From 0885397d105e739d9d37b3851d80da92fcc5e4a4 Mon Sep 17 00:00:00 2001 From: openstack Date: Sat, 20 Sep 2014 17:01:18 +0800 Subject: [PATCH] Add source code to Tricircle Initial PoC source code for Tricircle, the project for OpenStack cascading solution. Change-Id: I8abc93839a26446cb61c8d9004dfd812bd91de6e --- .gitignore | 45 + LICENSE | 201 + README.md | 397 + cinderproxy/README.md | 148 + cinderproxy/cinder/volume/cinder_proxy.py | 1099 ++ cinderproxy/installation/install.sh | 130 + cinderproxy/installation/uninstall.sh | 129 + glancesync/README.md | 140 + glancesync/etc/glance-sync | 10 + glancesync/etc/glance/glance-sync-paste.ini | 35 + glancesync/etc/glance/glance-sync.conf | 57 + glancesync/etc/glance/glance_store.yaml | 29 + glancesync/glance/cmd/sync.py | 59 + glancesync/glance/sync/__init__.py | 257 + glancesync/glance/sync/api/__init__.py | 22 + glancesync/glance/sync/api/v1/__init__.py | 59 + glancesync/glance/sync/api/v1/images.py | 95 + glancesync/glance/sync/base.py | 606 + glancesync/glance/sync/client/__init__.py | 46 + glancesync/glance/sync/client/v1/__init__.py | 0 glancesync/glance/sync/client/v1/api.py | 124 + glancesync/glance/sync/client/v1/client.py | 106 + glancesync/glance/sync/clients.py | 89 + glancesync/glance/sync/pool.py | 33 + glancesync/glance/sync/store/__init__.py | 0 .../glance/sync/store/_drivers/__init__.py | 0 .../glance/sync/store/_drivers/filesystem.py | 171 + glancesync/glance/sync/store/driver.py | 63 + glancesync/glance/sync/store/glance_store.py | 111 + glancesync/glance/sync/store/location.py | 95 + glancesync/glance/sync/task/__init__.py | 349 + glancesync/glance/sync/utils.py | 215 + glancesync/installation/install.sh | 152 + icehouse-patches/cinder/README.md | 43 + .../cinder/timestamp-query-patch/README.md | 54 + .../cinder/db/sqlalchemy/api.py | 2830 +++ .../installation/install.sh | 87 + .../installation/uninstall.sh | 115 + .../cinder/uuid-mapping-patch/README.md | 65 + .../versions/023_add_mapping_uuid.py | 36 + .../024_snapshots_add_mapping_uuid.py | 34 + .../versions/025_backup_add_mapping_uuid.py | 34 + .../cinder/db/sqlalchemy/models.py | 515 + .../installation/install.sh | 92 + .../installation/uninstall.sh | 118 + .../glance/glance_location_patch/README.md | 23 + .../glance.egg-info/entry_points.txt | 21 + .../glance/api/v2/images.py | 822 + .../glance/common/config.py | 260 + .../glance/common/exception.py | 362 + .../glance/common/utils.py | 602 + .../glance_location_patch/glance/gateway.py | 125 + .../glance/store/__init__.py | 814 + .../glance/store/http.py | 216 + .../installation/install.sh | 111 + icehouse-patches/neutron/dvr-patch/README.md | 165 + .../neutron/dvr-patch/installation/install.sh | 148 + .../neutron/dvr-patch/neutron/__init__.py | 19 + .../dvr-patch/neutron/agent/__init__.py | 14 + .../neutron/agent/common/__init__.py | 14 + .../dvr-patch/neutron/agent/common/config.py | 121 + .../dvr-patch/neutron/agent/dhcp_agent.py | 620 + .../dvr-patch/neutron/agent/firewall.py | 136 + .../dvr-patch/neutron/agent/l3_agent.py | 1579 ++ .../dvr-patch/neutron/agent/linux/__init__.py | 14 + .../neutron/agent/linux/async_process.py | 221 + .../dvr-patch/neutron/agent/linux/daemon.py | 149 + .../dvr-patch/neutron/agent/linux/dhcp.py | 921 + .../neutron/agent/linux/external_process.py | 102 + .../neutron/agent/linux/interface.py | 448 + .../dvr-patch/neutron/agent/linux/ip_lib.py | 567 + .../neutron/agent/linux/iptables_firewall.py | 381 + .../neutron/agent/linux/iptables_manager.py | 666 + .../dvr-patch/neutron/agent/linux/ovs_lib.py | 564 + .../neutron/agent/linux/ovsdb_monitor.py | 105 + .../dvr-patch/neutron/agent/linux/polling.py | 112 + .../dvr-patch/neutron/agent/linux/utils.py | 128 + .../neutron/agent/metadata/__init__.py | 15 + .../dvr-patch/neutron/agent/metadata/agent.py | 390 + .../neutron/agent/metadata/namespace_proxy.py | 182 + .../neutron/agent/netns_cleanup_util.py | 174 + .../neutron/agent/ovs_cleanup_util.py | 110 + .../neutron/dvr-patch/neutron/agent/rpc.py | 134 + .../neutron/agent/securitygroups_rpc.py | 301 + .../dvr-patch/neutron/api/api_common.py | 327 + .../dvr-patch/neutron/api/extensions.py | 684 + .../dvr-patch/neutron/api/rpc/__init__.py | 0 .../api/rpc/agentnotifiers/__init__.py | 0 .../rpc/agentnotifiers/dhcp_rpc_agent_api.py | 177 + .../rpc/agentnotifiers/l3_rpc_agent_api.py | 149 + .../agentnotifiers/metering_rpc_agent_api.py | 99 + .../dvr-patch/neutron/api/rpc/dvr_rpc.py | 122 + .../dvr-patch/neutron/api/v2/__init__.py | 0 .../dvr-patch/neutron/api/v2/attributes.py | 777 + .../neutron/dvr-patch/neutron/api/v2/base.py | 677 + .../neutron/dvr-patch/neutron/api/versions.py | 69 + .../dvr-patch/neutron/api/views/__init__.py | 0 .../dvr-patch/neutron/api/views/versions.py | 58 + .../neutron/dvr-patch/neutron/auth.py | 71 + .../neutron/dvr-patch/neutron/cmd/__init__.py | 14 + .../dvr-patch/neutron/cmd/sanity/__init__.py | 0 .../dvr-patch/neutron/cmd/sanity/checks.py | 44 + .../dvr-patch/neutron/cmd/sanity_check.py | 108 + .../dvr-patch/neutron/cmd/usage_audit.py | 48 + .../dvr-patch/neutron/common/config.py | 190 + .../dvr-patch/neutron/common/constants.py | 131 + .../dvr-patch/neutron/common/exceptions.py | 321 + .../dvr-patch/neutron/common/ipv6_utils.py | 39 + .../neutron/dvr-patch/neutron/common/rpc.py | 278 + .../dvr-patch/neutron/common/test_lib.py | 42 + .../dvr-patch/neutron/common/topics.py | 58 + .../neutron/dvr-patch/neutron/common/utils.py | 310 + .../neutron/dvr-patch/neutron/context.py | 174 + .../neutron/dvr-patch/neutron/db/agents_db.py | 219 + .../neutron/db/agentschedulers_db.py | 226 + .../neutron/db/allowedaddresspairs_db.py | 147 + .../neutron/dvr-patch/neutron/db/api.py | 84 + .../dvr-patch/neutron/db/db_base_plugin_v2.py | 1625 ++ .../dvr-patch/neutron/db/dhcp_rpc_base.py | 287 + .../dvr-patch/neutron/db/dvr_mac_db.py | 156 + .../dvr-patch/neutron/db/external_net_db.py | 163 + .../dvr-patch/neutron/db/extraroute_db.py | 185 + .../dvr-patch/neutron/db/firewall/__init__.py | 14 + .../neutron/db/firewall/firewall_db.py | 479 + .../neutron/db/l3_agentschedulers_db.py | 398 + .../dvr-patch/neutron/db/l3_attrs_db.py | 76 + .../neutron/dvr-patch/neutron/db/l3_db.py | 1057 + .../neutron/dvr-patch/neutron/db/l3_dvr_db.py | 434 + .../neutron/db/l3_dvrscheduler_db.py | 276 + .../dvr-patch/neutron/db/l3_gwmode_db.py | 85 + .../dvr-patch/neutron/db/l3_rpc_base.py | 198 + .../neutron/db/loadbalancer/__init__.py | 13 + .../db/loadbalancer/loadbalancer_db.py | 800 + .../neutron/db/metering/metering_db.py | 239 + .../neutron/db/metering/metering_rpc.py | 55 + .../neutron/db/migration/__init__.py | 53 + .../db/migration/alembic_migrations/env.py | 106 + .../versions/1064e98b7917_nec_pf_port_del.py | 61 + .../versions/10cd28e692e9_nuage_extraroute.py | 68 + .../versions/1149d7de0cfa_port_security.py | 82 + .../11c6e18605c8_pool_monitor_status_.py | 60 + .../versions/128e042a2b68_ext_gw_mode.py | 69 + .../1341ed32cc1e_nvp_netbinding_update.py | 68 + .../versions/13de305df56e_add_nec_pf_name.py | 53 + .../versions/14f24494ca31_arista_ml2.py | 76 + .../157a5d299379_ml2_binding_profile.py | 53 + .../176a85fc7d79_add_portbindings_db.py | 64 + .../1b693c095aa3_quota_ext_db_grizzly.py | 62 + .../1b837a7125a9_cisco_apic_driver.py | 74 + .../1c33fa3cd1a1_extra_route_config.py | 80 + .../versions/1d76643bcec4_nvp_netbinding.py | 65 + ...5dd1d09b22_set_not_null_fields_lb_stats.py | 66 + .../1efb85914233_allowedaddresspairs.py | 65 + ...c149aca4_agents_unique_by_type_and_host.py | 71 + .../versions/2026156eab2f_l2_dvr_models.py | 78 + .../2032abe8edac_lbaas_add_status_des.py | 55 + .../20ae61555e95_ml2_gre_type_driver.py | 64 + .../2447ad0e9585_add_ipv6_mode_props.py | 81 + .../versions/2528ceb28230_nec_pf_netid_fix.py | 59 + .../263772d65691_cisco_db_cleanup_2.py | 64 + .../27ef74513d33_quota_in_plumgrid_pl.py | 63 + .../versions/2a3bae1ceb8_nec_port_binding.py | 63 + .../2a6d0b51f4bb_cisco_plugin_cleanup.py | 86 + .../versions/2c4af419145b_l3_support.py | 54 + .../versions/2db5203cb7a9_nuage_floatingip.py | 83 + .../2eeaf963a447_floatingip_status.py | 79 + .../versions/32a65f71af51_ml2_portbinding.py | 68 + .../32b517556ec9_remove_tunnelip_mode.py | 56 + .../338d7508968c_vpnaas_peer_address_.py | 53 + ...et_length_of_description_field_metering.py | 58 + .../33dd0a9fa487_embrane_lbaas_driver.py | 59 + ...35c7c198ddea_lbaas_healthmon_del_status.py | 56 + .../versions/363468ac592c_nvp_network_gw.py | 98 + .../versions/38335592a0dc_nvp_portmap.py | 60 + .../38fc1f6789f8_cisco_n1kv_overlay.py | 55 + ...7f7f7c456_l3_extension_distributed_mode.py | 59 + .../39cf3f799352_fwaas_havana_2_model.py | 107 + .../3a520dd165d0_cisco_nexus_multi_switch.py | 57 + .../3b54bf9e29f7_nec_plugin_sharednet.py | 82 + .../3c6e57a23db4_add_multiprovider.py | 101 + .../3cabb850f4a5_table_to_track_port_.py | 61 + .../versions/3cb5d900c5de_security_groups.py | 101 + .../versions/3cbf70257c28_nvp_mac_learning.py | 61 + .../versions/3d6fae8b70b0_nvp_lbaas_plugin.py | 80 + .../versions/3ed8f075e38a_nvp_fwaas_plugin.py | 58 + .../versions/40dffbf4b549_nvp_dist_router.py | 61 + .../versions/45680af419f9_nvp_qos.py | 92 + .../versions/4692d074d587_agent_scheduler.py | 87 + .../46a0efbd8f0_cisco_n1kv_multisegm.py | 78 + .../477a4488d3f4_ml2_vxlan_type_driver.py | 67 + .../versions/48b6f43f7471_service_type.py | 74 + .../49332180ca96_ryu_plugin_update.py | 57 + .../49f5e553f61f_ml2_security_groups.py | 93 + .../4eca4a84f08a_remove_ml2_cisco_cred_db.py | 59 + .../50d5ba354c23_ml2_binding_vif_details.py | 97 + .../versions/50e86cb2637a_nsx_mappings.py | 80 + .../511471cc46b_agent_ext_model_supp.py | 82 + .../52c5e4a18807_lbaas_pool_scheduler.py | 61 + .../52ff27f7567a_support_for_vpnaas.py | 181 + .../53bbd27ec841_extra_dhcp_opts_supp.py | 64 + .../versions/54c2c487e913_lbaas.py | 161 + .../54f7549a0e5f_set_not_null_peer_address.py | 54 + .../557edfc53098_new_service_types.py | 79 + .../versions/569e98a8132b_metering.py | 75 + .../5918cbddab04_add_tables_for_route.py | 69 + .../versions/5a875d0e5c_ryu.py | 72 + .../versions/5ac71e65402c_ml2_initial.py | 82 + ...afba73813_ovs_tunnelendpoints_id_unique.py | 62 + .../66a59a7f516_nec_openflow_router.py | 66 + .../6be312499f9_set_not_null_vlan_id_cisco.py | 54 + .../86cf4d88bd3_remove_bigswitch_por.py | 57 + .../versions/8f682276ee4_ryu_plugin_quota.py | 59 + .../alembic_migrations/versions/HEAD | 1 + .../abc88c33f74f_lb_stats_needs_bigint.py | 65 + ...65aa907aec_set_length_of_protocol_field.py | 52 + .../b7a8863760e_rm_cisco_vlan_bindin.py | 58 + .../versions/c88b6b5fea3_cisco_n1kv_tables.py | 148 + ...871c0d5_set_admin_state_up_not_null_ml2.py | 54 + .../e197124d4b9_add_unique_constrain.py | 63 + .../e6b16a30d97_cisco_provider_nets.py | 60 + .../versions/ed93525fd003_bigswitch_quota.py | 62 + .../f44ab9871cd6_bsn_security_groups.py | 93 + .../versions/f489cf14a79c_lbaas_havana.py | 160 + .../versions/f9263d6df56_remove_dhcp_lease.py | 44 + .../versions/folsom_initial.py | 561 + .../versions/grizzly_release.py | 40 + .../versions/havana_release.py | 40 + .../versions/icehouse_release.py | 40 + .../dvr-patch/neutron/db/migration/cli.py | 171 + .../neutron/db/migration/migrate_to_ml2.py | 462 + .../dvr-patch/neutron/db/model_base.py | 51 + .../neutron/dvr-patch/neutron/db/models_v2.py | 204 + .../dvr-patch/neutron/db/portbindings_base.py | 41 + .../dvr-patch/neutron/db/portbindings_db.py | 121 + .../neutron/dvr-patch/neutron/db/quota_db.py | 179 + .../neutron/db/routedserviceinsertion_db.py | 106 + .../neutron/db/routerservicetype_db.py | 57 + .../dvr-patch/neutron/db/securitygroups_db.py | 564 + .../neutron/db/securitygroups_rpc_base.py | 374 + .../dvr-patch/neutron/db/servicetype_db.py | 99 + .../dvr-patch/neutron/db/sqlalchemyutils.py | 107 + .../dvr-patch/neutron/db/vpn/__init__.py | 16 + .../dvr-patch/neutron/db/vpn/vpn_db.py | 689 + .../dvr-patch/neutron/debug/__init__.py | 14 + .../dvr-patch/neutron/debug/commands.py | 155 + .../dvr-patch/neutron/debug/debug_agent.py | 196 + .../neutron/dvr-patch/neutron/debug/shell.py | 88 + .../dvr-patch/neutron/extensions/__init__.py | 0 .../dvr-patch/neutron/extensions/agent.py | 163 + .../neutron/extensions/allowedaddresspairs.py | 116 + .../neutron/extensions/dhcpagentscheduler.py | 152 + .../dvr-patch/neutron/extensions/dvr.py | 97 + .../neutron/extensions/external_net.py | 68 + .../neutron/extensions/extraroute.py | 74 + .../dvr-patch/neutron/extensions/firewall.py | 431 + .../dvr-patch/neutron/extensions/flavor.py | 67 + .../dvr-patch/neutron/extensions/l3.py | 254 + .../neutron/extensions/l3agentscheduler.py | 213 + .../extensions/lbaas_agentscheduler.py | 137 + .../neutron/extensions/loadbalancer.py | 506 + .../neutron/extensions/multiprovidernet.py | 114 + .../neutron/extensions/portbindings.py | 133 + .../neutron/extensions/providernet.py | 95 + .../dvr-patch/neutron/extensions/quotasv2.py | 152 + .../extensions/routedserviceinsertion.py | 71 + .../neutron/extensions/routerservicetype.py | 55 + .../neutron/extensions/securitygroup.py | 354 + .../neutron/extensions/servicetype.py | 91 + .../dvr-patch/neutron/extensions/vpnaas.py | 482 + .../dvr-patch/neutron/hacking/__init__.py | 0 .../dvr-patch/neutron/hacking/checks.py | 50 + .../neutron/dvr-patch/neutron/hooks.py | 27 + .../de/LC_MESSAGES/neutron-log-error.po | 170 + .../locale/de/LC_MESSAGES/neutron-log-info.po | 131 + .../de/LC_MESSAGES/neutron-log-warning.po | 57 + .../en_AU/LC_MESSAGES/neutron-log-error.po | 163 + .../en_AU/LC_MESSAGES/neutron-log-info.po | 128 + .../en_GB/LC_MESSAGES/neutron-log-error.po | 163 + .../en_GB/LC_MESSAGES/neutron-log-info.po | 128 + .../locale/en_US/LC_MESSAGES/neutron.po | 16173 ++++++++++++++++ .../es/LC_MESSAGES/neutron-log-error.po | 170 + .../locale/es/LC_MESSAGES/neutron-log-info.po | 128 + .../fr/LC_MESSAGES/neutron-log-critical.po | 23 + .../fr/LC_MESSAGES/neutron-log-error.po | 171 + .../locale/fr/LC_MESSAGES/neutron-log-info.po | 128 + .../locale/it/LC_MESSAGES/neutron-log-info.po | 128 + .../ja/LC_MESSAGES/neutron-log-error.po | 170 + .../locale/ja/LC_MESSAGES/neutron-log-info.po | 128 + .../ko_KR/LC_MESSAGES/neutron-log-error.po | 165 + .../ko_KR/LC_MESSAGES/neutron-log-info.po | 128 + .../neutron/locale/neutron-log-critical.pot | 19 + .../neutron/locale/neutron-log-error.pot | 158 + .../neutron/locale/neutron-log-info.pot | 127 + .../neutron/locale/neutron-log-warning.pot | 53 + .../dvr-patch/neutron/locale/neutron.pot | 16172 +++++++++++++++ .../pt_BR/LC_MESSAGES/neutron-log-error.po | 168 + .../pt_BR/LC_MESSAGES/neutron-log-info.po | 128 + .../zh_CN/LC_MESSAGES/neutron-log-error.po | 162 + .../zh_CN/LC_MESSAGES/neutron-log-info.po | 128 + .../zh_TW/LC_MESSAGES/neutron-log-info.po | 128 + .../neutron/dvr-patch/neutron/manager.py | 225 + .../neutron/neutron_plugin_base_v2.py | 352 + .../dvr-patch/neutron/notifiers/nova.py | 249 + .../neutron/openstack/common/gettextutils.py | 498 + .../neutron/openstack/common/importutils.py | 73 + .../neutron/openstack/common/jsonutils.py | 186 + .../dvr-patch/neutron/openstack/common/log.py | 732 + .../neutron/openstack/common/periodic_task.py | 183 + .../neutron/openstack/common/service.py | 512 + .../neutron/openstack/common/strutils.py | 239 + .../neutron/openstack/common/systemd.py | 104 + .../neutron/openstack/common/timeutils.py | 210 + .../neutron/plugins/bigswitch/__init__.py | 14 + .../bigswitch/agent/restproxy_agent.py | 181 + .../neutron/plugins/bigswitch/config.py | 122 + .../neutron/plugins/bigswitch/db/__init__.py | 16 + .../plugins/bigswitch/db/consistency_db.py | 82 + .../plugins/bigswitch/db/porttracker_db.py | 51 + .../plugins/bigswitch/extensions/__init__.py | 16 + .../bigswitch/extensions/routerrule.py | 142 + .../neutron/plugins/bigswitch/plugin.py | 1139 ++ .../plugins/bigswitch/routerrule_db.py | 146 + .../plugins/bigswitch/servermanager.py | 606 + .../plugins/bigswitch/tests/__init__.py | 14 + .../plugins/bigswitch/tests/test_server.py | 186 + .../neutron/plugins/bigswitch/vcsversion.py | 25 + .../neutron/plugins/bigswitch/version.py | 51 + .../neutron/plugins/brocade/NeutronPlugin.py | 506 + .../neutron/plugins/brocade/README.md | 112 + .../neutron/plugins/brocade/__init__.py | 14 + .../neutron/plugins/brocade/db/__init__.py | 14 + .../neutron/plugins/brocade/db/models.py | 149 + .../neutron/plugins/brocade/nos/__init__.py | 14 + .../plugins/brocade/nos/fake_nosdriver.py | 115 + .../plugins/brocade/nos/nctemplates.py | 202 + .../neutron/plugins/brocade/nos/nosdriver.py | 231 + .../neutron/plugins/brocade/vlanbm.py | 58 + .../neutron/plugins/cisco/__init__.py | 16 + .../neutron/plugins/cisco/common/__init__.py | 15 + .../plugins/cisco/common/cisco_constants.py | 109 + .../cisco/common/cisco_credentials_v2.py | 59 + .../plugins/cisco/common/cisco_exceptions.py | 234 + .../plugins/cisco/common/cisco_faults.py | 136 + .../neutron/plugins/cisco/common/config.py | 149 + .../neutron/plugins/cisco/db/__init__.py | 16 + .../neutron/plugins/cisco/db/n1kv_db_v2.py | 1619 ++ .../plugins/cisco/db/n1kv_models_v2.py | 183 + .../neutron/plugins/cisco/db/network_db_v2.py | 288 + .../plugins/cisco/db/network_models_v2.py | 54 + .../neutron/plugins/cisco/db/nexus_db_v2.py | 152 + .../plugins/cisco/db/nexus_models_v2.py | 44 + .../plugins/cisco/extensions/__init__.py | 14 + .../cisco/extensions/_credential_view.py | 50 + .../plugins/cisco/extensions/_qos_view.py | 50 + .../plugins/cisco/extensions/credential.py | 82 + .../neutron/plugins/cisco/extensions/n1kv.py | 104 + .../cisco/extensions/network_profile.py | 101 + .../cisco/extensions/policy_profile.py | 83 + .../neutron/plugins/cisco/extensions/qos.py | 154 + .../plugins/cisco/l2device_plugin_base.py | 173 + .../neutron/plugins/cisco/models/__init__.py | 15 + .../plugins/cisco/models/virt_phy_sw_v2.py | 551 + .../neutron/plugins/cisco/n1kv/__init__.py | 16 + .../neutron/plugins/cisco/n1kv/n1kv_client.py | 539 + .../plugins/cisco/n1kv/n1kv_neutron_plugin.py | 1436 ++ .../neutron/plugins/cisco/network_plugin.py | 174 + .../neutron/plugins/cisco/nexus/__init__.py | 19 + .../nexus/cisco_nexus_network_driver_v2.py | 194 + .../cisco/nexus/cisco_nexus_plugin_v2.py | 345 + .../cisco/nexus/cisco_nexus_snippets.py | 178 + .../plugins/cisco/test/nexus/__init__.py | 17 + .../cisco/test/nexus/fake_nexus_driver.py | 99 + .../neutron/plugins/common/__init__.py | 14 + .../neutron/plugins/common/constants.py | 80 + .../dvr-patch/neutron/plugins/common/utils.py | 67 + .../neutron/plugins/embrane/__init__.py | 16 + .../neutron/plugins/embrane/agent/__init__.py | 16 + .../plugins/embrane/agent/dispatcher.py | 132 + .../embrane/agent/operations/__init__.py | 16 + .../agent/operations/router_operations.py | 154 + .../neutron/plugins/embrane/base_plugin.py | 373 + .../plugins/embrane/common/__init__.py | 16 + .../neutron/plugins/embrane/common/config.py | 47 + .../plugins/embrane/common/constants.py | 70 + .../plugins/embrane/common/contexts.py | 38 + .../plugins/embrane/common/exceptions.py | 26 + .../plugins/embrane/common/operation.py | 49 + .../neutron/plugins/embrane/common/utils.py | 71 + .../plugins/embrane/l2base/__init__.py | 16 + .../plugins/embrane/l2base/fake/__init__.py | 16 + .../embrane/l2base/fake/fake_l2_plugin.py | 22 + .../embrane/l2base/fake/fakeplugin_support.py | 43 + .../embrane/l2base/openvswitch/__init__.py | 16 + .../l2base/openvswitch/openvswitch_support.py | 56 + .../plugins/embrane/l2base/support_base.py | 48 + .../embrane/l2base/support_exceptions.py | 23 + .../plugins/embrane/plugins/__init__.py | 16 + .../embrane/plugins/embrane_fake_plugin.py | 32 + .../embrane/plugins/embrane_ovs_plugin.py | 36 + .../neutron/plugins/hyperv/__init__.py | 14 + .../neutron/plugins/hyperv/agent/__init__.py | 14 + .../hyperv/agent/hyperv_neutron_agent.py | 473 + .../hyperv/agent/security_groups_driver.py | 146 + .../neutron/plugins/hyperv/agent/utils.py | 254 + .../plugins/hyperv/agent/utilsfactory.py | 70 + .../neutron/plugins/hyperv/agent/utilsv2.py | 437 + .../plugins/hyperv/agent_notifier_api.py | 78 + .../neutron/plugins/hyperv/common/__init__.py | 14 + .../plugins/hyperv/common/constants.py | 21 + .../dvr-patch/neutron/plugins/hyperv/db.py | 217 + .../plugins/hyperv/hyperv_neutron_plugin.py | 331 + .../dvr-patch/neutron/plugins/hyperv/model.py | 53 + .../neutron/plugins/hyperv/rpc_callbacks.py | 104 + .../plugins/ibm/agent/sdnve_neutron_agent.py | 270 + .../neutron/plugins/ibm/common/config.py | 74 + .../neutron/plugins/ibm/sdnve_api.py | 388 + .../plugins/ibm/sdnve_neutron_plugin.py | 666 + .../agent/linuxbridge_neutron_agent.py | 1023 + .../plugins/linuxbridge/common/__init__.py | 15 + .../plugins/linuxbridge/common/config.py | 76 + .../plugins/linuxbridge/common/constants.py | 40 + .../plugins/linuxbridge/db/__init__.py | 16 + .../plugins/linuxbridge/db/l2network_db_v2.py | 238 + .../plugins/linuxbridge/lb_neutron_plugin.py | 541 + .../neutron/plugins/metaplugin/__init__.py | 14 + .../plugins/metaplugin/common/__init__.py | 14 + .../plugins/metaplugin/common/config.py | 78 + .../neutron/plugins/metaplugin/meta_db_v2.py | 50 + .../plugins/metaplugin/meta_models_v2.py | 41 + .../plugins/metaplugin/meta_neutron_plugin.py | 417 + .../metaplugin/proxy_neutron_plugin.py | 134 + .../neutron/plugins/midonet/__init__.py | 15 + .../neutron/plugins/midonet/agent/__init__.py | 14 + .../plugins/midonet/agent/midonet_driver.py | 50 + .../plugins/midonet/common/__init__.py | 14 + .../neutron/plugins/midonet/common/config.py | 44 + .../plugins/midonet/common/net_util.py | 66 + .../neutron/plugins/midonet/midonet_lib.py | 694 + .../neutron/plugins/midonet/plugin.py | 1256 ++ .../dvr-patch/neutron/plugins/ml2/db.py | 200 + .../neutron/plugins/ml2/driver_api.py | 602 + .../neutron/plugins/ml2/driver_context.py | 142 + .../neutron/plugins/ml2/drivers/README.fslsdn | 102 + .../ml2/drivers/brocade/mechanism_brocade.py | 385 + .../ml2/drivers/brocade/nos/nosdriver.py | 236 + .../ml2/drivers/cisco/apic/__init__.py | 0 .../ml2/drivers/cisco/apic/apic_client.py | 416 + .../ml2/drivers/cisco/apic/apic_manager.py | 559 + .../ml2/drivers/cisco/apic/apic_model.py | 177 + .../plugins/ml2/drivers/cisco/apic/config.py | 82 + .../ml2/drivers/cisco/apic/exceptions.py | 59 + .../ml2/drivers/cisco/apic/mechanism_apic.py | 150 + .../plugins/ml2/drivers/cisco/nexus/config.py | 65 + .../drivers/cisco/nexus/mech_cisco_nexus.py | 219 + .../ml2/drivers/cisco/nexus/nexus_db_v2.py | 143 + .../cisco/nexus/nexus_network_driver.py | 171 + .../ml2/drivers/cisco/nexus/nexus_snippets.py | 200 + .../neutron/plugins/ml2/drivers/l2pop/README | 41 + .../neutron/plugins/ml2/drivers/l2pop/db.py | 113 + .../plugins/ml2/drivers/l2pop/mech_driver.py | 302 + .../neutron/plugins/ml2/drivers/l2pop/rpc.py | 86 + .../neutron/plugins/ml2/drivers/mech_agent.py | 149 + .../plugins/ml2/drivers/mech_arista/config.py | 70 + .../ml2/drivers/mech_bigswitch/driver.py | 128 + .../plugins/ml2/drivers/mechanism_fslsdn.py | 288 + .../plugins/ml2/drivers/mechanism_odl.py | 374 + .../plugins/ml2/drivers/mlnx/mech_mlnx.py | 91 + .../neutron/plugins/ml2/drivers/type_gre.py | 190 + .../plugins/ml2/drivers/type_tunnel.py | 132 + .../neutron/plugins/ml2/drivers/type_vlan.py | 267 + .../neutron/plugins/ml2/drivers/type_vxlan.py | 213 + .../dvr-patch/neutron/plugins/ml2/managers.py | 480 + .../dvr-patch/neutron/plugins/ml2/models.py | 111 + .../dvr-patch/neutron/plugins/ml2/plugin.py | 956 + .../dvr-patch/neutron/plugins/ml2/rpc.py | 276 + .../neutron/plugins/mlnx/__init__.py | 14 + .../neutron/plugins/mlnx/agent/__init__.py | 14 + .../mlnx/agent/eswitch_neutron_agent.py | 438 + .../neutron/plugins/mlnx/agent/utils.py | 142 + .../neutron/plugins/mlnx/agent_notify_api.py | 65 + .../neutron/plugins/mlnx/common/__init__.py | 14 + .../neutron/plugins/mlnx/common/comm_utils.py | 64 + .../neutron/plugins/mlnx/common/config.py | 78 + .../neutron/plugins/mlnx/common/constants.py | 26 + .../neutron/plugins/mlnx/common/exceptions.py | 28 + .../neutron/plugins/mlnx/db/__init__.py | 14 + .../neutron/plugins/mlnx/db/mlnx_db_v2.py | 255 + .../neutron/plugins/mlnx/db/mlnx_models_v2.py | 84 + .../neutron/plugins/mlnx/mlnx_plugin.py | 510 + .../neutron/plugins/mlnx/rpc_callbacks.py | 128 + .../dvr-patch/neutron/plugins/nec/README | 13 + .../dvr-patch/neutron/plugins/nec/__init__.py | 13 + .../neutron/plugins/nec/agent/__init__.py | 13 + .../plugins/nec/agent/nec_neutron_agent.py | 252 + .../neutron/plugins/nec/common/__init__.py | 13 + .../neutron/plugins/nec/common/config.py | 82 + .../neutron/plugins/nec/common/constants.py | 22 + .../neutron/plugins/nec/common/exceptions.py | 83 + .../neutron/plugins/nec/common/ofc_client.py | 156 + .../neutron/plugins/nec/db/__init__.py | 13 + .../dvr-patch/neutron/plugins/nec/db/api.py | 184 + .../neutron/plugins/nec/db/models.py | 69 + .../neutron/plugins/nec/db/packetfilter.py | 218 + .../neutron/plugins/nec/db/router.py | 90 + .../neutron/plugins/nec/drivers/__init__.py | 38 + .../neutron/plugins/nec/drivers/pfc.py | 372 + .../neutron/plugins/nec/drivers/trema.py | 248 + .../plugins/nec/extensions/__init__.py | 13 + .../plugins/nec/extensions/packetfilter.py | 206 + .../plugins/nec/extensions/router_provider.py | 58 + .../neutron/plugins/nec/nec_plugin.py | 779 + .../neutron/plugins/nec/nec_router.py | 356 + .../neutron/plugins/nec/ofc_driver_base.py | 103 + .../neutron/plugins/nec/ofc_manager.py | 199 + .../neutron/plugins/nec/packet_filter.py | 256 + .../neutron/plugins/nec/router_drivers.py | 222 + .../neutron/plugins/nuage/common/constants.py | 28 + .../plugins/nuage/extensions/netpartition.py | 107 + .../neutron/plugins/nuage/nuage_models.py | 102 + .../neutron/plugins/nuage/nuagedb.py | 202 + .../dvr-patch/neutron/plugins/nuage/plugin.py | 1006 + .../neutron/plugins/ofagent/agent/main.py | 39 + .../ofagent/agent/ofa_neutron_agent.py | 1460 ++ .../neutron/plugins/ofagent/agent/ports.py | 27 + .../agent/nvsd_neutron_agent.py | 176 + .../oneconvergence/lib/plugin_helper.py | 186 + .../neutron/plugins/oneconvergence/plugin.py | 440 + .../agent/ovs_dvr_neutron_agent.py | 745 + .../openvswitch/agent/ovs_neutron_agent.py | 1634 ++ .../plugins/openvswitch/common/__init__.py | 13 + .../plugins/openvswitch/common/config.py | 97 + .../plugins/openvswitch/common/constants.py | 67 + .../neutron/plugins/openvswitch/ovs_db_v2.py | 396 + .../plugins/openvswitch/ovs_models_v2.py | 107 + .../plugins/openvswitch/ovs_neutron_plugin.py | 634 + .../neutron/plugins/plumgrid/__init__.py | 15 + .../plugins/plumgrid/common/__init__.py | 15 + .../plugins/plumgrid/common/exceptions.py | 28 + .../plugins/plumgrid/drivers/__init__.py | 15 + .../plugins/plumgrid/drivers/fake_plumlib.py | 98 + .../plugins/plumgrid/drivers/plumlib.py | 99 + .../plumgrid/plumgrid_plugin/__init__.py | 15 + .../plumgrid/plumgrid_plugin/plugin_ver.py | 17 + .../plumgrid_plugin/plumgrid_plugin.py | 608 + .../plugins/ryu/agent/ryu_neutron_agent.py | 314 + .../neutron/plugins/ryu/common/__init__.py | 13 + .../neutron/plugins/ryu/common/config.py | 50 + .../neutron/plugins/ryu/db/api_v2.py | 214 + .../neutron/plugins/ryu/db/models_v2.py | 40 + .../neutron/plugins/ryu/ryu_neutron_plugin.py | 268 + .../neutron/plugins/vmware/api_client/base.py | 249 + .../vmware/api_client/eventlet_client.py | 155 + .../vmware/api_client/eventlet_request.py | 240 + .../plugins/vmware/api_client/request.py | 287 + .../plugins/vmware/check_nsx_config.py | 161 + .../neutron/plugins/vmware/common/config.py | 196 + .../plugins/vmware/common/exceptions.py | 121 + .../plugins/vmware/common/nsx_utils.py | 247 + .../neutron/plugins/vmware/common/sync.py | 674 + .../neutron/plugins/vmware/common/utils.py | 67 + .../neutron/plugins/vmware/dbexts/db.py | 193 + .../neutron/plugins/vmware/dbexts/lsn_db.py | 131 + .../neutron/plugins/vmware/dbexts/models.py | 135 + .../plugins/vmware/dbexts/networkgw_db.py | 499 + .../neutron/plugins/vmware/dbexts/qos_db.py | 300 + .../neutron/plugins/vmware/dbexts/vcns_db.py | 202 + .../plugins/vmware/dhcp_meta/__init__.py | 14 + .../plugins/vmware/dhcp_meta/lsnmanager.py | 462 + .../plugins/vmware/dhcp_meta/migration.py | 180 + .../neutron/plugins/vmware/dhcp_meta/nsx.py | 321 + .../neutron/plugins/vmware/dhcp_meta/rpc.py | 220 + .../neutron/plugins/vmware/dhcpmeta_modes.py | 163 + .../plugins/vmware/extensions/networkgw.py | 249 + .../plugins/vmware/extensions/nvp_qos.py | 40 + .../neutron/plugins/vmware/extensions/qos.py | 229 + .../vmware/extensions/servicerouter.py | 57 + .../neutron/plugins/vmware/nsxlib/__init__.py | 141 + .../plugins/vmware/nsxlib/l2gateway.py | 211 + .../neutron/plugins/vmware/nsxlib/lsn.py | 268 + .../neutron/plugins/vmware/nsxlib/queue.py | 71 + .../neutron/plugins/vmware/nsxlib/router.py | 689 + .../neutron/plugins/vmware/nsxlib/secgroup.py | 141 + .../neutron/plugins/vmware/nsxlib/switch.py | 397 + .../neutron/plugins/vmware/plugins/base.py | 2528 +++ .../neutron/plugins/vmware/plugins/service.py | 1819 ++ .../neutron/plugins/vmware/shell/commands.py | 67 + .../plugins/vmware/vshield/__init__.py | 14 + .../vmware/vshield/edge_appliance_driver.py | 665 + .../vmware/vshield/edge_firewall_driver.py | 352 + .../vshield/edge_loadbalancer_driver.py | 401 + .../plugins/vmware/vshield/tasks/tasks.py | 397 + .../neutron/plugins/vmware/vshield/vcns.py | 302 + .../plugins/vmware/vshield/vcns_driver.py | 51 + .../neutron/dvr-patch/neutron/policy.py | 414 + .../neutron/dvr-patch/neutron/quota.py | 332 + .../dvr-patch/neutron/scheduler/__init__.py | 14 + .../neutron/scheduler/dhcp_agent_scheduler.py | 133 + .../neutron/scheduler/l3_agent_scheduler.py | 235 + .../dvr-patch/neutron/server/__init__.py | 70 + .../neutron/dvr-patch/neutron/service.py | 299 + .../dvr-patch/neutron/services/__init__.py | 14 + .../neutron/services/firewall/__init__.py | 14 + .../services/firewall/agents/__init__.py | 14 + .../firewall/agents/firewall_agent_api.py | 83 + .../firewall/agents/l3reference/__init__.py | 14 + .../agents/l3reference/firewall_l3_agent.py | 293 + .../firewall/agents/varmour/__init__.py | 14 + .../firewall/agents/varmour/varmour_api.py | 145 + .../firewall/agents/varmour/varmour_router.py | 349 + .../firewall/agents/varmour/varmour_utils.py | 72 + .../services/firewall/drivers/__init__.py | 14 + .../services/firewall/drivers/fwaas_base.py | 98 + .../firewall/drivers/linux/__init__.py | 14 + .../firewall/drivers/linux/iptables_fwaas.py | 273 + .../firewall/drivers/varmour/__init__.py | 14 + .../firewall/drivers/varmour/varmour_fwaas.py | 205 + .../neutron/services/firewall/fwaas_plugin.py | 297 + .../neutron/services/l3_router/__init__.py | 14 + .../neutron/services/l3_router/l3_apic.py | 135 + .../services/l3_router/l3_router_plugin.py | 101 + .../neutron/services/loadbalancer/__init__.py | 14 + .../services/loadbalancer/agent/agent.py | 70 + .../services/loadbalancer/agent/agent_api.py | 98 + .../loadbalancer/agent/agent_device_driver.py | 96 + .../loadbalancer/agent/agent_manager.py | 336 + .../services/loadbalancer/agent_scheduler.py | 128 + .../services/loadbalancer/constants.py | 45 + .../services/loadbalancer/drivers/__init__.py | 15 + .../loadbalancer/drivers/abstract_driver.py | 128 + .../drivers/common/agent_driver_base.py | 443 + .../drivers/embrane/agent/lb_operations.py | 179 + .../loadbalancer/drivers/embrane/config.py | 53 + .../loadbalancer/drivers/embrane/models.py | 30 + .../loadbalancer/drivers/haproxy/__init__.py | 15 + .../loadbalancer/drivers/haproxy/cfg.py | 236 + .../drivers/haproxy/namespace_driver.py | 394 + .../drivers/haproxy/plugin_driver.py | 21 + .../loadbalancer/drivers/radware/__init__.py | 15 + .../loadbalancer/drivers/radware/driver.py | 1095 ++ .../drivers/radware/exceptions.py | 42 + .../neutron/services/loadbalancer/plugin.py | 326 + .../metering/agents/metering_agent.py | 296 + .../services/metering/metering_plugin.py | 74 + .../services/provider_configuration.py | 161 + .../neutron/services/service_base.py | 101 + .../neutron/services/vpn/__init__.py | 16 + .../dvr-patch/neutron/services/vpn/agent.py | 146 + .../neutron/services/vpn/common/__init__.py | 14 + .../neutron/services/vpn/common/topics.py | 20 + .../services/vpn/device_drivers/__init__.py | 36 + .../device_drivers/cisco_csr_rest_client.py | 258 + .../vpn/device_drivers/cisco_ipsec.py | 858 + .../services/vpn/device_drivers/ipsec.py | 711 + .../dvr-patch/neutron/services/vpn/plugin.py | 105 + .../services/vpn/service_drivers/__init__.py | 90 + .../vpn/service_drivers/cisco_csr_db.py | 239 + .../vpn/service_drivers/cisco_ipsec.py | 245 + .../services/vpn/service_drivers/ipsec.py | 154 + .../neutron/dvr-patch/neutron/version.py | 17 + .../neutron/dvr-patch/neutron/wsgi.py | 1301 ++ .../ml2-mech-driver-cascaded-patch/README.md | 65 + .../installation/install.sh | 92 + .../plugins/ml2/drivers/l2pop/config.py | 33 + .../neutron/plugins/ml2/drivers/l2pop/db.py | 137 + .../plugins/ml2/drivers/l2pop/mech_driver.py | 385 + .../ml2-mech-driver-cascading-patch/README.md | 66 + .../installation/install.sh | 93 + .../neutron/plugins/ml2/drivers/l2pop/db.py | 124 + .../plugins/ml2/drivers/l2pop/mech_driver.py | 307 + .../neutron/openvswitch-agent-patch/README.md | 65 + .../installation/install.sh | 92 + .../agent/ovs_dvr_neutron_agent.py | 764 + .../vlan2vlan/neutron/agent/l3_agent.py | 1697 ++ .../vlan2vlan/neutron/agent/l3_proxy.py | 1992 ++ .../vlan2vlan/neutron/agent/linux/ip_lib.py | 590 + .../vlan2vlan/neutron/agent/linux/ovs_lib.py | 611 + .../vlan2vlan/neutron/common/config.py | 195 + .../vlan2vlan/neutron/common/exceptions.py | 328 + .../vlan2vlan/neutron/db/cascade_db.py | 162 + .../vlan2vlan/neutron/db/dvr_mac_db.py | 191 + .../vlan2vlan/neutron/db/extraroute_db.py | 237 + .../vlan2vlan/neutron/db/l3_rpc_base.py | 232 + .../versions/2026156eab2f_l2_dvr_models.py | 94 + .../vlan2vlan/neutron/plugins/ml2/plugin.py | 993 + .../agent/ovs_dvr_neutron_agent.py | 777 + .../openvswitch/agent/ovs_neutron_agent.py | 1818 ++ .../plugins/openvswitch/common/config.py | 110 + .../plugins/openvswitch/common/constants.py | 70 + .../instance_mapping_uuid_patch/README.md | 63 + .../installation/install.sh | 91 + .../installation/uninstall.sh | 19 + .../nova/conductor/manager.py | 1116 ++ ...234_add_mapping_uuid_column_to_instance.py | 20 + .../nova/db/sqlalchemy/models.py | 1470 ++ .../nova/network/neutronv2/api.py | 1250 ++ .../nova/objects/instance.py | 739 + minimal_setup.png | Bin 0 -> 102649 bytes minimal_setup_with_glance_cascading.png | Bin 0 -> 130466 bytes neutronproxy/l2-proxy/README.md | 163 + .../l2_proxy_agent/l2_cascading_proxy.ini | 47 + neutronproxy/l2-proxy/installation/install.sh | 91 + .../l2-proxy/neutron/plugins/l2_proxy/README | 6 + .../neutron/plugins/l2_proxy/__init__.py | 0 .../plugins/l2_proxy/agent/__init__.py | 0 .../neutron/plugins/l2_proxy/agent/clients.py | 237 + .../plugins/l2_proxy/agent/l2_proxy.py | 1755 ++ .../l2_proxy/agent/neutron_keystoneclient.py | 319 + .../l2_proxy/agent/neutron_proxy_context.py | 203 + .../l2_proxy/agent/ovs_dvr_neutron_agent.py | 763 + .../plugins/l2_proxy/agent/xenapi/README | 16 + .../agent/xenapi/contrib/build-rpm.sh | 34 + .../SPECS/openstack-quantum-xen-plugins.spec | 30 + .../agent/xenapi/etc/xapi.d/plugins/netwrap | 75 + .../plugins/l2_proxy/common/__init__.py | 13 + .../neutron/plugins/l2_proxy/common/config.py | 123 + .../plugins/l2_proxy/common/constants.py | 67 + .../neutron/plugins/l2_proxy/ovs_db_v2.py | 396 + .../neutron/plugins/l2_proxy/ovs_models_v2.py | 111 + .../plugins/l2_proxy/ovs_neutron_plugin.py | 635 + neutronproxy/l3-proxy/README.md | 156 + .../l3-proxy/etc/neutron/l3_proxy_agent.ini | 22 + neutronproxy/l3-proxy/installation/install.sh | 91 + .../l3-proxy/neutron/agent/l3_proxy.py | 1782 ++ novaproxy/README.md | 156 + novaproxy/installation/install.sh | 111 + novaproxy/installation/uninstall.sh | 19 + novaproxy/nova/compute/clients.py | 246 + novaproxy/nova/compute/compute_context.py | 199 + .../nova/compute/compute_keystoneclient.py | 314 + novaproxy/nova/compute/manager_proxy.py | 2999 +++ 730 files changed, 187027 insertions(+) create mode 100644 .gitignore create mode 100644 LICENSE create mode 100644 README.md create mode 100644 cinderproxy/README.md create mode 100644 cinderproxy/cinder/volume/cinder_proxy.py create mode 100644 cinderproxy/installation/install.sh create mode 100644 cinderproxy/installation/uninstall.sh create mode 100644 glancesync/README.md create mode 100644 glancesync/etc/glance-sync create mode 100644 glancesync/etc/glance/glance-sync-paste.ini create mode 100644 glancesync/etc/glance/glance-sync.conf create mode 100644 glancesync/etc/glance/glance_store.yaml create mode 100644 glancesync/glance/cmd/sync.py create mode 100644 glancesync/glance/sync/__init__.py create mode 100644 glancesync/glance/sync/api/__init__.py create mode 100644 glancesync/glance/sync/api/v1/__init__.py create mode 100644 glancesync/glance/sync/api/v1/images.py create mode 100644 glancesync/glance/sync/base.py create mode 100644 glancesync/glance/sync/client/__init__.py create mode 100644 glancesync/glance/sync/client/v1/__init__.py create mode 100644 glancesync/glance/sync/client/v1/api.py create mode 100644 glancesync/glance/sync/client/v1/client.py create mode 100644 glancesync/glance/sync/clients.py create mode 100644 glancesync/glance/sync/pool.py create mode 100644 glancesync/glance/sync/store/__init__.py create mode 100644 glancesync/glance/sync/store/_drivers/__init__.py create mode 100644 glancesync/glance/sync/store/_drivers/filesystem.py create mode 100644 glancesync/glance/sync/store/driver.py create mode 100644 glancesync/glance/sync/store/glance_store.py create mode 100644 glancesync/glance/sync/store/location.py create mode 100644 glancesync/glance/sync/task/__init__.py create mode 100644 glancesync/glance/sync/utils.py create mode 100644 glancesync/installation/install.sh create mode 100644 icehouse-patches/cinder/README.md create mode 100644 icehouse-patches/cinder/timestamp-query-patch/README.md create mode 100644 icehouse-patches/cinder/timestamp-query-patch/cinder/db/sqlalchemy/api.py create mode 100644 icehouse-patches/cinder/timestamp-query-patch/installation/install.sh create mode 100644 icehouse-patches/cinder/timestamp-query-patch/installation/uninstall.sh create mode 100644 icehouse-patches/cinder/uuid-mapping-patch/README.md create mode 100644 icehouse-patches/cinder/uuid-mapping-patch/cinder/db/sqlalchemy/migrate_repo/versions/023_add_mapping_uuid.py create mode 100644 icehouse-patches/cinder/uuid-mapping-patch/cinder/db/sqlalchemy/migrate_repo/versions/024_snapshots_add_mapping_uuid.py create mode 100644 icehouse-patches/cinder/uuid-mapping-patch/cinder/db/sqlalchemy/migrate_repo/versions/025_backup_add_mapping_uuid.py create mode 100644 icehouse-patches/cinder/uuid-mapping-patch/cinder/db/sqlalchemy/models.py create mode 100644 icehouse-patches/cinder/uuid-mapping-patch/installation/install.sh create mode 100644 icehouse-patches/cinder/uuid-mapping-patch/installation/uninstall.sh create mode 100644 icehouse-patches/glance/glance_location_patch/README.md create mode 100644 icehouse-patches/glance/glance_location_patch/glance.egg-info/entry_points.txt create mode 100644 icehouse-patches/glance/glance_location_patch/glance/api/v2/images.py create mode 100644 icehouse-patches/glance/glance_location_patch/glance/common/config.py create mode 100644 icehouse-patches/glance/glance_location_patch/glance/common/exception.py create mode 100644 icehouse-patches/glance/glance_location_patch/glance/common/utils.py create mode 100644 icehouse-patches/glance/glance_location_patch/glance/gateway.py create mode 100644 icehouse-patches/glance/glance_location_patch/glance/store/__init__.py create mode 100644 icehouse-patches/glance/glance_location_patch/glance/store/http.py create mode 100644 icehouse-patches/glance/glance_location_patch/installation/install.sh create mode 100644 icehouse-patches/neutron/dvr-patch/README.md create mode 100644 icehouse-patches/neutron/dvr-patch/installation/install.sh create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/__init__.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/agent/__init__.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/agent/common/__init__.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/agent/common/config.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/agent/dhcp_agent.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/agent/firewall.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/agent/l3_agent.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/agent/linux/__init__.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/agent/linux/async_process.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/agent/linux/daemon.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/agent/linux/dhcp.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/agent/linux/external_process.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/agent/linux/interface.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/agent/linux/ip_lib.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/agent/linux/iptables_firewall.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/agent/linux/iptables_manager.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/agent/linux/ovs_lib.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/agent/linux/ovsdb_monitor.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/agent/linux/polling.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/agent/linux/utils.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/agent/metadata/__init__.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/agent/metadata/agent.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/agent/metadata/namespace_proxy.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/agent/netns_cleanup_util.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/agent/ovs_cleanup_util.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/agent/rpc.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/agent/securitygroups_rpc.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/api/api_common.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/api/extensions.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/api/rpc/__init__.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/api/rpc/agentnotifiers/__init__.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/api/rpc/agentnotifiers/dhcp_rpc_agent_api.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/api/rpc/agentnotifiers/l3_rpc_agent_api.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/api/rpc/agentnotifiers/metering_rpc_agent_api.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/api/rpc/dvr_rpc.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/api/v2/__init__.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/api/v2/attributes.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/api/v2/base.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/api/versions.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/api/views/__init__.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/api/views/versions.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/auth.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/cmd/__init__.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/cmd/sanity/__init__.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/cmd/sanity/checks.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/cmd/sanity_check.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/cmd/usage_audit.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/common/config.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/common/constants.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/common/exceptions.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/common/ipv6_utils.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/common/rpc.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/common/test_lib.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/common/topics.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/common/utils.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/context.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/db/agents_db.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/db/agentschedulers_db.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/db/allowedaddresspairs_db.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/db/api.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/db/db_base_plugin_v2.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/db/dhcp_rpc_base.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/db/dvr_mac_db.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/db/external_net_db.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/db/extraroute_db.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/db/firewall/__init__.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/db/firewall/firewall_db.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/db/l3_agentschedulers_db.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/db/l3_attrs_db.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/db/l3_db.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/db/l3_dvr_db.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/db/l3_dvrscheduler_db.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/db/l3_gwmode_db.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/db/l3_rpc_base.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/db/loadbalancer/__init__.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/db/loadbalancer/loadbalancer_db.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/db/metering/metering_db.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/db/metering/metering_rpc.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/db/migration/__init__.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/env.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/1064e98b7917_nec_pf_port_del.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/10cd28e692e9_nuage_extraroute.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/1149d7de0cfa_port_security.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/11c6e18605c8_pool_monitor_status_.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/128e042a2b68_ext_gw_mode.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/1341ed32cc1e_nvp_netbinding_update.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/13de305df56e_add_nec_pf_name.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/14f24494ca31_arista_ml2.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/157a5d299379_ml2_binding_profile.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/176a85fc7d79_add_portbindings_db.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/1b693c095aa3_quota_ext_db_grizzly.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/1b837a7125a9_cisco_apic_driver.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/1c33fa3cd1a1_extra_route_config.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/1d76643bcec4_nvp_netbinding.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/1e5dd1d09b22_set_not_null_fields_lb_stats.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/1efb85914233_allowedaddresspairs.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/1fcfc149aca4_agents_unique_by_type_and_host.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/2026156eab2f_l2_dvr_models.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/2032abe8edac_lbaas_add_status_des.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/20ae61555e95_ml2_gre_type_driver.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/2447ad0e9585_add_ipv6_mode_props.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/2528ceb28230_nec_pf_netid_fix.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/263772d65691_cisco_db_cleanup_2.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/27ef74513d33_quota_in_plumgrid_pl.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/2a3bae1ceb8_nec_port_binding.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/2a6d0b51f4bb_cisco_plugin_cleanup.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/2c4af419145b_l3_support.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/2db5203cb7a9_nuage_floatingip.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/2eeaf963a447_floatingip_status.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/32a65f71af51_ml2_portbinding.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/32b517556ec9_remove_tunnelip_mode.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/338d7508968c_vpnaas_peer_address_.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/33c3db036fe4_set_length_of_description_field_metering.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/33dd0a9fa487_embrane_lbaas_driver.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/35c7c198ddea_lbaas_healthmon_del_status.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/363468ac592c_nvp_network_gw.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/38335592a0dc_nvp_portmap.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/38fc1f6789f8_cisco_n1kv_overlay.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/3927f7f7c456_l3_extension_distributed_mode.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/39cf3f799352_fwaas_havana_2_model.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/3a520dd165d0_cisco_nexus_multi_switch.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/3b54bf9e29f7_nec_plugin_sharednet.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/3c6e57a23db4_add_multiprovider.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/3cabb850f4a5_table_to_track_port_.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/3cb5d900c5de_security_groups.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/3cbf70257c28_nvp_mac_learning.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/3d6fae8b70b0_nvp_lbaas_plugin.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/3ed8f075e38a_nvp_fwaas_plugin.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/40dffbf4b549_nvp_dist_router.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/45680af419f9_nvp_qos.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/4692d074d587_agent_scheduler.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/46a0efbd8f0_cisco_n1kv_multisegm.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/477a4488d3f4_ml2_vxlan_type_driver.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/48b6f43f7471_service_type.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/49332180ca96_ryu_plugin_update.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/49f5e553f61f_ml2_security_groups.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/4eca4a84f08a_remove_ml2_cisco_cred_db.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/50d5ba354c23_ml2_binding_vif_details.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/50e86cb2637a_nsx_mappings.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/511471cc46b_agent_ext_model_supp.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/52c5e4a18807_lbaas_pool_scheduler.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/52ff27f7567a_support_for_vpnaas.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/53bbd27ec841_extra_dhcp_opts_supp.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/54c2c487e913_lbaas.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/54f7549a0e5f_set_not_null_peer_address.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/557edfc53098_new_service_types.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/569e98a8132b_metering.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/5918cbddab04_add_tables_for_route.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/5a875d0e5c_ryu.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/5ac71e65402c_ml2_initial.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/63afba73813_ovs_tunnelendpoints_id_unique.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/66a59a7f516_nec_openflow_router.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/6be312499f9_set_not_null_vlan_id_cisco.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/86cf4d88bd3_remove_bigswitch_por.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/8f682276ee4_ryu_plugin_quota.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/HEAD create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/abc88c33f74f_lb_stats_needs_bigint.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/b65aa907aec_set_length_of_protocol_field.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/b7a8863760e_rm_cisco_vlan_bindin.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/c88b6b5fea3_cisco_n1kv_tables.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/d06e871c0d5_set_admin_state_up_not_null_ml2.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/e197124d4b9_add_unique_constrain.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/e6b16a30d97_cisco_provider_nets.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/ed93525fd003_bigswitch_quota.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/f44ab9871cd6_bsn_security_groups.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/f489cf14a79c_lbaas_havana.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/f9263d6df56_remove_dhcp_lease.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/folsom_initial.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/grizzly_release.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/havana_release.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/icehouse_release.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/db/migration/cli.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/db/migration/migrate_to_ml2.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/db/model_base.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/db/models_v2.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/db/portbindings_base.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/db/portbindings_db.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/db/quota_db.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/db/routedserviceinsertion_db.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/db/routerservicetype_db.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/db/securitygroups_db.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/db/securitygroups_rpc_base.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/db/servicetype_db.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/db/sqlalchemyutils.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/db/vpn/__init__.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/db/vpn/vpn_db.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/debug/__init__.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/debug/commands.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/debug/debug_agent.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/debug/shell.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/extensions/__init__.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/extensions/agent.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/extensions/allowedaddresspairs.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/extensions/dhcpagentscheduler.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/extensions/dvr.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/extensions/external_net.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/extensions/extraroute.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/extensions/firewall.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/extensions/flavor.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/extensions/l3.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/extensions/l3agentscheduler.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/extensions/lbaas_agentscheduler.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/extensions/loadbalancer.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/extensions/multiprovidernet.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/extensions/portbindings.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/extensions/providernet.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/extensions/quotasv2.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/extensions/routedserviceinsertion.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/extensions/routerservicetype.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/extensions/securitygroup.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/extensions/servicetype.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/extensions/vpnaas.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/hacking/__init__.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/hacking/checks.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/hooks.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/locale/de/LC_MESSAGES/neutron-log-error.po create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/locale/de/LC_MESSAGES/neutron-log-info.po create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/locale/de/LC_MESSAGES/neutron-log-warning.po create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/locale/en_AU/LC_MESSAGES/neutron-log-error.po create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/locale/en_AU/LC_MESSAGES/neutron-log-info.po create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/locale/en_GB/LC_MESSAGES/neutron-log-error.po create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/locale/en_GB/LC_MESSAGES/neutron-log-info.po create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/locale/en_US/LC_MESSAGES/neutron.po create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/locale/es/LC_MESSAGES/neutron-log-error.po create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/locale/es/LC_MESSAGES/neutron-log-info.po create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/locale/fr/LC_MESSAGES/neutron-log-critical.po create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/locale/fr/LC_MESSAGES/neutron-log-error.po create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/locale/fr/LC_MESSAGES/neutron-log-info.po create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/locale/it/LC_MESSAGES/neutron-log-info.po create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/locale/ja/LC_MESSAGES/neutron-log-error.po create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/locale/ja/LC_MESSAGES/neutron-log-info.po create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/locale/ko_KR/LC_MESSAGES/neutron-log-error.po create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/locale/ko_KR/LC_MESSAGES/neutron-log-info.po create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/locale/neutron-log-critical.pot create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/locale/neutron-log-error.pot create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/locale/neutron-log-info.pot create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/locale/neutron-log-warning.pot create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/locale/neutron.pot create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/locale/pt_BR/LC_MESSAGES/neutron-log-error.po create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/locale/pt_BR/LC_MESSAGES/neutron-log-info.po create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/locale/zh_CN/LC_MESSAGES/neutron-log-error.po create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/locale/zh_CN/LC_MESSAGES/neutron-log-info.po create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/locale/zh_TW/LC_MESSAGES/neutron-log-info.po create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/manager.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/neutron_plugin_base_v2.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/notifiers/nova.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/openstack/common/gettextutils.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/openstack/common/importutils.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/openstack/common/jsonutils.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/openstack/common/log.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/openstack/common/periodic_task.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/openstack/common/service.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/openstack/common/strutils.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/openstack/common/systemd.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/openstack/common/timeutils.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/bigswitch/__init__.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/bigswitch/agent/restproxy_agent.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/bigswitch/config.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/bigswitch/db/__init__.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/bigswitch/db/consistency_db.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/bigswitch/db/porttracker_db.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/bigswitch/extensions/__init__.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/bigswitch/extensions/routerrule.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/bigswitch/plugin.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/bigswitch/routerrule_db.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/bigswitch/servermanager.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/bigswitch/tests/__init__.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/bigswitch/tests/test_server.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/bigswitch/vcsversion.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/bigswitch/version.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/brocade/NeutronPlugin.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/brocade/README.md create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/brocade/__init__.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/brocade/db/__init__.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/brocade/db/models.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/brocade/nos/__init__.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/brocade/nos/fake_nosdriver.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/brocade/nos/nctemplates.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/brocade/nos/nosdriver.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/brocade/vlanbm.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/cisco/__init__.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/cisco/common/__init__.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/cisco/common/cisco_constants.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/cisco/common/cisco_credentials_v2.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/cisco/common/cisco_exceptions.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/cisco/common/cisco_faults.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/cisco/common/config.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/cisco/db/__init__.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/cisco/db/n1kv_db_v2.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/cisco/db/n1kv_models_v2.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/cisco/db/network_db_v2.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/cisco/db/network_models_v2.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/cisco/db/nexus_db_v2.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/cisco/db/nexus_models_v2.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/cisco/extensions/__init__.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/cisco/extensions/_credential_view.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/cisco/extensions/_qos_view.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/cisco/extensions/credential.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/cisco/extensions/n1kv.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/cisco/extensions/network_profile.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/cisco/extensions/policy_profile.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/cisco/extensions/qos.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/cisco/l2device_plugin_base.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/cisco/models/__init__.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/cisco/models/virt_phy_sw_v2.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/cisco/n1kv/__init__.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/cisco/n1kv/n1kv_client.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/cisco/network_plugin.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/cisco/nexus/__init__.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/cisco/nexus/cisco_nexus_network_driver_v2.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/cisco/nexus/cisco_nexus_plugin_v2.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/cisco/nexus/cisco_nexus_snippets.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/cisco/test/nexus/__init__.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/cisco/test/nexus/fake_nexus_driver.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/common/__init__.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/common/constants.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/common/utils.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/embrane/__init__.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/embrane/agent/__init__.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/embrane/agent/dispatcher.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/embrane/agent/operations/__init__.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/embrane/agent/operations/router_operations.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/embrane/base_plugin.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/embrane/common/__init__.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/embrane/common/config.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/embrane/common/constants.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/embrane/common/contexts.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/embrane/common/exceptions.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/embrane/common/operation.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/embrane/common/utils.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/embrane/l2base/__init__.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/embrane/l2base/fake/__init__.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/embrane/l2base/fake/fake_l2_plugin.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/embrane/l2base/fake/fakeplugin_support.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/embrane/l2base/openvswitch/__init__.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/embrane/l2base/openvswitch/openvswitch_support.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/embrane/l2base/support_base.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/embrane/l2base/support_exceptions.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/embrane/plugins/__init__.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/embrane/plugins/embrane_fake_plugin.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/embrane/plugins/embrane_ovs_plugin.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/hyperv/__init__.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/hyperv/agent/__init__.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/hyperv/agent/hyperv_neutron_agent.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/hyperv/agent/security_groups_driver.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/hyperv/agent/utils.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/hyperv/agent/utilsfactory.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/hyperv/agent/utilsv2.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/hyperv/agent_notifier_api.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/hyperv/common/__init__.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/hyperv/common/constants.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/hyperv/db.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/hyperv/hyperv_neutron_plugin.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/hyperv/model.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/hyperv/rpc_callbacks.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/ibm/agent/sdnve_neutron_agent.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/ibm/common/config.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/ibm/sdnve_api.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/ibm/sdnve_neutron_plugin.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/linuxbridge/common/__init__.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/linuxbridge/common/config.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/linuxbridge/common/constants.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/linuxbridge/db/__init__.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/linuxbridge/db/l2network_db_v2.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/linuxbridge/lb_neutron_plugin.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/metaplugin/__init__.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/metaplugin/common/__init__.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/metaplugin/common/config.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/metaplugin/meta_db_v2.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/metaplugin/meta_models_v2.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/metaplugin/meta_neutron_plugin.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/metaplugin/proxy_neutron_plugin.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/midonet/__init__.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/midonet/agent/__init__.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/midonet/agent/midonet_driver.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/midonet/common/__init__.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/midonet/common/config.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/midonet/common/net_util.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/midonet/midonet_lib.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/midonet/plugin.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/ml2/db.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/ml2/driver_api.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/ml2/driver_context.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/ml2/drivers/README.fslsdn create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/ml2/drivers/brocade/nos/nosdriver.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/ml2/drivers/cisco/apic/__init__.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/ml2/drivers/cisco/apic/apic_client.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/ml2/drivers/cisco/apic/apic_manager.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/ml2/drivers/cisco/apic/apic_model.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/ml2/drivers/cisco/apic/config.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/ml2/drivers/cisco/apic/exceptions.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/ml2/drivers/cisco/apic/mechanism_apic.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/ml2/drivers/cisco/nexus/config.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/ml2/drivers/cisco/nexus/mech_cisco_nexus.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/ml2/drivers/cisco/nexus/nexus_db_v2.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/ml2/drivers/cisco/nexus/nexus_network_driver.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/ml2/drivers/cisco/nexus/nexus_snippets.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/ml2/drivers/l2pop/README create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/ml2/drivers/l2pop/db.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/ml2/drivers/l2pop/mech_driver.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/ml2/drivers/l2pop/rpc.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/ml2/drivers/mech_agent.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/ml2/drivers/mech_arista/config.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/ml2/drivers/mech_bigswitch/driver.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/ml2/drivers/mechanism_fslsdn.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/ml2/drivers/mechanism_odl.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/ml2/drivers/mlnx/mech_mlnx.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/ml2/drivers/type_gre.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/ml2/drivers/type_tunnel.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/ml2/drivers/type_vlan.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/ml2/drivers/type_vxlan.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/ml2/managers.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/ml2/models.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/ml2/plugin.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/ml2/rpc.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/mlnx/__init__.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/mlnx/agent/__init__.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/mlnx/agent/eswitch_neutron_agent.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/mlnx/agent/utils.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/mlnx/agent_notify_api.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/mlnx/common/__init__.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/mlnx/common/comm_utils.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/mlnx/common/config.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/mlnx/common/constants.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/mlnx/common/exceptions.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/mlnx/db/__init__.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/mlnx/db/mlnx_db_v2.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/mlnx/db/mlnx_models_v2.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/mlnx/mlnx_plugin.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/mlnx/rpc_callbacks.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/nec/README create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/nec/__init__.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/nec/agent/__init__.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/nec/agent/nec_neutron_agent.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/nec/common/__init__.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/nec/common/config.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/nec/common/constants.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/nec/common/exceptions.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/nec/common/ofc_client.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/nec/db/__init__.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/nec/db/api.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/nec/db/models.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/nec/db/packetfilter.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/nec/db/router.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/nec/drivers/__init__.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/nec/drivers/pfc.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/nec/drivers/trema.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/nec/extensions/__init__.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/nec/extensions/packetfilter.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/nec/extensions/router_provider.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/nec/nec_plugin.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/nec/nec_router.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/nec/ofc_driver_base.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/nec/ofc_manager.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/nec/packet_filter.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/nec/router_drivers.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/nuage/common/constants.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/nuage/extensions/netpartition.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/nuage/nuage_models.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/nuage/nuagedb.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/nuage/plugin.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/ofagent/agent/main.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/ofagent/agent/ofa_neutron_agent.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/ofagent/agent/ports.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/oneconvergence/agent/nvsd_neutron_agent.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/oneconvergence/lib/plugin_helper.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/oneconvergence/plugin.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/openvswitch/agent/ovs_dvr_neutron_agent.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/openvswitch/agent/ovs_neutron_agent.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/openvswitch/common/__init__.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/openvswitch/common/config.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/openvswitch/common/constants.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/openvswitch/ovs_db_v2.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/openvswitch/ovs_models_v2.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/openvswitch/ovs_neutron_plugin.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/plumgrid/__init__.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/plumgrid/common/__init__.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/plumgrid/common/exceptions.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/plumgrid/drivers/__init__.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/plumgrid/drivers/fake_plumlib.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/plumgrid/drivers/plumlib.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/plumgrid/plumgrid_plugin/__init__.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/plumgrid/plumgrid_plugin/plugin_ver.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/ryu/agent/ryu_neutron_agent.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/ryu/common/__init__.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/ryu/common/config.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/ryu/db/api_v2.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/ryu/db/models_v2.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/ryu/ryu_neutron_plugin.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/api_client/base.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/api_client/eventlet_client.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/api_client/eventlet_request.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/api_client/request.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/check_nsx_config.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/common/config.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/common/exceptions.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/common/nsx_utils.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/common/sync.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/common/utils.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/dbexts/db.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/dbexts/lsn_db.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/dbexts/models.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/dbexts/networkgw_db.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/dbexts/qos_db.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/dbexts/vcns_db.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/dhcp_meta/__init__.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/dhcp_meta/lsnmanager.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/dhcp_meta/migration.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/dhcp_meta/nsx.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/dhcp_meta/rpc.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/dhcpmeta_modes.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/extensions/networkgw.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/extensions/nvp_qos.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/extensions/qos.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/extensions/servicerouter.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/nsxlib/__init__.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/nsxlib/l2gateway.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/nsxlib/lsn.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/nsxlib/queue.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/nsxlib/router.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/nsxlib/secgroup.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/nsxlib/switch.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/plugins/base.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/plugins/service.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/shell/commands.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/vshield/__init__.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/vshield/edge_appliance_driver.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/vshield/edge_firewall_driver.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/vshield/edge_loadbalancer_driver.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/vshield/tasks/tasks.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/vshield/vcns.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/vshield/vcns_driver.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/policy.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/quota.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/scheduler/__init__.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/scheduler/dhcp_agent_scheduler.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/scheduler/l3_agent_scheduler.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/server/__init__.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/service.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/services/__init__.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/services/firewall/__init__.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/services/firewall/agents/__init__.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/services/firewall/agents/firewall_agent_api.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/services/firewall/agents/l3reference/__init__.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/services/firewall/agents/l3reference/firewall_l3_agent.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/services/firewall/agents/varmour/__init__.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/services/firewall/agents/varmour/varmour_api.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/services/firewall/agents/varmour/varmour_router.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/services/firewall/agents/varmour/varmour_utils.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/services/firewall/drivers/__init__.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/services/firewall/drivers/fwaas_base.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/services/firewall/drivers/linux/__init__.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/services/firewall/drivers/linux/iptables_fwaas.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/services/firewall/drivers/varmour/__init__.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/services/firewall/drivers/varmour/varmour_fwaas.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/services/firewall/fwaas_plugin.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/services/l3_router/__init__.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/services/l3_router/l3_apic.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/services/l3_router/l3_router_plugin.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/services/loadbalancer/__init__.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/services/loadbalancer/agent/agent.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/services/loadbalancer/agent/agent_api.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/services/loadbalancer/agent/agent_device_driver.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/services/loadbalancer/agent/agent_manager.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/services/loadbalancer/agent_scheduler.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/services/loadbalancer/constants.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/services/loadbalancer/drivers/__init__.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/services/loadbalancer/drivers/abstract_driver.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/services/loadbalancer/drivers/common/agent_driver_base.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/services/loadbalancer/drivers/embrane/agent/lb_operations.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/services/loadbalancer/drivers/embrane/config.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/services/loadbalancer/drivers/embrane/models.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/services/loadbalancer/drivers/haproxy/__init__.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/services/loadbalancer/drivers/haproxy/cfg.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/services/loadbalancer/drivers/haproxy/namespace_driver.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/services/loadbalancer/drivers/haproxy/plugin_driver.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/services/loadbalancer/drivers/radware/__init__.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/services/loadbalancer/drivers/radware/driver.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/services/loadbalancer/drivers/radware/exceptions.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/services/loadbalancer/plugin.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/services/metering/agents/metering_agent.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/services/metering/metering_plugin.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/services/provider_configuration.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/services/service_base.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/services/vpn/__init__.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/services/vpn/agent.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/services/vpn/common/__init__.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/services/vpn/common/topics.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/services/vpn/device_drivers/__init__.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/services/vpn/device_drivers/cisco_csr_rest_client.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/services/vpn/device_drivers/cisco_ipsec.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/services/vpn/device_drivers/ipsec.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/services/vpn/plugin.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/services/vpn/service_drivers/__init__.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/services/vpn/service_drivers/cisco_csr_db.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/services/vpn/service_drivers/cisco_ipsec.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/services/vpn/service_drivers/ipsec.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/version.py create mode 100644 icehouse-patches/neutron/dvr-patch/neutron/wsgi.py create mode 100644 icehouse-patches/neutron/ml2-mech-driver-cascaded-patch/README.md create mode 100644 icehouse-patches/neutron/ml2-mech-driver-cascaded-patch/installation/install.sh create mode 100644 icehouse-patches/neutron/ml2-mech-driver-cascaded-patch/neutron/plugins/ml2/drivers/l2pop/config.py create mode 100644 icehouse-patches/neutron/ml2-mech-driver-cascaded-patch/neutron/plugins/ml2/drivers/l2pop/db.py create mode 100644 icehouse-patches/neutron/ml2-mech-driver-cascaded-patch/neutron/plugins/ml2/drivers/l2pop/mech_driver.py create mode 100644 icehouse-patches/neutron/ml2-mech-driver-cascading-patch/README.md create mode 100644 icehouse-patches/neutron/ml2-mech-driver-cascading-patch/installation/install.sh create mode 100644 icehouse-patches/neutron/ml2-mech-driver-cascading-patch/neutron/plugins/ml2/drivers/l2pop/db.py create mode 100644 icehouse-patches/neutron/ml2-mech-driver-cascading-patch/neutron/plugins/ml2/drivers/l2pop/mech_driver.py create mode 100644 icehouse-patches/neutron/openvswitch-agent-patch/README.md create mode 100644 icehouse-patches/neutron/openvswitch-agent-patch/installation/install.sh create mode 100644 icehouse-patches/neutron/openvswitch-agent-patch/neutron/plugins/openvswitch/agent/ovs_dvr_neutron_agent.py create mode 100644 icehouse-patches/neutron/vlan2vlan/neutron/agent/l3_agent.py create mode 100644 icehouse-patches/neutron/vlan2vlan/neutron/agent/l3_proxy.py create mode 100644 icehouse-patches/neutron/vlan2vlan/neutron/agent/linux/ip_lib.py create mode 100644 icehouse-patches/neutron/vlan2vlan/neutron/agent/linux/ovs_lib.py create mode 100644 icehouse-patches/neutron/vlan2vlan/neutron/common/config.py create mode 100644 icehouse-patches/neutron/vlan2vlan/neutron/common/exceptions.py create mode 100644 icehouse-patches/neutron/vlan2vlan/neutron/db/cascade_db.py create mode 100644 icehouse-patches/neutron/vlan2vlan/neutron/db/dvr_mac_db.py create mode 100644 icehouse-patches/neutron/vlan2vlan/neutron/db/extraroute_db.py create mode 100644 icehouse-patches/neutron/vlan2vlan/neutron/db/l3_rpc_base.py create mode 100644 icehouse-patches/neutron/vlan2vlan/neutron/db/migration/alembic_migrations/versions/2026156eab2f_l2_dvr_models.py create mode 100644 icehouse-patches/neutron/vlan2vlan/neutron/plugins/ml2/plugin.py create mode 100644 icehouse-patches/neutron/vlan2vlan/neutron/plugins/openvswitch/agent/ovs_dvr_neutron_agent.py create mode 100644 icehouse-patches/neutron/vlan2vlan/neutron/plugins/openvswitch/agent/ovs_neutron_agent.py create mode 100644 icehouse-patches/neutron/vlan2vlan/neutron/plugins/openvswitch/common/config.py create mode 100644 icehouse-patches/neutron/vlan2vlan/neutron/plugins/openvswitch/common/constants.py create mode 100644 icehouse-patches/nova/instance_mapping_uuid_patch/README.md create mode 100644 icehouse-patches/nova/instance_mapping_uuid_patch/installation/install.sh create mode 100644 icehouse-patches/nova/instance_mapping_uuid_patch/installation/uninstall.sh create mode 100644 icehouse-patches/nova/instance_mapping_uuid_patch/nova/conductor/manager.py create mode 100644 icehouse-patches/nova/instance_mapping_uuid_patch/nova/db/sqlalchemy/migrate_repo/versions/234_add_mapping_uuid_column_to_instance.py create mode 100644 icehouse-patches/nova/instance_mapping_uuid_patch/nova/db/sqlalchemy/models.py create mode 100644 icehouse-patches/nova/instance_mapping_uuid_patch/nova/network/neutronv2/api.py create mode 100644 icehouse-patches/nova/instance_mapping_uuid_patch/nova/objects/instance.py create mode 100644 minimal_setup.png create mode 100644 minimal_setup_with_glance_cascading.png create mode 100644 neutronproxy/l2-proxy/README.md create mode 100644 neutronproxy/l2-proxy/etc/neutron/plugins/l2_proxy_agent/l2_cascading_proxy.ini create mode 100644 neutronproxy/l2-proxy/installation/install.sh create mode 100644 neutronproxy/l2-proxy/neutron/plugins/l2_proxy/README create mode 100644 neutronproxy/l2-proxy/neutron/plugins/l2_proxy/__init__.py create mode 100644 neutronproxy/l2-proxy/neutron/plugins/l2_proxy/agent/__init__.py create mode 100644 neutronproxy/l2-proxy/neutron/plugins/l2_proxy/agent/clients.py create mode 100644 neutronproxy/l2-proxy/neutron/plugins/l2_proxy/agent/l2_proxy.py create mode 100644 neutronproxy/l2-proxy/neutron/plugins/l2_proxy/agent/neutron_keystoneclient.py create mode 100644 neutronproxy/l2-proxy/neutron/plugins/l2_proxy/agent/neutron_proxy_context.py create mode 100644 neutronproxy/l2-proxy/neutron/plugins/l2_proxy/agent/ovs_dvr_neutron_agent.py create mode 100644 neutronproxy/l2-proxy/neutron/plugins/l2_proxy/agent/xenapi/README create mode 100644 neutronproxy/l2-proxy/neutron/plugins/l2_proxy/agent/xenapi/contrib/build-rpm.sh create mode 100644 neutronproxy/l2-proxy/neutron/plugins/l2_proxy/agent/xenapi/contrib/rpmbuild/SPECS/openstack-quantum-xen-plugins.spec create mode 100644 neutronproxy/l2-proxy/neutron/plugins/l2_proxy/agent/xenapi/etc/xapi.d/plugins/netwrap create mode 100644 neutronproxy/l2-proxy/neutron/plugins/l2_proxy/common/__init__.py create mode 100644 neutronproxy/l2-proxy/neutron/plugins/l2_proxy/common/config.py create mode 100644 neutronproxy/l2-proxy/neutron/plugins/l2_proxy/common/constants.py create mode 100644 neutronproxy/l2-proxy/neutron/plugins/l2_proxy/ovs_db_v2.py create mode 100644 neutronproxy/l2-proxy/neutron/plugins/l2_proxy/ovs_models_v2.py create mode 100644 neutronproxy/l2-proxy/neutron/plugins/l2_proxy/ovs_neutron_plugin.py create mode 100644 neutronproxy/l3-proxy/README.md create mode 100644 neutronproxy/l3-proxy/etc/neutron/l3_proxy_agent.ini create mode 100644 neutronproxy/l3-proxy/installation/install.sh create mode 100644 neutronproxy/l3-proxy/neutron/agent/l3_proxy.py create mode 100644 novaproxy/README.md create mode 100644 novaproxy/installation/install.sh create mode 100644 novaproxy/installation/uninstall.sh create mode 100644 novaproxy/nova/compute/clients.py create mode 100644 novaproxy/nova/compute/compute_context.py create mode 100644 novaproxy/nova/compute/compute_keystoneclient.py create mode 100644 novaproxy/nova/compute/manager_proxy.py diff --git a/.gitignore b/.gitignore new file mode 100644 index 00000000..2101ce8a --- /dev/null +++ b/.gitignore @@ -0,0 +1,45 @@ +*.DS_Store +*.egg* +*.log +*.mo +*.pyc +*.swo +*.swp +*.sqlite +*.iml +*~ +.autogenerated +.coverage +.nova-venv +.project +.pydevproject +.ropeproject +.testrepository/ +.tox +.idea +.venv +AUTHORS +Authors +build-stamp +build/* +CA/ +ChangeLog +coverage.xml +cover/* +covhtml +dist/* +doc/source/api/* +doc/build/* +etc/nova/nova.conf.sample +instances +keeper +keys +local_settings.py +MANIFEST +nosetests.xml +nova/tests/cover/* +nova/vcsversion.py +tools/conf/nova.conf* +tools/lintstack.head.py +tools/pylint_exceptions +etc/nova/nova.conf.sample diff --git a/LICENSE b/LICENSE new file mode 100644 index 00000000..8d968b6c --- /dev/null +++ b/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/README.md b/README.md new file mode 100644 index 00000000..5db0e72f --- /dev/null +++ b/README.md @@ -0,0 +1,397 @@ +Tricircle +=============================== + +Tricircle is a project for [Openstack cascading solution](https://wiki.openstack.org/wiki/OpenStack_cascading_solution), including the source code of Nova Proxy, Cinder Proxy, Neutron L2/L3 Proxy, Glance sync manager and Ceilometer Proxy(not implemented yet). + +The project name "Tricircle" comes from a fractal. See the blog ["OpenStack cascading and fractal"](https://www.linkedin.com/today/post/article/20140729022031-23841540-openstack-cascading-and-fractal) for more information. + +Important to know +----------- +* the initial source code is for PoC only. Refactory will be done constantly to reach OpenStack acceptance standard. +* the PoC source code is based on IceHouse version, while Neutron is a master branch snapshot on July 1, 2014 which include DVR feature, not IceHouse version. The Neutron code is download from github when it was still in the developement and review status. The source code of DVR part is not stable, and not all DVR features are included, for example, N-S functions not ready. +* The Neutron cascading using the feature of provider network. But horizon doen't support provider network very well. So you have to use Neutron CLI to create a network. Or set default provide network type to VxLAN, or remove "local", "flat", "VLAN", "GRE" network typedriver from ML2 plugin configuration. +* For Neutron L2/L3 features, only VxLAN/L3 across casacaded OpenStack supported in the current source code. VLAN2VLAN, VLAN2VxLAN and VxLAN2VxLAN across cascaded OpenStack also implemented with IceHouse version but the patch is not ready yet, source code is in the VLAN2VLAN folder. +* The tunneling network for cross OpenStack piggy data path is using VxLAN, it leads to modification on L2 agent and L3 agent, we will refactory it to using GRE for the tunneling network to reduce patch for Juno version. +* If you want to experience VLAN2VLAN, VLAN2VxLAN and VxLAN2VxLAN across cascaded OpenStack, please ask help from PoC team member, see the wiki page [Openstack cascading solution](https://wiki.openstack.org/wiki/OpenStack_cascading_solution) for contact information. +* Glance cascading using Glance V2 API. Only CLI/pythonclient support V2 API, the Horizon doesn't support that version. So image management should be done through CLI, and using V2 only. Otherwise, the glance cascading cannot work properly. +* Glance cascading is not used by default, eg, useing global Glance by default. If Glance cascading is required, configuration is required. +* Refactory the Tricircle source code based on Juno version will be started soon once the Juno version is available. + + +Key modules +----------- + +* Nova proxy + + Similar role like Nova-Compute. Transfer the VM operation to cascaded Nova. Also responsible for attach volume and network to the VM in the cascaded OpenStack. + +* Cinder proxy + + Similar role like Cinder-Volume. Transfer the volume operation to cascaded Cinder. + +* Neuton proxy + + Including L2 proxy and L3 proxy, Similar role like OVS-Agent/L3-Agent. Finish L2/L3-networking in the cascaded OpenStack, including cross OpenStack networking. + +* Glance sync + + Synchronize image among the cascading and policy determined Cascaded OpenStacks + +Patches required +------------------ + +* IceHouse-Patches + + Pacthes for OpenStack IceHouse version, including patches for cascading level and cacscaded level. + +Feature Supported +------------------ + +* Nova cascading + Launch/Reboot/Terminate/Resize/Rescue/Pause/Un-pause/Suspend/Resume/VNC Console/Attach Volume/Detach Volume/Snapshot/KeyPair/Flavor + +* Cinder cascading + Create Volume/Delete Volume/Attach Volume/Detach Volume/Extend Volume/Create Snapshot/Delete Snapshot/List Snapshots/Create Volume from Snapshot/Create Volume from Image/Create Volume from Volume (Clone)/Create Image from Volume + +* Neutron cascading + Network/Subnet/Port/Router + +* Glance cascading + Only support V2 api. Create Image/Delete Image/List Image/Update Image/Upload Image/Patch Location/VM Snapshot/Image Synchronization + +Known Issues +------------------ +* Use "admin" role to experience these feature first, multi-tenancy has not been tested well. +* Launch VM only support "boot from image", "boot from volume", "boot from snapshot" +* Flavor only support new created flavor synchronized to the cascaded OpenStack, does not support flavor update synchronization to cascaded OpenStack yet. +* Must make a patch for "Create a volume from image", the patch link: https://bugs.launchpad.net/cinder/+bug/1308058 + +Installation without Glance cascading +------------ + +* **Prerequisites** + - the minimal installation requires three OpenStack IceHouse installated to experience across cascaded OpenStacks L2/L3 function. The minimal setup needs four nodes, see the following picture: + + ![minimal_setup](./minimal_setup.png?raw=true) + + - the cascading OpenStack needs two node, Node1 and Node 2. Add Node1 to AZ1, Node2 to AZ2 in the cascading OpenStack for both Nova and Cinder. + + - It's recommended to name the cascading Openstack region to "Cascading_OpenStack" or "Region1" + + - Node1 is all-in-one OpenStack installation with KeyStone and Glance, Node1 also function as Nova-Compute/Cinder-Volume/Neutron OVS-Agent/L3-Agent node, and will be replaced to be the proxy node for AZ1. + + - Node2 is general Nova-Compute node with Cinder-Volume, Neutron OVS-Agent/L3-Agent function installed. And will be replaced to be the proxy node for AZ2 + + - the all-in-one cascaded OpenStack installed in Node3 function as the AZ1. Node3 will also function as the Nova-Compute/Cinder-Volume/Neutron OVS-Agent/L3-Agent in order to be able to create VMs/Volume/Networking in this AZ1. Glance is only required to be installed if Glance cascading needed. Add Node3 to AZ1 in the cascaded OpenStack both for Nova and Cinder. It's recommended to name the cascaded Openstack region for Node3 to "AZ1" + + - the all-in-one cascaded OpenStack installed in Node4 function as the AZ2. Node3 will also function as the Nova-Compute/Cinder-Volume/Neutron OVS-Agent/L3-Agent in order to be able to create VMs/Volume/Networking in this AZ2. Glance is only required to be installed if Glance cascading needed.Add Node4 to AZ2 in the cascaded OpenStack both for Nova and Cinder.It's recommended to name the cascaded Openstack region for Node4 to "AZ2" + + Make sure the time of these four nodes are synchronized. Because the Nova Proxy/Cinder Proxy/Neutron L2/L3 Proxy will query the cascaded OpenStack using timestamp, incorrect time will lead to VM/Volume/Port status synchronization not work properly. + + Register all services endpoint in the global shared KeyStone. + + Make sure the 3 OpenStack can work independently before cascading introduced, eg. you can boot VM with network, create volume and attach volume in each OpenStack. After verify that 3 OpenStack can work independently, clean all created resources VM/Volume/Network. + + After all OpenStack installation is ready, it's time to install IceHouse pathces both for cascading OpenStack and cascaded OpenStack, and then replace the Nova-Compute/Cinder-Volume/Neutron OVS-Agent/L3-Agent to Nova Proxy / Cinder Proxy / Neutron l2/l3 Proxy. + +* **IceHouse pachtes installation step by step** + +1. Node1 + - Patches for Nova - instance_mapping_uuid_patch + + This patch is to make the Nova proxy being able to translate the cascading level VM's uuid to cascadede level VM's uuid + + Navigate to the folder + ``` + cd ./tricircle/icehouse-patches/nova/instance_mapping_uuid_patch + ``` + follow README.md instruction to install the patch + + - Patches for Cinder - Volume/SnapShot/Backup UUID mapping patch + + This patch is to make the Cinder proxy being able to translate the cascading level (Volume/Snapshot/backup)'s uuid to cascadede level (Volume/Snapshot/backup)'s uuid + + Navigate to the folder + ``` + cd ./tricircle/icehouse-patches/cinder/instance_mapping_uuid_patch + ``` + follow README.md instruction to install the patch + + - Patches for Neutron - DVR patch + + This patch is to make the Neutron has the DVR(distributed virtual router) feature. Through DVR, all L2/L3 proxy nodes in the cascading level can receive correspoding RPC message, and then convert the command to restful API to cascaded Neutron. + + Navigate to the folder + ``` + cd ./tricircle/icehouse-patches/neutron/dvr-patch + ``` + follow README.md instruction to install the patch + + - Patches for Neutron - ml2-mech-driver-cascading patch + + This patch is to make L2 population driver being able to populate the VM's host IP which stored in the port binding profile in the cascaded OpenStack to another cascaded OpenStack. + + Navigate to the folder + ``` + cd ./tricircle/icehouse-patches/neutron/ml2-mech-driver-cascading-patch + ``` + follow README.md instruction to install the patch + +2. Node3 + - Patches for Nova - port binding profile update bug: https://bugs.launchpad.net/neutron/+bug/1338202. + + because ml2-mech-driver-cascaded-patch will update the binding profile in the port, and will be flushed to null if you don't fix the bug. + + You can also fix the bug via: + + Navigate to the folder + ``` + cd ./tricircle/icehouse-patches/icehouse-patches/nova/instance_mapping_uuid_patch/nova/network/neutronv2/ + cp api.py $python_installation_path/site-packages/nova/network/neutronv2/ + + ``` + the patch will reserve what has been saved in the port binding profile + + - Patches for Cinder - timestamp-query-patch patch + + This patch is to make the cascaded Cinder being able to execute query with timestamp filter, but not to return all objects. + + Navigate to the folder + ``` + cd ./tricircle/icehouse-patches/cinder/timestamp-query-patch_patch + ``` + follow README.md instruction to install the patch + + - Patches for Neutron - DVR patch + + This patch is to make the Neutron has the DVR(distributed virtual router) feature. + + Navigate to the folder + ``` + cd ./tricircle/icehouse-patches/neutron/dvr-patch + ``` + follow README.md instruction to install the patch + + - Patches for Neutron - ml2-mech-driver-cascaded patch + + This patch is to make L2 population driver being able to populate the virtual remote port where the VM located in another OpenStack. + + Navigate to the folder + ``` + cd ./tricircle/icehouse-patches/neutron/ml2-mech-driver-cascaded-patch + ``` + follow README.md instruction to install the patch + + - Patches for Neutron - openvswitch-agent patch + + This patch is to get dvr mac crossing openstack for cross OpenStack L3 networking for VLAN-VLAN/VLAN-VxLAN/VxLAN-VxLAN. + + Navigate to the folder + ``` + cd ./tricircle/icehouse-patches/neutron/openvswitch-agent-patch + ``` + follow README.md instruction to install the patch + +3. Node4 + - Patches for Nova - port binding profile update bug: https://bugs.launchpad.net/neutron/+bug/1338202. + + because ml2-mech-driver-cascaded-patch will update the binding profile in the port, and will be flushed to null if you don't fix the bug. + + You can also fix the bug via: + + Navigate to the folder + ``` + cd ./tricircle/icehouse-patches/icehouse-patches/nova/instance_mapping_uuid_patch/nova/network/neutronv2/ + cp api.py $python_installation_path/site-packages/nova/network/neutronv2/ + + ``` + the patch will reserve what has been saved in the port binding profile + + - Patches for Cinder - timestamp-query-patch patch + + This patch is to make the cascaded Cinder being able to execute query with timestamp filter, but not to return all objects. + + Navigate to the folder + ``` + cd ./tricircle/icehouse-patches/cinder/timestamp-query-patch_patch + ``` + follow README.md instruction to install the patch + + - Patches for Neutron - DVR patch + + This patch is to make the Neutron has the DVR(distributed virtual router) feature. + + Navigate to the folder + ``` + cd ./tricircle/icehouse-patches/neutron/dvr-patch + ``` + follow README.md instruction to install the patch + + - Patches for Neutron - ml2-mech-driver-cascaded patch + + This patch is to make L2 population driver being able to populate the virtual remote port where the VM located in another OpenStack. + + Navigate to the folder + ``` + cd ./tricircle/icehouse-patches/neutron/ml2-mech-driver-cascaded-patch + ``` + follow README.md instruction to install the patch + + - Patches for Neutron - openvswitch-agent patch + + This patch is to get dvr mac crossing openstack for cross OpenStack L3 networking for VLAN-VLAN/VLAN-VxLAN/VxLAN-VxLAN. + + Navigate to the folder + ``` + cd ./tricircle/icehouse-patches/neutron/openvswitch-agent-patch + ``` + follow README.md instruction to install the patch + +* **Proxy installation step by step** + +1. Node1 + - Nova proxy + + Navigate to the folder + ``` + cd ./tricircle/novaproxy + ``` + follow README.md instruction to install the proxy. Please change the configuration value in the install.sh according to your environment setting + + - Cinder proxy + + Navigate to the folder + ``` + cd ./tricircle/cinderproxy + ``` + follow README.md instruction to install the proxy. Please change the configuration value in the install.sh according to your environment setting + + - L2 proxy + + Navigate to the folder + ``` + cd ./tricircle/neutronproxy/l2-proxy + ``` + follow README.md instruction to install the proxy. Please change the configuration value in the install.sh according to your environment setting + + - L3 proxy + + Navigate to the folder + ``` + cd ./tricircle/neutronproxy/l3-proxy + ``` + follow README.md instruction to install the proxy. Please change the configuration value in the install.sh according to your environment setting + +2. Node2 + - Nova proxy + + Navigate to the folder + ``` + cd ./tricircle/novaproxy + ``` + follow README.md instruction to install the proxy. Please change the configuration value in the install.sh according to your environment setting + + Navigate to the folder + ``` + cd ./tricircle/icehouse-patches/nova/instance_mapping_uuid_patch/nova/objects + cp instance.py $python_installation_path/site-packages/nova/objects/ + ``` + This file is a patch for instance UUID mapping used in the proxy nodes. + + - Cinder proxy + + Navigate to the folder + ``` + cd ./tricircle/cinderproxy + ``` + follow README.md instruction to install the proxy. Please change the configuration value in the install.sh according to your environment setting + + Navigate to the folder + ``` + cd ./tricircle/icehouse-patches/cinder/uuid-mapping-patch/cinder/db/sqlalchemy + cp models.py $python_installation_path/site-packages/cinder/db/sqlalchemy + ``` + This file is a patch for instance UUID mapping used in the proxy nodes. + + + - L2 proxy + + Navigate to the folder + ``` + cd ./tricircle/neutronproxy/l2-proxy + ``` + follow README.md instruction to install the proxy. Please change the configuration value in the install.sh according to your environment setting + + - L3 proxy + + Navigate to the folder + ``` + cd ./tricircle/neutronproxy/l3-proxy + ``` + follow README.md instruction to install the proxy. Please change the configuration value in the install.sh according to your environment setting + + +Upgrade to Glance cascading +------------ + +* **Prerequisites** + - To experience the glance cascading feature, you can simply upgrade the current installation with several step, see the following picture: + + ![minimal_setup_with_glance_cascading](./minimal_setup_with_glance_cascading.png?raw=true) + +1. Node1 + - Patches for Glance - glance_location_patch + + This patch is to make the glance being able to handle http url location. The patch also insert the sync manager to the chain of responsibility. + + Navigate to the folder + ``` + cd ./tricircle/icehouse-patches/glance/glance_location_patch + ``` + follow README.md instruction to install the patch + + - Sync Manager + + Navigate to the folder + ``` + cd ./tricircle/glancesync + ``` + + modify the storage scheme configuration for cascading and cascaded level + ``` + vi ./tricircle/glancesync/etc/glance/glance_store.yaml + ``` + + follow README.md instruction to install the sync manager. Please change the configuration value in the install.sh according to your environment setting, espeically for configuration: + sync_enabled=True + sync_server_port=9595 + sync_server_host=127.0.0.1 + +2. Node3 + - Glance Installation + + Please install Glance in the Node3 as the casacded Glance. + Register the service endpoint in the KeyStone. + Change the glance endpoint in nova.conf and cinder.conf to the Glance located in Node3 + +3. Node4 + - Glance Installation + + Please install Glance in the Node4 as the casacded Glance. + Register the service endpoint in the KeyStone + Change the glance endpoint in nova.conf and cinder.conf to the Glance located in Node4 + +4. Configuration + - Change Nova proxy configuration on Node1, setting the "cascaded_glance_flag" to True and add "cascaded_glance_url" of Node3 configurantion according to Nova-proxy README.MD instruction + - Change Cinder proxy configuration on Node1, setting the "glance_cascading_flag" to True and add "cascaded_glance_url" of Node3 configurantion according to Nova-proxy README.MD instruction + + - Change Nova proxy configuration on Node2, setting the "cascaded_glance_flag" to True and add "cascaded_glance_url" of Node4 configurantion according to Nova-proxy README.MD instruction + - Change Cinder proxy configuration on Node2, setting the "glance_cascading_flag" to True and add "cascaded_glance_url" of Node4 configurantion according to Nova-proxy README.MD instruction + +5. Experience Glance cascading + - Restart all related service + - Use Glance V2 api to create Image, Upload Image or patch location for Image. Image should be able to sync to distributed Glance if sync_enabled is setting to True + - Sync image only during first time usage but not uploading or patch location is still in testing phase, may not work properly. + - Create VM/Volume/etc from Horizon + + diff --git a/cinderproxy/README.md b/cinderproxy/README.md new file mode 100644 index 00000000..66ed571c --- /dev/null +++ b/cinderproxy/README.md @@ -0,0 +1,148 @@ +Openstack Cinder Proxy +=============================== + + Cinder-Proxy acts as the same role of Cinder-Volume in cascading OpenStack. + Cinder-Proxy treats cascaded Cinder as its cinder volume, convert the internal request message from the message bus to restful API calling to cascaded Cinder. + + +Key modules +----------- + +* The new cinder proxy module cinder_proxy,which treats cascaded Cinder as its cinder volume, convert the internal request message from the message bus to restful API calling to cascaded Cinder: + + cinder/volume/cinder_proxy.py + +Requirements +------------ +* openstack-cinder-volume-2014.1-14.1 has been installed + +Installation +------------ + +We provide two ways to install the cinder proxy code. In this section, we will guide you through installing the cinder proxy with the minimum configuration. + +* **Note:** + + - Make sure you have an existing installation of **Openstack Icehouse**. + - We recommend that you Do backup at least the following files before installation, because they are to be overwritten or modified: + $CINDER_CONFIG_PARENT_DIR/cinder.conf + (replace the $... with actual directory names.) + +* **Manual Installation** + + - Make sure you have performed backups properly. + + - Navigate to the local repository and copy the contents in 'cinder' sub-directory to the corresponding places in existing cinder, e.g. + ```cp -r $LOCAL_REPOSITORY_DIR/cinder $CINDER_PARENT_DIR``` + (replace the $... with actual directory name.) + + - Update the cinder configuration file (e.g. /etc/cinder/cinder.conf) with the minimum option below. If the option already exists, modify its value, otherwise add it to the config file. Check the "Configurations" section below for a full configuration guide. + ``` + [DEFAULT] + ... + ###configuration for Cinder cascading ### + volume_manager=cinder.volume.cinder_proxy.CinderProxy + volume_sync_interval=5 + cinder_tenant_name=$CASCADED_ADMIN_TENANT + cinder_username=$CASCADED_ADMIN_NAME + cinder_password=$CASCADED_ADMIN_PASSWORD + keystone_auth_url=http://$GLOBAL_KEYSTONE_IP:5000/v2.0/ + cascading_glance_url=$CASCADING_GLANCE + cascaded_glance_url=http://$CASCADED_GLANCE + cascaded_available_zone=$CASCADED_AVAILABLE_ZONE + cascaded_region_name=$CASCADED_REGION_NAME + ``` + + - Restart the cinder proxy. + ```service openstack-cinder-volume restart``` + + - Done. The cinder proxy should be working with a demo configuration. + +* **Automatic Installation** + + - Make sure you have performed backups properly. + + - Navigate to the installation directory and run installation script. + ``` + cd $LOCAL_REPOSITORY_DIR/installation + sudo bash ./install.sh + ``` + (replace the $... with actual directory name.) + + - Done. The installation code should setup the cinder proxy with the minimum configuration below. Check the "Configurations" section for a full configuration guide. + ``` + [DEFAULT] + ... + ###cascade info ### + ... + ###configuration for Cinder cascading ### + volume_manager=cinder.volume.cinder_proxy.CinderProxy + volume_sync_interval=5 + cinder_tenant_name=$CASCADED_ADMIN_TENANT + cinder_username=$CASCADED_ADMIN_NAME + cinder_password=$CASCADED_ADMIN_PASSWORD + keystone_auth_url=http://$GLOBAL_KEYSTONE_IP:5000/v2.0/ + cascading_glance_url=$CASCADING_GLANCE + cascaded_glance_url=http://$CASCADED_GLANCE + cascaded_available_zone=$CASCADED_AVAILABLE_ZONE + cascaded_region_name=$CASCADED_REGION_NAME + ``` + +* **Troubleshooting** + + In case the automatic installation process is not complete, please check the followings: + + - Make sure your OpenStack version is Icehouse. + + - Check the variables in the beginning of the install.sh scripts. Your installation directories may be different from the default values we provide. + + - The installation code will automatically add the related codes to $CINDER_PARENT_DIR/cinder and modify the related configuration. + + - In case the automatic installation does not work, try to install manually. + +Configurations +-------------- + +* This is a (default) configuration sample for the cinder proxy. Please add/modify these options in /etc/cinder/cinder.conf. +* Note: + - Please carefully make sure that options in the configuration file are not duplicated. If an option name already exists, modify its value instead of adding a new one of the same name. + - Please refer to the 'Configuration Details' section below for proper configuration and usage of costs and constraints. + +``` +[DEFAULT] + +... + +# +#Options defined in cinder.volume.manager +# + +# Default driver to use for the cinder proxy (string value) +volume_manager=cinder.volume.cinder_proxy.CinderProxy + + +#The cascading level keystone component service url, by which the cinder proxy +#can access to cascading level keystone service +keystone_auth_url=$keystone_auth_url + +#The cascading level glance component service url, by which the cinder proxy +#can access to cascading level glance service +cascading_glance_url=$CASCADING_GLANCE + +#The cascaded level glance component service url, by which the cinder proxy +#can judge whether the cascading glance image has a location for this cascaded glance +cascaded_glance_url=http://$CASCADED_GLANCE + +#The cascaded level region name, which will be set as a parameter when +#the cascaded level component services register endpoint to keystone +cascaded_region_name=$CASCADED_REGION_NAME + +#The cascaded level available zone name, which will be set as a parameter when +#forward request to cascaded level cinder. Please pay attention to that value of +#cascaded_available_zone of cinder-proxy must be the same as storage_availability_zone in +#the cascaded level node. And cinder-proxy should be configured to the same storage_availability_zone. +#this configuration could be removed in the future to just use the cinder-proxy storage_availability_zone +#configuration item. but it is up to the admin to make sure the storage_availability_zone in cinder-proxy#and casacdede cinder keep the same value. +cascaded_available_zone=$CASCADED_AVAILABLE_ZONE + + diff --git a/cinderproxy/cinder/volume/cinder_proxy.py b/cinderproxy/cinder/volume/cinder_proxy.py new file mode 100644 index 00000000..10424aa4 --- /dev/null +++ b/cinderproxy/cinder/volume/cinder_proxy.py @@ -0,0 +1,1099 @@ +# Copyright 2014 Huawei Technologies Co., LTD +# All Rights Reserved. +# +# @author: z00209472, Huawei Technologies Co., LTD +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Cinder_proxy manages creating, attaching, detaching, and persistent storage. + +Persistent storage volumes keep their state independent of instances. You can +attach to an instance, terminate the instance, spawn a new instance (even +one from a different image) and re-attach the volume with the same data +intact. + +**Related Flags** + +:volume_topic: What :mod:`rpc` topic to listen to (default: `cinder-volume`). +:volume_manager: The module name of a class derived from + :class:`manager.Manager` (default: + :class:`cinder.volume.manager.Manager`). +:volume_group: Name of the group that will contain exported volumes (default: + `cinder-volumes`) +:num_shell_tries: Number of times to attempt to run commands (default: 3) + +""" + + +import time +import datetime + +from oslo.config import cfg +from oslo import messaging + +from cinder import compute +from cinder import context +from cinder import exception +from cinder import manager +from cinder import quota +from cinder import utils +from cinder import volume + +from cinder.image import glance +from cinder.openstack.common import excutils +from cinder.openstack.common import log as logging +from cinder.openstack.common import periodic_task +from cinder.openstack.common import timeutils +from cinder.volume.configuration import Configuration +from cinder.volume import rpcapi as volume_rpcapi +from cinder.volume import utils as volume_utils +from cinderclient import service_catalog +from cinderclient.v2 import client as cinder_client +from keystoneclient.v2_0 import client as kc + +from eventlet.greenpool import GreenPool +LOG = logging.getLogger(__name__) + +QUOTAS = quota.QUOTAS + +volume_manager_opts = [ + cfg.IntOpt('migration_create_volume_timeout_secs', + default=300, + help='Timeout for creating the volume to migrate to ' + 'when performing volume migration (seconds)'), + cfg.IntOpt('volume_sync_interval', + default=5, + help='seconds between cascading and cascaded cinders' + 'when synchronizing volume data'), + cfg.BoolOpt('volume_service_inithost_offload', + default=False, + help='Offload pending volume delete during ' + 'volume service startup'), + cfg.StrOpt('cinder_username', + default='cinder_username', + help='username for connecting to cinder in admin context'), + cfg.StrOpt('cinder_password', + default='cinder_password', + help='password for connecting to cinder in admin context', + secret=True), + cfg.StrOpt('cinder_tenant_name', + default='cinder_tenant_name', + help='tenant name for connecting to cinder in admin context'), + cfg.StrOpt('cascaded_available_zone', + default='nova', + help='available zone for cascaded openstack'), + cfg.StrOpt('keystone_auth_url', + default='http://127.0.0.1:5000/v2.0/', + help='value of keystone url'), + cfg.StrOpt('cascaded_cinder_url', + default='http://127.0.0.1:8776/v2/%(project_id)s', + help='value of cascaded cinder url'), + cfg.StrOpt('cascading_cinder_url', + default='http://127.0.0.1:8776/v2/%(project_id)s', + help='value of cascading cinder url'), + cfg.BoolOpt('glance_cascading_flag', + default=False, + help='Whether to use glance cescaded'), + cfg.StrOpt('cascading_glance_url', + default='127.0.0.1:9292', + help='value of cascading glance url'), + cfg.StrOpt('cascaded_glance_url', + default='http://127.0.0.1:9292', + help='value of cascaded glance url'), + cfg.StrOpt('cascaded_region_name', + default='RegionOne', + help='Region name of this node'), +] +CONF = cfg.CONF +CONF.register_opts(volume_manager_opts) + + +def locked_volume_operation(f): + """Lock decorator for volume operations. + + Takes a named lock prior to executing the operation. The lock is named with + the operation executed and the id of the volume. This lock can then be used + by other operations to avoid operation conflicts on shared volumes. + + Example use: + + If a volume operation uses this decorator, it will block until the named + lock is free. This is used to protect concurrent operations on the same + volume e.g. delete VolA while create volume VolB from VolA is in progress. + """ + def lvo_inner1(inst, context, volume_id, **kwargs): + @utils.synchronized("%s-%s" % (volume_id, f.__name__), external=True) + def lvo_inner2(*_args, **_kwargs): + return f(*_args, **_kwargs) + return lvo_inner2(inst, context, volume_id, **kwargs) + return lvo_inner1 + + +def locked_snapshot_operation(f): + """Lock decorator for snapshot operations. + + Takes a named lock prior to executing the operation. The lock is named with + the operation executed and the id of the snapshot. This lock can then be + used by other operations to avoid operation conflicts on shared snapshots. + + Example use: + + If a snapshot operation uses this decorator, it will block until the named + lock is free. This is used to protect concurrent operations on the same + snapshot e.g. delete SnapA while create volume VolA from SnapA is in + progress. + """ + def lso_inner1(inst, context, snapshot_id, **kwargs): + @utils.synchronized("%s-%s" % (snapshot_id, f.__name__), external=True) + def lso_inner2(*_args, **_kwargs): + return f(*_args, **_kwargs) + return lso_inner2(inst, context, snapshot_id, **kwargs) + return lso_inner1 + + +class CinderProxy(manager.SchedulerDependentManager): + + """Manages attachable block storage devices.""" + + RPC_API_VERSION = '1.16' + target = messaging.Target(version=RPC_API_VERSION) + + def __init__(self, service_name=None, *args, **kwargs): + """Load the specified in args, or flags.""" + # update_service_capabilities needs service_name to be volume + super(CinderProxy, self).__init__(service_name='volume', + *args, **kwargs) + self.configuration = Configuration(volume_manager_opts, + config_group=service_name) + self._tp = GreenPool() + + self.volume_api = volume.API() + + self._last_info_volume_state_heal = 0 + self._change_since_time = None + self.volumes_mapping_cache = {'volumes': {}, 'snapshots': {}} + self._init_volume_mapping_cache() + self.image_service = glance.get_default_image_service() + + def _init_volume_mapping_cache(self): + + cinderClient = self._get_cinder_cascaded_admin_client() + + try: + search_op = {'all_tenants': True} + volumes = cinderClient.volumes.list(search_opts=search_op) + for volume in volumes: + if 'logicalVolumeId' in volume._info['metadata']: + volumeId = volume._info['metadata']['logicalVolumeId'] + physicalVolumeId = volume._info['id'] + self.volumes_mapping_cache['volumes'][volumeId] = \ + physicalVolumeId + + snapshots = \ + cinderClient.volume_snapshots.list(search_opts=search_op) + for snapshot in snapshots: + if 'logicalSnapshotId' in snapshot._info['metadata']: + snapshotId = \ + snapshot._info['metadata']['logicalSnapshotId'] + physicalSnapshotId = snapshot._info['id'] + self.volumes_mapping_cache['snapshots'][snapshotId] = \ + physicalSnapshotId + + LOG.info(_("Cascade info: cinder proxy: init volumes mapping" + "cache:%s"), self.volumes_mapping_cache) + + except Exception as ex: + LOG.error(_("Failed init volumes mapping cache")) + LOG.exception(ex) + + def _heal_volume_mapping_cache(self, volumeId, physicalVolumeId, action): + if action == 'add': + self.volumes_mapping_cache['volumes'][volumeId] = physicalVolumeId + LOG.info(_("Cascade info: volume mapping cache add record. " + "volumeId:%s,physicalVolumeId:%s"), + (volumeId, physicalVolumeId)) + return True + + elif action == 'remove': + if volumeId in self.volumes_mapping_cache['volumes']: + self.volumes_mapping_cache['volumes'].pop(volumeId) + LOG.info(_("Casecade info: volume mapping cache remove record." + " volumeId:%s, physicalVolumeId:%s"), + (volumeId, physicalVolumeId)) + return True + + def _heal_snapshot_mapping_cache(self, snapshotId, physicalSnapshotId, + action): + if action == 'add': + self.volumes_mapping_cache['snapshots'][snapshotId] = \ + physicalSnapshotId + LOG.info(_("Cascade info: snapshots mapping cache add record. " + "snapshotId:%s, physicalSnapshotId:%s"), + (snapshotId, physicalSnapshotId)) + return True + elif action == 'remove': + if snapshotId in self.volumes_mapping_cache['snapshots']: + self.volumes_mapping_cache['snapshots'].pop(snapshotId) + LOG.info(_("Casecade info: volume snapshot mapping cache" + "remove snapshotId:%s,physicalSnapshotId:%s"), + (snapshotId, physicalSnapshotId)) + return True + + def _get_cascaded_volume_id(self, volume_id): + physical_volume_id = None + if volume_id in self.volumes_mapping_cache['volumes']: + physical_volume_id = \ + self.volumes_mapping_cache['volumes'].get(volume_id) + LOG.debug(_('get cascade volume,volume id:%s,physicalVolumeId:%s'), + (volume_id, physical_volume_id)) + + if physical_volume_id is None: + LOG.error(_('can not find volume %s in volumes_mapping_cache %s.'), + volume_id, self.volumes_mapping_cache) + + return physical_volume_id + + def _get_cascaded_snapshot_id(self, snapshot_id): + physical_snapshot_id = None + if snapshot_id in self.volumes_mapping_cache['snapshots']: + physical_snapshot_id = \ + self.volumes_mapping_cache['snapshots'].get('snapshot_id') + LOG.debug(_("get cascade volume,snapshot_id:%s," + "physicalSnapshotId:%s"), + (snapshot_id, physical_snapshot_id)) + + if physical_snapshot_id is None: + LOG.error(_('not find snapshot %s in volumes_mapping_cache %s'), + snapshot_id, self.volumes_mapping_cache) + + return physical_snapshot_id + + def _get_cinder_cascaded_admin_client(self): + + try: + kwargs = {'username': cfg.CONF.cinder_username, + 'password': cfg.CONF.cinder_password, + 'tenant_name': cfg.CONF.cinder_tenant_name, + 'auth_url': cfg.CONF.keystone_auth_url + } + + client_v2 = kc.Client(**kwargs) + sCatalog = getattr(client_v2, 'auth_ref').get('serviceCatalog') + + compat_catalog = { + 'access': {'serviceCatalog': sCatalog} + } + + sc = service_catalog.ServiceCatalog(compat_catalog) + + url = sc.url_for(attr='region', + filter_value=cfg.CONF.cascaded_region_name, + service_type='volume', + service_name='cinder', + endpoint_type='publicURL') + + cinderclient = cinder_client.Client( + username=cfg.CONF.cinder_username, + api_key=cfg.CONF.cinder_password, + tenant_id=cfg.CONF.cinder_tenant_name, + auth_url=cfg.CONF.keystone_auth_url) + + cinderclient.client.auth_token = client_v2.auth_ref.auth_token + cinderclient.client.management_url = url + return cinderclient + + except Exception: + with excutils.save_and_reraise_exception(): + LOG.error(_('Failed to get cinder python client.')) + + def _get_cinder_cascaded_user_client(self, context): + + try: + ctx_dict = context.to_dict() + cinderclient = cinder_client.Client( + username=ctx_dict.get('user_id'), + api_key=ctx_dict.get('auth_token'), + project_id=ctx_dict.get('project_name'), + auth_url=cfg.CONF.keystone_auth_url) + cinderclient.client.auth_token = ctx_dict.get('auth_token') + cinderclient.client.management_url = \ + cfg.CONF.cascaded_cinder_url % ctx_dict + return cinderclient + + except Exception: + with excutils.save_and_reraise_exception(): + LOG.error(_('Failed to get cinder python client.')) + + def _get_image_cascaded(self, context, image_id, cascaded_glance_url): + + try: + # direct_url is returned by v2 api + client = glance.GlanceClientWrapper( + context, + netloc=cfg.CONF.cascading_glance_url, + use_ssl=False, + version="2") + image_meta = client.call(context, 'get', image_id) + + except Exception: + glance._reraise_translated_image_exception(image_id) + + if not self.image_service._is_image_available(context, image_meta): + raise exception.ImageNotFound(image_id=image_id) + + locations = getattr(image_meta, 'locations', None) + LOG.debug(_("Cascade info: image glance get_image_cascaded," + "locations:%s"), locations) + LOG.debug(_("Cascade info: image glance get_image_cascaded," + "cascaded_glance_url:%s"), cascaded_glance_url) + + cascaded_image_id = None + for loc in locations: + image_url = loc.get('url') + LOG.debug(_("Cascade info: image glance get_image_cascaded," + "image_url:%s"), image_url) + if cascaded_glance_url in image_url: + (cascaded_image_id, glance_netloc, use_ssl) = \ + glance._parse_image_ref(image_url) + LOG.debug(_("Cascade info : Result :image glance " + "get_image_cascaded,%s") % cascaded_image_id) + break + + if cascaded_image_id is None: + raise exception.CinderException( + _("Cascade exception: Cascaded image for image %s not exist ") + % image_id) + + return cascaded_image_id + + def _add_to_threadpool(self, func, *args, **kwargs): + self._tp.spawn_n(func, *args, **kwargs) + + def init_host(self): + """Do any initialization that needs to be run if this is a + standalone service. + """ + + ctxt = context.get_admin_context() + + volumes = self.db.volume_get_all_by_host(ctxt, self.host) + LOG.debug(_("Re-exporting %s volumes"), len(volumes)) + + LOG.debug(_('Resuming any in progress delete operations')) + for volume in volumes: + if volume['status'] == 'deleting': + LOG.info(_('Resuming delete on volume: %s') % volume['id']) + if CONF.volume_service_inithost_offload: + # Offload all the pending volume delete operations to the + # threadpool to prevent the main volume service thread + # from being blocked. + self._add_to_threadpool(self.delete_volume(ctxt, + volume['id'])) + else: + # By default, delete volumes sequentially + self.delete_volume(ctxt, volume['id']) + + # collect and publish service capabilities + self.publish_service_capabilities(ctxt) + + def create_volume(self, context, volume_id, request_spec=None, + filter_properties=None, allow_reschedule=True, + snapshot_id=None, image_id=None, source_volid=None): + """Creates and exports the volume.""" + + ctx_dict = context.__dict__ + try: + volume_properties = request_spec.get('volume_properties') + size = volume_properties.get('size') + display_name = volume_properties.get('display_name') + display_description = volume_properties.get('display_description') + volume_type_id = volume_properties.get('volume_type_id') + user_id = ctx_dict.get('user_id') + project_id = ctx_dict.get('project_id') + + cascaded_snapshot_id = None + if snapshot_id is not None: + snapshot_ref = self.db.snapshot_get(context, snapshot_id) + cascaded_snapshot_id = snapshot_ref['mapping_uuid'] + LOG.info(_('Cascade info: create volume from snapshot, ' + 'cascade id:%s'), cascaded_snapshot_id) + + cascaded_source_volid = None + if source_volid is not None: + vol_ref = self.db.volume_get(context, source_volid) + cascaded_source_volid = vol_ref['mapping_uuid'] + LOG.info(_('Cascade info: create volume from source volume, ' + 'cascade id:%s'), cascaded_source_volid) + + cascaded_volume_type = None + if volume_type_id is not None: + volume_type_ref = \ + self.db.volume_type_get(context, volume_type_id) + cascaded_volume_type = volume_type_ref['name'] + LOG.info(_('Cascade info: create volume use volume type, ' + 'cascade name:%s'), cascaded_volume_type) + + metadata = volume_properties.get('metadata') + if metadata is None: + metadata = {} + + metadata['logicalVolumeId'] = volume_id + + cascaded_image_id = None + if image_id is not None: + if cfg.CONF.glance_cascading_flag: + cascaded_image_id = self._get_image_cascaded( + context, + image_id, + cfg.CONF.cascaded_glance_url) + else: + cascaded_image_id = image_id + LOG.info(_("Cascade info: create volume use image, " + "cascaded image id is %s:"), cascaded_image_id) + + availability_zone = cfg.CONF.cascaded_available_zone + LOG.info(_('Cascade info: create volume with available zone:%s'), + availability_zone) + + cinderClient = self._get_cinder_cascaded_user_client(context) + + bodyResponse = cinderClient.volumes.create( + size=size, + snapshot_id=cascaded_snapshot_id, + source_volid=cascaded_source_volid, + name=display_name, + description=display_description, + volume_type=cascaded_volume_type, + user_id=user_id, + project_id=project_id, + availability_zone=availability_zone, + metadata=metadata, + imageRef=cascaded_image_id) + + if 'logicalVolumeId' in metadata: + metadata.pop('logicalVolumeId') + metadata['mapping_uuid'] = bodyResponse._info['id'] + self.db.volume_metadata_update(context, volume_id, metadata, True) + + if bodyResponse._info['status'] == 'creating': + self._heal_volume_mapping_cache(volume_id, + bodyResponse._info['id'], + 'add') + self.db.volume_update( + context, + volume_id, + {'mapping_uuid': bodyResponse._info['id']}) + + except Exception: + with excutils.save_and_reraise_exception(): + self.db.volume_update(context, + volume_id, + {'status': 'error'}) + + return volume_id + + @periodic_task.periodic_task(spacing=CONF.volume_sync_interval, + run_immediately=True) + def _heal_volume_status(self, context): + + TIME_SHIFT_TOLERANCE = 3 + + heal_interval = CONF.volume_sync_interval + + if not heal_interval: + return + + curr_time = time.time() + LOG.info(_('Cascade info: last volume update time:%s'), + self._last_info_volume_state_heal) + LOG.info(_('Cascade info: heal interval:%s'), heal_interval) + LOG.info(_('Cascade info: curr_time:%s'), curr_time) + + if self._last_info_volume_state_heal + heal_interval > curr_time: + return + self._last_info_volume_state_heal = curr_time + + cinderClient = self._get_cinder_cascaded_admin_client() + + try: + if self._change_since_time is None: + search_opt = {'all_tenants': True} + volumes = cinderClient.volumes.list(search_opts=search_opt) + volumetypes = cinderClient.volume_types.list() + LOG.info(_('Cascade info: change since time is none,' + 'volumes:%s'), volumes) + else: + change_since_isotime = \ + timeutils.parse_isotime(self._change_since_time) + changesine_timestamp = change_since_isotime - \ + datetime.timedelta(seconds=TIME_SHIFT_TOLERANCE) + timestr = time.mktime(changesine_timestamp.timetuple()) + new_change_since_isotime = \ + timeutils.iso8601_from_timestamp(timestr) + + search_op = {'all_tenants': True, + 'changes-since': new_change_since_isotime} + volumes = cinderClient.volumes.list(search_opts=search_op) + volumetypes = cinderClient.volume_types.list() + LOG.info(_('Cascade info: search time is not none,' + 'volumes:%s'), volumes) + + self._change_since_time = timeutils.isotime() + + if len(volumes) > 0: + LOG.debug(_('Updated the volumes %s'), volumes) + + for volume in volumes: + volume_id = volume._info['metadata']['logicalVolumeId'] + volume_status = volume._info['status'] + if volume_status == "in-use": + self.db.volume_update(context, volume_id, + {'status': volume._info['status'], + 'attach_status': 'attached', + 'attach_time': timeutils.strtime() + }) + elif volume_status == "available": + self.db.volume_update(context, volume_id, + {'status': volume._info['status'], + 'attach_status': 'detached', + 'instance_uuid': None, + 'attached_host': None, + 'mountpoint': None, + 'attach_time': None + }) + else: + self.db.volume_update(context, volume_id, + {'status': volume._info['status']}) + LOG.info(_('Cascade info: Updated the volume %s status from' + 'cinder-proxy'), volume_id) + + vol_types = self.db.volume_type_get_all(context, inactive=False) + for volumetype in volumetypes: + volume_type_name = volumetype._info['name'] + if volume_type_name not in vol_types.keys(): + extra_specs = volumetype._info['extra_specs'] + self.db.volume_type_create( + context, + dict(name=volume_type_name, extra_specs=extra_specs)) + + except Exception: + with excutils.save_and_reraise_exception(): + LOG.error(_('Failed to sys volume status to db.')) + + @locked_volume_operation + def delete_volume(self, context, volume_id, unmanage_only=False): + """Deletes and unexports volume.""" + context = context.elevated() + + volume_ref = self.db.volume_get(context, volume_id) + + if context.project_id != volume_ref['project_id']: + project_id = volume_ref['project_id'] + else: + project_id = context.project_id + + LOG.info(_("volume %s: deleting"), volume_ref['id']) + if volume_ref['attach_status'] == "attached": + # Volume is still attached, need to detach first + raise exception.VolumeAttached(volume_id=volume_id) + if volume_ref['host'] != self.host: + raise exception.InvalidVolume( + reason=_("volume is not local to this node")) + + self._notify_about_volume_usage(context, volume_ref, "delete.start") + self._reset_stats() + + try: + self._delete_cascaded_volume(context, volume_id) + except Exception: + LOG.exception(_("Failed to deleting volume")) + # Get reservations + try: + reserve_opts = {'volumes': -1, 'gigabytes': -volume_ref['size']} + QUOTAS.add_volume_type_opts(context, + reserve_opts, + volume_ref.get('volume_type_id')) + reservations = QUOTAS.reserve(context, + project_id=project_id, + **reserve_opts) + except Exception: + reservations = None + LOG.exception(_("Failed to update usages deleting volume")) + + # Delete glance metadata if it exists + try: + self.db.volume_glance_metadata_delete_by_volume(context, volume_id) + LOG.debug(_("volume %s: glance metadata deleted"), + volume_ref['id']) + except exception.GlanceMetadataNotFound: + LOG.debug(_("no glance metadata found for volume %s"), + volume_ref['id']) + + self.db.volume_destroy(context, volume_id) + LOG.info(_("volume %s: deleted successfully"), volume_ref['id']) + self._notify_about_volume_usage(context, volume_ref, "delete.end") + + # Commit the reservations + if reservations: + QUOTAS.commit(context, reservations, project_id=project_id) + + self.publish_service_capabilities(context) + + return True + + def _delete_cascaded_volume(self, context, volume_id): + + try: + + vol_ref = self.db.volume_get(context, volume_id) + casecaded_volume_id = vol_ref['mapping_uuid'] + LOG.info(_('Cascade info: prepare to delete cascaded volume %s.'), + casecaded_volume_id) + + cinderClient = self._get_cinder_cascaded_user_client(context) + + cinderClient.volumes.delete(volume=casecaded_volume_id) + LOG.info(_('Cascade info: finished to delete cascade volume %s'), + casecaded_volume_id) +# self._heal_volume_mapping_cache(volume_id,casecade_volume_id,s'remove') + except Exception: + with excutils.save_and_reraise_exception(): + LOG.error(_('Cascade info: failed to delete cascaded' + ' volume %s'), casecaded_volume_id) + + def create_snapshot(self, context, volume_id, snapshot_id): + """Creates and exports the snapshot.""" + + context = context.elevated() + snapshot_ref = self.db.snapshot_get(context, snapshot_id) + display_name = snapshot_ref['display_name'] + display_description = snapshot_ref['display_description'] + LOG.info(_("snapshot %s: creating"), snapshot_ref['id']) + + self._notify_about_snapshot_usage( + context, snapshot_ref, "create.start") + + vol_ref = self.db.volume_get(context, volume_id) + LOG.info(_("Cascade info: create snapshot while cascade id is:%s"), + vol_ref['mapping_uuid']) + + try: + vol_ref = self.db.volume_get(context, volume_id) + casecaded_volume_id = vol_ref['mapping_uuid'] + cinderClient = self._get_cinder_cascaded_user_client(context) + bodyResponse = cinderClient.volume_snapshots.create( + volume_id=casecaded_volume_id, + force=False, + name=display_name, + description=display_description) + + LOG.info(_("Cascade info: create snapshot while response is:%s"), + bodyResponse._info) + if bodyResponse._info['status'] == 'creating': + self._heal_snapshot_mapping_cache(snapshot_id, + bodyResponse._info['id'], + "add") + self.db.snapshot_update( + context, + snapshot_ref['id'], + {'mapping_uuid': bodyResponse._info['id']}) + + except Exception: + with excutils.save_and_reraise_exception(): + self.db.snapshot_update(context, + snapshot_ref['id'], + {'status': 'error'}) + return + + self.db.snapshot_update(context, + snapshot_ref['id'], {'status': 'available', + 'progress': '100%'}) +# vol_ref = self.db.volume_get(context, volume_id) + + if vol_ref.bootable: + try: + self.db.volume_glance_metadata_copy_to_snapshot( + context, snapshot_ref['id'], volume_id) + except exception.CinderException as ex: + LOG.exception(_("Failed updating %(snapshot_id)s" + " metadata using the provided volumes" + " %(volume_id)s metadata") % + {'volume_id': volume_id, + 'snapshot_id': snapshot_id}) + raise exception.MetadataCopyFailure(reason=ex) + + LOG.info(_("Cascade info: snapshot %s, created successfully"), + snapshot_ref['id']) + self._notify_about_snapshot_usage(context, snapshot_ref, "create.end") + + return snapshot_id + + @locked_snapshot_operation + def delete_snapshot(self, context, snapshot_id): + """Deletes and unexports snapshot.""" + caller_context = context + context = context.elevated() + snapshot_ref = self.db.snapshot_get(context, snapshot_id) + project_id = snapshot_ref['project_id'] + + LOG.info(_("snapshot %s: deleting"), snapshot_ref['id']) + self._notify_about_snapshot_usage( + context, snapshot_ref, "delete.start") + + try: + LOG.debug(_("snapshot %s: deleting"), snapshot_ref['id']) + + # Pass context so that drivers that want to use it, can, + # but it is not a requirement for all drivers. + snapshot_ref['context'] = caller_context + + self._delete_snapshot_cascaded(context, snapshot_id) + except exception.SnapshotIsBusy: + LOG.error(_("Cannot delete snapshot %s: snapshot is busy"), + snapshot_ref['id']) + self.db.snapshot_update(context, + snapshot_ref['id'], + {'status': 'available'}) + return True + except Exception: + with excutils.save_and_reraise_exception(): + self.db.snapshot_update(context, + snapshot_ref['id'], + {'status': 'error_deleting'}) + + # Get reservations + try: + if CONF.no_snapshot_gb_quota: + reserve_opts = {'snapshots': -1} + else: + reserve_opts = { + 'snapshots': -1, + 'gigabytes': -snapshot_ref['volume_size'], + } + volume_ref = self.db.volume_get(context, snapshot_ref['volume_id']) + QUOTAS.add_volume_type_opts(context, + reserve_opts, + volume_ref.get('volume_type_id')) + reservations = QUOTAS.reserve(context, + project_id=project_id, + **reserve_opts) + except Exception: + reservations = None + LOG.exception(_("Failed to update usages deleting snapshot")) + self.db.volume_glance_metadata_delete_by_snapshot(context, snapshot_id) + self.db.snapshot_destroy(context, snapshot_id) + LOG.info(_("snapshot %s: deleted successfully"), snapshot_ref['id']) + self._notify_about_snapshot_usage(context, snapshot_ref, "delete.end") + + # Commit the reservations + if reservations: + QUOTAS.commit(context, reservations, project_id=project_id) + return True + + def _delete_snapshot_cascaded(self, context, snapshot_id): + + try: + + snapshot_ref = self.db.snapshot_get(context, snapshot_id) + cascaded_snapshot_id = snapshot_ref['mapping_uuid'] + LOG.info(_("Cascade info: delete casecade snapshot:%s"), + cascaded_snapshot_id) + + cinderClient = self._get_cinder_cascaded_user_client(context) + + cinderClient.volume_snapshots.delete(cascaded_snapshot_id) + LOG.info(_("delete casecade snapshot %s successfully."), + cascaded_snapshot_id) + + except Exception: + with excutils.save_and_reraise_exception(): + LOG.error(_("failed to delete cascade snapshot %s"), + cascaded_snapshot_id) + + def attach_volume(self, context, volume_id, instance_uuid, host_name, + mountpoint, mode): + """Updates db to show volume is attached""" + @utils.synchronized(volume_id, external=True) + def do_attach(): + # check the volume status before attaching + volume = self.db.volume_get(context, volume_id) + volume_metadata = self.db.volume_admin_metadata_get( + context.elevated(), volume_id) + if volume['status'] == 'attaching': + if (volume['instance_uuid'] and volume['instance_uuid'] != + instance_uuid): + msg = _("being attached by another instance") + raise exception.InvalidVolume(reason=msg) + if (volume['attached_host'] and volume['attached_host'] != + host_name): + msg = _("being attached by another host") + raise exception.InvalidVolume(reason=msg) + if (volume_metadata.get('attached_mode') and + volume_metadata.get('attached_mode') != mode): + msg = _("being attached by different mode") + raise exception.InvalidVolume(reason=msg) + elif volume['status'] != "available": + msg = _("status must be available") + raise exception.InvalidVolume(reason=msg) + # TODO(jdg): attach_time column is currently varchar + # we should update this to a date-time object + # also consider adding detach_time? + self.db.volume_update(context, volume_id, + {"instance_uuid": instance_uuid, + "mountpoint": mountpoint, + "attached_host": host_name + }) + + self.db.volume_admin_metadata_update(context.elevated(), + volume_id, + {"attached_mode": mode}, + False) + return do_attach() + + @locked_volume_operation + def detach_volume(self, context, volume_id): + """Updates db to show volume is detached""" + # TODO(vish): refactor this into a more general "unreserve" + # TODO(sleepsonthefloor): Is this 'elevated' appropriate? + # self.db.volume_detached(context.elevated(), volume_id) + self.db.volume_admin_metadata_delete(context.elevated(), volume_id, + 'attached_mode') + + def copy_volume_to_image(self, context, volume_id, image_meta): + """Uploads the specified volume to Glance. + + image_meta is a dictionary containing the following keys: + 'id', 'container_format', 'disk_format' + + """ + LOG.info(_("cascade info, copy volume to image, image_meta is:%s"), + image_meta) + force = image_meta.get('force', False) + image_name = image_meta.get("name") + container_format = image_meta.get("container_format") + disk_format = image_meta.get("disk_format") + vol_ref = self.db.volume_get(context, volume_id) + casecaded_volume_id = vol_ref['mapping_uuid'] + cinderClient = self._get_cinder_cascaded_user_client(context) + + resp = cinderClient.volumes.upload_to_image( + volume=casecaded_volume_id, + force=force, + image_name=image_name, + container_format=container_format, + disk_format=disk_format) + + if cfg.CONF.glance_cascading_flag: + cascaded_image_id = resp[1]['os-volume_upload_image']['image_id'] + LOG.debug(_('Cascade info:upload volume to image,get cascaded ' + 'image id is %s'), cascaded_image_id) + url = '%s/v2/images/%s' % (cfg.CONF.cascaded_glance_url, + cascaded_image_id) + locations = [{ + 'url': url, + 'metadata': {'image_id': str(cascaded_image_id), + 'image_from': 'volume' + } + }] + + image_service, image_id = \ + glance.get_remote_image_service(context, image_meta['id']) + LOG.debug(_("Cascade info: image service:%s"), image_service) + glanceClient = glance.GlanceClientWrapper( + context, + netloc=cfg.CONF.cascading_glance_url, + use_ssl=False, + version="2") + glanceClient.call(context, 'update', image_id, + remove_props=None, locations=locations) + LOG.debug(_('Cascade info:upload volume to image,finish update' + 'image %s locations %s.'), (image_id, locations)) + + def accept_transfer(self, context, volume_id, new_user, new_project): + # NOTE(jdg): need elevated context as we haven't "given" the vol + # yet + return + + def _migrate_volume_generic(self, ctxt, volume, host): + rpcapi = volume_rpcapi.VolumeAPI() + + # Create new volume on remote host + new_vol_values = {} + for k, v in volume.iteritems(): + new_vol_values[k] = v + del new_vol_values['id'] + del new_vol_values['_name_id'] + # We don't copy volume_type because the db sets that according to + # volume_type_id, which we do copy + del new_vol_values['volume_type'] + new_vol_values['host'] = host['host'] + new_vol_values['status'] = 'creating' + new_vol_values['migration_status'] = 'target:%s' % volume['id'] + new_vol_values['attach_status'] = 'detached' + new_volume = self.db.volume_create(ctxt, new_vol_values) + rpcapi.create_volume(ctxt, new_volume, host['host'], + None, None, allow_reschedule=False) + + # Wait for new_volume to become ready + starttime = time.time() + deadline = starttime + CONF.migration_create_volume_timeout_secs + new_volume = self.db.volume_get(ctxt, new_volume['id']) + tries = 0 + while new_volume['status'] != 'available': + tries = tries + 1 + now = time.time() + if new_volume['status'] == 'error': + msg = _("failed to create new_volume on destination host") + raise exception.VolumeMigrationFailed(reason=msg) + elif now > deadline: + msg = _("timeout creating new_volume on destination host") + raise exception.VolumeMigrationFailed(reason=msg) + else: + time.sleep(tries ** 2) + new_volume = self.db.volume_get(ctxt, new_volume['id']) + + # Copy the source volume to the destination volume + try: + if volume['status'] == 'available': + # The above call is synchronous so we complete the migration + self.migrate_volume_completion(ctxt, volume['id'], + new_volume['id'], error=False) + else: + nova_api = compute.API() + # This is an async call to Nova, which will call the completion + # when it's done + nova_api.update_server_volume(ctxt, volume['instance_uuid'], + volume['id'], new_volume['id']) + except Exception: + with excutils.save_and_reraise_exception(): + msg = _("Failed to copy volume %(vol1)s to %(vol2)s") + LOG.error(msg % {'vol1': volume['id'], + 'vol2': new_volume['id']}) + volume = self.db.volume_get(ctxt, volume['id']) + # If we're in the completing phase don't delete the target + # because we may have already deleted the source! + if volume['migration_status'] == 'migrating': + rpcapi.delete_volume(ctxt, new_volume) + new_volume['migration_status'] = None + + def migrate_volume_completion(self, ctxt, volume_id, new_volume_id, + error=False): + volume = self.db.volume_get(ctxt, volume_id) + new_volume = self.db.volume_get(ctxt, new_volume_id) + rpcapi = volume_rpcapi.VolumeAPI() + + if error: + new_volume['migration_status'] = None + rpcapi.delete_volume(ctxt, new_volume) + self.db.volume_update(ctxt, volume_id, {'migration_status': None}) + return volume_id + + self.db.volume_update(ctxt, volume_id, + {'migration_status': 'completing'}) + + # Delete the source volume (if it fails, don't fail the migration) + try: + self.delete_volume(ctxt, volume_id) + except Exception as ex: + msg = _("Failed to delete migration source vol %(vol)s: %(err)s") + LOG.error(msg % {'vol': volume_id, 'err': ex}) + + self.db.finish_volume_migration(ctxt, volume_id, new_volume_id) + self.db.volume_destroy(ctxt, new_volume_id) + self.db.volume_update(ctxt, volume_id, {'migration_status': None}) + return volume['id'] + + def migrate_volume(self, ctxt, volume_id, host, force_host_copy=False): + """Migrate the volume to the specified host (called on source host).""" + return + + @periodic_task.periodic_task + def _report_driver_status(self, context): + LOG.info(_("Updating fake volume status")) + fake_location_info = 'LVMVolumeDriver:Huawei:cinder-volumes:default:0' + volume_stats = {'QoS_support': False, + 'location_info': fake_location_info, + 'volume_backend_name': 'LVM_iSCSI', + 'free_capacity_gb': 1024, + 'driver_version': '2.0.0', + 'total_capacity_gb': 1024, + 'reserved_percentage': 0, + 'vendor_name': 'Open Source', + 'storage_protocol': 'iSCSI' + } + self.update_service_capabilities(volume_stats) + + def publish_service_capabilities(self, context): + """Collect driver status and then publish.""" + self._report_driver_status(context) + self._publish_service_capabilities(context) + + def _reset_stats(self): + LOG.info(_("Clear capabilities")) + self._last_volume_stats = [] + + def notification(self, context, event): + LOG.info(_("Notification {%s} received"), event) + self._reset_stats() + + def _notify_about_volume_usage(self, + context, + volume, + event_suffix, + extra_usage_info=None): + volume_utils.notify_about_volume_usage( + context, volume, event_suffix, + extra_usage_info=extra_usage_info, host=self.host) + + def _notify_about_snapshot_usage(self, + context, + snapshot, + event_suffix, + extra_usage_info=None): + volume_utils.notify_about_snapshot_usage( + context, snapshot, event_suffix, + extra_usage_info=extra_usage_info, host=self.host) + + def extend_volume(self, context, volume_id, new_size, reservations): + volume = self.db.volume_get(context, volume_id) + + self._notify_about_volume_usage(context, volume, "resize.start") + try: + LOG.info(_("volume %s: extending"), volume['id']) + + cinderClient = self._get_cinder_cascaded_user_client(context) + + vol_ref = self.db.volume_get(context, volume_id) + cascaded_volume_id = vol_ref['mapping_uuid'] + LOG.info(_("Cascade info: extend volume cascade volume id is:%s"), + cascaded_volume_id) + cinderClient.volumes.extend(cascaded_volume_id, new_size) + LOG.info(_("Cascade info: volume %s: extended successfully"), + volume['id']) + + except Exception: + LOG.exception(_("volume %s: Error trying to extend volume"), + volume_id) + try: + self.db.volume_update(context, volume['id'], + {'status': 'error_extending'}) + finally: + QUOTAS.rollback(context, reservations) + return + + QUOTAS.commit(context, reservations) + self.db.volume_update(context, volume['id'], {'size': int(new_size), + 'status': 'extending'}) + self._notify_about_volume_usage( + context, volume, "resize.end", + extra_usage_info={'size': int(new_size)}) diff --git a/cinderproxy/installation/install.sh b/cinderproxy/installation/install.sh new file mode 100644 index 00000000..27ea8df4 --- /dev/null +++ b/cinderproxy/installation/install.sh @@ -0,0 +1,130 @@ +#!/bin/bash + +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# Copyright (c) 2014 Huawei Technologies. + +_CINDER_CONF_DIR="/etc/cinder" +_CINDER_CONF_FILE="cinder.conf" +_CINDER_DIR="/usr/lib64/python2.6/site-packages/cinder" +_CINDER_INSTALL_LOG="/var/log/cinder/cinder-proxy/installation/install.log" + +# please set the option list set in cinder configure file +_CINDER_CONF_OPTION=("volume_manager=cinder.volume.cinder_proxy.CinderProxy volume_sync_interval=5 periodic_interval=5 cinder_tenant_name=admin cinder_username=admin cinder_password=1234 keystone_auth_url=http://10.67.148.210:5000/v2.0/ glance_cascading_flag=False cascading_glance_url=10.67.148.210:9292 cascaded_glance_url=http://10.67.148.201:9292 cascaded_cinder_url=http://10.67.148.201:8776/v2/%(project_id)s cascaded_region_name=Region_AZ1 cascaded_available_zone=AZ1") + +# if you did not make changes to the installation files, +# please do not edit the following directories. +_CODE_DIR="../cinder/" +_BACKUP_DIR="${_CINDER_DIR}/cinder-proxy-installation-backup" + + +function log() +{ + if [ ! -f "${_CINDER_INSTALL_LOG}" ] ; then + mkdir -p `dirname ${_CINDER_INSTALL_LOG}` + touch $_CINDER_INSTALL_LOG + chmod 777 $_CINDER_INSTALL_LOG + fi + echo "$@" + echo "`date -u +'%Y-%m-%d %T.%N'`: $@" >> $_CINDER_INSTALL_LOG +} + +if [[ ${EUID} -ne 0 ]]; then + log "Please run as root." + exit 1 +fi + + +cd `dirname $0` + + +log "checking installation directories..." +if [ ! -d "${_CINDER_DIR}" ] ; then + log "Could not find the cinder installation. Please check the variables in the beginning of the script." + log "aborted." + exit 1 +fi +if [ ! -f "${_CINDER_CONF_DIR}/${_CINDER_CONF_FILE}" ] ; then + log "Could not find cinder config file. Please check the variables in the beginning of the script." + log "aborted." + exit 1 +fi + +log "checking previous installation..." +if [ -d "${_BACKUP_DIR}/cinder" ] ; then + log "It seems cinder-proxy has already been installed!" + log "Please check README for solution if this is not true." + exit 1 +fi + +log "backing up current files that might be overwritten..." +mkdir -p "${_BACKUP_DIR}/cinder" +mkdir -p "${_BACKUP_DIR}/etc/cinder" +cp -r "${_CINDER_DIR}/volume" "${_BACKUP_DIR}/cinder/" +if [ $? -ne 0 ] ; then + rm -r "${_BACKUP_DIR}/cinder" + log "Error in code backup, aborted." + exit 1 +fi +cp "${_CINDER_CONF_DIR}/${_CINDER_CONF_FILE}" "${_BACKUP_DIR}/etc/cinder/" +if [ $? -ne 0 ] ; then + rm -r "${_BACKUP_DIR}/cinder" + rm -r "${_BACKUP_DIR}/etc" + log "Error in config backup, aborted." + exit 1 +fi + +log "copying in new files..." +cp -r "${_CODE_DIR}" `dirname ${_CINDER_DIR}` +if [ $? -ne 0 ] ; then + log "Error in copying, aborted." + log "Recovering original files..." + cp -r "${_BACKUP_DIR}/cinder" `dirname ${_CINDER_DIR}` && rm -r "${_BACKUP_DIR}/cinder" + if [ $? -ne 0 ] ; then + log "Recovering failed! Please install manually." + fi + exit 1 +fi + +log "updating config file..." +sed -i.backup -e "/volume_manager *=/d" "${_CINDER_CONF_DIR}/${_CINDER_CONF_FILE}" +sed -i.backup -e "/periodic_interval *=/d" "${_CINDER_CONF_DIR}/${_CINDER_CONF_FILE}" +for option in $_CINDER_CONF_OPTION +do +sed -i -e "/\[DEFAULT\]/a \\"$option "${_CINDER_CONF_DIR}/${_CINDER_CONF_FILE}" +done + +if [ $? -ne 0 ] ; then + log "Error in updating, aborted." + log "Recovering original files..." + cp -r "${_BACKUP_DIR}/cinder" `dirname ${_CINDER_DIR}` && rm -r "${_BACKUP_DIR}/cinder" + if [ $? -ne 0 ] ; then + log "Recovering /cinder failed! Please install manually." + fi + cp "${_BACKUP_DIR}/etc/cinder/${_CINDER_CONF_FILE}" "${_CINDER_CONF_DIR}" && rm -r "${_BACKUP_DIR}/etc" + if [ $? -ne 0 ] ; then + log "Recovering config failed! Please install manually." + fi + exit 1 +fi + +log "restarting cinder proxy..." +service openstack-cinder-volume restart +if [ $? -ne 0 ] ; then + log "There was an error in restarting the service, please restart cinder proxy manually." + exit 1 +fi + +log "Cinder proxy Completed." +log "See README to get started." + +exit 0 diff --git a/cinderproxy/installation/uninstall.sh b/cinderproxy/installation/uninstall.sh new file mode 100644 index 00000000..57bd5a6c --- /dev/null +++ b/cinderproxy/installation/uninstall.sh @@ -0,0 +1,129 @@ +#!/bin/bash + +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# Copyright (c) 2014 Huawei Technologies. + +_CINDER_CONF_DIR="/etc/cinder" +_CINDER_CONF_FILE="cinder.conf" +_CINDER_DIR="/usr/lib64/python2.6/site-packages/cinder" +_CINDER_CONF_OPTION=("volume_manager volume_sync_interval periodic_interval cinder_tenant_name cinder_username cinder_password keystone_auth_url glance_cascading_flag cascading_glance_url cascaded_glance_url cascaded_cinder_url cascaded_region_name cascaded_available_zone") + +# if you did not make changes to the installation files, +# please do not edit the following directories. +_CODE_DIR="../cinder" +_BACKUP_DIR="${_CINDER_DIR}/cinder-proxy-installation-backup" +_CINDER_INSTALL_LOG="/var/log/cinder/cinder-proxy/installation/install.log" + +#_SCRIPT_NAME="${0##*/}" +#_SCRIPT_LOGFILE="/var/log/nova-solver-scheduler/installation/${_SCRIPT_NAME}.log" + +function log() +{ + if [ ! -f "${_CINDER_INSTALL_LOG}" ] ; then + mkdir -p `dirname ${_CINDER_INSTALL_LOG}` + touch $_CINDER_INSTALL_LOG + chmod 777 $_CINDER_INSTALL_LOG + fi + echo "$@" + echo "`date -u +'%Y-%m-%d %T.%N'`: $@" >> $_CINDER_INSTALL_LOG +} + +if [[ ${EUID} -ne 0 ]]; then + log "Please run as root." + exit 1 +fi + +cd `dirname $0` + +log "checking installation directories..." +if [ ! -d "${_CINDER_DIR}" ] ; then + log "Could not find the cinder installation. Please check the variables in the beginning of the script." + log "aborted." + exit 1 +fi +if [ ! -f "${_CINDER_CONF_DIR}/${_CINDER_CONF_FILE}" ] ; then + log "Could not find cinder config file. Please check the variables in the beginning of the script." + log "aborted." + exit 1 +fi + +log "checking backup..." +if [ ! -d "${_BACKUP_DIR}/cinder" ] ; then + log "Could not find backup files. It is possible that the cinder-proxy has been uninstalled." + log "If this is not the case, then please uninstall manually." + exit 1 +fi + +log "backing up current files that might be overwritten..." +if [ -d "${_BACKUP_DIR}/uninstall" ] ; then + rm -r "${_BACKUP_DIR}/uninstall" +fi +mkdir -p "${_BACKUP_DIR}/uninstall/cinder" +mkdir -p "${_BACKUP_DIR}/uninstall/etc/cinder" +cp -r "${_CINDER_DIR}/volume" "${_BACKUP_DIR}/uninstall/cinder/" +if [ $? -ne 0 ] ; then + rm -r "${_BACKUP_DIR}/uninstall/cinder" + log "Error in code backup, aborted." + exit 1 +fi +cp "${_CINDER_CONF_DIR}/${_CINDER_CONF_FILE}" "${_BACKUP_DIR}/uninstall/etc/cinder/" +if [ $? -ne 0 ] ; then + rm -r "${_BACKUP_DIR}/uninstall/cinder" + rm -r "${_BACKUP_DIR}/uninstall/etc" + log "Error in config backup, aborted." + exit 1 +fi + +log "restoring code to the status before installing cinder-proxy..." +cp -r "${_BACKUP_DIR}/cinder" `dirname ${_CINDER_DIR}` +if [ $? -ne 0 ] ; then + log "Error in copying, aborted." + log "Recovering current files..." + cp -r "${_BACKUP_DIR}/uninstall/cinder" `dirname ${_CINDER_DIR}` + if [ $? -ne 0 ] ; then + log "Recovering failed! Please uninstall manually." + fi + exit 1 +fi + +log "updating config file..." +for option in $_CINDER_CONF_OPTION +do +sed -i.uninstall.backup -e "/"$option "*=/d" "${_CINDER_CONF_DIR}/${_CINDER_CONF_FILE}" +done +if [ $? -ne 0 ] ; then + log "Error in updating, aborted." + log "Recovering current files..." + cp "${_BACKUP_DIR}/uninstall/etc/cinder/${_CINDER_CONF_FILE}" "${_CINDER_CONF_DIR}" + if [ $? -ne 0 ] ; then + log "Recovering failed! Please uninstall manually." + fi + exit 1 +fi + +log "cleaning up backup files..." +rm -r "${_BACKUP_DIR}/cinder" && rm -r "${_BACKUP_DIR}/etc" +if [ $? -ne 0 ] ; then + log "There was an error when cleaning up the backup files." +fi + +log "restarting cinder volume..." +service openstack-cinder-volume restart +if [ $? -ne 0 ] ; then + log "There was an error in restarting the service, please restart cinder volume manually." + exit 1 +fi + +log "Completed." + +exit 0 diff --git a/glancesync/README.md b/glancesync/README.md new file mode 100644 index 00000000..544c7fa7 --- /dev/null +++ b/glancesync/README.md @@ -0,0 +1,140 @@ +Glance Sync Manager +=============================== + +This is a submodule of Tricircle Project, in which a sync function is added to support the glance images' sync between cascading and cascadeds. +When launching a instance, the nova will search the image which is in the same region with the instance to downland, this can speeded up the whole launching time of the instance. + +Key modules +----------- + +* Primarily, there is only new module in glance cascading: Sync, which is in the glance/sync package. + + glance/sync/__init__.py : Adds a ImageRepoProxy class, like store, policy .etc , to augment a sync mechanism layer on top of the api request handling chain. + glance/sync/base.py : Contains SyncManager object, execute the sync operations. + glance/sync/utils.py : Some help functions. + glance/sync/api/ : Support a Web Server of sync. + glance/sync/client/: Support a client to visit the Web Server , ImageRepoProxy use this client to call the sync requests. + glance/sync/task/: Each Sync operation is transformed into a task, we using queue to store the task an eventlet to handle the task simultaneously. + glance/sync/store/: We implements the independent-glance-store, separating the handles of image_data from image_metadata. + glance/cmd/sync.py: For the Sync Server starting launch (refer this in /usr/bin/glance-sync). + + + +* **Note:** + At present, the glance cascading only support v2 version of glance-api; + +Requirements +------------ + +* pexpect>=2.3 + +Installation +------------ +* **Note:** + - The Installation and configuration guidelines written below is just for the cascading layer of glance. For the cascaded layer, the glance is installed as normal. + +* **Prerequisites** + - Please install the python package: pexpect>=2.3 ( because we use pxssh for loginng and there is a bug in pxssh, see https://mail.python.org/pipermail/python-list/2008-February/510054.html, you should fix this before launch the service. ) + +* **Manual Installation** + + - Make sure you have performed backups properly. +* **Manual Installation** + + 1. Under cascading Openstack, copy these files from glance-patch directory and glancesync directory to suitable place: + + | DIR | FROM | TO | + | ------------- |:-----------------|:-------------------------------------------| + | glancesync | glance/ | ${python_install_dir}/glance | + | glancesync | etc/glance/ | /etc/glance/ | + | glancesync | glance-sync | /usr/bin/ | + |${glance-patch}| glance/ | ${python_install_dir}/glance | + |${glance-patch}|glance.egg-info/entry_points.txt | ${glance_install_egg.info}/ | + + ${glance-patch} = `icehouse-patches/glance/glance_location_patch` ${python_install_dir} is where the openstack installed, e.g. `/usr/lib64/python2.6/site-packages` . + 2. Add/modify the config options + + | CONFIG_FILE | OPTION | ADD or MODIFY | + | ----------------|:---------------------------------------------------|:--------------:| + |glance-api.conf | show_multiple_locations=True | M | + |glance-api.conf | sync_server_host=${sync_mgr_host} | A | + |glance-api.conf | sync_server_port=9595 | A | + |glance-api.conf | sync_enabled=True | A | + |glance-sync.conf | cascading_endpoint_url=${glance_api_endpoint_url} | M | + |glance-sync.conf | sync_strategy=ALL | M | + |glance-sync.conf | auth_host=${keystone_host} | M | + 3. Re-launch services on cacading openstack, like: + + `service openstack-glance-api restart ` + `service openstack-glance-registry restart ` + `python /usr/bin/glance-sync --config-file=/etc/glance/glance-sync.conf & ` + +* **Automatic Installation** + + 1. Enter the glance-patch installation dir: `cd ./tricircle/icehouse-patches/glance/glance_location_patch/installation` . + 2. Optional, modify the shell script variable: `_PYTHON_INSTALL_DIR` . + 3. Run the install script: `sh install.sh` + 4. Enter the glancesync installation dir: `cd ./tricircle/glancesync/installation` . + 5. Modify the cascading&cascaded glances' store scheme configuration, which is in the file: `./tricircle/glancesync/etc/glance/glance_store.yaml` . + 6. Optional, modify the config options in shell script: `sync_enabled=True`, `sync_server_port=9595`, `sync_server_host=127.0.0.1` with the proper values. + 7. Run the install script: `sh install.sh` + +Configurations +-------------- + +Besides glance-api.conf file, we add some new config files. They are described separately. + + - In glance-api.conf, three options added: + + [DEFAULT] + + # Indicate whether use the image sync, default value is False. + #If configuring on cascading layer, this value should be True. + sync_enabled = True + + #The sync server 's port number, default is 9595. + sync_server_port = 9595 + + #The sync server's host name (or ip address) + sync_server_host = 127.0.0.1 + + *Besides, the option show_multiple_locations value should be ture. + - In glance-sync.conf which newly increased, the options is similar with glance-registry.conf except: + + [DEFAULT] + + #How to sync the image, the value can be ["None", "ALL", "USER"] + #When "ALL" choosen, means to sync to all the cascaded glances; + #When "USER" choosen, means according to user's role, project, etc. + sync_strategy = ALL + + #What the cascading glance endpoint url is .(Note that this value should be consistent with what in keystone). + cascading_endpoint_url = http://127.0.0.1:9292/ + + #when snapshot sync, set the timeout time(second) of snapshot 's status + #changing into 'active'. + snapshot_timeout = 300 + + #when snapshot sync, set the polling interval time(second) to check the + #snapshot's status. + snapshot_sleep_interval = 10 + + #When sync task fails, set the retry times. + task_retry_times = 0 + + #When copy image data using 'scp' between filesystmes, set the timeout + #time of the copy. + scp_copy_timeout = 3600 + + #When snapshot, one can set the specific regions in which the snapshot + #will sync to. (e.g. physicalOpenstack001, physicalOpenstack002) + snapshot_region_names = + + - Last but also important, we add a yaml file for config the store backend's copy : glance_store.yaml in cascading glance. + these config correspond to various store scheme (at present, only filesystem is supported), the values + are based on your environment, so you have to config it before installation or restart the glance-sync + when modify it. + + + + diff --git a/glancesync/etc/glance-sync b/glancesync/etc/glance-sync new file mode 100644 index 00000000..6d35e4b6 --- /dev/null +++ b/glancesync/etc/glance-sync @@ -0,0 +1,10 @@ +#!/usr/bin/python +# PBR Generated from 'console_scripts' + +import sys + +from glance.cmd.sync import main + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/glancesync/etc/glance/glance-sync-paste.ini b/glancesync/etc/glance/glance-sync-paste.ini new file mode 100644 index 00000000..bef2656e --- /dev/null +++ b/glancesync/etc/glance/glance-sync-paste.ini @@ -0,0 +1,35 @@ +# Use this pipeline for no auth - DEFAULT +[pipeline:glance-sync] +pipeline = versionnegotiation unauthenticated-context rootapp + +[filter:unauthenticated-context] +paste.filter_factory = glance.api.middleware.context:UnauthenticatedContextMiddleware.factory + +# Use this pipeline for keystone auth +[pipeline:glance-sync-keystone] +pipeline = versionnegotiation authtoken context rootapp + +# Use this pipeline for authZ only. This means that the registry will treat a +# user as authenticated without making requests to keystone to reauthenticate +# the user. +[pipeline:glance-sync-trusted-auth] +pipeline = versionnegotiation context rootapp + +[composite:rootapp] +paste.composite_factory = glance.sync.api:root_app_factory +/v1: syncv1app + +[app:syncv1app] +paste.app_factory = glance.sync.api.v1:API.factory + +[filter:context] +paste.filter_factory = glance.api.middleware.context:ContextMiddleware.factory + +[filter:versionnegotiation] +paste.filter_factory = glance.api.middleware.version_negotiation:VersionNegotiationFilter.factory + +[filter:unauthenticated-context] +paste.filter_factory = glance.api.middleware.context:UnauthenticatedContextMiddleware.factory + +[filter:authtoken] +paste.filter_factory = keystoneclient.middleware.auth_token:filter_factory \ No newline at end of file diff --git a/glancesync/etc/glance/glance-sync.conf b/glancesync/etc/glance/glance-sync.conf new file mode 100644 index 00000000..ed371fcd --- /dev/null +++ b/glancesync/etc/glance/glance-sync.conf @@ -0,0 +1,57 @@ +[DEFAULT] +# Show debugging output in logs (sets DEBUG log level output) +debug = True + +# Address to bind the API server +bind_host = 0.0.0.0 + +# Port the bind the API server to +bind_port = 9595 + +# Log to this file. Make sure you do not set the same log file for both the API +# and registry servers! +# +# If `log_file` is omitted and `use_syslog` is false, then log messages are +# sent to stdout as a fallback. +log_file = /var/log/glance/sync.log + +# Backlog requests when creating socket +backlog = 4096 + +#How to sync the image, the value can be ["None", "ALL", "USER"] +#When "ALL" choosen, means to sync to all the cascaded glances; +#When "USER" choosen, means according to user's role, project, etc. +sync_strategy = None + +#What the cascading glance endpoint is . +cascading_endpoint_url = http://127.0.0.1:9292/ + +#when snapshot sync, set the timeout time(second) of snapshot 's status +#changing into 'active'. +snapshot_timeout = 300 + +#when snapshot sync, set the polling interval time(second) to check the +#snapshot's status. +snapshot_sleep_interval = 10 + +#When sync task fails, set the retry times. +task_retry_times = 0 + +#When copy image data using 'scp' between filesystmes, set the timeout +#time of the copy. +scp_copy_timeout = 3600 + +#When snapshot, one can set the specific regions in which the snapshot +#will sync to. +snapshot_region_names = physicalOpenstack001, physicalOpenstack002 + +[keystone_authtoken] +auth_host = 127.0.0.1 +auth_port = 35357 +auth_protocol = http +admin_tenant_name = admin +admin_user = glance +admin_password = glance +[paste_deploy] +config_file = /etc/glance/glance-sync-paste.ini +flavor=keystone diff --git a/glancesync/etc/glance/glance_store.yaml b/glancesync/etc/glance/glance_store.yaml new file mode 100644 index 00000000..b11202e3 --- /dev/null +++ b/glancesync/etc/glance/glance_store.yaml @@ -0,0 +1,29 @@ +--- +glances: + - name: master + service_ip: "127.0.0.1" + schemes: + - name: http + parameters: + netloc: '127.0.0.1:8800' + path: '/' + image_name: 'test.img' + - name: filesystem + parameters: + host: '127.0.0.1' + datadir: '/var/lib/glance/images/' + login_user: 'glance' + login_password: 'glance' + - name: slave1 + service_ip: "0.0.0.0" + schemes: + - name: http + parameters: + netloc: '0.0.0.0:8800' + path: '/' + - name: filesystem + parameters: + host: '0.0.0.0' + datadir: '/var/lib/glance/images/' + login_user: 'glance' + login_password: 'glance' diff --git a/glancesync/glance/cmd/sync.py b/glancesync/glance/cmd/sync.py new file mode 100644 index 00000000..112c43bb --- /dev/null +++ b/glancesync/glance/cmd/sync.py @@ -0,0 +1,59 @@ +# Copyright (c) 2014 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Jia Dong, HuaWei + +""" +Reference implementation server for Glance Sync +""" + +import eventlet +import os +import sys + +from oslo.config import cfg + +# Monkey patch socket and time +eventlet.patcher.monkey_patch(all=False, socket=True, time=True, thread=True) + +# If ../glance/__init__.py exists, add ../ to Python search path, so that +# it will override what happens to be installed in /usr/(local/)lib/python... +possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), + os.pardir, + os.pardir)) +if os.path.exists(os.path.join(possible_topdir, 'glance', '__init__.py')): + sys.path.insert(0, possible_topdir) + +from glance.common import config +from glance.common import exception +from glance.common import wsgi +from glance.openstack.common import log +import glance.sync + + +def main(): + try: + config.parse_args(default_config_files='glance-sync.conf') + log.setup('glance') + + server = wsgi.Server() + server.start(config.load_paste_app('glance-sync'), default_port=9595) + server.wait() + except RuntimeError as e: + sys.exit("ERROR: %s" % e) + + +if __name__ == '__main__': + main() diff --git a/glancesync/glance/sync/__init__.py b/glancesync/glance/sync/__init__.py new file mode 100644 index 00000000..1c9713e9 --- /dev/null +++ b/glancesync/glance/sync/__init__.py @@ -0,0 +1,257 @@ +# Copyright (c) 2014 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Jia Dong, HuaWei + +from oslo.config import cfg + +import glance.context +import glance.domain.proxy +import glance.openstack.common.log as logging +from glance.sync.clients import Clients as clients +from glance.sync import utils + + +LOG = logging.getLogger(__name__) + +_V2_IMAGE_CREATE_PROPERTIES = ['container_format', 'disk_format', 'min_disk', + 'min_ram', 'name', 'virtual_size', 'visibility', + 'protected'] + +_V2_IMAGE_UPDATE_PROPERTIES = ['container_format', 'disk_format', 'min_disk', + 'min_ram', 'name'] + + +def _check_trigger_sync(pre_image, image): + """ + check if it is the case that the cascaded glance has upload or first patch + location. + """ + return pre_image.status in ('saving', 'queued') and image.size and \ + [l for l in image.locations if not utils.is_glance_location(l['url'])] + + +def _from_snapshot_request(pre_image, image): + """ + when patch location, check if it's snapshot-sync case. + """ + if pre_image.status == 'queued' and len(image.locations) == 1: + loc_meta = image.locations[0]['metadata'] + return loc_meta and loc_meta.get('image_from', None) in ['snapshot', + 'volume'] + + +def get_adding_image_properties(image): + _tags = list(image.tags) or [] + kwargs = {} + kwargs['body'] = {} + for key in _V2_IMAGE_CREATE_PROPERTIES: + try: + value = getattr(image, key, None) + if value and value != 'None': + kwargs['body'][key] = value + except KeyError: + pass + _properties = getattr(image, 'extra_properties') or None + + if _properties: + extra_keys = _properties.keys() + for _key in extra_keys: + kwargs['body'][_key] = _properties[_key] + if _tags: + kwargs['body']['tags'] = _tags + return kwargs + + +def get_existing_image_locations(image): + return {'locations': image.locations} + + +class ImageRepoProxy(glance.domain.proxy.Repo): + + def __init__(self, image_repo, context, sync_api): + self.image_repo = image_repo + self.context = context + self.sync_client = sync_api.get_sync_client(context) + proxy_kwargs = {'context': context, 'sync_api': sync_api} + super(ImageRepoProxy, self).__init__(image_repo, + item_proxy_class=ImageProxy, + item_proxy_kwargs=proxy_kwargs) + + def _sync_saving_metadata(self, pre_image, image): + kwargs = {} + remove_keys = [] + changes = {} + """ + image base properties + """ + for key in _V2_IMAGE_UPDATE_PROPERTIES: + pre_value = getattr(pre_image, key, None) + my_value = getattr(image, key, None) + + if not my_value and not pre_value or my_value == pre_value: + continue + if not my_value and pre_value: + remove_keys.append(key) + else: + changes[key] = my_value + + """ + image extra_properties + """ + pre_props = pre_image.extra_properties or {} + _properties = image.extra_properties or {} + addset = set(_properties.keys()).difference(set(pre_props.keys())) + removeset = set(pre_props.keys()).difference(set(_properties.keys())) + mayrepset = set(pre_props.keys()).intersection(set(_properties.keys())) + + for key in addset: + changes[key] = _properties[key] + + for key in removeset: + remove_keys.append(key) + + for key in mayrepset: + if _properties[key] == pre_props[key]: + continue + changes[key] = _properties[key] + + """ + image tags + """ + tag_dict = {} + pre_tags = pre_image.tags + new_tags = image.tags + + added_tags = set(new_tags) - set(pre_tags) + removed_tags = set(pre_tags) - set(new_tags) + if added_tags: + tag_dict['add'] = added_tags + if removed_tags: + tag_dict['delete'] = removed_tags + if tag_dict: + kwargs['tags'] = tag_dict + + kwargs['changes'] = changes + kwargs['removes'] = remove_keys + if not changes and not remove_keys and not tag_dict: + return + LOG.debug(_('In image %s, some properties changed, sync...') + % (image.image_id)) + self.sync_client.update_image_matedata(image.image_id, **kwargs) + + def _try_sync_locations(self, pre_image, image): + image_id = image.image_id + """ + image locations + """ + locations_dict = {} + pre_locs = pre_image.locations + _locs = image.locations + + """ + if all locations of cascading removed, the image status become 'queued' + so the cascaded images should be 'queued' too. we replace all locations + with '[]' + """ + if pre_locs and not _locs: + LOG.debug(_('The image %s all locations removed, sync...') + % (image_id)) + self.sync_client.sync_locations(image_id, + action='CLEAR', + locs=pre_locs) + return + + added_locs = [] + removed_locs = [] + for _loc in pre_locs: + if _loc in _locs: + continue + removed_locs.append(_loc) + + for _loc in _locs: + if _loc in pre_locs: + continue + added_locs.append(_loc) + + if added_locs: + if _from_snapshot_request(pre_image, image): + add_kwargs = get_adding_image_properties(image) + else: + add_kwargs = {} + LOG.debug(_('The image %s add locations, sync...') % (image_id)) + self.sync_client.sync_locations(image_id, + action='INSERT', + locs=added_locs, + **add_kwargs) + elif removed_locs: + LOG.debug(_('The image %s remove some locations, sync...') + % (image_id)) + self.sync_client.sync_locations(image_id, + action='DELETE', + locs=removed_locs) + + def save(self, image): + pre_image = self.get(image.image_id) + result = super(ImageRepoProxy, self).save(image) + + image_id = image.image_id + if _check_trigger_sync(pre_image, image): + add_kwargs = get_adding_image_properties(image) + self.sync_client.sync_data(image_id, **add_kwargs) + LOG.debug(_('Sync data when image status changes ACTIVE, the ' + 'image id is %s.' % (image_id))) + else: + """ + In case of add/remove/replace locations property. + """ + self._try_sync_locations(pre_image, image) + """ + In case of sync the glance's properties + """ + if image.status == 'active': + self._sync_saving_metadata(pre_image, image) + + return result + + def remove(self, image): + result = super(ImageRepoProxy, self).remove(image) + LOG.debug(_('Image %s removed, sync...') % (image.image_id)) + delete_kwargs = get_existing_image_locations(image) + self.sync_client.remove_image(image.image_id, **delete_kwargs) + return result + + +class ImageFactoryProxy(glance.domain.proxy.ImageFactory): + + def __init__(self, factory, context, sync_api): + self.context = context + self.sync_api = sync_api + proxy_kwargs = {'context': context, 'sync_api': sync_api} + super(ImageFactoryProxy, self).__init__(factory, + proxy_class=ImageProxy, + proxy_kwargs=proxy_kwargs) + + def new_image(self, **kwargs): + return super(ImageFactoryProxy, self).new_image(**kwargs) + + +class ImageProxy(glance.domain.proxy.Image): + + def __init__(self, image, context, sync_api=None): + self.image = image + self.sync_api = sync_api + self.context = context + super(ImageProxy, self).__init__(image) diff --git a/glancesync/glance/sync/api/__init__.py b/glancesync/glance/sync/api/__init__.py new file mode 100644 index 00000000..52b5a85c --- /dev/null +++ b/glancesync/glance/sync/api/__init__.py @@ -0,0 +1,22 @@ +# Copyright (c) 2014 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Jia Dong, HuaWei + +import paste.urlmap + + +def root_app_factory(loader, global_conf, **local_conf): + return paste.urlmap.urlmap_factory(loader, global_conf, **local_conf) diff --git a/glancesync/glance/sync/api/v1/__init__.py b/glancesync/glance/sync/api/v1/__init__.py new file mode 100644 index 00000000..e37c9929 --- /dev/null +++ b/glancesync/glance/sync/api/v1/__init__.py @@ -0,0 +1,59 @@ +# Copyright (c) 2014 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Jia Dong, HuaWei + +from glance.common import wsgi +from glance.sync.api.v1 import images + + +def init(mapper): + + images_resource = images.create_resource() + + mapper.connect("/cascaded-eps", + controller=images_resource, + action="endpoints", + conditions={'method': ['POST']}) + + mapper.connect("/images/{id}", + controller=images_resource, + action="update", + conditions={'method': ['PATCH']}) + + mapper.connect("/images/{id}", + controller=images_resource, + action="remove", + conditions={'method': ['DELETE']}) + + mapper.connect("/images/{id}", + controller=images_resource, + action="upload", + conditions={'method': ['PUT']}) + + mapper.connect("/images/{id}/location", + controller=images_resource, + action="sync_loc", + conditions={'method': ['PUT']}) + + +class API(wsgi.Router): + + """WSGI entry point for all Registry requests.""" + + def __init__(self, mapper): + mapper = mapper or wsgi.APIMapper() + init(mapper) + super(API, self).__init__(mapper) diff --git a/glancesync/glance/sync/api/v1/images.py b/glancesync/glance/sync/api/v1/images.py new file mode 100644 index 00000000..1239945a --- /dev/null +++ b/glancesync/glance/sync/api/v1/images.py @@ -0,0 +1,95 @@ +# Copyright (c) 2014 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Jia Dong, HuaWei + +from oslo.config import cfg + +from glance.common import exception +from glance.common import wsgi +import glance.openstack.common.log as logging +from glance.sync.base import SyncManagerV2 as sync_manager +from glance.sync import utils as utils + + +LOG = logging.getLogger(__name__) + + +class Controller(object): + + def __init__(self): + self.sync_manager = sync_manager() + self.sync_manager.start() + + def test(self, req): + return {'body': 'for test'} + + def update(self, req, id, body): + LOG.debug(_('sync client start run UPDATE metadata operation for' + 'image_id: %s' % (id))) + self.sync_manager.sync_image_metadata(id, req.context.auth_tok, 'SAVE', + **body) + return dict({'body': id}) + + def remove(self, req, id, body): + LOG.debug(_('sync client start run DELETE operation for image_id: %s' + % (id))) + self.sync_manager.sync_image_metadata(id, req.context.auth_tok, + 'DELETE', **body) + return dict({'body': id}) + + def upload(self, req, id, body): + LOG.debug(_('sync client start run UPLOAD operation for image_id: %s' + % (id))) + self.sync_manager.sync_image_data(id, req.context.auth_tok, **body) + return dict({'body': id}) + + def sync_loc(self, req, id, body): + action = body['action'] + locs = body['locations'] + LOG.debug(_('sync client start run SYNC-LOC operation for image_id: %s' + % (id))) + if action == 'INSERT': + self.sync_manager.adding_locations(id, req.context.auth_tok, locs, + **body) + elif action == 'DELETE': + self.sync_manager.removing_locations(id, + req.context.auth_tok, + locs) + elif action == 'CLEAR': + self.sync_manager.clear_all_locations(id, + req.context.auth_tok, + locs) + + return dict({'body': id}) + + def endpoints(self, req, body): + regions = req.params.get('regions', []) + if not regions: + regions = body.pop('regions', []) + if not isinstance(regions, list): + regions = [regions] + LOG.debug(_('get cacaded endpoints of user/tenant: %s' + % (req.context.user or req.context.tenant or 'NONE'))) + return dict(eps=utils.get_endpoints(req.context.auth_tok, + req.context.tenant, + region_names=regions) or []) + + +def create_resource(): + """Images resource factory method.""" + deserializer = wsgi.JSONRequestDeserializer() + serializer = wsgi.JSONResponseSerializer() + return wsgi.Resource(Controller(), deserializer, serializer) diff --git a/glancesync/glance/sync/base.py b/glancesync/glance/sync/base.py new file mode 100644 index 00000000..cbbf0196 --- /dev/null +++ b/glancesync/glance/sync/base.py @@ -0,0 +1,606 @@ +# Copyright (c) 2014 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Jia Dong, HuaWei + +import copy +import httplib +import Queue +import threading +import time + +import eventlet +from oslo.config import cfg +import six.moves.urllib.parse as urlparse + +from glance.common import exception +from glance.common import utils +from glance.openstack.common import importutils +from glance.openstack.common import jsonutils +from glance.openstack.common import threadgroup +from glance.openstack.common import timeutils +import glance.openstack.common.log as logging + +from glance.sync import utils as s_utils +from glance.sync.clients import Clients as clients +from glance.sync.store.driver import StoreFactory as s_factory +from glance.sync.store.location import LocationFactory as l_factory +import glance.sync.store.glance_store as glance_store +from glance.sync.task import TaskObject +from glance.sync.task import PeriodicTask + +LOG = logging.getLogger(__name__) + +CONF = cfg.CONF +CONF.import_opt('sync_strategy', 'glance.common.config', group='sync') +CONF.import_opt('task_retry_times', 'glance.common.config', group='sync') +CONF.import_opt('snapshot_timeout', 'glance.common.config', group='sync') +CONF.import_opt('snapshot_sleep_interval', 'glance.common.config', + group='sync') + + +def get_image_servcie(): + return ImageService + + +def create_glance_client(auth_token, url): + return clients(auth_token).glance(url=url) + + +def create_self_glance_client(auth_token): + return create_glance_client(auth_token, + s_utils.get_cascading_endpoint_url()) + + +def create_restful_client(auth_token, url): + pieces = urlparse.urlparse(url) + return _create_restful_client(auth_token, pieces.netloc) + + +def create_self_restful_client(auth_token): + return create_restful_client(auth_token, + s_utils.get_cascading_endpoint_url()) + + +def _create_restful_client(auth_token, url): + server, port = url.split(':') + conn = httplib.HTTPConnection(server.encode(), port.encode()) + image_service = get_image_servcie() + glance_client = image_service(conn, auth_token) + return glance_client + + +def get_mappings_from_image(auth_token, image_id): + client = create_self_glance_client(auth_token) + image = client.images.get(image_id) + locations = image.locations + if not locations: + return {} + return get_mappings_from_locations(locations) + + +def get_mappings_from_locations(locations): + mappings = {} + for loc in locations: + if s_utils.is_glance_location(loc['url']): + id = loc['metadata'].get('image_id') + if not id: + continue + ep_url = s_utils.create_ep_by_loc(loc) + mappings[ep_url] = id +# endpoints.append(utils.create_ep_by_loc(loc)) + return mappings + + +class AuthenticationException(Exception): + pass + + +class ImageAlreadyPresentException(Exception): + pass + + +class ServerErrorException(Exception): + pass + + +class UploadException(Exception): + pass + + +class ImageService(object): + + def __init__(self, conn, auth_token): + """Initialize the ImageService. + + conn: a httplib.HTTPConnection to the glance server + auth_token: authentication token to pass in the x-auth-token header + """ + self.auth_token = auth_token + self.conn = conn + + def _http_request(self, method, url, headers, body, + ignore_result_body=False): + """Perform an HTTP request against the server. + + method: the HTTP method to use + url: the URL to request (not including server portion) + headers: headers for the request + body: body to send with the request + ignore_result_body: the body of the result will be ignored + + Returns: a httplib response object + """ + if self.auth_token: + headers.setdefault('x-auth-token', self.auth_token) + + LOG.debug(_('Request: %(method)s http://%(server)s:%(port)s' + '%(url)s with headers %(headers)s') + % {'method': method, + 'server': self.conn.host, + 'port': self.conn.port, + 'url': url, + 'headers': repr(headers)}) + self.conn.request(method, url, body, headers) + + response = self.conn.getresponse() + headers = self._header_list_to_dict(response.getheaders()) + code = response.status + code_description = httplib.responses[code] + LOG.debug(_('Response: %(code)s %(status)s %(headers)s') + % {'code': code, + 'status': code_description, + 'headers': repr(headers)}) + + if code in [400, 500]: + raise ServerErrorException(response.read()) + + if code in [401, 403]: + raise AuthenticationException(response.read()) + + if code == 409: + raise ImageAlreadyPresentException(response.read()) + + if ignore_result_body: + # NOTE: because we are pipelining requests through a single HTTP + # connection, httplib requires that we read the response body + # before we can make another request. If the caller knows they + # don't care about the body, they can ask us to do that for them. + response.read() + return response + + @staticmethod + def _header_list_to_dict(headers): + """Expand a list of headers into a dictionary. + + headers: a list of [(key, value), (key, value), (key, value)] + + Returns: a dictionary representation of the list + """ + d = {} + for (header, value) in headers: + if header.startswith('x-image-meta-property-'): + prop = header.replace('x-image-meta-property-', '') + d.setdefault('properties', {}) + d['properties'][prop] = value + else: + d[header.replace('x-image-meta-', '')] = value + return d + + @staticmethod + def _dict_to_headers(d): + """Convert a dictionary into one suitable for a HTTP request. + + d: a dictionary + + Returns: the same dictionary, with x-image-meta added to every key + """ + h = {} + for key in d: + if key == 'properties': + for subkey in d[key]: + if d[key][subkey] is None: + h['x-image-meta-property-%s' % subkey] = '' + else: + h['x-image-meta-property-%s' % subkey] = d[key][subkey] + + else: + h['x-image-meta-%s' % key] = d[key] + return h + + def add_location(self, image_uuid, path_val, metadata=None): + """ + add an actual location + """ + LOG.debug(_('call restful api to add location: url is %s' % path_val)) + metadata = metadata or {} + url = '/v2/images/%s' % image_uuid + hdrs = {'Content-Type': 'application/openstack-images-v2.1-json-patch'} + body = [] + value = {'url': path_val, 'metadata': metadata} + body.append({'op': 'add', 'path': '/locations/-', 'value': value}) + return self._http_request('PATCH', url, hdrs, jsonutils.dumps(body)) + + def clear_locations(self, image_uuid): + """ + clear all the location infos, make the image status be 'queued'. + """ + LOG.debug(_('call restful api to clear image location: image id is %s' + % image_uuid)) + url = '/v2/images/%s' % image_uuid + hdrs = {'Content-Type': 'application/openstack-images-v2.1-json-patch'} + body = [] + body.append({'op': 'replace', 'path': '/locations', 'value': []}) + return self._http_request('PATCH', url, hdrs, jsonutils.dumps(body)) + + +class MetadataHelper(object): + + def execute(self, auth_token, endpoint, action_name='CREATE', + image_id=None, **kwargs): + + glance_client = create_glance_client(auth_token, endpoint) + if action_name.upper() == 'CREATE': + return self._do_create_action(glance_client, **kwargs) + if action_name.upper() == 'SAVE': + return self._do_save_action(glance_client, image_id, **kwargs) + if action_name.upper() == 'DELETE': + return self._do_delete_action(glance_client, image_id, **kwargs) + + return None + + @staticmethod + def _fetch_params(keys, **kwargs): + return tuple([kwargs.get(key, None) for key in keys]) + + def _do_create_action(self, glance_client, **kwargs): + body = kwargs['body'] + new_image = glance_client.images.create(**body) + return new_image.id + + def _do_save_action(self, glance_client, image_id, **kwargs): + keys = ['changes', 'removes', 'tags'] + changes, removes, tags = self._fetch_params(keys, **kwargs) + if changes or removes: + glance_client.images.update(image_id, + remove_props=removes, + **changes) + if tags: + if tags.get('add', None): + added = tags.get('add') + for tag in added: + glance_client.image_tags.update(image_id, tag) + elif tags.get('delete', None): + removed = tags.get('delete') + for tag in removed: + glance_client.image_tags.delete(image_id, tag) + return glance_client.images.get(image_id) + + def _do_delete_action(self, glance_client, image_id, **kwargs): + return glance_client.images.delete(image_id) + + +_task_queue = Queue.Queue(maxsize=150) + + +class SyncManagerV2(): + + MAX_TASK_RETRY_TIMES = 1 + + def __init__(self): + global _task_queue + self.mete_helper = MetadataHelper() + self.location_factory = l_factory() + self.store_factory = s_factory() + self.task_queue = _task_queue + self.task_handler = None + self.unhandle_task_list = [] + self.periodic_add_id_list = [] + self.periodic_add_done = True + self._load_glance_store_cfg() + self.ks_client = clients().keystone() + self.create_new_periodic_task = False + + def _load_glance_store_cfg(self): + glance_store.setup_glance_stores() + + def sync_image_metadata(self, image_id, auth_token, action, **kwargs): + if not action or CONF.sync.sync_strategy == 'None': + return + kwargs['image_id'] = image_id + if action == 'SAVE': + self.task_queue.put_nowait(TaskObject.get_instance('meta_update', + kwargs)) + elif action == 'DELETE': + self.task_queue.put_nowait(TaskObject.get_instance('meta_remove', + kwargs)) + + def sync_image_data(self, image_id, auth_token, eps=None, **kwargs): + if CONF.sync.sync_strategy == 'None': + return + + kwargs['image_id'] = image_id + cascading_ep = s_utils.get_cascading_endpoint_url() + kwargs['cascading_ep'] = cascading_ep + self.task_queue.put_nowait(TaskObject.get_instance('sync', kwargs)) + + def adding_locations(self, image_id, auth_token, locs, **kwargs): + if CONF.sync.sync_strategy == 'None': + return + for loc in locs: + if s_utils.is_glance_location(loc['url']): + if s_utils.is_snapshot_location(loc): + snapshot_ep = s_utils.create_ep_by_loc(loc) + snapshot_id = s_utils.get_id_from_glance_loc(loc) + snapshot_client = create_glance_client(auth_token, + snapshot_ep) + snapshot_image = snapshot_client.images.get(snapshot_id) + _pre_check_time = timeutils.utcnow() + _timout = CONF.sync.snapshot_timeout + while not timeutils.is_older_than(_pre_check_time, + _timout): + if snapshot_image.status == 'active': + break + LOG.debug(_('Check snapshot not active, wait for %i' + 'second.' + % CONF.sync.snapshot_sleep_interval)) + time.sleep(CONF.sync.snapshot_sleep_interval) + snapshot_image = snapshot_client.images.get( + snapshot_id) + + if snapshot_image.status != 'active': + LOG.error(_('Snapshot status to active Timeout')) + return + kwargs['image_id'] = image_id + kwargs['snapshot_ep'] = snapshot_ep + kwargs['snapshot_id'] = snapshot_id + snapshot_task = TaskObject.get_instance('snapshot', kwargs) + self.task_queue.put_nowait(snapshot_task) + else: + LOG.debug(_('patch a normal location %s to image %s' + % (loc['url'], image_id))) + input = {'image_id': image_id, 'location': loc} + self.task_queue.put_nowait(TaskObject.get_instance('patch', + input)) + + def removing_locations(self, image_id, auth_token, locs): + if CONF.sync.sync_strategy == 'None': + return + locs = filter(lambda loc: s_utils.is_glance_location(loc['url']), locs) + if not locs: + return + input = {'image_id': image_id, 'locations': locs} + remove_locs_task = TaskObject.get_instance('locs_remove', input) + self.task_queue.put_nowait(remove_locs_task) + + def clear_all_locations(self, image_id, auth_token, locs): + locs = filter(lambda loc: not s_utils.is_snapshot_location(loc), locs) + self.removing_locations(image_id, auth_token, locs) + + def create_new_cascaded_task(self, last_run_time=None): + LOG.debug(_('new_cascaded periodic task has been created.')) + glance_client = create_self_glance_client(self.ks_client.auth_token) + filters = {'status': 'active'} + image_list = glance_client.images.list(filters=filters) + input = {} + run_images = {} + cascading_ep = s_utils.get_cascading_endpoint_url() + input['cascading_ep'] = cascading_ep + input['image_id'] = 'ffffffff-ffff-ffff-ffff-ffffffffffff' + all_ep_urls = s_utils.get_endpoints() + for image in image_list: + glance_urls = [loc['url'] for loc in image.locations + if s_utils.is_glance_location(loc['url'])] + lack_ep_urls = s_utils.calculate_lack_endpoints(all_ep_urls, + glance_urls) + if lack_ep_urls: + image_core_props = s_utils.get_core_properties(image) + run_images[image.id] = {'body': image_core_props, + 'locations': lack_ep_urls} + if not run_images: + LOG.debug(_('No images need to sync to new cascaded glances.')) + input['images'] = run_images + return TaskObject.get_instance('periodic_add', input, + last_run_time=last_run_time) + + @staticmethod + def _fetch_params(keys, **kwargs): + return tuple([kwargs.get(key, None) for key in keys]) + + def _get_candidate_path(self, auth_token, from_ep, image_id, + scheme='file'): + g_client = create_glance_client(auth_token, from_ep) + image = g_client.images.get(image_id) + locs = image.locations or [] + for loc in locs: + if s_utils.is_glance_location(loc['url']): + continue + if loc['url'].startswith(scheme): + if scheme == 'file': + return loc['url'][len('file://'):] + return loc['url'] + return None + + def _do_image_data_copy(self, s_ep, d_ep, from_image_id, to_image_id, + candidate_path=None): + from_scheme, to_scheme = glance_store.choose_best_store_schemes(s_ep, + d_ep) + store_driver = self.store_factory.get_instance(from_scheme['name'], + to_scheme['name']) + from_params = from_scheme['parameters'] + from_params['image_id'] = from_image_id + to_params = to_scheme['parameters'] + to_params['image_id'] = to_image_id + from_location = self.location_factory.get_instance(from_scheme['name'], + **from_params) + to_location = self.location_factory.get_instance(to_scheme['name'], + **to_params) + return store_driver.copy_to(from_location, to_location, + candidate_path=candidate_path) + + def _patch_cascaded_location(self, auth_token, image_id, + cascaded_ep, cascaded_id, action=None): + self_restful_client = create_self_restful_client(auth_token) + path = s_utils.generate_glance_location(cascaded_ep, cascaded_id) + # add the auth_token, so this url can be visited, otherwise 404 error + path += '?auth_token=' + auth_token + metadata = {'image_id': cascaded_id} + if action: + metadata['action'] = action + self_restful_client.add_location(image_id, path, metadata) + + def meta_update(self, auth_token, cascaded_ep, image_id, **kwargs): + + return self.mete_helper.execute(auth_token, cascaded_ep, 'SAVE', + image_id, **kwargs) + + def meta_delete(self, auth_token, cascaded_ep, image_id): + + return self.mete_helper.execute(auth_token, cascaded_ep, 'DELETE', + image_id) + + def sync_image(self, auth_token, copy_ep, cascaded_ep, copy_image_id, + cascading_image_id, **kwargs): + # Firstly, crate an image object with cascading image's properties. + cascaded_id = self.mete_helper.execute(auth_token, cascaded_ep, + **kwargs) + try: + c_path = self._get_candidate_path(auth_token, copy_ep, + copy_image_id) + # execute copy operation to copy the image data. + copy_image_loc = self._do_image_data_copy(copy_ep, + cascaded_ep, + copy_image_id, + cascaded_id, + candidate_path=c_path) + # patch the copied image_data to the image + glance_client = create_restful_client(auth_token, cascaded_ep) + glance_client.add_location(cascaded_id, copy_image_loc) + # patch the glance location to cascading glance + + msg = _("patch glance location to cascading image, with cascaded " + "endpoint : %s, cascaded id: %s, cascading image id: %s." % + (cascaded_ep, cascaded_id, cascading_image_id)) + LOG.debug(msg) + self._patch_cascaded_location(auth_token, + cascading_image_id, + cascaded_ep, + cascaded_id, + action='upload') + return cascaded_id + except exception.SyncStoreCopyError as e: + LOG.error(_("Exception occurs when syncing store copy.")) + raise exception.SyncServiceOperationError(reason=e.msg) + + def do_snapshot(self, auth_token, snapshot_ep, cascaded_ep, + snapshot_image_id, cascading_image_id, **kwargs): + + return self.sync_image(auth_token, snapshot_ep, cascaded_ep, + snapshot_image_id, cascading_image_id, **kwargs) + + def patch_location(self, image_id, cascaded_id, auth_token, cascaded_ep, + location): + g_client = create_glance_client(auth_token, cascaded_ep) + cascaded_image = g_client.images.get(cascaded_id) + glance_client = create_restful_client(auth_token, cascaded_ep) + try: + glance_client.add_location(cascaded_id, location['url']) + if cascaded_image.status == 'queued': + self._patch_cascaded_location(auth_token, + image_id, + cascaded_ep, + cascaded_id, + action='patch') + except: + pass + + def remove_loc(self, cascaded_id, auth_token, cascaded_ep): + glance_client = create_glance_client(auth_token, cascaded_ep) + glance_client.images.delete(cascaded_id) + + def start(self): + # lanuch a new thread to read the task_task to handle. + _thread = threading.Thread(target=self.tasks_handle) + _thread.setDaemon(True) + _thread.start() + + def tasks_handle(self): + while True: + _task = self.task_queue.get() + if not isinstance(_task, TaskObject): + LOG.error(_('task type valid.')) + continue + LOG.debug(_('Task start to runs, task id is %s' % _task.id)) + _task.start_time = timeutils.strtime() + self.unhandle_task_list.append(copy.deepcopy(_task)) + + eventlet.spawn(_task.execute, self, self.ks_client.auth_token) + + def handle_tasks(self, task_result): + t_image_id = task_result.get('image_id') + t_type = task_result.get('type') + t_start_time = task_result.get('start_time') + t_status = task_result.get('status') + + handling_tasks = filter(lambda t: t.image_id == t_image_id and + t.start_time == t_start_time, + self.unhandle_task_list) + if not handling_tasks or len(handling_tasks) > 1: + LOG.error(_('The task not exist or duplicate, can not go handle. ' + 'Info is image: %(id)s, op_type: %(type)s, run time: ' + '%(time)s' + % {'id': t_image_id, + 'type': t_type, + 'time': t_start_time} + )) + return + + task = handling_tasks[0] + self.unhandle_task_list.remove(task) + + if isinstance(task, PeriodicTask): + LOG.debug(_('The periodic task executed done, with op %(type)s ' + 'runs at time: %(start_time)s, the status is ' + '%(status)s.' % + {'type': t_type, + 'start_time': t_start_time, + 'status': t_status + })) + + else: + if t_status == 'terminal': + LOG.debug(_('The task executed successful for image:' + '%(image_id)s with op %(type)s, which runs ' + 'at time: %(start_time)s' % + {'image_id': t_image_id, + 'type': t_type, + 'start_time': t_start_time + })) + elif t_status == 'param_error': + LOG.error(_('The task executed failed for params error. Image:' + '%(image_id)s with op %(type)s, which runs ' + 'at time: %(start_time)s' % + {'image_id': t_image_id, + 'type': t_type, + 'start_time': t_start_time + })) + elif t_status == 'error': + LOG.error(_('The task failed to execute. Detail info is: ' + '%(image_id)s with op %(op_type)s run_time:' + '%(start_time)s' % + {'image_id': t_image_id, + 'op_type': t_type, + 'start_time': t_start_time + })) diff --git a/glancesync/glance/sync/client/__init__.py b/glancesync/glance/sync/client/__init__.py new file mode 100644 index 00000000..114dad93 --- /dev/null +++ b/glancesync/glance/sync/client/__init__.py @@ -0,0 +1,46 @@ +from oslo.config import cfg + +sync_client_opts = [ + cfg.StrOpt('sync_client_protocol', default='http', + help=_('The protocol to use for communication with the ' + 'sync server. Either http or https.')), + cfg.StrOpt('sync_client_key_file', + help=_('The path to the key file to use in SSL connections ' + 'to the sync server.')), + cfg.StrOpt('sync_client_cert_file', + help=_('The path to the cert file to use in SSL connections ' + 'to the sync server.')), + cfg.StrOpt('sync_client_ca_file', + help=_('The path to the certifying authority cert file to ' + 'use in SSL connections to the sync server.')), + cfg.BoolOpt('sync_client_insecure', default=False, + help=_('When using SSL in connections to the sync server, ' + 'do not require validation via a certifying ' + 'authority.')), + cfg.IntOpt('sync_client_timeout', default=600, + help=_('The period of time, in seconds, that the API server ' + 'will wait for a sync request to complete. A ' + 'value of 0 implies no timeout.')), +] + +sync_client_ctx_opts = [ + cfg.BoolOpt('sync_use_user_token', default=True, + help=_('Whether to pass through the user token when ' + 'making requests to the sync.')), + cfg.StrOpt('sync_admin_user', secret=True, + help=_('The administrators user name.')), + cfg.StrOpt('sync_admin_password', secret=True, + help=_('The administrators password.')), + cfg.StrOpt('sync_admin_tenant_name', secret=True, + help=_('The tenant name of the administrative user.')), + cfg.StrOpt('sync_auth_url', + help=_('The URL to the keystone service.')), + cfg.StrOpt('sync_auth_strategy', default='noauth', + help=_('The strategy to use for authentication.')), + cfg.StrOpt('sync_auth_region', + help=_('The region for the authentication service.')), +] + +CONF = cfg.CONF +CONF.register_opts(sync_client_opts) +CONF.register_opts(sync_client_ctx_opts) diff --git a/glancesync/glance/sync/client/v1/__init__.py b/glancesync/glance/sync/client/v1/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/glancesync/glance/sync/client/v1/api.py b/glancesync/glance/sync/client/v1/api.py new file mode 100644 index 00000000..736df07b --- /dev/null +++ b/glancesync/glance/sync/client/v1/api.py @@ -0,0 +1,124 @@ +# Copyright (c) 2014 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Jia Dong, HuaWei + +import os + +from oslo.config import cfg + +from glance.common import exception +from glance.openstack.common import jsonutils +import glance.openstack.common.log as logging +from glance.sync.client.v1 import client + +CONF = cfg.CONF +CONF.import_opt('sync_server_host', 'glance.common.config') +CONF.import_opt('sync_server_port', 'glance.common.config') + +sync_client_ctx_opts = [ + cfg.BoolOpt('sync_send_identity_headers', default=False, + help=_("Whether to pass through headers containing user " + "and tenant information when making requests to " + "the sync. This allows the sync to use the " + "context middleware without the keystoneclients' " + "auth_token middleware, removing calls to the keystone " + "auth service. It is recommended that when using this " + "option, secure communication between glance api and " + "glance sync is ensured by means other than " + "auth_token middleware.")), +] +CONF.register_opts(sync_client_ctx_opts) + +_sync_client = 'glance.sync.client' +CONF.import_opt('sync_client_protocol', _sync_client) +CONF.import_opt('sync_client_key_file', _sync_client) +CONF.import_opt('sync_client_cert_file', _sync_client) +CONF.import_opt('sync_client_ca_file', _sync_client) +CONF.import_opt('sync_client_insecure', _sync_client) +CONF.import_opt('sync_client_timeout', _sync_client) +CONF.import_opt('sync_use_user_token', _sync_client) +CONF.import_opt('sync_admin_user', _sync_client) +CONF.import_opt('sync_admin_password', _sync_client) +CONF.import_opt('sync_admin_tenant_name', _sync_client) +CONF.import_opt('sync_auth_url', _sync_client) +CONF.import_opt('sync_auth_strategy', _sync_client) +CONF.import_opt('sync_auth_region', _sync_client) +CONF.import_opt('metadata_encryption_key', 'glance.common.config') + +_CLIENT_CREDS = None +_CLIENT_HOST = None +_CLIENT_PORT = None +_CLIENT_KWARGS = {} + + +def get_sync_client(cxt): + global _CLIENT_CREDS, _CLIENT_KWARGS, _CLIENT_HOST, _CLIENT_PORT + kwargs = _CLIENT_KWARGS.copy() + if CONF.sync_use_user_token: + kwargs['auth_tok'] = cxt.auth_tok + if _CLIENT_CREDS: + kwargs['creds'] = _CLIENT_CREDS + + if CONF.sync_send_identity_headers: + identity_headers = { + 'X-User-Id': cxt.user, + 'X-Tenant-Id': cxt.tenant, + 'X-Roles': ','.join(cxt.roles), + 'X-Identity-Status': 'Confirmed', + 'X-Service-Catalog': jsonutils.dumps(cxt.service_catalog), + } + kwargs['identity_headers'] = identity_headers + return client.SyncClient(_CLIENT_HOST, _CLIENT_PORT, **kwargs) + + +def configure_sync_client(): + + global _CLIENT_KWARGS, _CLIENT_HOST, _CLIENT_PORT + host, port = CONF.sync_server_host, CONF.sync_server_port + + _CLIENT_HOST = host + _CLIENT_PORT = port + _METADATA_ENCRYPTION_KEY = CONF.metadata_encryption_key + _CLIENT_KWARGS = { + 'use_ssl': CONF.sync_client_protocol.lower() == 'https', + 'key_file': CONF.sync_client_key_file, + 'cert_file': CONF.sync_client_cert_file, + 'ca_file': CONF.sync_client_ca_file, + 'insecure': CONF.sync_client_insecure, + 'timeout': CONF.sync_client_timeout, + } + + if not CONF.sync_use_user_token: + configure_sync_admin_creds() + + +def configure_sync_admin_creds(): + global _CLIENT_CREDS + + if CONF.sync_auth_url or os.getenv('OS_AUTH_URL'): + strategy = 'keystone' + else: + strategy = CONF.sync_auth_strategy + + _CLIENT_CREDS = { + 'user': CONF.sync_admin_user, + 'password': CONF.sync_admin_password, + 'username': CONF.sync_admin_user, + 'tenant': CONF.sync_admin_tenant_name, + 'auth_url': CONF.sync_auth_url, + 'strategy': strategy, + 'region': CONF.sync_auth_region, + } diff --git a/glancesync/glance/sync/client/v1/client.py b/glancesync/glance/sync/client/v1/client.py new file mode 100644 index 00000000..05ef0486 --- /dev/null +++ b/glancesync/glance/sync/client/v1/client.py @@ -0,0 +1,106 @@ +# Copyright (c) 2014 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Jia Dong, HuaWei + +from glance.common.client import BaseClient +from glance.openstack.common import jsonutils +import glance.openstack.common.log as logging + + +LOG = logging.getLogger(__name__) + + +class SyncClient(BaseClient): + + DEFAULT_PORT = 9595 + + def __init__(self, host=None, port=DEFAULT_PORT, identity_headers=None, + **kwargs): + self.identity_headers = identity_headers + BaseClient.__init__(self, host, port, configure_via_auth=False, + **kwargs) + + def do_request(self, method, action, **kwargs): + try: + kwargs['headers'] = kwargs.get('headers', {}) + res = super(SyncClient, self).do_request(method, action, **kwargs) + status = res.status + request_id = res.getheader('x-openstack-request-id') + msg = (_("Sync request %(method)s %(action)s HTTP %(status)s" + " request id %(request_id)s") % + {'method': method, 'action': action, + 'status': status, 'request_id': request_id}) + LOG.debug(msg) + + except Exception as exc: + exc_name = exc.__class__.__name__ + LOG.info(_("Sync client request %(method)s %(action)s " + "raised %(exc_name)s"), + {'method': method, 'action': action, + 'exc_name': exc_name}) + raise + return res + + def _add_common_params(self, id, kwargs): + pass + + def update_image_matedata(self, image_id, **kwargs): + headers = { + 'Content-Type': 'application/json', + } + body = jsonutils.dumps(kwargs) + res = self.do_request("PATCH", "/v1/images/%s" % (image_id), body=body, + headers=headers) + return res + + def remove_image(self, image_id, **kwargs): + headers = { + 'Content-Type': 'application/json', + } + body = jsonutils.dumps(kwargs) + res = self.do_request("DELETE", "/v1/images/%s" % + (image_id), body=body, headers=headers) + return res + + def sync_data(self, image_id, **kwargs): + headers = { + 'Content-Type': 'application/json', + } + body = jsonutils.dumps(kwargs) + res = self.do_request("PUT", "/v1/images/%s" % (image_id), body=body, + headers=headers) + return res + + def sync_locations(self, image_id, action=None, locs=None, **kwargs): + headers = { + 'Content-Type': 'application/json', + } + kwargs['action'] = action + kwargs['locations'] = locs + body = jsonutils.dumps(kwargs) + res = self.do_request("PUT", "/v1/images/%s/location" % (image_id), + body=body, headers=headers) + return res + + def get_cascaded_endpoints(self, regions=[]): + headers = { + 'Content-Type': 'application/json', + } + + body = jsonutils.dumps({'regions': regions}) + res = self.do_request('POST', '/v1/cascaded-eps', body=body, + headers=headers) + return jsonutils.loads(res.read())['eps'] diff --git a/glancesync/glance/sync/clients.py b/glancesync/glance/sync/clients.py new file mode 100644 index 00000000..cadc8f4a --- /dev/null +++ b/glancesync/glance/sync/clients.py @@ -0,0 +1,89 @@ +# Copyright (c) 2014 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Jia Dong, HuaWei + +from oslo.config import cfg + +from keystoneclient.v2_0 import client as ksclient +import glance.openstack.common.log as logging +from glanceclient.v2 import client as gclient2 + + +LOG = logging.getLogger(__name__) +CONF = cfg.CONF + + +class Clients(object): + + def __init__(self, auth_token=None, tenant_id=None): + self._keystone = None + self._glance = None + self._cxt_token = auth_token + self._tenant_id = tenant_id + self._ks_conf = cfg.CONF.keystone_authtoken + + @property + def auth_token(self, token=None): + return token or self.keystone().auth_token + + @property + def ks_url(self): + protocol = self._ks_conf.auth_protocol or 'http' + auth_host = self._ks_conf.auth_host or '127.0.0.1' + auth_port = self._ks_conf.auth_port or '35357' + return protocol + '://' + auth_host + ':' + str(auth_port) + '/v2.0/' + + def url_for(self, **kwargs): + return self.keystone().service_catalog.url_for(**kwargs) + + def get_urls(self, **kwargs): + return self.keystone().service_catalog.get_urls(**kwargs) + + def keystone(self): + if self._keystone: + return self._keystone + + if self._cxt_token and self._tenant_id: + creds = {'token': self._cxt_token, + 'auth_url': self.ks_url, + 'project_id': self._tenant_id + } + else: + creds = {'username': self._ks_conf.admin_user, + 'password': self._ks_conf.admin_password, + 'auth_url': self.ks_url, + 'project_name': self._ks_conf.admin_tenant_name} + try: + self._keystone = ksclient.Client(**creds) + except Exception as e: + LOG.error(_('create keystone client error: reason: %s') % (e)) + return None + + return self._keystone + + def glance(self, auth_token=None, url=None): + gclient = gclient2 + if gclient is None: + return None + if self._glance: + return self._glance + args = { + 'token': auth_token or self.auth_token, + 'endpoint': url or self.url_for(service_type='image') + } + self._glance = gclient.Client(**args) + + return self._glance diff --git a/glancesync/glance/sync/pool.py b/glancesync/glance/sync/pool.py new file mode 100644 index 00000000..1e8fb938 --- /dev/null +++ b/glancesync/glance/sync/pool.py @@ -0,0 +1,33 @@ +# Copyright (c) 2014 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Jia Dong, HuaWei + +from concurrent.futures import ThreadPoolExecutor + +import glance.openstack.common.log as logging + + +LOG = logging.getLogger(__name__) + + +class ThreadPool(object): + + def __init__(self): + self.pool = ThreadPoolExecutor(128) + + def execute(self, func, *args, **kwargs): + LOG.info(_('execute %s in a thread pool') % (func.__name__)) + self.pool.submit(func, *args, **kwargs) diff --git a/glancesync/glance/sync/store/__init__.py b/glancesync/glance/sync/store/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/glancesync/glance/sync/store/_drivers/__init__.py b/glancesync/glance/sync/store/_drivers/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/glancesync/glance/sync/store/_drivers/filesystem.py b/glancesync/glance/sync/store/_drivers/filesystem.py new file mode 100644 index 00000000..0ef3d1ff --- /dev/null +++ b/glancesync/glance/sync/store/_drivers/filesystem.py @@ -0,0 +1,171 @@ +# Copyright (c) 2014 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Jia Dong, HuaWei + +""" +A simple filesystem-backed store +""" + +import logging +import os +import sys + +from oslo.config import cfg +import pxssh +import pexpect + +from glance.common import exception +import glance.sync.store.driver +import glance.sync.store.location +from glance.sync.store.location import Location +from glance.sync import utils as s_utils + +LOG = logging.getLogger(__name__) + + +CONF = cfg.CONF +CONF.import_opt('scp_copy_timeout', 'glance.common.config', group='sync') + + +def _login_ssh(host, passwd): + child_ssh = pexpect.spawn('ssh -p 22 %s' % (host)) + child_ssh.logfile = sys.stdout + login_flag = True + while True: + ssh_index = child_ssh.expect(['.yes/no.', '.assword:.', + pexpect.TIMEOUT]) + if ssh_index == 0: + child_ssh.sendline('yes') + elif ssh_index == 1: + child_ssh.sendline(passwd) + break + else: + login_flag = False + break + if not login_flag: + return None + + return child_ssh + + +def _get_ssh(hostname, username, password): + s = pxssh.pxssh() + s.login(hostname, username, password, original_prompt='[#$>]') + s.logfile = sys.stdout + return s + + +class LocationCreator(glance.sync.store.location.LocationCreator): + + def __init__(self): + self.scheme = 'file' + + def create(self, **kwargs): + image_id = kwargs.get('image_id') + image_file_name = kwargs.get('image_name', None) or image_id + datadir = kwargs.get('datadir') + path = os.path.join(datadir, str(image_file_name)) + login_user = kwargs.get('login_user') + login_password = kwargs.get('login_password') + host = kwargs.get('host') + store_specs = {'scheme': self.scheme, 'path': path, 'host': host, + 'login_user': login_user, + 'login_password': login_password} + return Location(self.scheme, StoreLocation, image_id=image_id, + store_specs=store_specs) + + +class StoreLocation(glance.sync.store.location.StoreLocation): + + def process_specs(self): + self.scheme = self.specs.get('scheme', 'file') + self.path = self.specs.get('path') + self.host = self.specs.get('host') + self.login_user = self.specs.get('login_user') + self.login_password = self.specs.get('login_password') + + +class Store(glance.sync.store.driver.Store): + + def copy_to(self, from_location, to_location, candidate_path=None): + + from_store_loc = from_location.store_location + to_store_loc = to_location.store_location + + if from_store_loc.host == to_store_loc.host and \ + from_store_loc.path == to_store_loc.path: + + LOG.info(_('The from_loc is same to to_loc, no need to copy. the ' + 'host:path is %s:%s') % (from_store_loc.host, + from_store_loc.path)) + return 'file://%s' % to_store_loc.path + + from_host = r"""{username}@{host}""".format( + username=from_store_loc.login_user, + host=from_store_loc.host) + + to_host = r"""{username}@{host}""".format( + username=to_store_loc.login_user, + host=to_store_loc.host) + + to_path = r"""{to_host}:{path}""".format(to_host=to_host, + path=to_store_loc.path) + + copy_path = from_store_loc.path + + try: + from_ssh = _get_ssh(from_store_loc.host, + from_store_loc.login_user, + from_store_loc.login_password) + except Exception: + raise exception.SyncStoreCopyError(reason="ssh login failed.") + + from_ssh.sendline('ls %s' % copy_path) + from_ssh.prompt() + if 'cannot access' in from_ssh.before or \ + 'No such file' in from_ssh.before: + if candidate_path: + from_ssh.sendline('ls %s' % candidate_path) + from_ssh.prompt() + if 'cannot access' not in from_ssh.before and \ + 'No such file' not in from_ssh.before: + copy_path = candidate_path + else: + msg = _("the image path for copy to is not exists, file copy" + "failed: path is %s" % (copy_path)) + raise exception.SyncStoreCopyError(reason=msg) + + from_ssh.sendline('scp -P 22 %s %s' % (copy_path, to_path)) + while True: + scp_index = from_ssh.expect(['.yes/no.', '.assword:.', + pexpect.TIMEOUT]) + if scp_index == 0: + from_ssh.sendline('yes') + from_ssh.prompt() + elif scp_index == 1: + from_ssh.sendline(to_store_loc.login_password) + from_ssh.prompt(timeout=CONF.sync.scp_copy_timeout) + break + else: + msg = _("scp commond execute failed, with copy_path %s and " + "to_path %s" % (copy_path, to_path)) + raise exception.SyncStoreCopyError(reason=msg) + break + + if from_ssh: + from_ssh.logout() + + return 'file://%s' % to_store_loc.path diff --git a/glancesync/glance/sync/store/driver.py b/glancesync/glance/sync/store/driver.py new file mode 100644 index 00000000..e1275187 --- /dev/null +++ b/glancesync/glance/sync/store/driver.py @@ -0,0 +1,63 @@ +# Copyright (c) 2014 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Jia Dong, HuaWei + +"""Base class for all storage backends""" + +from oslo.config import cfg +from stevedore import extension + +from glance.common import exception +import glance.openstack.common.log as logging +from glance.openstack.common.gettextutils import _ +from glance.openstack.common import importutils +from glance.openstack.common import strutils + +LOG = logging.getLogger(__name__) + + +class StoreFactory(object): + + SYNC_STORE_NAMESPACE = "glance.sync.store.driver" + + def __init__(self): + self._stores = {} + self._load_store_drivers() + + def _load_store_drivers(self): + extension_manager = extension.ExtensionManager( + namespace=self.SYNC_STORE_NAMESPACE, + invoke_on_load=True, + ) + for ext in extension_manager: + if ext.name in self._stores: + continue + ext.obj.name = ext.name + self._stores[ext.name] = ext.obj + + def get_instance(self, from_scheme='filesystem', to_scheme=None): + _store_driver = self._stores.get(from_scheme) + if to_scheme and to_scheme != from_scheme and _store_driver: + func_name = 'copy_to_%s' % to_scheme + if not getattr(_store_driver, func_name, None): + return None + return _store_driver + + +class Store(object): + + def copy_to(self, source_location, dest_location, candidate_path=None): + pass diff --git a/glancesync/glance/sync/store/glance_store.py b/glancesync/glance/sync/store/glance_store.py new file mode 100644 index 00000000..480365e8 --- /dev/null +++ b/glancesync/glance/sync/store/glance_store.py @@ -0,0 +1,111 @@ +# Copyright (c) 2014 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Jia Dong, HuaWei + +import fnmatch +import operator +import os + +from oslo.config import cfg +import yaml + +from glance.sync import utils as s_utils + + +OPTS = [ + cfg.StrOpt('glance_store_cfg_file', + default="glance_store.yaml", + help="Configuration file for glance's store location " + "definition." + ), +] + +PRIOR_SOTRE_SCHEMES = ['filesystem', 'http', 'swift'] + +cfg.CONF.register_opts(OPTS) + + +def choose_best_store_schemes(source_endpoint, dest_endpoint): + global GLANCE_STORES + source_host = s_utils.get_host_from_ep(source_endpoint) + dest_host = s_utils.get_host_from_ep(dest_endpoint) + source_store = GLANCE_STORES.get_glance_store(source_host) + dest_store = GLANCE_STORES.get_glance_store(dest_host) + tmp_dict = {} + for s_scheme in source_store.schemes: + s_scheme_name = s_scheme['name'] + for d_scheme in dest_store.schemes: + d_scheme_name = d_scheme['name'] + if s_scheme_name == d_scheme_name: + tmp_dict[s_scheme_name] = (s_scheme, d_scheme) + if tmp_dict: + return tmp_dict[sorted(tmp_dict, key=lambda scheme: + PRIOR_SOTRE_SCHEMES.index(scheme))[0]] + + return (source_store.schemes[0], dest_store.schemes[0]) + + +class GlanceStore(object): + + def __init__(self, service_ip, name, schemes): + self.service_ip = service_ip + self.name = name + self.schemes = schemes + + +class ImageObject(object): + + def __init__(self, image_id, glance_store): + self.image_id = image_id + self.glance_store = glance_store + + +class GlanceStoreManager(object): + + def __init__(self, cfg): + self.cfg = cfg + self.g_stores = [] + + cfg_items = cfg['glances'] + for item in cfg_items: + self.g_stores.append(GlanceStore(item['service_ip'], + item['name'], + item['schemes'])) + + def get_glance_store(self, service_ip): + for g_store in self.g_stores: + if service_ip == g_store.service_ip: + return g_store + return None + + def generate_Image_obj(self, image_id, endpoint): + g_store = self.get_glance_store(s_utils.get_host_from_ep(endpoint)) + return ImageObject(image_id, g_store) + + +GLANCE_STORES = None + + +def setup_glance_stores(): + global GLANCE_STORES + cfg_file = cfg.CONF.glance_store_cfg_file + if not os.path.exists(cfg_file): + cfg_file = cfg.CONF.find_file(cfg_file) + with open(cfg_file) as fap: + data = fap.read() + + locs_cfg = yaml.safe_load(data) + GLANCE_STORES = GlanceStoreManager(locs_cfg) diff --git a/glancesync/glance/sync/store/location.py b/glancesync/glance/sync/store/location.py new file mode 100644 index 00000000..1c1e3346 --- /dev/null +++ b/glancesync/glance/sync/store/location.py @@ -0,0 +1,95 @@ +# Copyright (c) 2014 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Jia Dong, HuaWei + +import logging +import urlparse + +from stevedore import extension + +LOG = logging.getLogger(__name__) + + +class LocationCreator(object): + + def __init__(self): + self.scheme = None + + def creator(self, **kwargs): + pass + + +class Location(object): + + """ + Class describing the location of an image that Glance knows about + """ + + def __init__(self, store_name, store_location_class, + uri=None, image_id=None, store_specs=None): + """ + Create a new Location object. + + :param store_name: The string identifier/scheme of the storage backend + :param store_location_class: The store location class to use + for this location instance. + :param image_id: The identifier of the image in whatever storage + backend is used. + :param uri: Optional URI to construct location from + :param store_specs: Dictionary of information about the location + of the image that is dependent on the backend + store + """ + self.store_name = store_name + self.image_id = image_id + self.store_specs = store_specs or {} + self.store_location = store_location_class(self.store_specs) + + +class StoreLocation(object): + + """ + Base class that must be implemented by each store + """ + + def __init__(self, store_specs): + self.specs = store_specs + if self.specs: + self.process_specs() + + +class LocationFactory(object): + + SYNC_LOCATION_NAMESPACE = "glance.sync.store.location" + + def __init__(self): + self._locations = {} + self._load_locations() + + def _load_locations(self): + extension_manager = extension.ExtensionManager( + namespace=self.SYNC_LOCATION_NAMESPACE, + invoke_on_load=True, + ) + for ext in extension_manager: + if ext.name in self._locations: + continue + ext.obj.name = ext.name + self._locations[ext.name] = ext.obj + + def get_instance(self, scheme, **kwargs): + loc_creator = self._locations.get(scheme, None) + return loc_creator.create(**kwargs) diff --git a/glancesync/glance/sync/task/__init__.py b/glancesync/glance/sync/task/__init__.py new file mode 100644 index 00000000..85a0d319 --- /dev/null +++ b/glancesync/glance/sync/task/__init__.py @@ -0,0 +1,349 @@ +# Copyright (c) 2014 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Jia Dong, HuaWei + +import threading +import Queue +import uuid + +import eventlet +from oslo.config import cfg + +import glance.openstack.common.log as logging +from glance.openstack.common import timeutils +from glance.sync import utils as s_utils + +LOG = logging.getLogger(__name__) + + +snapshot_opt = [ + cfg.ListOpt('snapshot_region_names', + default=[], + help=_("for what regions the snapshot sync to"), + deprecated_opts=[cfg.DeprecatedOpt('snapshot_region_names', + group='DEFAULT')]), +] + +CONF = cfg.CONF +CONF.register_opts(snapshot_opt) + + +class TaskObject(object): + + def __init__(self, type, input, retry_times=0): + self.id = str(uuid.uuid4()) + self.type = type + self.input = input + self.image_id = self.input.get('image_id') + self.status = 'new' + self.retry_times = retry_times + self.start_time = None + + @classmethod + def get_instance(cls, type, input, **kwargs): + _type_cls_dict = {'meta_update': MetaUpdateTask, + 'meta_remove': MetaDeleteTask, + 'sync': ImageActiveTask, + 'snapshot': PatchSnapshotLocationTask, + 'patch': PatchLocationTask, + 'locs_remove': RemoveLocationsTask, + 'periodic_add': ChkNewCascadedsPeriodicTask} + + if _type_cls_dict.get(type): + return _type_cls_dict[type](input, **kwargs) + + return None + + def _handle_result(self, sync_manager): + return sync_manager.handle_tasks({'image_id': self.image_id, + 'type': self.type, + 'start_time': self.start_time, + 'status': self.status + }) + + def execute(self, sync_manager, auth_token): + if not self.checkInput(): + self.status = 'param_error' + LOG.error(_('the input content not valid: %s.' % (self.input))) + return self._handle_result(sync_manager) + + try: + self.status = 'running' + green_threads = self.create_green_threads(sync_manager, auth_token) + for gt in green_threads: + gt.wait() + except Exception as e: + msg = _("Unable to execute task of image %(image_id)s: %(e)s") % \ + {'image_id': self.image_id, 'e': unicode(e)} + LOG.exception(msg) + self.status = 'error' + else: + self.status = 'terminal' + + return self._handle_result(sync_manager) + + def checkInput(self): + if not self.input.pop('image_id', None): + LOG.warn(_('No cascading image_id specified.')) + return False + return self.do_checkInput() + + +class MetaUpdateTask(TaskObject): + + def __init__(self, input): + super(MetaUpdateTask, self).__init__('meta_update', input) + + def do_checkInput(self): + params = self.input + changes = params.get('changes') + removes = params.get('removes') + tags = params.get('tags') + if not changes and not removes and not tags: + LOG.warn(_('No changes and removes and tags with the glance.')) + return True + + def create_green_threads(self, sync_manager, auth_token): + green_threads = [] + cascaded_mapping = s_utils.get_mappings_from_image(auth_token, + self.image_id) + for cascaded_ep in cascaded_mapping: + cascaded_id = cascaded_mapping[cascaded_ep] + green_threads.append(eventlet.spawn(sync_manager.meta_update, + auth_token, + cascaded_ep, + image_id=cascaded_id, + **self.input)) + + return green_threads + + +class MetaDeleteTask(TaskObject): + + def __init__(self, input): + super(MetaDeleteTask, self).__init__('meta_remove', input) + + def do_checkInput(self): + self.locations = self.input.get('locations') + return self.locations is not None + + def create_green_threads(self, sync_manager, auth_token): + green_threads = [] + cascaded_mapping = s_utils.get_mappings_from_locations(self.locations) + for cascaded_ep in cascaded_mapping: + cascaded_id = cascaded_mapping[cascaded_ep] + green_threads.append(eventlet.spawn(sync_manager.meta_delete, + auth_token, + cascaded_ep, + image_id=cascaded_id)) + + return green_threads + + +class ImageActiveTask(TaskObject): + + def __init__(self, input): + super(ImageActiveTask, self).__init__('sync', input) + + def do_checkInput(self): + image_data = self.input.get('body') + self.cascading_endpoint = self.input.get('cascading_ep') + return image_data and self.cascading_endpoint + + def create_green_threads(self, sync_manager, auth_token): + green_threads = [] + cascaded_eps = s_utils.get_endpoints(auth_token) + for cascaded_ep in cascaded_eps: + green_threads.append(eventlet.spawn(sync_manager.sync_image, + auth_token, + self.cascading_endpoint, + cascaded_ep, + self.image_id, + self.image_id, + **self.input)) + + return green_threads + + +class PatchSnapshotLocationTask(TaskObject): + + def __init__(self, input): + super(PatchSnapshotLocationTask, self).__init__('snapshot', input) + + def do_checkInput(self): + image_metadata = self.input.get('body') + self.snapshot_endpoint = self.input.pop('snapshot_ep', None) + self.snapshot_id = self.input.pop('snapshot_id', None) + return image_metadata and self.snapshot_endpoint and self.snapshot_id + + def create_green_threads(self, sync_manager, auth_token): + green_threads = [] + _region_names = CONF.snapshot_region_names + cascaded_mapping = s_utils.get_endpoints(auth_token, + region_names=_region_names) + try: + if self.snapshot_endpoint in cascaded_mapping: + cascaded_mapping.remove(self.snapshot_endpoint) + except TypeError: + pass + for cascaded_ep in cascaded_mapping: + green_threads.append(eventlet.spawn(sync_manager.do_snapshot, + auth_token, + self.snapshot_endpoint, + cascaded_ep, + self.snapshot_id, + self.image_id, + **self.input)) + + return green_threads + + +class PatchLocationTask(TaskObject): + + def __init__(self, input): + super(PatchLocationTask, self).__init__('patch', input) + + def do_checkInput(self): + self.location = self.input.get('location') + return self.location is not None + + def create_green_threads(self, sync_manager, auth_token): + green_threads = [] + cascaded_mapping = s_utils.get_mappings_from_image(auth_token, + self.image_id) + for cascaded_ep in cascaded_mapping: + cascaded_id = cascaded_mapping[cascaded_ep] + green_threads.append(eventlet.spawn(sync_manager.patch_location, + self.image_id, + cascaded_id, + auth_token, + cascaded_ep, + self.location)) + return green_threads + + +class RemoveLocationsTask(TaskObject): + + def __init__(self, input): + super(RemoveLocationsTask, self).__init__('locs_remove', input) + + def do_checkInput(self): + self.locations = self.input.get('locations') + return self.locations is not None + + def create_green_threads(self, sync_manager, auth_token): + green_threads = [] + cascaded_mapping = s_utils.get_mappings_from_locations(self.locations) + for cascaded_ep in cascaded_mapping: + cascaded_id = cascaded_mapping[cascaded_ep] + green_threads.append(eventlet.spawn(sync_manager.remove_loc, + cascaded_id, + auth_token, + cascaded_ep)) + return green_threads + + +class PeriodicTask(TaskObject): + + MAX_SLEEP_SECONDS = 15 + + def __init__(self, type, input, interval, last_run_time, run_immediately): + super(PeriodicTask, self).__init__(type, input) + self.interval = interval + self.last_run_time = last_run_time + self.run_immediately = run_immediately + + def do_checkInput(self): + if not self.interval or self.interval < 0: + LOG.error(_('The Periodic Task interval invaild.')) + return False + + return True + + def ready(self): + # first time to run + if self.last_run_time is None: + self.last_run_time = timeutils.strtime() + return self.run_immediately + return timeutils.is_older_than(self.last_run_time, self.interval) + + def execute(self, sync_manager, auth_token): + while not self.ready(): + LOG.debug(_('the periodic task has not ready yet, sleep a while.' + 'current_start_time is %s, last_run_time is %s, and ' + 'the interval is %i.' % (self.start_time, + self.last_run_time, + self.interval))) + _max_sleep_time = self.MAX_SLEEP_SECONDS + eventlet.sleep(seconds=max(self.interval / 10, _max_sleep_time)) + + super(PeriodicTask, self).execute(sync_manager, auth_token) + + +class ChkNewCascadedsPeriodicTask(PeriodicTask): + + def __init__(self, input, interval=60, last_run_time=None, + run_immediately=False): + + super(ChkNewCascadedsPeriodicTask, self).__init__('periodic_add', + input, interval, + last_run_time, + run_immediately) + LOG.debug(_('create ChkNewCascadedsPeriodicTask.')) + + def do_checkInput(self): + self.images = self.input.get('images') + self.cascading_endpoint = self.input.get('cascading_ep') + if self.images is None or not self.cascading_endpoint: + return False + return super(ChkNewCascadedsPeriodicTask, self).do_checkInput() + + def _stil_need_synced(self, cascaded_ep, image_id, auth_token): + g_client = s_utils.create_self_glance_client(auth_token) + try: + image = g_client.images.get(image_id) + except Exception: + LOG.warn(_('The add cascaded periodic task checks that the image ' + 'has deleted, no need to sync. id is %s' % image_id)) + return False + else: + if image.status != 'active': + LOG.warn(_('The add cascaded period task checks image status ' + 'not active, no need to sync.' + 'image id is %s.' % image_id)) + return False + ep_list = [loc['url'] for loc in image.locations + if s_utils.is_glance_location(loc['url'])] + return not s_utils.is_ep_contains(cascaded_ep, ep_list) + + def create_green_threads(self, sync_manager, auth_token): + green_threads = [] + for image_id in self.images: + cascaded_eps = self.images[image_id].get('locations') + kwargs = {'body': self.images[image_id].get('body')} + for cascaded_ep in cascaded_eps: + if not self._stil_need_synced(cascaded_ep, + image_id, auth_token): + continue + green_threads.append(eventlet.spawn(sync_manager.sync_image, + auth_token, + self.cascading_endpoint, + cascaded_ep, + image_id, + image_id, + **kwargs)) + + return green_threads diff --git a/glancesync/glance/sync/utils.py b/glancesync/glance/sync/utils.py new file mode 100644 index 00000000..3dcef7b4 --- /dev/null +++ b/glancesync/glance/sync/utils.py @@ -0,0 +1,215 @@ +# Copyright (c) 2014 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Jia Dong, HuaWei + +import re + +from oslo.config import cfg +import six.moves.urllib.parse as urlparse + +from glance.sync.clients import Clients as clients + +CONF = cfg.CONF +CONF.import_opt('cascading_endpoint_url', 'glance.common.config', group='sync') +CONF.import_opt('sync_strategy', 'glance.common.config', group='sync') + + +def create_glance_client(auth_token, url): + """ + create glance clients + """ + return clients(auth_token).glance(url=url) + + +def create_self_glance_client(auth_token): + return create_glance_client(auth_token, get_cascading_endpoint_url()) + + +def get_mappings_from_image(auth_token, image_id): + """ + get image's patched glance-locations + """ + client = create_self_glance_client(auth_token) + image = client.images.get(image_id) + locations = image.locations + if not locations: + return {} + return get_mappings_from_locations(locations) + + +def get_mappings_from_locations(locations): + mappings = {} + for loc in locations: + if is_glance_location(loc['url']): + id = loc['metadata'].get('image_id') + if not id: + continue + ep_url = create_ep_by_loc(loc) + mappings[ep_url] = id + return mappings + + +def get_cascading_endpoint_url(): + return CONF.sync.cascading_endpoint_url + + +def get_host_from_ep(ep_url): + if not ep_url: + return None + pieces = urlparse.urlparse(ep_url) + return pieces.netloc.split(':')[0] + +pattern = re.compile(r'^https?://\S+/v2/images/\S+$') + + +def get_default_location(locations): + for location in locations: + if is_default_location(location): + return location + return None + + +def is_glance_location(loc_url): + return pattern.match(loc_url) + + +def is_snapshot_location(location): + l_meta = location['metadata'] + return l_meta and l_meta.get('image_from', None) in['snapshot', 'volume'] + + +def get_id_from_glance_loc(location): + if not is_glance_location(location['url']): + return None + loc_meta = location['metadata'] + if not loc_meta: + return None + return loc_meta.get('image_id', None) + + +def is_default_location(location): + try: + return not is_glance_location(location['url']) \ + and location['metadata']['is_default'] == 'true' + except: + return False + + +def get_snapshot_glance_loc(locations): + for location in locations: + if is_snapshot_location(location): + return location + return None + + +def create_ep_by_loc(location): + loc_url = location['url'] + if not is_glance_location(loc_url): + return None + piece = urlparse.urlparse(loc_url) + return piece.scheme + '://' + piece.netloc + '/' + + +def generate_glance_location(ep, image_id, port=None): + default_port = port or '9292' + piece = urlparse.urlparse(ep) + paths = [] + paths.append(piece.scheme) + paths.append('://') + paths.append(piece.netloc.split(':')[0]) + paths.append(':') + paths.append(default_port) + paths.append('/v2/images/') + paths.append(image_id) + return ''.join(paths) + + +def get_endpoints(auth_token=None, tenant_id=None, **kwargs): + """ + find which glance should be sync by strategy config + """ + strategy = CONF.sync.sync_strategy + if strategy not in ['All', 'User']: + return None + + openstack_clients = clients(auth_token, tenant_id) + ksclient = openstack_clients.keystone() + + ''' + suppose that the cascading glance is 'public' endpoint type, and the + cascaded glacne endpoints are 'internal' + ''' + regions = kwargs.pop('region_names', []) + if strategy == 'All' and not regions: + urls = ksclient.service_catalog.get_urls(service_type='image', + endpoint_type='publicURL') + if urls: + result = [u for u in urls if u != get_cascading_endpoint_url()] + else: + result = [] + return result + else: + user_urls = [] + for region_name in regions: + urls = ksclient.service_catalog.get_urls(service_type='image', + endpoint_type='publicURL', + region_name=region_name) + if urls: + user_urls.extend(urls) + result = [u for u in set(user_urls) if u != + get_cascading_endpoint_url()] + return result + + +_V2_IMAGE_CREATE_PROPERTIES = ['container_format', + 'disk_format', 'min_disk', 'min_ram', 'name', + 'virtual_size', 'visibility', 'protected'] + + +def get_core_properties(image): + """ + when sync, create image object, get the sync info + """ + _tags = list(image.tags) or [] + kwargs = {} + for key in _V2_IMAGE_CREATE_PROPERTIES: + try: + value = getattr(image, key, None) + if value and value != 'None': + kwargs[key] = value + except KeyError: + pass + if _tags: + kwargs['tags'] = _tags + return kwargs + + +def calculate_lack_endpoints(all_ep_urls, glance_urls): + """ + calculate endpoints which exists in all_eps but not in glance_eps + """ + if not glance_urls: + return all_ep_urls + + def _contain(ep): + _hosts = [urlparse.urlparse(_ep).netloc for _ep in glance_urls] + return not urlparse.urlparse(ep).netloc in _hosts + return filter(_contain, all_ep_urls) + + +def is_ep_contains(ep_url, glance_urls): + _hosts = [urlparse.urlparse(_ep).netloc for _ep in glance_urls] + return urlparse.urlparse(ep_url) in _hosts diff --git a/glancesync/installation/install.sh b/glancesync/installation/install.sh new file mode 100644 index 00000000..87f99ae8 --- /dev/null +++ b/glancesync/installation/install.sh @@ -0,0 +1,152 @@ +#!/bin/bash + +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# Copyright (c) 2014 Huawei Technologies. + +CURPATH=$(cd "$(dirname "$0")"; pwd) +_GLANCE_CONF_DIR="/etc/glance" +_GLANCE_API_CONF_FILE="glance-api.conf" +_GLANCE_SYNC_CMD_FILE="glance-sync" +_PYTHON_INSTALL_DIR="/usr/lib64/python2.6/site-packages" +_GLANCE_DIR="${_PYTHON_INSTALL_DIR}/glance" + +# if you did not make changes to the installation files, +# please do not edit the following directories. +_CODE_DIR="${CURPATH}/../glance" +_CONF_DIR="${CURPATH}/../etc" +_BACKUP_DIR="${_GLANCE_DIR}/glance-sync-backup" + +_SCRIPT_LOGFILE="/var/log/glance/installation/install.log" + +api_config_option_list="sync_enabled=True sync_server_port=9595 sync_server_host=127.0.0.1" + +export PS4='+{$LINENO:${FUNCNAME[0]}}' + +ERRTRAP() +{ + echo "[LINE:$1] Error: Command or function exited with status $?" +} + +function log() +{ + echo "$@" + echo "`date -u +'%Y-%m-%d %T.%N'`: $@" >> $_SCRIPT_LOGFILE +} + + +function process_stop +{ + PID=`ps -efw|grep "$1"|grep -v grep|awk '{print $2}'` + echo "PID is: $PID">>$_SCRIPT_LOGFILE + if [ "x${PID}" != "x" ]; then + for kill_id in $PID + do + kill -9 ${kill_id} + if [ $? -ne 0 ]; then + echo "[[stop glance-sync]]$1 stop failed.">>$_SCRIPT_LOGFILE + exit 1 + fi + done + echo "[[stop glance-sync]]$1 stop ok.">>$_SCRIPT_LOGFILE + fi +} + + +trap 'ERRTRAP $LINENO' ERR + +if [[ ${EUID} -ne 0 ]]; then + log "Please run as root." + exit 1 +fi + +if [ ! -d "/var/log/glance/installation" ]; then + mkdir /var/log/glance/installation + touch _SCRIPT_LOGFILE +fi + +cd `dirname $0` + +log "checking installation directories..." +if [ ! -d "${_GLANCE_DIR}" ] ; then + log "Could not find the glance installation. Please check the variables in the beginning of the script." + log "aborted." + exit 1 +fi +if [ ! -f "${_GLANCE_CONF_DIR}/${_GLANCE_API_CONF_FILE}" ] ; then + log "Could not find glance-api config file. Please check the variables in the beginning of the script." + log "aborted." + exit 1 +fi +if [ ! -f "${_CONF_DIR}/${_GLANCE_SYNC_CMD_FILE}" ]; then + log "Could not find the glance-sync file. Please check the variables in the beginning of the script." + log "aborted." + exit 1 +fi + +log "checking previous installation..." +if [ -d "${_BACKUP_DIR}/glance" ] ; then + log "It seems glance cascading has already been installed!" + log "Please check README for solution if this is not true." + exit 1 +fi + +log "backing up current files that might be overwritten..." +mkdir -p "${_BACKUP_DIR}/glance" +mkdir -p "${_BACKUP_DIR}/etc" +mkdir -p "${_BACKUP_DIR}/etc/glance" +cp -rf "${_GLANCE_CONF_DIR}/${_GLANCE_API_CONF_FILE}" "${_BACKUP_DIR}/etc/glance/" + +if [ $? -ne 0 ] ; then + rm -r "${_BACKUP_DIR}/glance" + rm -r "${_BACKUP_DIR}/etc" + log "Error in config backup, aborted." + exit 1 +fi + +log "copying in new files..." +cp -r "${_CODE_DIR}" `dirname ${_GLANCE_DIR}` +cp -r "${_CONF_DIR}/glance" "/etc" +cp "${_CONF_DIR}/${_GLANCE_SYNC_CMD_FILE}" "/usr/bin/" +if [ $? -ne 0 ] ; then + log "Error in copying, aborted." + log "Recovering original files..." + cp -r "${_BACKUP_DIR}/glance" `dirname ${_GLANCE_DIR}` && rm -r "${_BACKUP_DIR}/glance" + cp "${_BACKUP_DIR}/etc/glance/*.conf" `dirname ${_GLANCE_CONF_DIR}` && rm -r "${_BACKUP_DIR}/etc" + if [ $? -ne 0 ] ; then + log "Recovering failed! Please install manually." + fi + exit 1 +fi + +log "updating config file..." +for option in $api_config_option_list +do + sed -i -e "/$option/d" "${_GLANCE_CONF_DIR}/${_GLANCE_API_CONF_FILE}" + sed -i -e "/DEFAULT/a $option" "${_GLANCE_CONF_DIR}/${_GLANCE_API_CONF_FILE}" +done + + +log "restarting glance ..." +service openstack-glance-api restart +service openstack-glance-registry restart +process_stop "glance-sync" +python /usr/bin/glance-sync --config-file=/etc/glance/glance-sync.conf & +if [ $? -ne 0 ] ; then + log "There was an error in restarting the service, please restart glance manually." + exit 1 +fi + +log "Completed." +log "See README to get started." + +exit 0 diff --git a/icehouse-patches/cinder/README.md b/icehouse-patches/cinder/README.md new file mode 100644 index 00000000..98b91e80 --- /dev/null +++ b/icehouse-patches/cinder/README.md @@ -0,0 +1,43 @@ +Cinder create volume from image bug +=============================== +Openstack cascade current is developed based on Icehouse version. While +in Icehouse version there is a bug about creating volume from image and uploading volume to image. +Please referer to the http links https://bugs.launchpad.net/cinder/+bug/1308058 for details. +This bug is recommended to fix in cascaded cinder. + +Key modules +----------- + +* when create volume from image or upload volume to image, cinder will call a function in glance.py +to check image metadata, while not all the metadata will be included from glance image information. +As a result, in the function _extract_attributes included in file,not all the element such as "cheksum" +will be validated : + + cinder/image/glance.py + + +Requirements +------------ +* openstack icehouse has been installed + +Installation +------------ + +We suggest a way to fix the cinder-image-metadata bug. In this section, +we will guide you through fix this image metadata bug. + +* **Note:** + + - Make sure you have an existing installation of **Openstack Icehouse**. + - We recommend that you Do backup at least the following files before installation, + because they are to be overwritten or modified. + +* **Manual Installation as the OpenStack Community suggest** + + mofify "output[attr] = getattr(image, attr)" to "output[attr] = getattr(image, attr, None)" + in _extract_attributes cinder/image/glance.py,Line 434 around + + + + + diff --git a/icehouse-patches/cinder/timestamp-query-patch/README.md b/icehouse-patches/cinder/timestamp-query-patch/README.md new file mode 100644 index 00000000..4a46947e --- /dev/null +++ b/icehouse-patches/cinder/timestamp-query-patch/README.md @@ -0,0 +1,54 @@ +Cinder timestamp-query-patch +=============================== +it will be patched in cascaded level's control node + +cinder icehouse version database has update_at attribute for change_since +query filter function, however cinder db api this version don't support +timestamp query function. So it is needed to make this patch in cascaded level +while syncronization state between cascading and cascaded openstack level + +Key modules +----------- + +* adding timestamp query function while list volumes: + + cinder\db\sqlalchemy\api.py + + +Requirements +------------ +* openstack icehouse has been installed + +Installation +------------ + +We provide two ways to install the timestamp query patch code. In this section, we will guide you through installing the timestamp query patch. + +* **Note:** + + - Make sure you have an existing installation of **Openstack Icehouse**. + - We recommend that you Do backup at least the following files before installation, because they are to be overwritten or modified: + +* **Manual Installation** + + - Make sure you have performed backups properly. + + - Navigate to the local repository and copy the contents in 'cinder' sub-directory to the corresponding places in existing cinder, e.g. + ```cp -r $LOCAL_REPOSITORY_DIR/cinder $CINDER_PARENT_DIR``` + (replace the $... with actual directory name.) + + - restart cinder api service + + - Done. The cinder proxy should be working with a demo configuration. + +* **Automatic Installation** + + - Make sure you have performed backups properly. + + - Navigate to the installation directory and run installation script. + ``` + cd $LOCAL_REPOSITORY_DIR/installation + sudo bash ./install.sh + ``` + (replace the $... with actual directory name.) + diff --git a/icehouse-patches/cinder/timestamp-query-patch/cinder/db/sqlalchemy/api.py b/icehouse-patches/cinder/timestamp-query-patch/cinder/db/sqlalchemy/api.py new file mode 100644 index 00000000..8b4617ec --- /dev/null +++ b/icehouse-patches/cinder/timestamp-query-patch/cinder/db/sqlalchemy/api.py @@ -0,0 +1,2830 @@ +# Copyright (c) 2011 X.commerce, a business unit of eBay Inc. +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# Copyright 2014 IBM Corp. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Implementation of SQLAlchemy backend.""" + + +import sys +import uuid +import warnings + +from oslo.config import cfg +from sqlalchemy.exc import IntegrityError +from sqlalchemy import or_ +from sqlalchemy.orm import joinedload, joinedload_all +from sqlalchemy.orm import RelationshipProperty +from sqlalchemy.sql.expression import literal_column +from sqlalchemy.sql import func + +from cinder.common import sqlalchemyutils +from cinder.db.sqlalchemy import models +from cinder import exception +from cinder.openstack.common.db import exception as db_exc +from cinder.openstack.common.db.sqlalchemy import session as db_session +from cinder.openstack.common import log as logging +from cinder.openstack.common import timeutils +from cinder.openstack.common import uuidutils + + +CONF = cfg.CONF +LOG = logging.getLogger(__name__) + +db_session.set_defaults(sql_connection='sqlite:///$state_path/$sqlite_db', + sqlite_db='cinder.sqlite') + +get_engine = db_session.get_engine +get_session = db_session.get_session + +_DEFAULT_QUOTA_NAME = 'default' + + +def get_backend(): + """The backend is this module itself.""" + + return sys.modules[__name__] + + +def is_admin_context(context): + """Indicates if the request context is an administrator.""" + if not context: + warnings.warn(_('Use of empty request context is deprecated'), + DeprecationWarning) + raise Exception('die') + return context.is_admin + + +def is_user_context(context): + """Indicates if the request context is a normal user.""" + if not context: + return False + if context.is_admin: + return False + if not context.user_id or not context.project_id: + return False + return True + + +def authorize_project_context(context, project_id): + """Ensures a request has permission to access the given project.""" + if is_user_context(context): + if not context.project_id: + raise exception.NotAuthorized() + elif context.project_id != project_id: + raise exception.NotAuthorized() + + +def authorize_user_context(context, user_id): + """Ensures a request has permission to access the given user.""" + if is_user_context(context): + if not context.user_id: + raise exception.NotAuthorized() + elif context.user_id != user_id: + raise exception.NotAuthorized() + + +def authorize_quota_class_context(context, class_name): + """Ensures a request has permission to access the given quota class.""" + if is_user_context(context): + if not context.quota_class: + raise exception.NotAuthorized() + elif context.quota_class != class_name: + raise exception.NotAuthorized() + + +def require_admin_context(f): + """Decorator to require admin request context. + + The first argument to the wrapped function must be the context. + + """ + + def wrapper(*args, **kwargs): + if not is_admin_context(args[0]): + raise exception.AdminRequired() + return f(*args, **kwargs) + return wrapper + + +def require_context(f): + """Decorator to require *any* user or admin context. + + This does no authorization for user or project access matching, see + :py:func:`authorize_project_context` and + :py:func:`authorize_user_context`. + + The first argument to the wrapped function must be the context. + + """ + + def wrapper(*args, **kwargs): + if not is_admin_context(args[0]) and not is_user_context(args[0]): + raise exception.NotAuthorized() + return f(*args, **kwargs) + return wrapper + + +def require_volume_exists(f): + """Decorator to require the specified volume to exist. + + Requires the wrapped function to use context and volume_id as + their first two arguments. + """ + + def wrapper(context, volume_id, *args, **kwargs): + volume_get(context, volume_id) + return f(context, volume_id, *args, **kwargs) + wrapper.__name__ = f.__name__ + return wrapper + + +def require_snapshot_exists(f): + """Decorator to require the specified snapshot to exist. + + Requires the wrapped function to use context and snapshot_id as + their first two arguments. + """ + + def wrapper(context, snapshot_id, *args, **kwargs): + snapshot_get(context, snapshot_id) + return f(context, snapshot_id, *args, **kwargs) + wrapper.__name__ = f.__name__ + return wrapper + + +def model_query(context, *args, **kwargs): + """Query helper that accounts for context's `read_deleted` field. + + :param context: context to query under + :param session: if present, the session to use + :param read_deleted: if present, overrides context's read_deleted field. + :param project_only: if present and context is user-type, then restrict + query to match the context's project_id. + """ + session = kwargs.get('session') or get_session() + read_deleted = kwargs.get('read_deleted') or context.read_deleted + project_only = kwargs.get('project_only') + + query = session.query(*args) + + if read_deleted == 'no': + query = query.filter_by(deleted=False) + elif read_deleted == 'yes': + pass # omit the filter to include deleted and active + elif read_deleted == 'only': + query = query.filter_by(deleted=True) + else: + raise Exception( + _("Unrecognized read_deleted value '%s'") % read_deleted) + + if project_only and is_user_context(context): + query = query.filter_by(project_id=context.project_id) + + return query + + +def _sync_volumes(context, project_id, session, volume_type_id=None, + volume_type_name=None): + (volumes, gigs) = _volume_data_get_for_project( + context, project_id, volume_type_id=volume_type_id, session=session) + key = 'volumes' + if volume_type_name: + key += '_' + volume_type_name + return {key: volumes} + + +def _sync_snapshots(context, project_id, session, volume_type_id=None, + volume_type_name=None): + (snapshots, gigs) = _snapshot_data_get_for_project( + context, project_id, volume_type_id=volume_type_id, session=session) + key = 'snapshots' + if volume_type_name: + key += '_' + volume_type_name + return {key: snapshots} + + +def _sync_gigabytes(context, project_id, session, volume_type_id=None, + volume_type_name=None): + (_junk, vol_gigs) = _volume_data_get_for_project( + context, project_id, volume_type_id=volume_type_id, session=session) + key = 'gigabytes' + if volume_type_name: + key += '_' + volume_type_name + if CONF.no_snapshot_gb_quota: + return {key: vol_gigs} + (_junk, snap_gigs) = _snapshot_data_get_for_project( + context, project_id, volume_type_id=volume_type_id, session=session) + return {key: vol_gigs + snap_gigs} + + +QUOTA_SYNC_FUNCTIONS = { + '_sync_volumes': _sync_volumes, + '_sync_snapshots': _sync_snapshots, + '_sync_gigabytes': _sync_gigabytes, +} + + +################### + + +@require_admin_context +def service_destroy(context, service_id): + session = get_session() + with session.begin(): + service_ref = _service_get(context, service_id, session=session) + service_ref.delete(session=session) + + +@require_admin_context +def _service_get(context, service_id, session=None): + result = model_query( + context, + models.Service, + session=session).\ + filter_by(id=service_id).\ + first() + if not result: + raise exception.ServiceNotFound(service_id=service_id) + + return result + + +@require_admin_context +def service_get(context, service_id): + return _service_get(context, service_id) + + +@require_admin_context +def service_get_all(context, disabled=None): + query = model_query(context, models.Service) + + if disabled is not None: + query = query.filter_by(disabled=disabled) + + return query.all() + + +@require_admin_context +def service_get_all_by_topic(context, topic): + return model_query( + context, models.Service, read_deleted="no").\ + filter_by(disabled=False).\ + filter_by(topic=topic).\ + all() + + +@require_admin_context +def service_get_by_host_and_topic(context, host, topic): + result = model_query( + context, models.Service, read_deleted="no").\ + filter_by(disabled=False).\ + filter_by(host=host).\ + filter_by(topic=topic).\ + first() + if not result: + raise exception.ServiceNotFound(service_id=None) + return result + + +@require_admin_context +def service_get_all_by_host(context, host): + return model_query( + context, models.Service, read_deleted="no").\ + filter_by(host=host).\ + all() + + +@require_admin_context +def _service_get_all_topic_subquery(context, session, topic, subq, label): + sort_value = getattr(subq.c, label) + return model_query(context, models.Service, + func.coalesce(sort_value, 0), + session=session, read_deleted="no").\ + filter_by(topic=topic).\ + filter_by(disabled=False).\ + outerjoin((subq, models.Service.host == subq.c.host)).\ + order_by(sort_value).\ + all() + + +@require_admin_context +def service_get_all_volume_sorted(context): + session = get_session() + with session.begin(): + topic = CONF.volume_topic + label = 'volume_gigabytes' + subq = model_query(context, models.Volume.host, + func.sum(models.Volume.size).label(label), + session=session, read_deleted="no").\ + group_by(models.Volume.host).\ + subquery() + return _service_get_all_topic_subquery(context, + session, + topic, + subq, + label) + + +@require_admin_context +def service_get_by_args(context, host, binary): + result = model_query(context, models.Service).\ + filter_by(host=host).\ + filter_by(binary=binary).\ + first() + + if not result: + raise exception.HostBinaryNotFound(host=host, binary=binary) + + return result + + +@require_admin_context +def service_create(context, values): + service_ref = models.Service() + service_ref.update(values) + if not CONF.enable_new_services: + service_ref.disabled = True + service_ref.save() + return service_ref + + +@require_admin_context +def service_update(context, service_id, values): + session = get_session() + with session.begin(): + service_ref = _service_get(context, service_id, session=session) + service_ref.update(values) + service_ref.save(session=session) + + +################### + + +def _metadata_refs(metadata_dict, meta_class): + metadata_refs = [] + if metadata_dict: + for k, v in metadata_dict.iteritems(): + metadata_ref = meta_class() + metadata_ref['key'] = k + metadata_ref['value'] = v + metadata_refs.append(metadata_ref) + return metadata_refs + + +def _dict_with_extra_specs(inst_type_query): + """Convert type query result to dict with extra_spec and rate_limit. + + Takes a volume type query returned by sqlalchemy and returns it + as a dictionary, converting the extra_specs entry from a list + of dicts: + + 'extra_specs' : [{'key': 'k1', 'value': 'v1', ...}, ...] + to a single dict: + 'extra_specs' : {'k1': 'v1'} + """ + inst_type_dict = dict(inst_type_query) + extra_specs = dict([(x['key'], x['value']) + for x in inst_type_query['extra_specs']]) + inst_type_dict['extra_specs'] = extra_specs + return inst_type_dict + + +################### + + +@require_admin_context +def iscsi_target_count_by_host(context, host): + return model_query(context, models.IscsiTarget).\ + filter_by(host=host).\ + count() + + +@require_admin_context +def iscsi_target_create_safe(context, values): + iscsi_target_ref = models.IscsiTarget() + + for (key, value) in values.iteritems(): + iscsi_target_ref[key] = value + try: + iscsi_target_ref.save() + return iscsi_target_ref + except IntegrityError: + return None + + +################### + + +@require_context +def _quota_get(context, project_id, resource, session=None): + result = model_query(context, models.Quota, session=session, + read_deleted="no").\ + filter_by(project_id=project_id).\ + filter_by(resource=resource).\ + first() + + if not result: + raise exception.ProjectQuotaNotFound(project_id=project_id) + + return result + + +@require_context +def quota_get(context, project_id, resource): + return _quota_get(context, project_id, resource) + + +@require_context +def quota_get_all_by_project(context, project_id): + authorize_project_context(context, project_id) + + rows = model_query(context, models.Quota, read_deleted="no").\ + filter_by(project_id=project_id).\ + all() + + result = {'project_id': project_id} + for row in rows: + result[row.resource] = row.hard_limit + + return result + + +@require_admin_context +def quota_create(context, project_id, resource, limit): + quota_ref = models.Quota() + quota_ref.project_id = project_id + quota_ref.resource = resource + quota_ref.hard_limit = limit + quota_ref.save() + return quota_ref + + +@require_admin_context +def quota_update(context, project_id, resource, limit): + session = get_session() + with session.begin(): + quota_ref = _quota_get(context, project_id, resource, session=session) + quota_ref.hard_limit = limit + quota_ref.save(session=session) + + +@require_admin_context +def quota_destroy(context, project_id, resource): + session = get_session() + with session.begin(): + quota_ref = _quota_get(context, project_id, resource, session=session) + quota_ref.delete(session=session) + + +################### + + +@require_context +def _quota_class_get(context, class_name, resource, session=None): + result = model_query(context, models.QuotaClass, session=session, + read_deleted="no").\ + filter_by(class_name=class_name).\ + filter_by(resource=resource).\ + first() + + if not result: + raise exception.QuotaClassNotFound(class_name=class_name) + + return result + + +@require_context +def quota_class_get(context, class_name, resource): + return _quota_class_get(context, class_name, resource) + + +def quota_class_get_default(context): + rows = model_query(context, models.QuotaClass, + read_deleted="no").\ + filter_by(class_name=_DEFAULT_QUOTA_NAME).all() + + result = {'class_name': _DEFAULT_QUOTA_NAME} + for row in rows: + result[row.resource] = row.hard_limit + + return result + + +@require_context +def quota_class_get_all_by_name(context, class_name): + authorize_quota_class_context(context, class_name) + + rows = model_query(context, models.QuotaClass, read_deleted="no").\ + filter_by(class_name=class_name).\ + all() + + result = {'class_name': class_name} + for row in rows: + result[row.resource] = row.hard_limit + + return result + + +@require_admin_context +def quota_class_create(context, class_name, resource, limit): + quota_class_ref = models.QuotaClass() + quota_class_ref.class_name = class_name + quota_class_ref.resource = resource + quota_class_ref.hard_limit = limit + quota_class_ref.save() + return quota_class_ref + + +@require_admin_context +def quota_class_update(context, class_name, resource, limit): + session = get_session() + with session.begin(): + quota_class_ref = _quota_class_get(context, class_name, resource, + session=session) + quota_class_ref.hard_limit = limit + quota_class_ref.save(session=session) + + +@require_admin_context +def quota_class_destroy(context, class_name, resource): + session = get_session() + with session.begin(): + quota_class_ref = _quota_class_get(context, class_name, resource, + session=session) + quota_class_ref.delete(session=session) + + +@require_admin_context +def quota_class_destroy_all_by_name(context, class_name): + session = get_session() + with session.begin(): + quota_classes = model_query(context, models.QuotaClass, + session=session, read_deleted="no").\ + filter_by(class_name=class_name).\ + all() + + for quota_class_ref in quota_classes: + quota_class_ref.delete(session=session) + + +################### + + +@require_context +def quota_usage_get(context, project_id, resource): + result = model_query(context, models.QuotaUsage, read_deleted="no").\ + filter_by(project_id=project_id).\ + filter_by(resource=resource).\ + first() + + if not result: + raise exception.QuotaUsageNotFound(project_id=project_id) + + return result + + +@require_context +def quota_usage_get_all_by_project(context, project_id): + authorize_project_context(context, project_id) + + rows = model_query(context, models.QuotaUsage, read_deleted="no").\ + filter_by(project_id=project_id).\ + all() + + result = {'project_id': project_id} + for row in rows: + result[row.resource] = dict(in_use=row.in_use, reserved=row.reserved) + + return result + + +@require_admin_context +def _quota_usage_create(context, project_id, resource, in_use, reserved, + until_refresh, session=None): + + quota_usage_ref = models.QuotaUsage() + quota_usage_ref.project_id = project_id + quota_usage_ref.resource = resource + quota_usage_ref.in_use = in_use + quota_usage_ref.reserved = reserved + quota_usage_ref.until_refresh = until_refresh + quota_usage_ref.save(session=session) + + return quota_usage_ref + + +################### + + +@require_context +def _reservation_get(context, uuid, session=None): + result = model_query(context, models.Reservation, session=session, + read_deleted="no").\ + filter_by(uuid=uuid).first() + + if not result: + raise exception.ReservationNotFound(uuid=uuid) + + return result + + +@require_context +def reservation_get(context, uuid): + return _reservation_get(context, uuid) + + +@require_context +def reservation_get_all_by_project(context, project_id): + authorize_project_context(context, project_id) + + rows = model_query(context, models.Reservation, read_deleted="no").\ + filter_by(project_id=project_id).all() + + result = {'project_id': project_id} + for row in rows: + result.setdefault(row.resource, {}) + result[row.resource][row.uuid] = row.delta + + return result + + +@require_admin_context +def _reservation_create(context, uuid, usage, project_id, resource, delta, + expire, session=None): + reservation_ref = models.Reservation() + reservation_ref.uuid = uuid + reservation_ref.usage_id = usage['id'] + reservation_ref.project_id = project_id + reservation_ref.resource = resource + reservation_ref.delta = delta + reservation_ref.expire = expire + reservation_ref.save(session=session) + return reservation_ref + + +@require_admin_context +def reservation_create(context, uuid, usage, project_id, resource, delta, + expire): + return _reservation_create(context, uuid, usage, project_id, resource, + delta, expire) + + +@require_admin_context +def reservation_destroy(context, uuid): + session = get_session() + with session.begin(): + reservation_ref = _reservation_get(context, uuid, session=session) + reservation_ref.delete(session=session) + + +################### + + +# NOTE(johannes): The quota code uses SQL locking to ensure races don't +# cause under or over counting of resources. To avoid deadlocks, this +# code always acquires the lock on quota_usages before acquiring the lock +# on reservations. + +def _get_quota_usages(context, session, project_id): + # Broken out for testability + rows = model_query(context, models.QuotaUsage, + read_deleted="no", + session=session).\ + filter_by(project_id=project_id).\ + with_lockmode('update').\ + all() + return dict((row.resource, row) for row in rows) + + +@require_context +def quota_reserve(context, resources, quotas, deltas, expire, + until_refresh, max_age, project_id=None): + elevated = context.elevated() + session = get_session() + with session.begin(): + if project_id is None: + project_id = context.project_id + + # Get the current usages + usages = _get_quota_usages(context, session, project_id) + + # Handle usage refresh + work = set(deltas.keys()) + while work: + resource = work.pop() + + # Do we need to refresh the usage? + refresh = False + if resource not in usages: + usages[resource] = _quota_usage_create(elevated, + project_id, + resource, + 0, 0, + until_refresh or None, + session=session) + refresh = True + elif usages[resource].in_use < 0: + # Negative in_use count indicates a desync, so try to + # heal from that... + refresh = True + elif usages[resource].until_refresh is not None: + usages[resource].until_refresh -= 1 + if usages[resource].until_refresh <= 0: + refresh = True + elif max_age and usages[resource].updated_at is not None and ( + (usages[resource].updated_at - + timeutils.utcnow()).seconds >= max_age): + refresh = True + + # OK, refresh the usage + if refresh: + # Grab the sync routine + sync = QUOTA_SYNC_FUNCTIONS[resources[resource].sync] + volume_type_id = getattr(resources[resource], + 'volume_type_id', None) + volume_type_name = getattr(resources[resource], + 'volume_type_name', None) + updates = sync(elevated, project_id, + volume_type_id=volume_type_id, + volume_type_name=volume_type_name, + session=session) + for res, in_use in updates.items(): + # Make sure we have a destination for the usage! + if res not in usages: + usages[res] = _quota_usage_create( + elevated, + project_id, + res, + 0, 0, + until_refresh or None, + session=session + ) + + # Update the usage + usages[res].in_use = in_use + usages[res].until_refresh = until_refresh or None + + # Because more than one resource may be refreshed + # by the call to the sync routine, and we don't + # want to double-sync, we make sure all refreshed + # resources are dropped from the work set. + work.discard(res) + + # NOTE(Vek): We make the assumption that the sync + # routine actually refreshes the + # resources that it is the sync routine + # for. We don't check, because this is + # a best-effort mechanism. + + # Check for deltas that would go negative + unders = [r for r, delta in deltas.items() + if delta < 0 and delta + usages[r].in_use < 0] + + # Now, let's check the quotas + # NOTE(Vek): We're only concerned about positive increments. + # If a project has gone over quota, we want them to + # be able to reduce their usage without any + # problems. + overs = [r for r, delta in deltas.items() + if quotas[r] >= 0 and delta >= 0 and + quotas[r] < delta + usages[r].total] + + # NOTE(Vek): The quota check needs to be in the transaction, + # but the transaction doesn't fail just because + # we're over quota, so the OverQuota raise is + # outside the transaction. If we did the raise + # here, our usage updates would be discarded, but + # they're not invalidated by being over-quota. + + # Create the reservations + if not overs: + reservations = [] + for resource, delta in deltas.items(): + reservation = _reservation_create(elevated, + str(uuid.uuid4()), + usages[resource], + project_id, + resource, delta, expire, + session=session) + reservations.append(reservation.uuid) + + # Also update the reserved quantity + # NOTE(Vek): Again, we are only concerned here about + # positive increments. Here, though, we're + # worried about the following scenario: + # + # 1) User initiates resize down. + # 2) User allocates a new instance. + # 3) Resize down fails or is reverted. + # 4) User is now over quota. + # + # To prevent this, we only update the + # reserved value if the delta is positive. + if delta > 0: + usages[resource].reserved += delta + + # Apply updates to the usages table + for usage_ref in usages.values(): + usage_ref.save(session=session) + + if unders: + LOG.warning(_("Change will make usage less than 0 for the following " + "resources: %s") % unders) + if overs: + usages = dict((k, dict(in_use=v['in_use'], reserved=v['reserved'])) + for k, v in usages.items()) + raise exception.OverQuota(overs=sorted(overs), quotas=quotas, + usages=usages) + + return reservations + + +def _quota_reservations(session, context, reservations): + """Return the relevant reservations.""" + + # Get the listed reservations + return model_query(context, models.Reservation, + read_deleted="no", + session=session).\ + filter(models.Reservation.uuid.in_(reservations)).\ + with_lockmode('update').\ + all() + + +@require_context +def reservation_commit(context, reservations, project_id=None): + session = get_session() + with session.begin(): + usages = _get_quota_usages(context, session, project_id) + + for reservation in _quota_reservations(session, context, reservations): + usage = usages[reservation.resource] + if reservation.delta >= 0: + usage.reserved -= reservation.delta + usage.in_use += reservation.delta + + reservation.delete(session=session) + + for usage in usages.values(): + usage.save(session=session) + + +@require_context +def reservation_rollback(context, reservations, project_id=None): + session = get_session() + with session.begin(): + usages = _get_quota_usages(context, session, project_id) + + for reservation in _quota_reservations(session, context, reservations): + usage = usages[reservation.resource] + if reservation.delta >= 0: + usage.reserved -= reservation.delta + + reservation.delete(session=session) + + for usage in usages.values(): + usage.save(session=session) + + +@require_admin_context +def quota_destroy_all_by_project(context, project_id): + session = get_session() + with session.begin(): + quotas = model_query(context, models.Quota, session=session, + read_deleted="no").\ + filter_by(project_id=project_id).\ + all() + + for quota_ref in quotas: + quota_ref.delete(session=session) + + quota_usages = model_query(context, models.QuotaUsage, + session=session, read_deleted="no").\ + filter_by(project_id=project_id).\ + all() + + for quota_usage_ref in quota_usages: + quota_usage_ref.delete(session=session) + + reservations = model_query(context, models.Reservation, + session=session, read_deleted="no").\ + filter_by(project_id=project_id).\ + all() + + for reservation_ref in reservations: + reservation_ref.delete(session=session) + + +@require_admin_context +def reservation_expire(context): + session = get_session() + with session.begin(): + current_time = timeutils.utcnow() + results = model_query(context, models.Reservation, session=session, + read_deleted="no").\ + filter(models.Reservation.expire < current_time).\ + all() + + if results: + for reservation in results: + if reservation.delta >= 0: + reservation.usage.reserved -= reservation.delta + reservation.usage.save(session=session) + + reservation.delete(session=session) + + +################### + + +@require_admin_context +def volume_allocate_iscsi_target(context, volume_id, host): + session = get_session() + with session.begin(): + iscsi_target_ref = model_query(context, models.IscsiTarget, + session=session, read_deleted="no").\ + filter_by(volume=None).\ + filter_by(host=host).\ + with_lockmode('update').\ + first() + + # NOTE(vish): if with_lockmode isn't supported, as in sqlite, + # then this has concurrency issues + if not iscsi_target_ref: + raise exception.NoMoreTargets() + + iscsi_target_ref.volume_id = volume_id + session.add(iscsi_target_ref) + + return iscsi_target_ref.target_num + + +@require_admin_context +def volume_attached(context, volume_id, instance_uuid, host_name, mountpoint): + if instance_uuid and not uuidutils.is_uuid_like(instance_uuid): + raise exception.InvalidUUID(uuid=instance_uuid) + + session = get_session() + with session.begin(): + volume_ref = _volume_get(context, volume_id, session=session) + volume_ref['status'] = 'in-use' + volume_ref['mountpoint'] = mountpoint + volume_ref['attach_status'] = 'attached' + volume_ref['instance_uuid'] = instance_uuid + volume_ref['attached_host'] = host_name + volume_ref.save(session=session) + return volume_ref + + +@require_context +def volume_create(context, values): + values['volume_metadata'] = _metadata_refs(values.get('metadata'), + models.VolumeMetadata) + if is_admin_context(context): + values['volume_admin_metadata'] = \ + _metadata_refs(values.get('admin_metadata'), + models.VolumeAdminMetadata) + elif values.get('volume_admin_metadata'): + del values['volume_admin_metadata'] + + volume_ref = models.Volume() + if not values.get('id'): + values['id'] = str(uuid.uuid4()) + volume_ref.update(values) + + session = get_session() + with session.begin(): + volume_ref.save(session=session) + + return _volume_get(context, values['id'], session=session) + + +@require_admin_context +def volume_data_get_for_host(context, host): + result = model_query(context, + func.count(models.Volume.id), + func.sum(models.Volume.size), + read_deleted="no").\ + filter_by(host=host).\ + first() + + # NOTE(vish): convert None to 0 + return (result[0] or 0, result[1] or 0) + + +@require_admin_context +def _volume_data_get_for_project(context, project_id, volume_type_id=None, + session=None): + query = model_query(context, + func.count(models.Volume.id), + func.sum(models.Volume.size), + read_deleted="no", + session=session).\ + filter_by(project_id=project_id) + + if volume_type_id: + query = query.filter_by(volume_type_id=volume_type_id) + + result = query.first() + + # NOTE(vish): convert None to 0 + return (result[0] or 0, result[1] or 0) + + +@require_admin_context +def volume_data_get_for_project(context, project_id, volume_type_id=None): + return _volume_data_get_for_project(context, project_id, volume_type_id) + + +@require_admin_context +def finish_volume_migration(context, src_vol_id, dest_vol_id): + """Copy almost all columns from dest to source.""" + session = get_session() + with session.begin(): + src_volume_ref = _volume_get(context, src_vol_id, session=session) + dest_volume_ref = _volume_get(context, dest_vol_id, session=session) + + # NOTE(rpodolyaka): we should copy only column values, while model + # instances also have relationships attributes, which + # should be ignored + def is_column(inst, attr): + return attr in inst.__class__.__table__.columns + + for key, value in dest_volume_ref.iteritems(): + if key == 'id' or not is_column(dest_volume_ref, key): + continue + elif key == 'migration_status': + value = None + elif key == '_name_id': + value = dest_volume_ref['_name_id'] or dest_volume_ref['id'] + + setattr(src_volume_ref, key, value) + + +@require_admin_context +def volume_destroy(context, volume_id): + session = get_session() + now = timeutils.utcnow() + with session.begin(): + model_query(context, models.Volume, session=session).\ + filter_by(id=volume_id).\ + update({'status': 'deleted', + 'deleted': True, + 'deleted_at': now, + 'updated_at': literal_column('updated_at')}) + model_query(context, models.IscsiTarget, session=session).\ + filter_by(volume_id=volume_id).\ + update({'volume_id': None}) + model_query(context, models.VolumeMetadata, session=session).\ + filter_by(volume_id=volume_id).\ + update({'deleted': True, + 'deleted_at': now, + 'updated_at': literal_column('updated_at')}) + model_query(context, models.VolumeAdminMetadata, session=session).\ + filter_by(volume_id=volume_id).\ + update({'deleted': True, + 'deleted_at': now, + 'updated_at': literal_column('updated_at')}) + model_query(context, models.Transfer, session=session).\ + filter_by(volume_id=volume_id).\ + update({'deleted': True, + 'deleted_at': now, + 'updated_at': literal_column('updated_at')}) + + +@require_admin_context +def volume_detached(context, volume_id): + session = get_session() + with session.begin(): + volume_ref = _volume_get(context, volume_id, session=session) + # Hide status update from user if we're performing a volume migration + if not volume_ref['migration_status']: + volume_ref['status'] = 'available' + volume_ref['mountpoint'] = None + volume_ref['attach_status'] = 'detached' + volume_ref['instance_uuid'] = None + volume_ref['attached_host'] = None + volume_ref['attach_time'] = None + volume_ref.save(session=session) + + +@require_context +def _volume_get_query(context, session=None, project_only=False): + if is_admin_context(context): + return model_query(context, models.Volume, session=session, + project_only=project_only).\ + options(joinedload('volume_metadata')).\ + options(joinedload('volume_admin_metadata')).\ + options(joinedload('volume_type')) + else: + return model_query(context, models.Volume, session=session, + project_only=project_only).\ + options(joinedload('volume_metadata')).\ + options(joinedload('volume_type')) + + +@require_context +def _volume_get(context, volume_id, session=None): + result = _volume_get_query(context, session=session, project_only=True).\ + filter_by(id=volume_id).\ + first() + + if not result: + raise exception.VolumeNotFound(volume_id=volume_id) + + return result + + +@require_context +def volume_get(context, volume_id): + return _volume_get(context, volume_id) + + +@require_admin_context +def volume_get_all(context, marker, limit, sort_key, sort_dir, + filters=None): + """Retrieves all volumes. + + :param context: context to query under + :param marker: the last item of the previous page, used to determine the + next page of results to return + :param limit: maximum number of items to return + :param sort_key: single attributes by which results should be sorted + :param sort_dir: direction in which results should be sorted (asc, desc) + :param filters: Filters for the query. A filter key/value of + 'no_migration_targets'=True causes volumes with either + a NULL 'migration_status' or a 'migration_status' that + does not start with 'target:' to be retrieved. + :returns: list of matching volumes + """ + session = get_session() + with session.begin(): + # Generate the query + query = _generate_paginate_query(context, session, marker, limit, + sort_key, sort_dir, filters) + # No volumes would match, return empty list + if query is None: + return [] + return query.all() + + +@require_admin_context +def volume_get_all_by_host(context, host): + return _volume_get_query(context).filter_by(host=host).all() + + +@require_admin_context +def volume_get_all_by_instance_uuid(context, instance_uuid): + result = model_query(context, models.Volume, read_deleted="no").\ + options(joinedload('volume_metadata')).\ + options(joinedload('volume_admin_metadata')).\ + options(joinedload('volume_type')).\ + filter_by(instance_uuid=instance_uuid).\ + all() + + if not result: + return [] + + return result + + +@require_context +def volume_get_all_by_project(context, project_id, marker, limit, sort_key, + sort_dir, filters=None): + """"Retrieves all volumes in a project. + + :param context: context to query under + :param project_id: project for all volumes being retrieved + :param marker: the last item of the previous page, used to determine the + next page of results to return + :param limit: maximum number of items to return + :param sort_key: single attributes by which results should be sorted + :param sort_dir: direction in which results should be sorted (asc, desc) + :param filters: Filters for the query. A filter key/value of + 'no_migration_targets'=True causes volumes with either + a NULL 'migration_status' or a 'migration_status' that + does not start with 'target:' to be retrieved. + :returns: list of matching volumes + """ + session = get_session() + with session.begin(): + authorize_project_context(context, project_id) + # Add in the project filter without modifying the given filters + filters = filters.copy() if filters else {} + filters['project_id'] = project_id + # Generate the query + query = _generate_paginate_query(context, session, marker, limit, + sort_key, sort_dir, filters) + # No volumes would match, return empty list + if query is None: + return [] + return query.all() + + +def _generate_paginate_query(context, session, marker, limit, sort_key, + sort_dir, filters): + """Generate the query to include the filters and the paginate options. + + Returns a query with sorting / pagination criteria added or None + if the given filters will not yield any results. + + :param context: context to query under + :param session: the session to use + :param marker: the last item of the previous page; we returns the next + results after this value. + :param limit: maximum number of items to return + :param sort_key: single attributes by which results should be sorted + :param sort_dir: direction in which results should be sorted (asc, desc) + :param filters: dictionary of filters; values that are lists, + tuples, sets, or frozensets cause an 'IN' test to + be performed, while exact matching ('==' operator) + is used for other values + :returns: updated query or None + """ + query = _volume_get_query(context, session=session) + + if filters: + filters = filters.copy() + + # 'no_migration_targets' is unique, must be either NULL or + # not start with 'target:' + if ('no_migration_targets' in filters and + filters['no_migration_targets']): + filters.pop('no_migration_targets') + try: + column_attr = getattr(models.Volume, 'migration_status') + conditions = [column_attr is None, + column_attr.op('NOT LIKE')('target:%')] + query = query.filter(or_(*conditions)) + except AttributeError: + log_msg = _("'migration_status' column could not be found.") + LOG.debug(log_msg) + return None + + if ('changes-since' in filters and + filters['changes-since'] is not None): + try: + timeStr = filters['changes-since'] + change_since_isotime = timeutils.parse_isotime(timeStr) + changes_since = timeutils.normalize_time(change_since_isotime) + column_attr = getattr(models.Volume, 'updated_at') + query = query.filter(column_attr >= changes_since) + filters.pop('changes-since') + except AttributeError: + log_msg = _("'update_at' column could not be found.") + LOG.debug(log_msg) + return None + + # Apply exact match filters for everything else, ensure that the + # filter value exists on the model + for key in filters.keys(): + # metadata is unique, must be a dict + if key == 'metadata': + if not isinstance(filters[key], dict): + log_msg = _("'metadata' filter value is not valid.") + LOG.debug(log_msg) + return None + continue + try: + column_attr = getattr(models.Volume, key) + # Do not allow relationship properties since those require + # schema specific knowledge + prop = getattr(column_attr, 'property') + if isinstance(prop, RelationshipProperty): + log_msg = (_("'%s' filter key is not valid, " + "it maps to a relationship.")) % key + LOG.debug(log_msg) + return None + except AttributeError: + log_msg = _("'%s' filter key is not valid.") % key + LOG.debug(log_msg) + return None + + # Holds the simple exact matches + filter_dict = {} + + # Iterate over all filters, special case the filter is necessary + for key, value in filters.iteritems(): + if key == 'metadata': + # model.VolumeMetadata defines the backref to Volumes as + # 'volume_metadata', use that column attribute key + key = 'volume_metadata' + column_attr = getattr(models.Volume, key) + for k, v in value.iteritems(): + query = query.filter(column_attr.any(key=k, value=v)) + elif isinstance(value, (list, tuple, set, frozenset)): + # Looking for values in a list; apply to query directly + column_attr = getattr(models.Volume, key) + query = query.filter(column_attr.in_(value)) + else: + # OK, simple exact match; save for later + filter_dict[key] = value + + # Apply simple exact matches + if filter_dict: + query = query.filter_by(**filter_dict) + + marker_volume = None + if marker is not None: + marker_volume = _volume_get(context, marker, session) + + return sqlalchemyutils.paginate_query(query, models.Volume, limit, + [sort_key, 'created_at', 'id'], + marker=marker_volume, + sort_dir=sort_dir) + + +@require_admin_context +def volume_get_iscsi_target_num(context, volume_id): + result = model_query(context, models.IscsiTarget, read_deleted="yes").\ + filter_by(volume_id=volume_id).\ + first() + + if not result: + raise exception.ISCSITargetNotFoundForVolume(volume_id=volume_id) + + return result.target_num + + +@require_context +def volume_update(context, volume_id, values): + session = get_session() + with session.begin(): + metadata = values.get('metadata') + if metadata is not None: + _volume_user_metadata_update(context, + volume_id, + values.pop('metadata'), + delete=True, + session=session) + + admin_metadata = values.get('admin_metadata') + if is_admin_context(context) and admin_metadata is not None: + _volume_admin_metadata_update(context, + volume_id, + values.pop('admin_metadata'), + delete=True, + session=session) + + volume_ref = _volume_get(context, volume_id, session=session) + volume_ref.update(values) + volume_ref.save(session=session) + return volume_ref + + +#################### + +def _volume_x_metadata_get_query(context, volume_id, model, session=None): + return model_query(context, model, session=session, read_deleted="no").\ + filter_by(volume_id=volume_id) + + +def _volume_x_metadata_get(context, volume_id, model, session=None): + rows = _volume_x_metadata_get_query(context, volume_id, model, + session=session).all() + result = {} + for row in rows: + result[row['key']] = row['value'] + + return result + + +def _volume_x_metadata_get_item(context, volume_id, key, model, notfound_exec, + session=None): + result = _volume_x_metadata_get_query(context, volume_id, + model, session=session).\ + filter_by(key=key).\ + first() + + if not result: + raise notfound_exec(metadata_key=key, volume_id=volume_id) + return result + + +def _volume_x_metadata_update(context, volume_id, metadata, delete, + model, notfound_exec, session=None): + if not session: + session = get_session() + + with session.begin(subtransactions=True): + # Set existing metadata to deleted if delete argument is True + if delete: + original_metadata = _volume_x_metadata_get(context, volume_id, + model, session=session) + for meta_key, meta_value in original_metadata.iteritems(): + if meta_key not in metadata: + meta_ref = _volume_x_metadata_get_item(context, volume_id, + meta_key, model, + notfound_exec, + session=session) + meta_ref.update({'deleted': True}) + meta_ref.save(session=session) + + meta_ref = None + + # Now update all existing items with new values, or create new meta + # objects + for meta_key, meta_value in metadata.items(): + + # update the value whether it exists or not + item = {"value": meta_value} + + try: + meta_ref = _volume_x_metadata_get_item(context, volume_id, + meta_key, model, + notfound_exec, + session=session) + except notfound_exec: + meta_ref = model() + item.update({"key": meta_key, "volume_id": volume_id}) + + meta_ref.update(item) + meta_ref.save(session=session) + + return _volume_x_metadata_get(context, volume_id, model) + + +def _volume_user_metadata_get_query(context, volume_id, session=None): + return _volume_x_metadata_get_query(context, volume_id, + models.VolumeMetadata, session=session) + + +@require_context +@require_volume_exists +def _volume_user_metadata_get(context, volume_id, session=None): + return _volume_x_metadata_get(context, volume_id, + models.VolumeMetadata, session=session) + + +@require_context +def _volume_user_metadata_get_item(context, volume_id, key, session=None): + return _volume_x_metadata_get_item(context, volume_id, key, + models.VolumeMetadata, + exception.VolumeMetadataNotFound, + session=session) + + +@require_context +@require_volume_exists +def _volume_user_metadata_update(context, volume_id, metadata, delete, + session=None): + return _volume_x_metadata_update(context, volume_id, metadata, delete, + models.VolumeMetadata, + exception.VolumeMetadataNotFound, + session=session) + + +@require_context +@require_volume_exists +def volume_metadata_get_item(context, volume_id, key): + return _volume_user_metadata_get_item(context, volume_id, key) + + +@require_context +@require_volume_exists +def volume_metadata_get(context, volume_id): + return _volume_user_metadata_get(context, volume_id) + + +@require_context +@require_volume_exists +def volume_metadata_delete(context, volume_id, key): + _volume_user_metadata_get_query(context, volume_id).\ + filter_by(key=key).\ + update({'deleted': True, + 'deleted_at': timeutils.utcnow(), + 'updated_at': literal_column('updated_at')}) + + +@require_context +@require_volume_exists +def volume_metadata_update(context, volume_id, metadata, delete): + return _volume_user_metadata_update(context, volume_id, metadata, delete) + + +################### + + +def _volume_admin_metadata_get_query(context, volume_id, session=None): + return _volume_x_metadata_get_query(context, volume_id, + models.VolumeAdminMetadata, + session=session) + + +@require_admin_context +@require_volume_exists +def _volume_admin_metadata_get(context, volume_id, session=None): + return _volume_x_metadata_get(context, volume_id, + models.VolumeAdminMetadata, session=session) + + +@require_admin_context +@require_volume_exists +def _volume_admin_metadata_update(context, volume_id, metadata, delete, + session=None): + return _volume_x_metadata_update(context, volume_id, metadata, delete, + models.VolumeAdminMetadata, + exception.VolumeAdminMetadataNotFound, + session=session) + + +@require_admin_context +@require_volume_exists +def volume_admin_metadata_get(context, volume_id): + return _volume_admin_metadata_get(context, volume_id) + + +@require_admin_context +@require_volume_exists +def volume_admin_metadata_delete(context, volume_id, key): + _volume_admin_metadata_get_query(context, volume_id).\ + filter_by(key=key).\ + update({'deleted': True, + 'deleted_at': timeutils.utcnow(), + 'updated_at': literal_column('updated_at')}) + + +@require_admin_context +@require_volume_exists +def volume_admin_metadata_update(context, volume_id, metadata, delete): + return _volume_admin_metadata_update(context, volume_id, metadata, delete) + + +################### + + +@require_context +def snapshot_create(context, values): + values['snapshot_metadata'] = _metadata_refs(values.get('metadata'), + models.SnapshotMetadata) + snapshot_ref = models.Snapshot() + if not values.get('id'): + values['id'] = str(uuid.uuid4()) + snapshot_ref.update(values) + + session = get_session() + with session.begin(): + snapshot_ref.save(session=session) + + return _snapshot_get(context, values['id'], session=session) + + +@require_admin_context +def snapshot_destroy(context, snapshot_id): + session = get_session() + with session.begin(): + model_query(context, models.Snapshot, session=session).\ + filter_by(id=snapshot_id).\ + update({'status': 'deleted', + 'deleted': True, + 'deleted_at': timeutils.utcnow(), + 'updated_at': literal_column('updated_at')}) + model_query(context, models.SnapshotMetadata, session=session).\ + filter_by(snapshot_id=snapshot_id).\ + update({'deleted': True, + 'deleted_at': timeutils.utcnow(), + 'updated_at': literal_column('updated_at')}) + + +@require_context +def _snapshot_get(context, snapshot_id, session=None): + result = model_query(context, models.Snapshot, session=session, + project_only=True).\ + options(joinedload('volume')).\ + options(joinedload('snapshot_metadata')).\ + filter_by(id=snapshot_id).\ + first() + + if not result: + raise exception.SnapshotNotFound(snapshot_id=snapshot_id) + + return result + + +@require_context +def snapshot_get(context, snapshot_id): + return _snapshot_get(context, snapshot_id) + + +@require_admin_context +def snapshot_get_all(context): + return model_query(context, models.Snapshot).\ + options(joinedload('snapshot_metadata')).\ + all() + + +@require_context +def snapshot_get_all_for_volume(context, volume_id): + return model_query(context, models.Snapshot, read_deleted='no', + project_only=True).\ + filter_by(volume_id=volume_id).\ + options(joinedload('snapshot_metadata')).\ + all() + + +@require_context +def snapshot_get_all_by_project(context, project_id): + authorize_project_context(context, project_id) + return model_query(context, models.Snapshot).\ + filter_by(project_id=project_id).\ + options(joinedload('snapshot_metadata')).\ + all() + + +@require_context +def _snapshot_data_get_for_project(context, project_id, volume_type_id=None, + session=None): + authorize_project_context(context, project_id) + query = model_query(context, + func.count(models.Snapshot.id), + func.sum(models.Snapshot.volume_size), + read_deleted="no", + session=session).\ + filter_by(project_id=project_id) + + if volume_type_id: + query = query.join('volume').filter_by(volume_type_id=volume_type_id) + + result = query.first() + + # NOTE(vish): convert None to 0 + return (result[0] or 0, result[1] or 0) + + +@require_context +def snapshot_data_get_for_project(context, project_id, volume_type_id=None): + return _snapshot_data_get_for_project(context, project_id, volume_type_id) + + +@require_context +def snapshot_get_active_by_window(context, begin, end=None, project_id=None): + """Return snapshots that were active during window.""" + + query = model_query(context, models.Snapshot, read_deleted="yes") + query = query.filter(or_(models.Snapshot.deleted_at is None, + models.Snapshot.deleted_at > begin)) + query = query.options(joinedload(models.Snapshot.volume)) + if end: + query = query.filter(models.Snapshot.created_at < end) + if project_id: + query = query.filter_by(project_id=project_id) + + return query.all() + + +@require_context +def snapshot_update(context, snapshot_id, values): + session = get_session() + with session.begin(): + snapshot_ref = _snapshot_get(context, snapshot_id, session=session) + snapshot_ref.update(values) + snapshot_ref.save(session=session) + +#################### + + +def _snapshot_metadata_get_query(context, snapshot_id, session=None): + return model_query(context, models.SnapshotMetadata, + session=session, read_deleted="no").\ + filter_by(snapshot_id=snapshot_id) + + +@require_context +@require_snapshot_exists +def _snapshot_metadata_get(context, snapshot_id, session=None): + rows = _snapshot_metadata_get_query(context, snapshot_id, session).all() + result = {} + for row in rows: + result[row['key']] = row['value'] + + return result + + +@require_context +@require_snapshot_exists +def snapshot_metadata_get(context, snapshot_id): + return _snapshot_metadata_get(context, snapshot_id) + + +@require_context +@require_snapshot_exists +def snapshot_metadata_delete(context, snapshot_id, key): + _snapshot_metadata_get_query(context, snapshot_id).\ + filter_by(key=key).\ + update({'deleted': True, + 'deleted_at': timeutils.utcnow(), + 'updated_at': literal_column('updated_at')}) + + +@require_context +def _snapshot_metadata_get_item(context, snapshot_id, key, session=None): + result = _snapshot_metadata_get_query(context, + snapshot_id, + session=session).\ + filter_by(key=key).\ + first() + + if not result: + raise exception.SnapshotMetadataNotFound(metadata_key=key, + snapshot_id=snapshot_id) + return result + + +@require_context +@require_snapshot_exists +def snapshot_metadata_update(context, snapshot_id, metadata, delete): + session = get_session() + with session.begin(): + # Set existing metadata to deleted if delete argument is True + if delete: + original_metadata = _snapshot_metadata_get(context, snapshot_id, + session) + for meta_key, meta_value in original_metadata.iteritems(): + if meta_key not in metadata: + meta_ref = _snapshot_metadata_get_item(context, + snapshot_id, + meta_key, session) + meta_ref.update({'deleted': True}) + meta_ref.save(session=session) + + meta_ref = None + + # Now update all existing items with new values, or create new meta + # objects + for meta_key, meta_value in metadata.items(): + + # update the value whether it exists or not + item = {"value": meta_value} + + try: + meta_ref = _snapshot_metadata_get_item(context, snapshot_id, + meta_key, session) + except exception.SnapshotMetadataNotFound as e: + meta_ref = models.SnapshotMetadata() + item.update({"key": meta_key, "snapshot_id": snapshot_id}) + + meta_ref.update(item) + meta_ref.save(session=session) + + return snapshot_metadata_get(context, snapshot_id) + +################### + + +@require_admin_context +def volume_type_create(context, values): + """Create a new instance type. + + In order to pass in extra specs, the values dict should contain a + 'extra_specs' key/value pair: + {'extra_specs' : {'k1': 'v1', 'k2': 'v2', ...}} + """ + if not values.get('id'): + values['id'] = str(uuid.uuid4()) + + session = get_session() + with session.begin(): + try: + _volume_type_get_by_name(context, values['name'], session) + raise exception.VolumeTypeExists(id=values['name']) + except exception.VolumeTypeNotFoundByName: + pass + try: + _volume_type_get(context, values['id'], session) + raise exception.VolumeTypeExists(id=values['id']) + except exception.VolumeTypeNotFound: + pass + try: + values['extra_specs'] = _metadata_refs(values.get('extra_specs'), + models.VolumeTypeExtraSpecs) + volume_type_ref = models.VolumeTypes() + volume_type_ref.update(values) + volume_type_ref.save(session=session) + except Exception as e: + raise db_exc.DBError(e) + return volume_type_ref + + +@require_context +def volume_type_get_all(context, inactive=False, filters=None): + """Returns a dict describing all volume_types with name as key.""" + filters = filters or {} + + read_deleted = "yes" if inactive else "no" + rows = model_query(context, models.VolumeTypes, + read_deleted=read_deleted).\ + options(joinedload('extra_specs')).\ + order_by("name").\ + all() + + result = {} + for row in rows: + result[row['name']] = _dict_with_extra_specs(row) + + return result + + +@require_context +def _volume_type_get(context, id, session=None, inactive=False): + read_deleted = "yes" if inactive else "no" + result = model_query(context, + models.VolumeTypes, + session=session, + read_deleted=read_deleted).\ + options(joinedload('extra_specs')).\ + filter_by(id=id).\ + first() + + if not result: + raise exception.VolumeTypeNotFound(volume_type_id=id) + + return _dict_with_extra_specs(result) + + +@require_context +def volume_type_get(context, id, inactive=False): + """Return a dict describing specific volume_type.""" + + return _volume_type_get(context, id, None, inactive) + + +@require_context +def _volume_type_get_by_name(context, name, session=None): + result = model_query(context, models.VolumeTypes, session=session).\ + options(joinedload('extra_specs')).\ + filter_by(name=name).\ + first() + + if not result: + raise exception.VolumeTypeNotFoundByName(volume_type_name=name) + else: + return _dict_with_extra_specs(result) + + +@require_context +def volume_type_get_by_name(context, name): + """Return a dict describing specific volume_type.""" + + return _volume_type_get_by_name(context, name) + + +@require_admin_context +def volume_type_qos_associations_get(context, qos_specs_id, inactive=False): + read_deleted = "yes" if inactive else "no" + return model_query(context, models.VolumeTypes, + read_deleted=read_deleted). \ + filter_by(qos_specs_id=qos_specs_id).all() + + +@require_admin_context +def volume_type_qos_associate(context, type_id, qos_specs_id): + session = get_session() + with session.begin(): + _volume_type_get(context, type_id, session) + + session.query(models.VolumeTypes). \ + filter_by(id=type_id). \ + update({'qos_specs_id': qos_specs_id, + 'updated_at': timeutils.utcnow()}) + + +@require_admin_context +def volume_type_qos_disassociate(context, qos_specs_id, type_id): + """Disassociate volume type from qos specs.""" + session = get_session() + with session.begin(): + _volume_type_get(context, type_id, session) + + session.query(models.VolumeTypes). \ + filter_by(id=type_id). \ + filter_by(qos_specs_id=qos_specs_id). \ + update({'qos_specs_id': None, + 'updated_at': timeutils.utcnow()}) + + +@require_admin_context +def volume_type_qos_disassociate_all(context, qos_specs_id): + """Disassociate all volume types associated with specified qos specs.""" + session = get_session() + with session.begin(): + session.query(models.VolumeTypes). \ + filter_by(qos_specs_id=qos_specs_id). \ + update({'qos_specs_id': None, + 'updated_at': timeutils.utcnow()}) + + +@require_admin_context +def volume_type_qos_specs_get(context, type_id): + """Return all qos specs for given volume type. + + result looks like: + { + 'qos_specs': + { + 'id': 'qos-specs-id', + 'name': 'qos_specs_name', + 'consumer': 'Consumer', + 'specs': { + 'key1': 'value1', + 'key2': 'value2', + 'key3': 'value3' + } + } + } + + """ + session = get_session() + with session.begin(): + _volume_type_get(context, type_id, session) + + row = session.query(models.VolumeTypes). \ + options(joinedload('qos_specs')). \ + filter_by(id=type_id). \ + first() + + # row.qos_specs is a list of QualityOfServiceSpecs ref + specs = _dict_with_qos_specs(row.qos_specs) + + if not specs: + # turn empty list to None + specs = None + else: + specs = specs[0] + + return {'qos_specs': specs} + + +@require_admin_context +def volume_type_destroy(context, id): + session = get_session() + with session.begin(): + _volume_type_get(context, id, session) + results = model_query(context, models.Volume, session=session). \ + filter_by(volume_type_id=id).all() + if results: + msg = _('VolumeType %s deletion failed, VolumeType in use.') % id + LOG.error(msg) + raise exception.VolumeTypeInUse(volume_type_id=id) + model_query(context, models.VolumeTypes, session=session).\ + filter_by(id=id).\ + update({'deleted': True, + 'deleted_at': timeutils.utcnow(), + 'updated_at': literal_column('updated_at')}) + model_query(context, models.VolumeTypeExtraSpecs, session=session).\ + filter_by(volume_type_id=id).\ + update({'deleted': True, + 'deleted_at': timeutils.utcnow(), + 'updated_at': literal_column('updated_at')}) + + +@require_context +def volume_get_active_by_window(context, + begin, + end=None, + project_id=None): + """Return volumes that were active during window.""" + query = model_query(context, models.Volume, read_deleted="yes") + query = query.filter(or_(models.Volume.deleted_at is None, + models.Volume.deleted_at > begin)) + if end: + query = query.filter(models.Volume.created_at < end) + if project_id: + query = query.filter_by(project_id=project_id) + + return query.all() + + +#################### + + +def _volume_type_extra_specs_query(context, volume_type_id, session=None): + return model_query(context, models.VolumeTypeExtraSpecs, session=session, + read_deleted="no").\ + filter_by(volume_type_id=volume_type_id) + + +@require_context +def volume_type_extra_specs_get(context, volume_type_id): + rows = _volume_type_extra_specs_query(context, volume_type_id).\ + all() + + result = {} + for row in rows: + result[row['key']] = row['value'] + + return result + + +@require_context +def volume_type_extra_specs_delete(context, volume_type_id, key): + session = get_session() + with session.begin(): + _volume_type_extra_specs_get_item(context, volume_type_id, key, + session) + _volume_type_extra_specs_query(context, volume_type_id, session).\ + filter_by(key=key).\ + update({'deleted': True, + 'deleted_at': timeutils.utcnow(), + 'updated_at': literal_column('updated_at')}) + + +@require_context +def _volume_type_extra_specs_get_item(context, volume_type_id, key, + session=None): + result = _volume_type_extra_specs_query( + context, volume_type_id, session=session).\ + filter_by(key=key).\ + first() + + if not result: + raise exception.VolumeTypeExtraSpecsNotFound( + extra_specs_key=key, + volume_type_id=volume_type_id) + + return result + + +@require_context +def volume_type_extra_specs_update_or_create(context, volume_type_id, + specs): + session = get_session() + with session.begin(): + spec_ref = None + for key, value in specs.iteritems(): + try: + spec_ref = _volume_type_extra_specs_get_item( + context, volume_type_id, key, session) + except exception.VolumeTypeExtraSpecsNotFound as e: + spec_ref = models.VolumeTypeExtraSpecs() + spec_ref.update({"key": key, "value": value, + "volume_type_id": volume_type_id, + "deleted": False}) + spec_ref.save(session=session) + + return specs + + +#################### + + +@require_admin_context +def qos_specs_create(context, values): + """Create a new QoS specs. + + :param values dictionary that contains specifications for QoS + e.g. {'name': 'Name', + 'qos_specs': { + 'consumer': 'front-end', + 'total_iops_sec': 1000, + 'total_bytes_sec': 1024000 + } + } + """ + specs_id = str(uuid.uuid4()) + + session = get_session() + with session.begin(): + try: + _qos_specs_get_by_name(context, values['name'], session) + raise exception.QoSSpecsExists(specs_id=values['name']) + except exception.QoSSpecsNotFound: + pass + try: + # Insert a root entry for QoS specs + specs_root = models.QualityOfServiceSpecs() + root = dict(id=specs_id) + # 'QoS_Specs_Name' is a internal reserved key to store + # the name of QoS specs + root['key'] = 'QoS_Specs_Name' + root['value'] = values['name'] + LOG.debug("DB qos_specs_create(): root %s", root) + specs_root.update(root) + specs_root.save(session=session) + + # Insert all specification entries for QoS specs + for k, v in values['qos_specs'].iteritems(): + item = dict(key=k, value=v, specs_id=specs_id) + item['id'] = str(uuid.uuid4()) + spec_entry = models.QualityOfServiceSpecs() + spec_entry.update(item) + spec_entry.save(session=session) + except Exception as e: + raise db_exc.DBError(e) + + return dict(id=specs_root.id, name=specs_root.value) + + +@require_admin_context +def _qos_specs_get_by_name(context, name, session=None, inactive=False): + read_deleted = 'yes' if inactive else 'no' + results = model_query(context, models.QualityOfServiceSpecs, + read_deleted=read_deleted, session=session). \ + filter_by(key='QoS_Specs_Name'). \ + filter_by(value=name). \ + options(joinedload('specs')).all() + + if not results: + raise exception.QoSSpecsNotFound(specs_id=name) + + return results + + +@require_admin_context +def _qos_specs_get_ref(context, qos_specs_id, session=None, inactive=False): + read_deleted = 'yes' if inactive else 'no' + result = model_query(context, models.QualityOfServiceSpecs, + read_deleted=read_deleted, session=session). \ + filter_by(id=qos_specs_id). \ + options(joinedload_all('specs')).all() + + if not result: + raise exception.QoSSpecsNotFound(specs_id=qos_specs_id) + + return result + + +def _dict_with_children_specs(specs): + """Convert specs list to a dict.""" + result = {} + for spec in specs: + # Skip deleted keys + if not spec['deleted']: + result.update({spec['key']: spec['value']}) + + return result + + +def _dict_with_qos_specs(rows): + """Convert qos specs query results to list. + + Qos specs query results are a list of quality_of_service_specs refs, + some are root entry of a qos specs (key == 'QoS_Specs_Name') and the + rest are children entry, a.k.a detailed specs for a qos specs. This + function converts query results to a dict using spec name as key. + """ + result = [] + for row in rows: + if row['key'] == 'QoS_Specs_Name': + member = {} + member['name'] = row['value'] + member.update(dict(id=row['id'])) + if row.specs: + spec_dict = _dict_with_children_specs(row.specs) + member.update(dict(consumer=spec_dict['consumer'])) + del spec_dict['consumer'] + member.update(dict(specs=spec_dict)) + result.append(member) + return result + + +@require_admin_context +def qos_specs_get(context, qos_specs_id, inactive=False): + rows = _qos_specs_get_ref(context, qos_specs_id, None, inactive) + + return _dict_with_qos_specs(rows)[0] + + +@require_admin_context +def qos_specs_get_all(context, inactive=False, filters=None): + """Returns a list of all qos_specs. + + Results is like: + [{ + 'id': SPECS-UUID, + 'name': 'qos_spec-1', + 'consumer': 'back-end', + 'specs': { + 'key1': 'value1', + 'key2': 'value2', + ... + } + }, + { + 'id': SPECS-UUID, + 'name': 'qos_spec-2', + 'consumer': 'front-end', + 'specs': { + 'key1': 'value1', + 'key2': 'value2', + ... + } + }, + ] + """ + filters = filters or {} + # TODO(zhiteng) Add filters for 'consumer' + + read_deleted = "yes" if inactive else "no" + rows = model_query(context, models.QualityOfServiceSpecs, + read_deleted=read_deleted). \ + options(joinedload_all('specs')).all() + + return _dict_with_qos_specs(rows) + + +@require_admin_context +def qos_specs_get_by_name(context, name, inactive=False): + rows = _qos_specs_get_by_name(context, name, None, inactive) + + return _dict_with_qos_specs(rows)[0] + + +@require_admin_context +def qos_specs_associations_get(context, qos_specs_id): + """Return all entities associated with specified qos specs. + + For now, the only entity that is possible to associate with + a qos specs is volume type, so this is just a wrapper of + volume_type_qos_associations_get(). But it's possible to + extend qos specs association to other entities, such as volumes, + sometime in future. + """ + # Raise QoSSpecsNotFound if no specs found + _qos_specs_get_ref(context, qos_specs_id, None) + return volume_type_qos_associations_get(context, qos_specs_id) + + +@require_admin_context +def qos_specs_associate(context, qos_specs_id, type_id): + """Associate volume type from specified qos specs.""" + return volume_type_qos_associate(context, type_id, qos_specs_id) + + +@require_admin_context +def qos_specs_disassociate(context, qos_specs_id, type_id): + """Disassociate volume type from specified qos specs.""" + return volume_type_qos_disassociate(context, qos_specs_id, type_id) + + +@require_admin_context +def qos_specs_disassociate_all(context, qos_specs_id): + """Disassociate all entities associated with specified qos specs. + + For now, the only entity that is possible to associate with + a qos specs is volume type, so this is just a wrapper of + volume_type_qos_disassociate_all(). But it's possible to + extend qos specs association to other entities, such as volumes, + sometime in future. + """ + return volume_type_qos_disassociate_all(context, qos_specs_id) + + +@require_admin_context +def qos_specs_item_delete(context, qos_specs_id, key): + session = get_session() + with session.begin(): + _qos_specs_get_item(context, qos_specs_id, key) + session.query(models.QualityOfServiceSpecs). \ + filter(models.QualityOfServiceSpecs.key == key). \ + filter(models.QualityOfServiceSpecs.specs_id == qos_specs_id). \ + update({'deleted': True, + 'deleted_at': timeutils.utcnow(), + 'updated_at': literal_column('updated_at')}) + + +@require_admin_context +def qos_specs_delete(context, qos_specs_id): + session = get_session() + with session.begin(): + _qos_specs_get_ref(context, qos_specs_id, session) + session.query(models.QualityOfServiceSpecs).\ + filter(or_(models.QualityOfServiceSpecs.id == qos_specs_id, + models.QualityOfServiceSpecs.specs_id == + qos_specs_id)).\ + update({'deleted': True, + 'deleted_at': timeutils.utcnow(), + 'updated_at': literal_column('updated_at')}) + + +@require_admin_context +def _qos_specs_get_item(context, qos_specs_id, key, session=None): + result = model_query(context, models.QualityOfServiceSpecs, + session=session). \ + filter(models.QualityOfServiceSpecs.key == key). \ + filter(models.QualityOfServiceSpecs.specs_id == qos_specs_id). \ + first() + + if not result: + raise exception.QoSSpecsKeyNotFound( + specs_key=key, + specs_id=qos_specs_id) + + return result + + +@require_admin_context +def qos_specs_update(context, qos_specs_id, specs): + """Make updates to a existing qos specs. + + Perform add, update or delete key/values to a qos specs. + """ + + session = get_session() + with session.begin(): + # make sure qos specs exists + _qos_specs_get_ref(context, qos_specs_id, session) + spec_ref = None + for key in specs.keys(): + try: + spec_ref = _qos_specs_get_item( + context, qos_specs_id, key, session) + except exception.QoSSpecsKeyNotFound as e: + spec_ref = models.QualityOfServiceSpecs() + id = None + if spec_ref.get('id', None): + id = spec_ref['id'] + else: + id = str(uuid.uuid4()) + value = dict(id=id, key=key, value=specs[key], + specs_id=qos_specs_id, + deleted=False) + LOG.debug('qos_specs_update() value: %s' % value) + spec_ref.update(value) + spec_ref.save(session=session) + + return specs + + +#################### + + +@require_context +def volume_type_encryption_get(context, volume_type_id, session=None): + return model_query(context, models.Encryption, session=session, + read_deleted="no").\ + filter_by(volume_type_id=volume_type_id).first() + + +@require_admin_context +def volume_type_encryption_delete(context, volume_type_id): + session = get_session() + with session.begin(): + encryption = volume_type_encryption_get(context, volume_type_id, + session) + encryption.update({'deleted': True, + 'deleted_at': timeutils.utcnow(), + 'updated_at': literal_column('updated_at')}) + + +@require_admin_context +def volume_type_encryption_create(context, volume_type_id, values): + session = get_session() + with session.begin(): + encryption = models.Encryption() + + if 'volume_type_id' not in values: + values['volume_type_id'] = volume_type_id + + encryption.update(values) + encryption.save(session=session) + + return encryption + + +@require_admin_context +def volume_type_encryption_update(context, volume_type_id, values): + session = get_session() + with session.begin(): + encryption = volume_type_encryption_get(context, volume_type_id, + session) + + if not encryption: + raise exception.VolumeTypeEncryptionNotFound( + type_id=volume_type_id) + + encryption.update(values) + encryption.save(session=session) + + return encryption + + +def volume_type_encryption_volume_get(context, volume_type_id, session=None): + volume_list = _volume_get_query(context, session=session, + project_only=False).\ + filter_by(volume_type_id=volume_type_id).\ + all() + return volume_list + +#################### + + +@require_context +def volume_encryption_metadata_get(context, volume_id, session=None): + """Return the encryption key id for a given volume.""" + + volume_ref = _volume_get(context, volume_id) + encryption_ref = volume_type_encryption_get(context, + volume_ref['volume_type_id']) + + return { + 'encryption_key_id': volume_ref['encryption_key_id'], + 'control_location': encryption_ref['control_location'], + 'cipher': encryption_ref['cipher'], + 'key_size': encryption_ref['key_size'], + 'provider': encryption_ref['provider'], + } + + +#################### + + +@require_context +def _volume_glance_metadata_get_all(context, session=None): + rows = model_query(context, + models.VolumeGlanceMetadata, + project_only=True, + session=session).\ + filter_by(deleted=False).\ + all() + + return rows + + +@require_context +def volume_glance_metadata_get_all(context): + """Return the Glance metadata for all volumes.""" + + return _volume_glance_metadata_get_all(context) + + +@require_context +@require_volume_exists +def _volume_glance_metadata_get(context, volume_id, session=None): + rows = model_query(context, models.VolumeGlanceMetadata, session=session).\ + filter_by(volume_id=volume_id).\ + filter_by(deleted=False).\ + all() + + if not rows: + raise exception.GlanceMetadataNotFound(id=volume_id) + + return rows + + +@require_context +@require_volume_exists +def volume_glance_metadata_get(context, volume_id): + """Return the Glance metadata for the specified volume.""" + + return _volume_glance_metadata_get(context, volume_id) + + +@require_context +@require_snapshot_exists +def _volume_snapshot_glance_metadata_get(context, snapshot_id, session=None): + rows = model_query(context, models.VolumeGlanceMetadata, session=session).\ + filter_by(snapshot_id=snapshot_id).\ + filter_by(deleted=False).\ + all() + + if not rows: + raise exception.GlanceMetadataNotFound(id=snapshot_id) + + return rows + + +@require_context +@require_snapshot_exists +def volume_snapshot_glance_metadata_get(context, snapshot_id): + """Return the Glance metadata for the specified snapshot.""" + + return _volume_snapshot_glance_metadata_get(context, snapshot_id) + + +@require_context +@require_volume_exists +def volume_glance_metadata_create(context, volume_id, key, value): + """Update the Glance metadata for a volume by adding a new key:value pair. + + This API does not support changing the value of a key once it has been + created. + """ + + session = get_session() + with session.begin(): + rows = session.query(models.VolumeGlanceMetadata).\ + filter_by(volume_id=volume_id).\ + filter_by(key=key).\ + filter_by(deleted=False).all() + + if len(rows) > 0: + raise exception.GlanceMetadataExists(key=key, + volume_id=volume_id) + + vol_glance_metadata = models.VolumeGlanceMetadata() + vol_glance_metadata.volume_id = volume_id + vol_glance_metadata.key = key + vol_glance_metadata.value = str(value) + + vol_glance_metadata.save(session=session) + + return + + +@require_context +@require_snapshot_exists +def volume_glance_metadata_copy_to_snapshot(context, snapshot_id, volume_id): + """Update the Glance metadata for a snapshot. + + This copies all of the key:value pairs from the originating volume, to + ensure that a volume created from the snapshot will retain the + original metadata. + """ + + session = get_session() + with session.begin(): + metadata = _volume_glance_metadata_get(context, volume_id, + session=session) + for meta in metadata: + vol_glance_metadata = models.VolumeGlanceMetadata() + vol_glance_metadata.snapshot_id = snapshot_id + vol_glance_metadata.key = meta['key'] + vol_glance_metadata.value = meta['value'] + + vol_glance_metadata.save(session=session) + + +@require_context +@require_volume_exists +def volume_glance_metadata_copy_from_volume_to_volume(context, + src_volume_id, + volume_id): + """Update the Glance metadata for a volume. + + This copies all all of the key:value pairs from the originating volume, + to ensure that a volume created from the volume (clone) will + retain the original metadata. + """ + + session = get_session() + with session.begin(): + metadata = _volume_glance_metadata_get(context, + src_volume_id, + session=session) + for meta in metadata: + vol_glance_metadata = models.VolumeGlanceMetadata() + vol_glance_metadata.volume_id = volume_id + vol_glance_metadata.key = meta['key'] + vol_glance_metadata.value = meta['value'] + + vol_glance_metadata.save(session=session) + + +@require_context +@require_volume_exists +def volume_glance_metadata_copy_to_volume(context, volume_id, snapshot_id): + """Update the Glance metadata from a volume (created from a snapshot) by + copying all of the key:value pairs from the originating snapshot. + + This is so that the Glance metadata from the original volume is retained. + """ + + session = get_session() + with session.begin(): + metadata = _volume_snapshot_glance_metadata_get(context, snapshot_id, + session=session) + for meta in metadata: + vol_glance_metadata = models.VolumeGlanceMetadata() + vol_glance_metadata.volume_id = volume_id + vol_glance_metadata.key = meta['key'] + vol_glance_metadata.value = meta['value'] + + vol_glance_metadata.save(session=session) + + +@require_context +def volume_glance_metadata_delete_by_volume(context, volume_id): + model_query(context, models.VolumeGlanceMetadata, read_deleted='no').\ + filter_by(volume_id=volume_id).\ + update({'deleted': True, + 'deleted_at': timeutils.utcnow(), + 'updated_at': literal_column('updated_at')}) + + +@require_context +def volume_glance_metadata_delete_by_snapshot(context, snapshot_id): + model_query(context, models.VolumeGlanceMetadata, read_deleted='no').\ + filter_by(snapshot_id=snapshot_id).\ + update({'deleted': True, + 'deleted_at': timeutils.utcnow(), + 'updated_at': literal_column('updated_at')}) + + +############################### + + +@require_context +def backup_get(context, backup_id): + result = model_query(context, models.Backup, project_only=True).\ + filter_by(id=backup_id).\ + first() + + if not result: + raise exception.BackupNotFound(backup_id=backup_id) + + return result + + +@require_admin_context +def backup_get_all(context): + return model_query(context, models.Backup).all() + + +@require_admin_context +def backup_get_all_by_host(context, host): + return model_query(context, models.Backup).filter_by(host=host).all() + + +@require_context +def backup_get_all_by_project(context, project_id): + authorize_project_context(context, project_id) + + return model_query(context, models.Backup).\ + filter_by(project_id=project_id).all() + + +@require_context +def backup_create(context, values): + backup = models.Backup() + if not values.get('id'): + values['id'] = str(uuid.uuid4()) + backup.update(values) + backup.save() + return backup + + +@require_context +def backup_update(context, backup_id, values): + session = get_session() + with session.begin(): + backup = model_query(context, models.Backup, + session=session, read_deleted="yes").\ + filter_by(id=backup_id).first() + + if not backup: + raise exception.BackupNotFound( + _("No backup with id %s") % backup_id) + + backup.update(values) + backup.save(session=session) + return backup + + +@require_admin_context +def backup_destroy(context, backup_id): + model_query(context, models.Backup).\ + filter_by(id=backup_id).\ + update({'status': 'deleted', + 'deleted': True, + 'deleted_at': timeutils.utcnow(), + 'updated_at': literal_column('updated_at')}) + + +############################### + + +@require_context +def _transfer_get(context, transfer_id, session=None): + query = model_query(context, models.Transfer, + session=session).\ + filter_by(id=transfer_id) + + if not is_admin_context(context): + volume = models.Volume + query = query.filter(models.Transfer.volume_id == volume.id, + volume.project_id == context.project_id) + + result = query.first() + + if not result: + raise exception.TransferNotFound(transfer_id=transfer_id) + + return result + + +@require_context +def transfer_get(context, transfer_id): + return _transfer_get(context, transfer_id) + + +def _translate_transfers(transfers): + results = [] + for transfer in transfers: + r = {} + r['id'] = transfer['id'] + r['volume_id'] = transfer['volume_id'] + r['display_name'] = transfer['display_name'] + r['created_at'] = transfer['created_at'] + r['deleted'] = transfer['deleted'] + results.append(r) + return results + + +@require_admin_context +def transfer_get_all(context): + results = model_query(context, models.Transfer).all() + return _translate_transfers(results) + + +@require_context +def transfer_get_all_by_project(context, project_id): + authorize_project_context(context, project_id) + + query = model_query(context, models.Transfer).\ + filter(models.Volume.id == models.Transfer.volume_id, + models.Volume.project_id == project_id) + results = query.all() + return _translate_transfers(results) + + +@require_context +def transfer_create(context, values): + transfer = models.Transfer() + if not values.get('id'): + values['id'] = str(uuid.uuid4()) + session = get_session() + with session.begin(): + volume_ref = _volume_get(context, + values['volume_id'], + session=session) + if volume_ref['status'] != 'available': + msg = _('Volume must be available') + LOG.error(msg) + raise exception.InvalidVolume(reason=msg) + volume_ref['status'] = 'awaiting-transfer' + transfer.update(values) + transfer.save(session=session) + volume_ref.update(volume_ref) + volume_ref.save(session=session) + return transfer + + +@require_context +def transfer_destroy(context, transfer_id): + session = get_session() + with session.begin(): + transfer_ref = _transfer_get(context, + transfer_id, + session=session) + volume_ref = _volume_get(context, + transfer_ref['volume_id'], + session=session) + # If the volume state is not 'awaiting-transfer' don't change it, but + # we can still mark the transfer record as deleted. + if volume_ref['status'] != 'awaiting-transfer': + msg = _('Volume in unexpected state %s, ' + 'expected awaiting-transfer') % volume_ref['status'] + LOG.error(msg) + else: + volume_ref['status'] = 'available' + volume_ref.update(volume_ref) + volume_ref.save(session=session) + model_query(context, models.Transfer, session=session).\ + filter_by(id=transfer_id).\ + update({'deleted': True, + 'deleted_at': timeutils.utcnow(), + 'updated_at': literal_column('updated_at')}) + + +@require_context +def transfer_accept(context, transfer_id, user_id, project_id): + session = get_session() + with session.begin(): + transfer_ref = _transfer_get(context, transfer_id, session) + volume_id = transfer_ref['volume_id'] + volume_ref = _volume_get(context, volume_id, session=session) + if volume_ref['status'] != 'awaiting-transfer': + volume_status = volume_ref['status'] + msg = _('Transfer %(transfer_id)s: Volume id %(volume_id)s in ' + 'unexpected state %(status)s, expected ' + 'awaiting-transfer') % {'transfer_id': transfer_id, + 'volume_id': volume_ref['id'], + 'status': volume_ref['status']} + LOG.error(msg) + raise exception.InvalidVolume(reason=msg) + + volume_ref['status'] = 'available' + volume_ref['user_id'] = user_id + volume_ref['project_id'] = project_id + volume_ref['updated_at'] = literal_column('updated_at') + volume_ref.update(volume_ref) + volume_ref.save(session=session) + session.query(models.Transfer).\ + filter_by(id=transfer_ref['id']).\ + update({'deleted': True, + 'deleted_at': timeutils.utcnow(), + 'updated_at': literal_column('updated_at')}) diff --git a/icehouse-patches/cinder/timestamp-query-patch/installation/install.sh b/icehouse-patches/cinder/timestamp-query-patch/installation/install.sh new file mode 100644 index 00000000..e33309db --- /dev/null +++ b/icehouse-patches/cinder/timestamp-query-patch/installation/install.sh @@ -0,0 +1,87 @@ +#!/bin/bash + +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# Copyright (c) 2014 Huawei Technologies. + +_CINDER_DIR="/usr/lib64/python2.6/site-packages/cinder" +# if you did not make changes to the installation files, +# please do not edit the following directories. +_CODE_DIR="../cinder" +_BACKUP_DIR="${_CINDER_DIR}/cinder_timestamp_query_patch-installation-backup" +_SCRIPT_LOGFILE="/var/log/cinder/cinder_timestamp_query_patch/installation/install.log" + +function log() +{ + log_path=`dirname ${_SCRIPT_LOGFILE}` + if [ ! -d $log_path ] ; then + mkdir -p $log_path + chmod 777 $_SCRIPT_LOGFILE + fi + echo "$@" + echo "`date -u +'%Y-%m-%d %T.%N'`: $@" >> $_SCRIPT_LOGFILE +} + +if [[ ${EUID} -ne 0 ]]; then + log "Please run as root." + exit 1 +fi + + +cd `dirname $0` + +log "checking installation directories..." +if [ ! -d "${_CINDER_DIR}" ] ; then + log "Could not find the cinder installation. Please check the variables in the beginning of the script." + log "aborted." + exit 1 +fi + +log "checking previous installation..." +if [ -d "${_BACKUP_DIR}/cinder" ] ; then + log "It seems cinder timestamp query has already been installed!" + log "Please check README for solution if this is not true." + exit 1 +fi + +log "backing up current files that might be overwritten..." +mkdir -p "${_BACKUP_DIR}/cinder" +mkdir -p "${_BACKUP_DIR}/etc/cinder" +cp -r "${_CINDER_DIR}/db" "${_BACKUP_DIR}/cinder" +if [ $? -ne 0 ] ; then + rm -r "${_BACKUP_DIR}/cinder" + echo "Error in code backup, aborted." + exit 1 +fi + +log "copying in new files..." +cp -r "${_CODE_DIR}" `dirname ${_CINDER_DIR}` +if [ $? -ne 0 ] ; then + log "Error in copying, aborted." + log "Recovering original files..." + cp -r "${_BACKUP_DIR}/cinder" `dirname ${_CINDER_DIR}` && rm -r "${_BACKUP_DIR}/cinder" + if [ $? -ne 0 ] ; then + log "Recovering failed! Please install manually." + fi + exit 1 +fi + +service openstack-cinder-api restart + +if [ $? -ne 0 ] ; then + log "There was an error in restarting the service, please restart cinder api manually." + exit 1 +fi + +log "Completed." +log "See README to get started." +exit 0 diff --git a/icehouse-patches/cinder/timestamp-query-patch/installation/uninstall.sh b/icehouse-patches/cinder/timestamp-query-patch/installation/uninstall.sh new file mode 100644 index 00000000..efb491ff --- /dev/null +++ b/icehouse-patches/cinder/timestamp-query-patch/installation/uninstall.sh @@ -0,0 +1,115 @@ +#!/bin/bash + +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# Copyright (c) 2014 Huawei Technologies. + +_CINDER_CONF_DIR="/etc/cinder" +_CINDER_CONF_FILE="cinder.conf" +_CINDER_DIR="/usr/lib64/python2.6/site-packages/cinder" +_CINDER_INSTALL_LOG="/var/log/cinder/cinder-proxy/installation/install.log" + + +# if you did not make changes to the installation files, +# please do not edit the following directories. +_CODE_DIR="../cinder" +_BACKUP_DIR="${_CINDER_DIR}/cinder_timestamp_query_patch-installation-backup" + +#_SCRIPT_NAME="${0##*/}" +#_SCRIPT_LOGFILE="/var/log/nova-solver-scheduler/installation/${_SCRIPT_NAME}.log" + +function log() +{ + if [ ! -f "${_CINDER_INSTALL_LOG}" ] ; then + mkdir -p `dirname ${_CINDER_INSTALL_LOG}` + touch $_CINDER_INSTALL_LOG + chmod 777 $_CINDER_INSTALL_LOG + fi + echo "$@" + echo "`date -u +'%Y-%m-%d %T.%N'`: $@" >> $_CINDER_INSTALL_LOG +} + +if [[ ${EUID} -ne 0 ]]; then + log "Please run as root." + exit 1 +fi + +cd `dirname $0` + +log "checking installation directories..." +if [ ! -d "${_CINDER_DIR}" ] ; then + log "Could not find the cinder installation. Please check the variables in the beginning of the script." + log "aborted." + exit 1 +fi +if [ ! -f "${_CINDER_CONF_DIR}/${_CINDER_CONF_FILE}" ] ; then + log "Could not find cinder config file. Please check the variables in the beginning of the script." + log "aborted." + exit 1 +fi + +log "checking backup..." +if [ ! -d "${_BACKUP_DIR}/cinder" ] ; then + log "Could not find backup files. It is possible that the cinder-proxy has been uninstalled." + log "If this is not the case, then please uninstall manually." + exit 1 +fi + +log "backing up current files that might be overwritten..." +if [ -d "${_BACKUP_DIR}/uninstall" ] ; then + rm -r "${_BACKUP_DIR}/uninstall" +fi +mkdir -p "${_BACKUP_DIR}/uninstall/cinder" +mkdir -p "${_BACKUP_DIR}/uninstall/etc/cinder" +cp -r "${_CINDER_DIR}/volume" "${_BACKUP_DIR}/uninstall/cinder/" +if [ $? -ne 0 ] ; then + rm -r "${_BACKUP_DIR}/uninstall/cinder" + log "Error in code backup, aborted." + exit 1 +fi +cp "${_CINDER_CONF_DIR}/${_CINDER_CONF_FILE}" "${_BACKUP_DIR}/uninstall/etc/cinder/" +if [ $? -ne 0 ] ; then + rm -r "${_BACKUP_DIR}/uninstall/cinder" + rm -r "${_BACKUP_DIR}/uninstall/etc" + log "Error in config backup, aborted." + exit 1 +fi + +log "restoring code to the status before installing cinder-proxy..." +cp -r "${_BACKUP_DIR}/cinder" `dirname ${_CINDER_DIR}` +if [ $? -ne 0 ] ; then + log "Error in copying, aborted." + log "Recovering current files..." + cp -r "${_BACKUP_DIR}/uninstall/cinder" `dirname ${_CINDER_DIR}` + if [ $? -ne 0 ] ; then + log "Recovering failed! Please uninstall manually." + fi + exit 1 +fi + + +log "cleaning up backup files..." +rm -r "${_BACKUP_DIR}/cinder" && rm -r "${_BACKUP_DIR}/etc" +if [ $? -ne 0 ] ; then + log "There was an error when cleaning up the backup files." +fi + +log "restarting cinder api..." +service openstack-cinder-api restart +if [ $? -ne 0 ] ; then + log "There was an error in restarting the service, please restart cinder volume manually." + exit 1 +fi + +log "Completed." + +exit 0 diff --git a/icehouse-patches/cinder/uuid-mapping-patch/README.md b/icehouse-patches/cinder/uuid-mapping-patch/README.md new file mode 100644 index 00000000..eec8e0d9 --- /dev/null +++ b/icehouse-patches/cinder/uuid-mapping-patch/README.md @@ -0,0 +1,65 @@ +Cinder uuid-mapping-patch +=============================== + +it will be patched in cascading level's control node + +Cascading level node can manage volume/snapshot/backup/ in cascaded level node, +because of the mapping_uuid stored in cascading level represent the relationship of +volume/snapshot/bakcup + +Key modules +----------- + +* adding mapping_uuid column in cinder volume /cinder snapshot /cinder backup table, + when cinder synchronizes db: + + cinder\db\sqlalchemy\migrate_repo\versions\023_add_mapping_uuid.py + cinder\db\sqlalchemy\migrate_repo\versions\024_snapshots_add_mapping_uuid.py + cinder\db\sqlalchemy\migrate_repo\versions\025_backup_add_mapping_uuid.py + cinder\db\sqlalchemy\models.py + + +Requirements +------------ +* openstack icehouse has been installed + +Installation +------------ + +We provide two ways to install the mapping-uuid-patch code. In this section, we will guide you through installing the instance_mapping_uuid patch. + +* **Note:** + + - Make sure you have an existing installation of **Openstack Icehouse**. + - We recommend that you Do backup at least the following files before installation, because they are to be overwritten or modified: + +* **Manual Installation** + + - Make sure you have performed backups properly. + + - Navigate to the local repository and copy the contents in 'cinder' sub-directory to the corresponding places in existing nova, e.g. + ```cp -r $LOCAL_REPOSITORY_DIR/cinder $NOVA_PARENT_DIR``` + (replace the $... with actual directory name.) + + - synchronize the cinder db. + ``` + mysql -u root -p$MYSQL_PASS -e "DROP DATABASE if exists cinder; + CREATE DATABASE cinder; + GRANT ALL PRIVILEGES ON cinder.* TO 'cinder'@'%' IDENTIFIED BY $MYSQL_PASS; + GRANT ALL PRIVILEGES ON *.* TO 'cinder'@'%'IDENTIFIED BY $MYSQL_PASS; + cinder-manage db sync + ``` + + - Done. The cinder proxy should be working with a demo configuration. + +* **Automatic Installation** + + - Make sure you have performed backups properly. + + - Navigate to the installation directory and run installation script. + ``` + cd $LOCAL_REPOSITORY_DIR/installation + sudo bash ./install.sh + ``` + (replace the $... with actual directory name.) + diff --git a/icehouse-patches/cinder/uuid-mapping-patch/cinder/db/sqlalchemy/migrate_repo/versions/023_add_mapping_uuid.py b/icehouse-patches/cinder/uuid-mapping-patch/cinder/db/sqlalchemy/migrate_repo/versions/023_add_mapping_uuid.py new file mode 100644 index 00000000..1361cc7c --- /dev/null +++ b/icehouse-patches/cinder/uuid-mapping-patch/cinder/db/sqlalchemy/migrate_repo/versions/023_add_mapping_uuid.py @@ -0,0 +1,36 @@ +# Copyright 2013 IBM Corp. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +from sqlalchemy import String, Column, MetaData, Table + + +def upgrade(migrate_engine): + """Add mapping_uuid column to volumes.""" + meta = MetaData() + meta.bind = migrate_engine + + volumes = Table('volumes', meta, autoload=True) + mapping_uuid = Column('mapping_uuid', String(36)) + volumes.create_column(mapping_uuid) + + +def downgrade(migrate_engine): + """Remove mapping_uuid column from volumes.""" + meta = MetaData() + meta.bind = migrate_engine + + volumes = Table('volumes', meta, autoload=True) + mapping_uuid = volumes.columns.mapping_uuid + volumes.drop_column(mapping_uuid) diff --git a/icehouse-patches/cinder/uuid-mapping-patch/cinder/db/sqlalchemy/migrate_repo/versions/024_snapshots_add_mapping_uuid.py b/icehouse-patches/cinder/uuid-mapping-patch/cinder/db/sqlalchemy/migrate_repo/versions/024_snapshots_add_mapping_uuid.py new file mode 100644 index 00000000..26b241d5 --- /dev/null +++ b/icehouse-patches/cinder/uuid-mapping-patch/cinder/db/sqlalchemy/migrate_repo/versions/024_snapshots_add_mapping_uuid.py @@ -0,0 +1,34 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +from sqlalchemy import Column +from sqlalchemy import MetaData, String, Table + + +def upgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + snapshots = Table('snapshots', meta, autoload=True) + mapping_uuid = Column('mapping_uuid', String(36)) + snapshots.create_column(mapping_uuid) + snapshots.update().values(mapping_uuid=None).execute() + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + snapshots = Table('snapshots', meta, autoload=True) + mapping_uuid = snapshots.columns.mapping_uuid + snapshots.drop_column(mapping_uuid) diff --git a/icehouse-patches/cinder/uuid-mapping-patch/cinder/db/sqlalchemy/migrate_repo/versions/025_backup_add_mapping_uuid.py b/icehouse-patches/cinder/uuid-mapping-patch/cinder/db/sqlalchemy/migrate_repo/versions/025_backup_add_mapping_uuid.py new file mode 100644 index 00000000..0e537ed0 --- /dev/null +++ b/icehouse-patches/cinder/uuid-mapping-patch/cinder/db/sqlalchemy/migrate_repo/versions/025_backup_add_mapping_uuid.py @@ -0,0 +1,34 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +from sqlalchemy import Column +from sqlalchemy import MetaData, String, Table + + +def upgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + backups = Table('backups', meta, autoload=True) + mapping_uuid = Column('mapping_uuid', String(36)) + backups.create_column(mapping_uuid) + backups.update().values(mapping_uuid=None).execute() + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + backups = Table('backups', meta, autoload=True) + mapping_uuid = backups.columns.mapping_uuid + backups.drop_column(mapping_uuid) diff --git a/icehouse-patches/cinder/uuid-mapping-patch/cinder/db/sqlalchemy/models.py b/icehouse-patches/cinder/uuid-mapping-patch/cinder/db/sqlalchemy/models.py new file mode 100644 index 00000000..dc09292b --- /dev/null +++ b/icehouse-patches/cinder/uuid-mapping-patch/cinder/db/sqlalchemy/models.py @@ -0,0 +1,515 @@ +# Copyright (c) 2011 X.commerce, a business unit of eBay Inc. +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# Copyright 2011 Piston Cloud Computing, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +SQLAlchemy models for cinder data. +""" + + +from sqlalchemy import Column, Integer, String, Text, schema +from sqlalchemy.ext.declarative import declarative_base +from sqlalchemy import ForeignKey, DateTime, Boolean +from sqlalchemy.orm import relationship, backref + +from oslo.config import cfg + +from cinder.openstack.common.db.sqlalchemy import models +from cinder.openstack.common import timeutils + + +CONF = cfg.CONF +BASE = declarative_base() + + +class CinderBase(models.TimestampMixin, + models.ModelBase): + + """Base class for Cinder Models.""" + + __table_args__ = {'mysql_engine': 'InnoDB'} + + # TODO(rpodolyaka): reuse models.SoftDeleteMixin in the next stage + # of implementing of BP db-cleanup + deleted_at = Column(DateTime) + deleted = Column(Boolean, default=False) + metadata = None + + def delete(self, session=None): + """Delete this object.""" + self.deleted = True + self.deleted_at = timeutils.utcnow() + self.save(session=session) + + +class Service(BASE, CinderBase): + + """Represents a running service on a host.""" + + __tablename__ = 'services' + id = Column(Integer, primary_key=True) + host = Column(String(255)) # , ForeignKey('hosts.id')) + binary = Column(String(255)) + topic = Column(String(255)) + report_count = Column(Integer, nullable=False, default=0) + disabled = Column(Boolean, default=False) + availability_zone = Column(String(255), default='cinder') + disabled_reason = Column(String(255)) + + +class Volume(BASE, CinderBase): + + """Represents a block storage device that can be attached to a vm.""" + __tablename__ = 'volumes' + id = Column(String(36), primary_key=True) + _name_id = Column(String(36)) # Don't access/modify this directly! + + @property + def name_id(self): + return self.id if not self._name_id else self._name_id + + @name_id.setter + def name_id(self, value): + self._name_id = value + + @property + def name(self): + return CONF.volume_name_template % self.name_id + + ec2_id = Column(Integer) + user_id = Column(String(255)) + project_id = Column(String(255)) + + snapshot_id = Column(String(36)) + + host = Column(String(255)) # , ForeignKey('hosts.id')) + size = Column(Integer) + availability_zone = Column(String(255)) # TODO(vish): foreign key? + instance_uuid = Column(String(36)) + attached_host = Column(String(255)) + mountpoint = Column(String(255)) + attach_time = Column(String(255)) # TODO(vish): datetime + status = Column(String(255)) # TODO(vish): enum? + attach_status = Column(String(255)) # TODO(vish): enum + migration_status = Column(String(255)) + + scheduled_at = Column(DateTime) + launched_at = Column(DateTime) + terminated_at = Column(DateTime) + + display_name = Column(String(255)) + display_description = Column(String(255)) + + provider_location = Column(String(255)) + provider_auth = Column(String(255)) + provider_geometry = Column(String(255)) + + volume_type_id = Column(String(36)) + source_volid = Column(String(36)) + encryption_key_id = Column(String(36)) + + deleted = Column(Boolean, default=False) + bootable = Column(Boolean, default=False) + + mapping_uuid = Column(String(36)) + + +class VolumeMetadata(BASE, CinderBase): + + """Represents a metadata key/value pair for a volume.""" + __tablename__ = 'volume_metadata' + id = Column(Integer, primary_key=True) + key = Column(String(255)) + value = Column(String(255)) + volume_id = Column(String(36), ForeignKey('volumes.id'), nullable=False) + volume = relationship(Volume, backref="volume_metadata", + foreign_keys=volume_id, + primaryjoin='and_(' + 'VolumeMetadata.volume_id == Volume.id,' + 'VolumeMetadata.deleted == False)') + + +class VolumeAdminMetadata(BASE, CinderBase): + + """Represents a administrator metadata key/value pair for a volume.""" + __tablename__ = 'volume_admin_metadata' + id = Column(Integer, primary_key=True) + key = Column(String(255)) + value = Column(String(255)) + volume_id = Column(String(36), ForeignKey('volumes.id'), nullable=False) + volume = relationship(Volume, backref="volume_admin_metadata", + foreign_keys=volume_id, + primaryjoin='and_(' + 'VolumeAdminMetadata.volume_id == Volume.id,' + 'VolumeAdminMetadata.deleted == False)') + + +class VolumeTypes(BASE, CinderBase): + + """Represent possible volume_types of volumes offered.""" + __tablename__ = "volume_types" + id = Column(String(36), primary_key=True) + name = Column(String(255)) + # A reference to qos_specs entity + qos_specs_id = Column(String(36), + ForeignKey('quality_of_service_specs.id')) + volumes = relationship(Volume, + backref=backref('volume_type', uselist=False), + foreign_keys=id, + primaryjoin='and_(' + 'Volume.volume_type_id == VolumeTypes.id, ' + 'VolumeTypes.deleted == False)') + + +class VolumeTypeExtraSpecs(BASE, CinderBase): + + """Represents additional specs as key/value pairs for a volume_type.""" + __tablename__ = 'volume_type_extra_specs' + id = Column(Integer, primary_key=True) + key = Column(String(255)) + value = Column(String(255)) + volume_type_id = Column(String(36), + ForeignKey('volume_types.id'), + nullable=False) + volume_type = relationship( + VolumeTypes, + backref="extra_specs", + foreign_keys=volume_type_id, + primaryjoin='and_(' + 'VolumeTypeExtraSpecs.volume_type_id == VolumeTypes.id,' + 'VolumeTypeExtraSpecs.deleted == False)' + ) + + +class QualityOfServiceSpecs(BASE, CinderBase): + + """Represents QoS specs as key/value pairs. + + QoS specs is standalone entity that can be associated/disassociated + with volume types (one to many relation). Adjacency list relationship + pattern is used in this model in order to represent following hierarchical + data with in flat table, e.g, following structure + + qos-specs-1 'Rate-Limit' + | + +------> consumer = 'front-end' + +------> total_bytes_sec = 1048576 + +------> total_iops_sec = 500 + + qos-specs-2 'QoS_Level1' + | + +------> consumer = 'back-end' + +------> max-iops = 1000 + +------> min-iops = 200 + + is represented by: + + id specs_id key value + ------ -------- ------------- ----- + UUID-1 NULL QoSSpec_Name Rate-Limit + UUID-2 UUID-1 consumer front-end + UUID-3 UUID-1 total_bytes_sec 1048576 + UUID-4 UUID-1 total_iops_sec 500 + UUID-5 NULL QoSSpec_Name QoS_Level1 + UUID-6 UUID-5 consumer back-end + UUID-7 UUID-5 max-iops 1000 + UUID-8 UUID-5 min-iops 200 + """ + __tablename__ = 'quality_of_service_specs' + id = Column(String(36), primary_key=True) + specs_id = Column(String(36), ForeignKey(id)) + key = Column(String(255)) + value = Column(String(255)) + + specs = relationship( + "QualityOfServiceSpecs", + cascade="all, delete-orphan", + backref=backref("qos_spec", remote_side=id), + ) + + vol_types = relationship( + VolumeTypes, + backref=backref('qos_specs'), + foreign_keys=id, + primaryjoin='and_(' + 'or_(VolumeTypes.qos_specs_id == ' + 'QualityOfServiceSpecs.id,' + 'VolumeTypes.qos_specs_id == ' + 'QualityOfServiceSpecs.specs_id),' + 'QualityOfServiceSpecs.deleted == False)') + + +class VolumeGlanceMetadata(BASE, CinderBase): + + """Glance metadata for a bootable volume.""" + __tablename__ = 'volume_glance_metadata' + id = Column(Integer, primary_key=True, nullable=False) + volume_id = Column(String(36), ForeignKey('volumes.id')) + snapshot_id = Column(String(36), ForeignKey('snapshots.id')) + key = Column(String(255)) + value = Column(Text) + volume = relationship(Volume, backref="volume_glance_metadata", + foreign_keys=volume_id, + primaryjoin='and_(' + 'VolumeGlanceMetadata.volume_id == Volume.id,' + 'VolumeGlanceMetadata.deleted == False)') + + +class Quota(BASE, CinderBase): + + """Represents a single quota override for a project. + + If there is no row for a given project id and resource, then the + default for the quota class is used. If there is no row for a + given quota class and resource, then the default for the + deployment is used. If the row is present but the hard limit is + Null, then the resource is unlimited. + """ + + __tablename__ = 'quotas' + id = Column(Integer, primary_key=True) + + project_id = Column(String(255), index=True) + + resource = Column(String(255)) + hard_limit = Column(Integer, nullable=True) + + +class QuotaClass(BASE, CinderBase): + + """Represents a single quota override for a quota class. + + If there is no row for a given quota class and resource, then the + default for the deployment is used. If the row is present but the + hard limit is Null, then the resource is unlimited. + """ + + __tablename__ = 'quota_classes' + id = Column(Integer, primary_key=True) + + class_name = Column(String(255), index=True) + + resource = Column(String(255)) + hard_limit = Column(Integer, nullable=True) + + +class QuotaUsage(BASE, CinderBase): + + """Represents the current usage for a given resource.""" + + __tablename__ = 'quota_usages' + id = Column(Integer, primary_key=True) + + project_id = Column(String(255), index=True) + resource = Column(String(255)) + + in_use = Column(Integer) + reserved = Column(Integer) + + @property + def total(self): + return self.in_use + self.reserved + + until_refresh = Column(Integer, nullable=True) + + +class Reservation(BASE, CinderBase): + + """Represents a resource reservation for quotas.""" + + __tablename__ = 'reservations' + id = Column(Integer, primary_key=True) + uuid = Column(String(36), nullable=False) + + usage_id = Column(Integer, ForeignKey('quota_usages.id'), nullable=False) + + project_id = Column(String(255), index=True) + resource = Column(String(255)) + + delta = Column(Integer) + expire = Column(DateTime, nullable=False) + + usage = relationship( + "QuotaUsage", + foreign_keys=usage_id, + primaryjoin='and_(Reservation.usage_id == QuotaUsage.id,' + 'QuotaUsage.deleted == 0)') + + +class Snapshot(BASE, CinderBase): + + """Represents a snapshot of volume.""" + __tablename__ = 'snapshots' + id = Column(String(36), primary_key=True) + + @property + def name(self): + return CONF.snapshot_name_template % self.id + + @property + def volume_name(self): + return self.volume.name # pylint: disable=E1101 + + user_id = Column(String(255)) + project_id = Column(String(255)) + + volume_id = Column(String(36)) + status = Column(String(255)) + progress = Column(String(255)) + volume_size = Column(Integer) + + display_name = Column(String(255)) + display_description = Column(String(255)) + + encryption_key_id = Column(String(36)) + volume_type_id = Column(String(36)) + + provider_location = Column(String(255)) + + volume = relationship(Volume, backref="snapshots", + foreign_keys=volume_id, + primaryjoin='Snapshot.volume_id == Volume.id') + + mapping_uuid = Column(String(36)) + + +class SnapshotMetadata(BASE, CinderBase): + + """Represents a metadata key/value pair for a snapshot.""" + __tablename__ = 'snapshot_metadata' + id = Column(Integer, primary_key=True) + key = Column(String(255)) + value = Column(String(255)) + snapshot_id = Column(String(36), + ForeignKey('snapshots.id'), + nullable=False) + snapshot = relationship(Snapshot, backref="snapshot_metadata", + foreign_keys=snapshot_id, + primaryjoin='and_(' + 'SnapshotMetadata.snapshot_id == Snapshot.id,' + 'SnapshotMetadata.deleted == False)') + + +class IscsiTarget(BASE, CinderBase): + + """Represents an iscsi target for a given host.""" + __tablename__ = 'iscsi_targets' + __table_args__ = (schema.UniqueConstraint("target_num", "host"), + {'mysql_engine': 'InnoDB'}) + id = Column(Integer, primary_key=True) + target_num = Column(Integer) + host = Column(String(255)) + volume_id = Column(String(36), ForeignKey('volumes.id'), nullable=True) + volume = relationship(Volume, + backref=backref('iscsi_target', uselist=False), + foreign_keys=volume_id, + primaryjoin='and_(IscsiTarget.volume_id==Volume.id,' + 'IscsiTarget.deleted==False)') + + +class Backup(BASE, CinderBase): + + """Represents a backup of a volume to Swift.""" + __tablename__ = 'backups' + id = Column(String(36), primary_key=True) + + @property + def name(self): + return CONF.backup_name_template % self.id + + user_id = Column(String(255), nullable=False) + project_id = Column(String(255), nullable=False) + + volume_id = Column(String(36), nullable=False) + host = Column(String(255)) + availability_zone = Column(String(255)) + display_name = Column(String(255)) + display_description = Column(String(255)) + container = Column(String(255)) + status = Column(String(255)) + fail_reason = Column(String(255)) + service_metadata = Column(String(255)) + service = Column(String(255)) + size = Column(Integer) + object_count = Column(Integer) + mapping_uuid = Column(String(36)) + + +class Encryption(BASE, CinderBase): + + """Represents encryption requirement for a volume type. + + Encryption here is a set of performance characteristics describing + cipher, provider, and key_size for a certain volume type. + """ + + __tablename__ = 'encryption' + cipher = Column(String(255)) + key_size = Column(Integer) + provider = Column(String(255)) + control_location = Column(String(255)) + volume_type_id = Column(String(36), + ForeignKey('volume_types.id'), + primary_key=True) + volume_type = relationship( + VolumeTypes, + backref="encryption", + foreign_keys=volume_type_id, + primaryjoin='and_(' + 'Encryption.volume_type_id == VolumeTypes.id,' + 'Encryption.deleted == False)' + ) + + +class Transfer(BASE, CinderBase): + + """Represents a volume transfer request.""" + __tablename__ = 'transfers' + id = Column(String(36), primary_key=True) + volume_id = Column(String(36), ForeignKey('volumes.id')) + display_name = Column(String(255)) + salt = Column(String(255)) + crypt_hash = Column(String(255)) + expires_at = Column(DateTime) + volume = relationship(Volume, backref="transfer", + foreign_keys=volume_id, + primaryjoin='and_(' + 'Transfer.volume_id == Volume.id,' + 'Transfer.deleted == False)') + + +def register_models(): + """Register Models and create metadata. + + Called from cinder.db.sqlalchemy.__init__ as part of loading the driver, + it will never need to be called explicitly elsewhere unless the + connection is lost and needs to be reestablished. + """ + from sqlalchemy import create_engine + models = (Backup, + Service, + Volume, + VolumeMetadata, + VolumeAdminMetadata, + SnapshotMetadata, + Transfer, + VolumeTypeExtraSpecs, + VolumeTypes, + VolumeGlanceMetadata, + ) + engine = create_engine(CONF.database.connection, echo=False) + for model in models: + model.metadata.create_all(engine) diff --git a/icehouse-patches/cinder/uuid-mapping-patch/installation/install.sh b/icehouse-patches/cinder/uuid-mapping-patch/installation/install.sh new file mode 100644 index 00000000..738c4b42 --- /dev/null +++ b/icehouse-patches/cinder/uuid-mapping-patch/installation/install.sh @@ -0,0 +1,92 @@ +#!/bin/bash + +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# Copyright (c) 2014 Huawei Technologies. + +_MYSQL_PASS="1234" +_CINDER_DIR="/usr/lib64/python2.6/site-packages/cinder" +# if you did not make changes to the installation files, +# please do not edit the following directories. +_CODE_DIR="../cinder" +_BACKUP_DIR="${_CINDER_DIR}/cinder_mapping_uuid_patch-installation-backup" + +_SCRIPT_LOGFILE="/var/log/cinder/cinder_mapping_uuid_patch/installation/install.log" + +function log() +{ + log_path=`dirname ${_SCRIPT_LOGFILE}` + if [ ! -d $log_path ] ; then + mkdir -p $log_path + chmod 777 $log_path + fi + echo "$@" + echo "`date -u +'%Y-%m-%d %T.%N'`: $@" >> $_SCRIPT_LOGFILE +} + +if [[ ${EUID} -ne 0 ]]; then + log "Please run as root." + exit 1 +fi + + +cd `dirname $0` + +log "checking installation directories..." +if [ ! -d "${_CINDER_DIR}" ] ; then + log "Could not find the cinder installation. Please check the variables in the beginning of the script." + log "aborted." + exit 1 +fi + +log "checking previous installation..." +if [ -d "${_BACKUP_DIR}/cinder" ] ; then + log "It seems cinder mapping-uuid-patch has already been installed!" + log "Please check README for solution if this is not true." + exit 1 +fi + +log "backing up current files that might be overwritten..." +mkdir -p "${_BACKUP_DIR}/cinder" +mkdir -p "${_BACKUP_DIR}/etc/cinder" +cp -r "${_CINDER_DIR}/db" "${_BACKUP_DIR}/cinder" +if [ $? -ne 0 ] ; then + rm -r "${_BACKUP_DIR}/cinder" + echo "Error in code backup, aborted." + exit 1 +fi + +log "copying in new files..." +cp -r "${_CODE_DIR}" `dirname ${_CINDER_DIR}` +if [ $? -ne 0 ] ; then + log "Error in copying, aborted." + log "Recovering original files..." + cp -r "${_BACKUP_DIR}/cinder" `dirname ${_CINDER_DIR}` && rm -r "${_BACKUP_DIR}/cinder" + if [ $? -ne 0 ] ; then + log "Recovering failed! Please install manually." + fi + exit 1 +fi + +log "syc cinder db..." +mysql -u root -p$_MYSQL_PASS -e "DROP DATABASE if exists cinder;CREATE DATABASE cinder;" + +cinder-manage db sync + +if [ $? -ne 0 ] ; then + log "There was an error in restarting the service, please restart cinder api manually." + exit 1 +fi + +log "Completed." +log "See README to get started." +exit 0 diff --git a/icehouse-patches/cinder/uuid-mapping-patch/installation/uninstall.sh b/icehouse-patches/cinder/uuid-mapping-patch/installation/uninstall.sh new file mode 100644 index 00000000..04cdda21 --- /dev/null +++ b/icehouse-patches/cinder/uuid-mapping-patch/installation/uninstall.sh @@ -0,0 +1,118 @@ +#!/bin/bash + +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# Copyright (c) 2014 Huawei Technologies. + +_MYSQL_PASS="1234" +_CINDER_CONF_DIR="/etc/cinder" +_CINDER_CONF_FILE="cinder.conf" +_CINDER_DIR="/usr/lib64/python2.6/site-packages/cinder" +_CINDER_INSTALL_LOG="/var/log/cinder/cinder_mapping_uuid_patch/installation/install.log" + + +# if you did not make changes to the installation files, +# please do not edit the following directories. +_CODE_DIR="../cinder" +_BACKUP_DIR="${_CINDER_DIR}/cinder_mapping_uuid_patch-installation-backup" + +#_SCRIPT_NAME="${0##*/}" +#_SCRIPT_LOGFILE="/var/log/nova-solver-scheduler/installation/${_SCRIPT_NAME}.log" + +function log() +{ + if [ ! -f "${_CINDER_INSTALL_LOG}" ] ; then + mkdir -p `dirname ${_CINDER_INSTALL_LOG}` + touch $_CINDER_INSTALL_LOG + chmod 777 $_CINDER_INSTALL_LOG + fi + echo "$@" + echo "`date -u +'%Y-%m-%d %T.%N'`: $@" >> $_CINDER_INSTALL_LOG +} + +if [[ ${EUID} -ne 0 ]]; then + log "Please run as root." + exit 1 +fi + +cd `dirname $0` + +log "checking installation directories..." +if [ ! -d "${_CINDER_DIR}" ] ; then + log "Could not find the cinder installation. Please check the variables in the beginning of the script." + log "aborted." + exit 1 +fi +if [ ! -f "${_CINDER_CONF_DIR}/${_CINDER_CONF_FILE}" ] ; then + log "Could not find cinder config file. Please check the variables in the beginning of the script." + log "aborted." + exit 1 +fi + +log "checking backup..." +if [ ! -d "${_BACKUP_DIR}/cinder" ] ; then + log "Could not find backup files. It is possible that the cinder-proxy has been uninstalled." + log "If this is not the case, then please uninstall manually." + exit 1 +fi + +log "backing up current files that might be overwritten..." +if [ -d "${_BACKUP_DIR}/uninstall" ] ; then + rm -r "${_BACKUP_DIR}/uninstall" +fi +mkdir -p "${_BACKUP_DIR}/uninstall/cinder" +mkdir -p "${_BACKUP_DIR}/uninstall/etc/cinder" +cp -r "${_CINDER_DIR}/db" "${_BACKUP_DIR}/uninstall/cinder/" +if [ $? -ne 0 ] ; then + rm -r "${_BACKUP_DIR}/uninstall/cinder" + log "Error in code backup, aborted." + exit 1 +fi +cp "${_CINDER_CONF_DIR}/${_CINDER_CONF_FILE}" "${_BACKUP_DIR}/uninstall/etc/cinder/" +if [ $? -ne 0 ] ; then + rm -r "${_BACKUP_DIR}/uninstall/cinder" + rm -r "${_BACKUP_DIR}/uninstall/etc" + log "Error in config backup, aborted." + exit 1 +fi + +log "restoring code to the status before installing cinder-proxy..." +cp -r "${_BACKUP_DIR}/cinder" `dirname ${_CINDER_DIR}` +if [ $? -ne 0 ] ; then + log "Error in copying, aborted." + log "Recovering current files..." + cp -r "${_BACKUP_DIR}/uninstall/cinder" `dirname ${_CINDER_DIR}` + if [ $? -ne 0 ] ; then + log "Recovering failed! Please uninstall manually." + fi + exit 1 +fi + + +log "cleaning up backup files..." +rm -r "${_BACKUP_DIR}/cinder" && rm -r "${_BACKUP_DIR}/etc" +if [ $? -ne 0 ] ; then + log "There was an error when cleaning up the backup files." +fi + +log "restarting cinder api..." +mysql -u root -p$_MYSQL_PASS -e "DROP DATABASE if exists cinder;CREATE DATABASE cinder;" +cinder-manage db sync +service openstack-cinder-api restart +if [ $? -ne 0 ] ; then + log "There was an error in restarting the service, please restart cinder volume manually." + exit 1 +fi + +log "Completed." + +exit 0 diff --git a/icehouse-patches/glance/glance_location_patch/README.md b/icehouse-patches/glance/glance_location_patch/README.md new file mode 100644 index 00000000..32fac7de --- /dev/null +++ b/icehouse-patches/glance/glance_location_patch/README.md @@ -0,0 +1,23 @@ +Glance-Cascading Patch +================ + + +Introduction +----------------------------- + +*For glance cascading, we have to create the relationship bewteen one cascading-glance and some cascaded-glances. In order to achieve this goal, we using glance's multi-location feature, the relationshiop can be as a location with the special format. Besides, we modify the image status changing-rule: The image's active toggle into 'active' only if the cascaded have been synced. Because of these two reasons, a few existing source files were modified for adapting the cascading: + + glance/store/http.py + glance/store/__init__.py + glance/api/v2/image.py + glance/gateway.py + glance/common/utils.py + glance/common/config.py + glance/common/exception.py + + + Install + ------------------------------ + + + *To implement this patch just replacing the original files to these files, or run the install.sh in glancesync/installation/ directory. diff --git a/icehouse-patches/glance/glance_location_patch/glance.egg-info/entry_points.txt b/icehouse-patches/glance/glance_location_patch/glance.egg-info/entry_points.txt new file mode 100644 index 00000000..476cf23a --- /dev/null +++ b/icehouse-patches/glance/glance_location_patch/glance.egg-info/entry_points.txt @@ -0,0 +1,21 @@ +[console_scripts] +glance-api = glance.cmd.api:main +glance-cache-cleaner = glance.cmd.cache_cleaner:main +glance-cache-manage = glance.cmd.cache_manage:main +glance-cache-prefetcher = glance.cmd.cache_prefetcher:main +glance-cache-pruner = glance.cmd.cache_pruner:main +glance-control = glance.cmd.control:main +glance-manage = glance.cmd.manage:main +glance-registry = glance.cmd.registry:main +glance-replicator = glance.cmd.replicator:main +glance-scrubber = glance.cmd.scrubber:main + +[glance.common.image_location_strategy.modules] +location_order_strategy = glance.common.location_strategy.location_order +store_type_strategy = glance.common.location_strategy.store_type + +[glance.sync.store.location] +filesystem = glance.sync.store._drivers.filesystem:LocationCreator + +[glance.sync.store.driver] +filesystem = glance.sync.store._drivers.filesystem:Store \ No newline at end of file diff --git a/icehouse-patches/glance/glance_location_patch/glance/api/v2/images.py b/icehouse-patches/glance/glance_location_patch/glance/api/v2/images.py new file mode 100644 index 00000000..67fb1a3e --- /dev/null +++ b/icehouse-patches/glance/glance_location_patch/glance/api/v2/images.py @@ -0,0 +1,822 @@ +# Copyright 2012 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import re + +from oslo.config import cfg +import six.moves.urllib.parse as urlparse +import webob.exc + +from glance.api import policy +from glance.common import exception +from glance.common import location_strategy +from glance.common import utils +from glance.common import wsgi +import glance.db +import glance.gateway +import glance.notifier +from glance.openstack.common import jsonutils as json +import glance.openstack.common.log as logging +from glance.openstack.common import timeutils +import glance.schema +import glance.store +import glance.sync.client.v1.api as sync_api + +LOG = logging.getLogger(__name__) + +CONF = cfg.CONF +CONF.import_opt('disk_formats', 'glance.common.config', group='image_format') +CONF.import_opt('container_formats', 'glance.common.config', + group='image_format') +CONF.import_opt('sync_enabled', 'glance.common.config') + + +class ImagesController(object): + + def __init__(self, db_api=None, policy_enforcer=None, notifier=None, + store_api=None): + self.db_api = db_api or glance.db.get_api() + self.policy = policy_enforcer or policy.Enforcer() + self.notifier = notifier or glance.notifier.Notifier() + self.store_api = store_api or glance.store + self.sync_api = sync_api + self.sync_api.configure_sync_client() + self.gateway = glance.gateway.Gateway(self.db_api, self.store_api, + self.notifier, self.policy, + self.sync_api) + + @utils.mutating + def create(self, req, image, extra_properties, tags): + image_factory = self.gateway.get_image_factory(req.context) + image_repo = self.gateway.get_repo(req.context) + try: + image = image_factory.new_image(extra_properties=extra_properties, + tags=tags, **image) + image_repo.add(image) + except exception.DuplicateLocation as dup: + raise webob.exc.HTTPBadRequest(explanation=dup.msg) + except exception.Forbidden as e: + raise webob.exc.HTTPForbidden(explanation=e.msg) + except exception.InvalidParameterValue as e: + raise webob.exc.HTTPBadRequest(explanation=e.msg) + except exception.LimitExceeded as e: + LOG.info(unicode(e)) + raise webob.exc.HTTPRequestEntityTooLarge( + explanation=e.msg, request=req, content_type='text/plain') + + return image + + def index(self, req, marker=None, limit=None, sort_key='created_at', + sort_dir='desc', filters=None, member_status='accepted'): + result = {} + if filters is None: + filters = {} + filters['deleted'] = False + + if limit is None: + limit = CONF.limit_param_default + limit = min(CONF.api_limit_max, limit) + + image_repo = self.gateway.get_repo(req.context) + try: + images = image_repo.list(marker=marker, limit=limit, + sort_key=sort_key, sort_dir=sort_dir, + filters=filters, + member_status=member_status) + if len(images) != 0 and len(images) == limit: + result['next_marker'] = images[-1].image_id + except (exception.NotFound, exception.InvalidSortKey, + exception.InvalidFilterRangeValue) as e: + raise webob.exc.HTTPBadRequest(explanation=e.msg) + except exception.Forbidden as e: + raise webob.exc.HTTPForbidden(explanation=e.msg) + result['images'] = images + return result + + def show(self, req, image_id): + image_repo = self.gateway.get_repo(req.context) + try: + image = image_repo.get(image_id) + if CONF.sync_enabled: + sync_client = sync_api.get_sync_client(req.context) + eps = sync_client.get_cascaded_endpoints() + utils.check_synced(image, eps) + return image + except exception.Forbidden as e: + raise webob.exc.HTTPForbidden(explanation=e.msg) + except exception.NotFound as e: + raise webob.exc.HTTPNotFound(explanation=e.msg) + + @utils.mutating + def update(self, req, image_id, changes): + image_repo = self.gateway.get_repo(req.context) + try: + image = image_repo.get(image_id) + + for change in changes: + change_method_name = '_do_%s' % change['op'] + assert hasattr(self, change_method_name) + change_method = getattr(self, change_method_name) + change_method(req, image, change) + + if changes: + image_repo.save(image) + except exception.NotFound as e: + raise webob.exc.HTTPNotFound(explanation=e.msg) + except exception.Forbidden as e: + raise webob.exc.HTTPForbidden(explanation=e.msg) + except exception.InvalidParameterValue as e: + raise webob.exc.HTTPBadRequest(explanation=e.msg) + except exception.StorageQuotaFull as e: + msg = (_("Denying attempt to upload image because it exceeds the ." + "quota: %s") % e) + LOG.info(msg) + raise webob.exc.HTTPRequestEntityTooLarge( + explanation=msg, request=req, content_type='text/plain') + except exception.LimitExceeded as e: + LOG.info(unicode(e)) + raise webob.exc.HTTPRequestEntityTooLarge( + explanation=e.msg, request=req, content_type='text/plain') + + return image + + def _do_replace(self, req, image, change): + path = change['path'] + path_root = path[0] + value = change['value'] + if path_root == 'locations': + self._do_replace_locations(image, value) + else: + if hasattr(image, path_root): + setattr(image, path_root, value) + elif path_root in image.extra_properties: + image.extra_properties[path_root] = value + else: + msg = _("Property %s does not exist.") + raise webob.exc.HTTPConflict(msg % path_root) + + def _do_add(self, req, image, change): + path = change['path'] + path_root = path[0] + value = change['value'] + if path_root == 'locations': + self._do_add_locations(image, path[1], value) + else: + if (hasattr(image, path_root) or + path_root in image.extra_properties): + msg = _("Property %s already present.") + raise webob.exc.HTTPConflict(msg % path_root) + image.extra_properties[path_root] = value + + def _do_remove(self, req, image, change): + path = change['path'] + path_root = path[0] + if path_root == 'locations': + self._do_remove_locations(image, path[1]) + else: + if hasattr(image, path_root): + msg = _("Property %s may not be removed.") + raise webob.exc.HTTPForbidden(msg % path_root) + elif path_root in image.extra_properties: + del image.extra_properties[path_root] + else: + msg = _("Property %s does not exist.") + raise webob.exc.HTTPConflict(msg % path_root) + + @utils.mutating + def delete(self, req, image_id): + image_repo = self.gateway.get_repo(req.context) + try: + image = image_repo.get(image_id) + image.delete() + image_repo.remove(image) + except exception.Forbidden as e: + raise webob.exc.HTTPForbidden(explanation=e.msg) + except exception.NotFound as e: + msg = (_("Failed to find image %(image_id)s to delete") % + {'image_id': image_id}) + LOG.info(msg) + raise webob.exc.HTTPNotFound(explanation=msg) + + def _get_locations_op_pos(self, path_pos, max_pos, allow_max): + if path_pos is None or max_pos is None: + return None + pos = max_pos if allow_max else max_pos - 1 + if path_pos.isdigit(): + pos = int(path_pos) + elif path_pos != '-': + return None + if (not allow_max) and (pos not in range(max_pos)): + return None + return pos + + def _do_replace_locations(self, image, value): + if len(image.locations) > 0 and len(value) > 0: + msg = _("Cannot replace locations from a non-empty " + "list to a non-empty list.") + raise webob.exc.HTTPBadRequest(explanation=msg) + if len(value) == 0: + # NOTE(zhiyan): this actually deletes the location + # from the backend store. + del image.locations[:] + if image.status == 'active': + image.status = 'queued' + else: # NOTE(zhiyan): len(image.locations) == 0 + try: + image.locations = value + if image.status == 'queued': + image.status = 'active' + except (exception.BadStoreUri, exception.DuplicateLocation) as bse: + raise webob.exc.HTTPBadRequest(explanation=bse.msg) + except ValueError as ve: # update image status failed. + raise webob.exc.HTTPBadRequest(explanation=unicode(ve)) + + def _do_add_locations(self, image, path_pos, value): + pos = self._get_locations_op_pos(path_pos, + len(image.locations), True) + if pos is None: + msg = _("Invalid position for adding a location.") + raise webob.exc.HTTPBadRequest(explanation=msg) + try: + image.locations.insert(pos, value) + if image.status == 'queued': + image.status = 'active' + except (exception.BadStoreUri, exception.DuplicateLocation) as bse: + raise webob.exc.HTTPBadRequest(explanation=bse.msg) + except ValueError as ve: # update image status failed. + raise webob.exc.HTTPBadRequest(explanation=unicode(ve)) + + def _do_remove_locations(self, image, path_pos): + pos = self._get_locations_op_pos(path_pos, + len(image.locations), False) + if pos is None: + msg = _("Invalid position for removing a location.") + raise webob.exc.HTTPBadRequest(explanation=msg) + try: + # NOTE(zhiyan): this actually deletes the location + # from the backend store. + image.locations.pop(pos) + except Exception as e: + raise webob.exc.HTTPInternalServerError(explanation=unicode(e)) + if (len(image.locations) == 0) and (image.status == 'active'): + image.status = 'queued' + + +class RequestDeserializer(wsgi.JSONRequestDeserializer): + + _disallowed_properties = ['direct_url', 'self', 'file', 'schema'] + _readonly_properties = ['created_at', 'updated_at', 'status', 'checksum', + 'size', 'virtual_size', 'direct_url', 'self', + 'file', 'schema'] + _reserved_properties = ['owner', 'is_public', 'location', 'deleted', + 'deleted_at'] + _base_properties = ['checksum', 'created_at', 'container_format', + 'disk_format', 'id', 'min_disk', 'min_ram', 'name', + 'size', 'virtual_size', 'status', 'tags', + 'updated_at', 'visibility', 'protected'] + _path_depth_limits = {'locations': {'add': 2, 'remove': 2, 'replace': 1}} + + def __init__(self, schema=None): + super(RequestDeserializer, self).__init__() + self.schema = schema or get_schema() + + def _get_request_body(self, request): + output = super(RequestDeserializer, self).default(request) + if 'body' not in output: + msg = _('Body expected in request.') + raise webob.exc.HTTPBadRequest(explanation=msg) + return output['body'] + + @classmethod + def _check_allowed(cls, image): + for key in cls._disallowed_properties: + if key in image: + msg = _("Attribute '%s' is read-only.") % key + raise webob.exc.HTTPForbidden(explanation=unicode(msg)) + + def create(self, request): + body = self._get_request_body(request) + self._check_allowed(body) + try: + self.schema.validate(body) + except exception.InvalidObject as e: + raise webob.exc.HTTPBadRequest(explanation=e.msg) + image = {} + properties = body + tags = properties.pop('tags', None) + for key in self._base_properties: + try: + image[key] = properties.pop(key) + except KeyError: + pass + return dict(image=image, extra_properties=properties, tags=tags) + + def _get_change_operation_d10(self, raw_change): + try: + return raw_change['op'] + except KeyError: + msg = _("Unable to find '%s' in JSON Schema change") % 'op' + raise webob.exc.HTTPBadRequest(explanation=msg) + + def _get_change_operation_d4(self, raw_change): + op = None + for key in ['replace', 'add', 'remove']: + if key in raw_change: + if op is not None: + msg = _('Operation objects must contain only one member' + ' named "add", "remove", or "replace".') + raise webob.exc.HTTPBadRequest(explanation=msg) + op = key + if op is None: + msg = _('Operation objects must contain exactly one member' + ' named "add", "remove", or "replace".') + raise webob.exc.HTTPBadRequest(explanation=msg) + return op + + def _get_change_path_d10(self, raw_change): + try: + return raw_change['path'] + except KeyError: + msg = _("Unable to find '%s' in JSON Schema change") % 'path' + raise webob.exc.HTTPBadRequest(explanation=msg) + + def _get_change_path_d4(self, raw_change, op): + return raw_change[op] + + def _decode_json_pointer(self, pointer): + """Parse a json pointer. + + Json Pointers are defined in + http://tools.ietf.org/html/draft-pbryan-zyp-json-pointer . + The pointers use '/' for separation between object attributes, such + that '/A/B' would evaluate to C in {"A": {"B": "C"}}. A '/' character + in an attribute name is encoded as "~1" and a '~' character is encoded + as "~0". + """ + self._validate_json_pointer(pointer) + ret = [] + for part in pointer.lstrip('/').split('/'): + ret.append(part.replace('~1', '/').replace('~0', '~').strip()) + return ret + + def _validate_json_pointer(self, pointer): + """Validate a json pointer. + + We only accept a limited form of json pointers. + """ + if not pointer.startswith('/'): + msg = _('Pointer `%s` does not start with "/".') % pointer + raise webob.exc.HTTPBadRequest(explanation=msg) + if re.search('/\s*?/', pointer[1:]): + msg = _('Pointer `%s` contains adjacent "/".') % pointer + raise webob.exc.HTTPBadRequest(explanation=msg) + if len(pointer) > 1 and pointer.endswith('/'): + msg = _('Pointer `%s` end with "/".') % pointer + raise webob.exc.HTTPBadRequest(explanation=msg) + if pointer[1:].strip() == '/': + msg = _('Pointer `%s` does not contains valid token.') % pointer + raise webob.exc.HTTPBadRequest(explanation=msg) + if re.search('~[^01]', pointer) or pointer.endswith('~'): + msg = _('Pointer `%s` contains "~" not part of' + ' a recognized escape sequence.') % pointer + raise webob.exc.HTTPBadRequest(explanation=msg) + + def _get_change_value(self, raw_change, op): + if 'value' not in raw_change: + msg = _('Operation "%s" requires a member named "value".') + raise webob.exc.HTTPBadRequest(explanation=msg % op) + return raw_change['value'] + + def _validate_change(self, change): + path_root = change['path'][0] + if path_root in self._readonly_properties: + msg = _("Attribute '%s' is read-only.") % path_root + raise webob.exc.HTTPForbidden(explanation=unicode(msg)) + if path_root in self._reserved_properties: + msg = _("Attribute '%s' is reserved.") % path_root + raise webob.exc.HTTPForbidden(explanation=unicode(msg)) + + if change['op'] == 'delete': + return + + partial_image = None + if len(change['path']) == 1: + partial_image = {path_root: change['value']} + elif ((path_root in _get_base_properties().keys()) and + (_get_base_properties()[path_root].get('type', '') == 'array')): + # NOTE(zhiyan): cient can use PATCH API to adding element to + # the image's existing set property directly. + # Such as: 1. using '/locations/N' path to adding a location + # to the image's 'locations' list at N position. + # (implemented) + # 2. using '/tags/-' path to appending a tag to the + # image's 'tags' list at last. (Not implemented) + partial_image = {path_root: [change['value']]} + + if partial_image: + try: + self.schema.validate(partial_image) + except exception.InvalidObject as e: + raise webob.exc.HTTPBadRequest(explanation=e.msg) + + def _validate_path(self, op, path): + path_root = path[0] + limits = self._path_depth_limits.get(path_root, {}) + if len(path) != limits.get(op, 1): + msg = _("Invalid JSON pointer for this resource: " + "'/%s'") % '/'.join(path) + raise webob.exc.HTTPBadRequest(explanation=unicode(msg)) + + def _parse_json_schema_change(self, raw_change, draft_version): + if draft_version == 10: + op = self._get_change_operation_d10(raw_change) + path = self._get_change_path_d10(raw_change) + elif draft_version == 4: + op = self._get_change_operation_d4(raw_change) + path = self._get_change_path_d4(raw_change, op) + else: + msg = _('Unrecognized JSON Schema draft version') + raise webob.exc.HTTPBadRequest(explanation=msg) + + path_list = self._decode_json_pointer(path) + return op, path_list + + def update(self, request): + changes = [] + content_types = { + 'application/openstack-images-v2.0-json-patch': 4, + 'application/openstack-images-v2.1-json-patch': 10, + } + if request.content_type not in content_types: + headers = {'Accept-Patch': ', '.join(content_types.keys())} + raise webob.exc.HTTPUnsupportedMediaType(headers=headers) + + json_schema_version = content_types[request.content_type] + + body = self._get_request_body(request) + + if not isinstance(body, list): + msg = _('Request body must be a JSON array of operation objects.') + raise webob.exc.HTTPBadRequest(explanation=msg) + + for raw_change in body: + if not isinstance(raw_change, dict): + msg = _('Operations must be JSON objects.') + raise webob.exc.HTTPBadRequest(explanation=msg) + + (op, path) = self._parse_json_schema_change(raw_change, + json_schema_version) + + # NOTE(zhiyan): the 'path' is a list. + self._validate_path(op, path) + change = {'op': op, 'path': path} + + if not op == 'remove': + change['value'] = self._get_change_value(raw_change, op) + self._validate_change(change) + + changes.append(change) + + return {'changes': changes} + + def _validate_limit(self, limit): + try: + limit = int(limit) + except ValueError: + msg = _("limit param must be an integer") + raise webob.exc.HTTPBadRequest(explanation=msg) + + if limit < 0: + msg = _("limit param must be positive") + raise webob.exc.HTTPBadRequest(explanation=msg) + + return limit + + def _validate_sort_dir(self, sort_dir): + if sort_dir not in ['asc', 'desc']: + msg = _('Invalid sort direction: %s') % sort_dir + raise webob.exc.HTTPBadRequest(explanation=msg) + + return sort_dir + + def _validate_member_status(self, member_status): + if member_status not in ['pending', 'accepted', 'rejected', 'all']: + msg = _('Invalid status: %s') % member_status + raise webob.exc.HTTPBadRequest(explanation=msg) + + return member_status + + def _get_filters(self, filters): + visibility = filters.get('visibility') + if visibility: + if visibility not in ['public', 'private', 'shared']: + msg = _('Invalid visibility value: %s') % visibility + raise webob.exc.HTTPBadRequest(explanation=msg) + + return filters + + def index(self, request): + params = request.params.copy() + limit = params.pop('limit', None) + marker = params.pop('marker', None) + sort_dir = params.pop('sort_dir', 'desc') + member_status = params.pop('member_status', 'accepted') + + # NOTE (flwang) To avoid using comma or any predefined chars to split + # multiple tags, now we allow user specify multiple 'tag' parameters + # in URL, such as v2/images?tag=x86&tag=64bit. + tags = [] + while 'tag' in params: + tags.append(params.pop('tag').strip()) + + query_params = { + 'sort_key': params.pop('sort_key', 'created_at'), + 'sort_dir': self._validate_sort_dir(sort_dir), + 'filters': self._get_filters(params), + 'member_status': self._validate_member_status(member_status), + } + + if marker is not None: + query_params['marker'] = marker + + if limit is not None: + query_params['limit'] = self._validate_limit(limit) + + if tags: + query_params['filters']['tags'] = tags + + return query_params + + +class ResponseSerializer(wsgi.JSONResponseSerializer): + + def __init__(self, schema=None): + super(ResponseSerializer, self).__init__() + self.schema = schema or get_schema() + + def _get_image_href(self, image, subcollection=''): + base_href = '/v2/images/%s' % image.image_id + if subcollection: + base_href = '%s/%s' % (base_href, subcollection) + return base_href + + def _format_image(self, image): + image_view = dict() + try: + image_view = dict(image.extra_properties) + attributes = ['name', 'disk_format', 'container_format', + 'visibility', 'size', 'virtual_size', 'status', + 'checksum', 'protected', 'min_ram', 'min_disk', + 'owner'] + for key in attributes: + image_view[key] = getattr(image, key) + image_view['id'] = image.image_id + image_view['created_at'] = timeutils.isotime(image.created_at) + image_view['updated_at'] = timeutils.isotime(image.updated_at) + + if CONF.show_multiple_locations: + if image.locations: + image_view['locations'] = list(image.locations) + else: + # NOTE (flwang): We will still show "locations": [] if + # image.locations is None to indicate it's allowed to show + # locations but it's just non-existent. + image_view['locations'] = [] + + if CONF.show_image_direct_url and image.locations: + # Choose best location configured strategy + best_location = ( + location_strategy.choose_best_location(image.locations)) + image_view['direct_url'] = best_location['url'] + + image_view['tags'] = list(image.tags) + image_view['self'] = self._get_image_href(image) + image_view['file'] = self._get_image_href(image, 'file') + image_view['schema'] = '/v2/schemas/image' + image_view = self.schema.filter(image_view) # domain + except exception.Forbidden as e: + raise webob.exc.HTTPForbidden(explanation=e.msg) + return image_view + + def create(self, response, image): + response.status_int = 201 + self.show(response, image) + response.location = self._get_image_href(image) + + def show(self, response, image): + image_view = self._format_image(image) + body = json.dumps(image_view, ensure_ascii=False) + response.unicode_body = unicode(body) + response.content_type = 'application/json' + + def update(self, response, image): + image_view = self._format_image(image) + body = json.dumps(image_view, ensure_ascii=False) + response.unicode_body = unicode(body) + response.content_type = 'application/json' + + def index(self, response, result): + params = dict(response.request.params) + params.pop('marker', None) + query = urlparse.urlencode(params) + body = { + 'images': [self._format_image(i) for i in result['images']], + 'first': '/v2/images', + 'schema': '/v2/schemas/images', + } + if query: + body['first'] = '%s?%s' % (body['first'], query) + if 'next_marker' in result: + params['marker'] = result['next_marker'] + next_query = urlparse.urlencode(params) + body['next'] = '/v2/images?%s' % next_query + response.unicode_body = unicode(json.dumps(body, ensure_ascii=False)) + response.content_type = 'application/json' + + def delete(self, response, result): + response.status_int = 204 + + +def _get_base_properties(): + return { + 'id': { + 'type': 'string', + 'description': _('An identifier for the image'), + 'pattern': ('^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}' + '-([0-9a-fA-F]){4}-([0-9a-fA-F]){12}$'), + }, + 'name': { + 'type': 'string', + 'description': _('Descriptive name for the image'), + 'maxLength': 255, + }, + 'status': { + 'type': 'string', + 'description': _('Status of the image (READ-ONLY)'), + 'enum': ['queued', 'saving', 'active', 'killed', + 'deleted', 'pending_delete'], + }, + 'visibility': { + 'type': 'string', + 'description': _('Scope of image accessibility'), + 'enum': ['public', 'private'], + }, + 'protected': { + 'type': 'boolean', + 'description': _('If true, image will not be deletable.'), + }, + 'checksum': { + 'type': 'string', + 'description': _('md5 hash of image contents. (READ-ONLY)'), + 'maxLength': 32, + }, + 'owner': { + 'type': 'string', + 'description': _('Owner of the image'), + 'maxLength': 255, + }, + 'size': { + 'type': 'integer', + 'description': _('Size of image file in bytes (READ-ONLY)'), + }, + 'virtual_size': { + 'type': 'integer', + 'description': _('Virtual size of image in bytes (READ-ONLY)'), + }, + 'container_format': { + 'type': 'string', + 'description': _('Format of the container'), + 'enum': CONF.image_format.container_formats, + }, + 'disk_format': { + 'type': 'string', + 'description': _('Format of the disk'), + 'enum': CONF.image_format.disk_formats, + }, + 'created_at': { + 'type': 'string', + 'description': _('Date and time of image registration' + ' (READ-ONLY)'), + # TODO(bcwaldon): our jsonschema library doesn't seem to like the + # format attribute, figure out why! + #'format': 'date-time', + }, + 'updated_at': { + 'type': 'string', + 'description': _('Date and time of the last image modification' + ' (READ-ONLY)'), + #'format': 'date-time', + }, + 'tags': { + 'type': 'array', + 'description': _('List of strings related to the image'), + 'items': { + 'type': 'string', + 'maxLength': 255, + }, + }, + 'direct_url': { + 'type': 'string', + 'description': _('URL to access the image file kept in external ' + 'store (READ-ONLY)'), + }, + 'min_ram': { + 'type': 'integer', + 'description': _('Amount of ram (in MB) required to boot image.'), + }, + 'min_disk': { + 'type': 'integer', + 'description': _('Amount of disk space (in GB) required to boot ' + 'image.'), + }, + 'self': { + 'type': 'string', + 'description': '(READ-ONLY)' + }, + 'file': { + 'type': 'string', + 'description': '(READ-ONLY)' + }, + 'schema': { + 'type': 'string', + 'description': '(READ-ONLY)' + }, + 'locations': { + 'type': 'array', + 'items': { + 'type': 'object', + 'properties': { + 'url': { + 'type': 'string', + 'maxLength': 255, + }, + 'metadata': { + 'type': 'object', + }, + }, + 'required': ['url', 'metadata'], + }, + 'description': _('A set of URLs to access the image file kept in ' + 'external store'), + }, + } + + +def _get_base_links(): + return [ + {'rel': 'self', 'href': '{self}'}, + {'rel': 'enclosure', 'href': '{file}'}, + {'rel': 'describedby', 'href': '{schema}'}, + ] + + +def get_schema(custom_properties=None): + properties = _get_base_properties() + links = _get_base_links() + if CONF.allow_additional_image_properties: + schema = glance.schema.PermissiveSchema('image', properties, links) + else: + schema = glance.schema.Schema('image', properties) + schema.merge_properties(custom_properties or {}) + return schema + + +def get_collection_schema(custom_properties=None): + image_schema = get_schema(custom_properties) + return glance.schema.CollectionSchema('images', image_schema) + + +def load_custom_properties(): + """Find the schema properties files and load them into a dict.""" + filename = 'schema-image.json' + match = CONF.find_file(filename) + if match: + schema_file = open(match) + schema_data = schema_file.read() + return json.loads(schema_data) + else: + msg = _('Could not find schema properties file %s. Continuing ' + 'without custom properties') + LOG.warn(msg % filename) + return {} + + +def create_resource(custom_properties=None): + """Images resource factory method""" + schema = get_schema(custom_properties) + deserializer = RequestDeserializer(schema) + serializer = ResponseSerializer(schema) + controller = ImagesController() + return wsgi.Resource(controller, deserializer, serializer) diff --git a/icehouse-patches/glance/glance_location_patch/glance/common/config.py b/icehouse-patches/glance/glance_location_patch/glance/common/config.py new file mode 100644 index 00000000..3718e8f4 --- /dev/null +++ b/icehouse-patches/glance/glance_location_patch/glance/common/config.py @@ -0,0 +1,260 @@ +#!/usr/bin/env python + +# Copyright 2011 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Routines for configuring Glance +""" + +import logging +import logging.config +import logging.handlers +import os + +from oslo.config import cfg +from paste import deploy + +from glance.version import version_info as version + +paste_deploy_opts = [ + cfg.StrOpt('flavor', + help=_('Partial name of a pipeline in your paste configuration ' + 'file with the service name removed. For example, if ' + 'your paste section name is ' + '[pipeline:glance-api-keystone] use the value ' + '"keystone"')), + cfg.StrOpt('config_file', + help=_('Name of the paste configuration file.')), +] +image_format_opts = [ + cfg.ListOpt('container_formats', + default=['ami', 'ari', 'aki', 'bare', 'ovf', 'ova'], + help=_("Supported values for the 'container_format' " + "image attribute"), + deprecated_opts=[cfg.DeprecatedOpt('container_formats', + group='DEFAULT')]), + cfg.ListOpt('disk_formats', + default=['ami', 'ari', 'aki', 'vhd', 'vmdk', 'raw', 'qcow2', + 'vdi', 'iso'], + help=_("Supported values for the 'disk_format' " + "image attribute"), + deprecated_opts=[cfg.DeprecatedOpt('disk_formats', + group='DEFAULT')]), +] +task_opts = [ + cfg.IntOpt('task_time_to_live', + default=48, + help=_("Time in hours for which a task lives after, either " + "succeeding or failing"), + deprecated_opts=[cfg.DeprecatedOpt('task_time_to_live', + group='DEFAULT')]), +] +common_opts = [ + cfg.BoolOpt('allow_additional_image_properties', default=True, + help=_('Whether to allow users to specify image properties ' + 'beyond what the image schema provides')), + cfg.IntOpt('image_member_quota', default=128, + help=_('Maximum number of image members per image. ' + 'Negative values evaluate to unlimited.')), + cfg.IntOpt('image_property_quota', default=128, + help=_('Maximum number of properties allowed on an image. ' + 'Negative values evaluate to unlimited.')), + cfg.IntOpt('image_tag_quota', default=128, + help=_('Maximum number of tags allowed on an image. ' + 'Negative values evaluate to unlimited.')), + cfg.IntOpt('image_location_quota', default=10, + help=_('Maximum number of locations allowed on an image. ' + 'Negative values evaluate to unlimited.')), + cfg.StrOpt('data_api', default='glance.db.sqlalchemy.api', + help=_('Python module path of data access API')), + cfg.IntOpt('limit_param_default', default=25, + help=_('Default value for the number of items returned by a ' + 'request if not specified explicitly in the request')), + cfg.IntOpt('api_limit_max', default=1000, + help=_('Maximum permissible number of items that could be ' + 'returned by a request')), + cfg.BoolOpt('show_image_direct_url', default=False, + help=_('Whether to include the backend image storage location ' + 'in image properties. Revealing storage location can ' + 'be a security risk, so use this setting with ' + 'caution!')), + cfg.BoolOpt('show_multiple_locations', default=False, + help=_('Whether to include the backend image locations ' + 'in image properties. Revealing storage location can ' + 'be a security risk, so use this setting with ' + 'caution! The overrides show_image_direct_url.')), + cfg.IntOpt('image_size_cap', default=1099511627776, + help=_("Maximum size of image a user can upload in bytes. " + "Defaults to 1099511627776 bytes (1 TB).")), + cfg.IntOpt('user_storage_quota', default=0, + help=_("Set a system wide quota for every user. This value is " + "the total number of bytes that a user can use across " + "all storage systems. A value of 0 means unlimited.")), + cfg.BoolOpt('enable_v1_api', default=True, + help=_("Deploy the v1 OpenStack Images API.")), + cfg.BoolOpt('enable_v2_api', default=True, + help=_("Deploy the v2 OpenStack Images API.")), + cfg.BoolOpt('enable_v1_registry', default=True, + help=_("Deploy the v1 OpenStack Registry API.")), + cfg.BoolOpt('enable_v2_registry', default=True, + help=_("Deploy the v2 OpenStack Registry API.")), + cfg.StrOpt('pydev_worker_debug_host', default=None, + help=_('The hostname/IP of the pydev process listening for ' + 'debug connections')), + cfg.IntOpt('pydev_worker_debug_port', default=5678, + help=_('The port on which a pydev process is listening for ' + 'connections.')), + cfg.StrOpt('metadata_encryption_key', secret=True, + help=_('Key used for encrypting sensitive metadata while ' + 'talking to the registry or database.')), + cfg.BoolOpt('sync_enabled', default=False, + help=_("Whether to launch the Sync function.")), + cfg.StrOpt('sync_server_host', default='127.0.0.1', + help=_('host ip where sync_web_server in.')), + cfg.IntOpt('sync_server_port', default=9595, + help=_('host port where sync_web_server in.')), +] +sync_opts = [ + cfg.StrOpt('cascading_endpoint_url', default='http://127.0.0.1:9292/', + help=_('host ip where glance in.'), + deprecated_opts=[cfg.DeprecatedOpt('cascading_endpoint_url', + group='DEFAULT')]), + cfg.StrOpt('sync_strategy', default='None', + help=_("Define the sync strategy, value can be All/User/None."), + deprecated_opts=[cfg.DeprecatedOpt('sync_strategy', + group='DEFAULT')]), + cfg.IntOpt('snapshot_timeout', default=300, + help=_('when snapshot, max wait (second)time for snapshot ' + 'status become active.'), + deprecated_opts=[cfg.DeprecatedOpt('snapshot_timeout', + group='DEFAULT')]), + cfg.IntOpt('snapshot_sleep_interval', default=10, + help=_('when snapshot, sleep interval for waiting snapshot ' + 'status become active.'), + deprecated_opts=[cfg.DeprecatedOpt('snapshot_sleep_interval', + group='DEFAULT')]), + cfg.IntOpt('task_retry_times', default=0, + help=_('sync task fail retry times.'), + deprecated_opts=[cfg.DeprecatedOpt('task_retry_times', + group='DEFAULT')]), + cfg.IntOpt('scp_copy_timeout', default=3600, + help=_('when snapshot, max wait (second)time for snapshot ' + 'status become active.'), + deprecated_opts=[cfg.DeprecatedOpt('scp_copy_timeout', + group='DEFAULT')]), +] + +CONF = cfg.CONF +CONF.register_opts(paste_deploy_opts, group='paste_deploy') +CONF.register_opts(image_format_opts, group='image_format') +CONF.register_opts(task_opts, group='task') +CONF.register_opts(sync_opts, group='sync') +CONF.register_opts(common_opts) + + +def parse_args(args=None, usage=None, default_config_files=None): + CONF(args=args, + project='glance', + version=version.cached_version_string(), + usage=usage, + default_config_files=default_config_files) + + +def parse_cache_args(args=None): + config_files = cfg.find_config_files(project='glance', prog='glance-cache') + parse_args(args=args, default_config_files=config_files) + + +def _get_deployment_flavor(flavor=None): + """ + Retrieve the paste_deploy.flavor config item, formatted appropriately + for appending to the application name. + + :param flavor: if specified, use this setting rather than the + paste_deploy.flavor configuration setting + """ + if not flavor: + flavor = CONF.paste_deploy.flavor + return '' if not flavor else ('-' + flavor) + + +def _get_paste_config_path(): + paste_suffix = '-paste.ini' + conf_suffix = '.conf' + if CONF.config_file: + # Assume paste config is in a paste.ini file corresponding + # to the last config file + path = CONF.config_file[-1].replace(conf_suffix, paste_suffix) + else: + path = CONF.prog + paste_suffix + return CONF.find_file(os.path.basename(path)) + + +def _get_deployment_config_file(): + """ + Retrieve the deployment_config_file config item, formatted as an + absolute pathname. + """ + path = CONF.paste_deploy.config_file + if not path: + path = _get_paste_config_path() + if not path: + msg = _("Unable to locate paste config file for %s.") % CONF.prog + raise RuntimeError(msg) + return os.path.abspath(path) + + +def load_paste_app(app_name, flavor=None, conf_file=None): + """ + Builds and returns a WSGI app from a paste config file. + + We assume the last config file specified in the supplied ConfigOpts + object is the paste config file, if conf_file is None. + + :param app_name: name of the application to load + :param flavor: name of the variant of the application to load + :param conf_file: path to the paste config file + + :raises RuntimeError when config file cannot be located or application + cannot be loaded from config file + """ + # append the deployment flavor to the application name, + # in order to identify the appropriate paste pipeline + app_name += _get_deployment_flavor(flavor) + + if not conf_file: + conf_file = _get_deployment_config_file() + + try: + logger = logging.getLogger(__name__) + logger.debug(_("Loading %(app_name)s from %(conf_file)s"), + {'conf_file': conf_file, 'app_name': app_name}) + + app = deploy.loadapp("config:%s" % conf_file, name=app_name) + + # Log the options used when starting if we're in debug mode... + if CONF.debug: + CONF.log_opt_values(logger, logging.DEBUG) + + return app + except (LookupError, ImportError) as e: + msg = (_("Unable to load %(app_name)s from " + "configuration file %(conf_file)s." + "\nGot: %(e)r") % {'app_name': app_name, + 'conf_file': conf_file, + 'e': e}) + logger.error(msg) + raise RuntimeError(msg) diff --git a/icehouse-patches/glance/glance_location_patch/glance/common/exception.py b/icehouse-patches/glance/glance_location_patch/glance/common/exception.py new file mode 100644 index 00000000..06257d7a --- /dev/null +++ b/icehouse-patches/glance/glance_location_patch/glance/common/exception.py @@ -0,0 +1,362 @@ +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Glance exception subclasses""" + +import six +import six.moves.urllib.parse as urlparse + +_FATAL_EXCEPTION_FORMAT_ERRORS = False + + +class RedirectException(Exception): + + def __init__(self, url): + self.url = urlparse.urlparse(url) + + +class GlanceException(Exception): + + """ + Base Glance Exception + + To correctly use this class, inherit from it and define + a 'message' property. That message will get printf'd + with the keyword arguments provided to the constructor. + """ + message = _("An unknown exception occurred") + + def __init__(self, message=None, *args, **kwargs): + if not message: + message = self.message + try: + if kwargs: + message = message % kwargs + except Exception: + if _FATAL_EXCEPTION_FORMAT_ERRORS: + raise + else: + # at least get the core message out if something happened + pass + self.msg = message + super(GlanceException, self).__init__(message) + + def __unicode__(self): + # NOTE(flwang): By default, self.msg is an instance of Message, which + # can't be converted by str(). Based on the definition of + # __unicode__, it should return unicode always. + return six.text_type(self.msg) + + +class MissingCredentialError(GlanceException): + message = _("Missing required credential: %(required)s") + + +class BadAuthStrategy(GlanceException): + message = _("Incorrect auth strategy, expected \"%(expected)s\" but " + "received \"%(received)s\"") + + +class NotFound(GlanceException): + message = _("An object with the specified identifier was not found.") + + +class UnknownScheme(GlanceException): + message = _("Unknown scheme '%(scheme)s' found in URI") + + +class BadStoreUri(GlanceException): + message = _("The Store URI was malformed.") + + +class Duplicate(GlanceException): + message = _("An object with the same identifier already exists.") + + +class Conflict(GlanceException): + message = _("An object with the same identifier is currently being " + "operated on.") + + +class StorageFull(GlanceException): + message = _("There is not enough disk space on the image storage media.") + + +class StorageQuotaFull(GlanceException): + message = _("The size of the data %(image_size)s will exceed the limit. " + "%(remaining)s bytes remaining.") + + +class StorageWriteDenied(GlanceException): + message = _("Permission to write image storage media denied.") + + +class AuthBadRequest(GlanceException): + message = _("Connect error/bad request to Auth service at URL %(url)s.") + + +class AuthUrlNotFound(GlanceException): + message = _("Auth service at URL %(url)s not found.") + + +class AuthorizationFailure(GlanceException): + message = _("Authorization failed.") + + +class NotAuthenticated(GlanceException): + message = _("You are not authenticated.") + + +class Forbidden(GlanceException): + message = _("You are not authorized to complete this action.") + + +class ForbiddenPublicImage(Forbidden): + message = _("You are not authorized to complete this action.") + + +class ProtectedImageDelete(Forbidden): + message = _("Image %(image_id)s is protected and cannot be deleted.") + + +class Invalid(GlanceException): + message = _("Data supplied was not valid.") + + +class InvalidSortKey(Invalid): + message = _("Sort key supplied was not valid.") + + +class InvalidPropertyProtectionConfiguration(Invalid): + message = _("Invalid configuration in property protection file.") + + +class InvalidFilterRangeValue(Invalid): + message = _("Unable to filter using the specified range.") + + +class ReadonlyProperty(Forbidden): + message = _("Attribute '%(property)s' is read-only.") + + +class ReservedProperty(Forbidden): + message = _("Attribute '%(property)s' is reserved.") + + +class AuthorizationRedirect(GlanceException): + message = _("Redirecting to %(uri)s for authorization.") + + +class ClientConnectionError(GlanceException): + message = _("There was an error connecting to a server") + + +class ClientConfigurationError(GlanceException): + message = _("There was an error configuring the client.") + + +class MultipleChoices(GlanceException): + message = _("The request returned a 302 Multiple Choices. This generally " + "means that you have not included a version indicator in a " + "request URI.\n\nThe body of response returned:\n%(body)s") + + +class LimitExceeded(GlanceException): + message = _("The request returned a 413 Request Entity Too Large. This " + "generally means that rate limiting or a quota threshold was " + "breached.\n\nThe response body:\n%(body)s") + + def __init__(self, *args, **kwargs): + self.retry_after = (int(kwargs['retry']) if kwargs.get('retry') + else None) + super(LimitExceeded, self).__init__(*args, **kwargs) + + +class ServiceUnavailable(GlanceException): + message = _("The request returned 503 Service Unavilable. This " + "generally occurs on service overload or other transient " + "outage.") + + def __init__(self, *args, **kwargs): + self.retry_after = (int(kwargs['retry']) if kwargs.get('retry') + else None) + super(ServiceUnavailable, self).__init__(*args, **kwargs) + + +class ServerError(GlanceException): + message = _("The request returned 500 Internal Server Error.") + + +class UnexpectedStatus(GlanceException): + message = _("The request returned an unexpected status: %(status)s." + "\n\nThe response body:\n%(body)s") + + +class InvalidContentType(GlanceException): + message = _("Invalid content type %(content_type)s") + + +class BadRegistryConnectionConfiguration(GlanceException): + message = _("Registry was not configured correctly on API server. " + "Reason: %(reason)s") + + +class BadStoreConfiguration(GlanceException): + message = _("Store %(store_name)s could not be configured correctly. " + "Reason: %(reason)s") + + +class BadDriverConfiguration(GlanceException): + message = _("Driver %(driver_name)s could not be configured correctly. " + "Reason: %(reason)s") + + +class StoreDeleteNotSupported(GlanceException): + message = _("Deleting images from this store is not supported.") + + +class StoreGetNotSupported(GlanceException): + message = _("Getting images from this store is not supported.") + + +class StoreAddNotSupported(GlanceException): + message = _("Adding images to this store is not supported.") + + +class StoreAddDisabled(GlanceException): + message = _("Configuration for store failed. Adding images to this " + "store is disabled.") + + +class MaxRedirectsExceeded(GlanceException): + message = _("Maximum redirects (%(redirects)s) was exceeded.") + + +class InvalidRedirect(GlanceException): + message = _("Received invalid HTTP redirect.") + + +class NoServiceEndpoint(GlanceException): + message = _("Response from Keystone does not contain a Glance endpoint.") + + +class RegionAmbiguity(GlanceException): + message = _("Multiple 'image' service matches for region %(region)s. This " + "generally means that a region is required and you have not " + "supplied one.") + + +class WorkerCreationFailure(GlanceException): + message = _("Server worker creation failed: %(reason)s.") + + +class SchemaLoadError(GlanceException): + message = _("Unable to load schema: %(reason)s") + + +class InvalidObject(GlanceException): + message = _("Provided object does not match schema " + "'%(schema)s': %(reason)s") + + +class UnsupportedHeaderFeature(GlanceException): + message = _("Provided header feature is unsupported: %(feature)s") + + +class InUseByStore(GlanceException): + message = _("The image cannot be deleted because it is in use through " + "the backend store outside of Glance.") + + +class ImageSizeLimitExceeded(GlanceException): + message = _("The provided image is too large.") + + +class ImageMemberLimitExceeded(LimitExceeded): + message = _("The limit has been exceeded on the number of allowed image " + "members for this image. Attempted: %(attempted)s, " + "Maximum: %(maximum)s") + + +class ImagePropertyLimitExceeded(LimitExceeded): + message = _("The limit has been exceeded on the number of allowed image " + "properties. Attempted: %(attempted)s, Maximum: %(maximum)s") + + +class ImageTagLimitExceeded(LimitExceeded): + message = _("The limit has been exceeded on the number of allowed image " + "tags. Attempted: %(attempted)s, Maximum: %(maximum)s") + + +class ImageLocationLimitExceeded(LimitExceeded): + message = _("The limit has been exceeded on the number of allowed image " + "locations. Attempted: %(attempted)s, Maximum: %(maximum)s") + + +class RPCError(GlanceException): + message = _("%(cls)s exception was raised in the last rpc call: %(val)s") + + +class TaskException(GlanceException): + message = _("An unknown task exception occurred") + + +class TaskNotFound(TaskException, NotFound): + message = _("Task with the given id %(task_id)s was not found") + + +class InvalidTaskStatus(TaskException, Invalid): + message = _("Provided status of task is unsupported: %(status)s") + + +class InvalidTaskType(TaskException, Invalid): + message = _("Provided type of task is unsupported: %(type)s") + + +class InvalidTaskStatusTransition(TaskException, Invalid): + message = _("Status transition from %(cur_status)s to" + " %(new_status)s is not allowed") + + +class DuplicateLocation(Duplicate): + message = _("The location %(location)s already exists") + + +class ImageDataNotFound(NotFound): + message = _("No image data could be found") + + +class InvalidParameterValue(Invalid): + message = _("Invalid value '%(value)s' for parameter '%(param)s': " + "%(extra_msg)s") + + +class InvalidImageStatusTransition(Invalid): + message = _("Image status transition from %(cur_status)s to" + " %(new_status)s is not allowed") + + +class FunctionNameNotFound(GlanceException): + message = _("Can not found the function name: %(func_name)s " + "in object/module: %(owner_name)s") + + +class SyncServiceOperationError(GlanceException): + message = _("Image sync service execute failed with reason: %(reason)s") + + +class SyncStoreCopyError(GlanceException): + message = _("Image sync store failed with reason: %(reason)s") diff --git a/icehouse-patches/glance/glance_location_patch/glance/common/utils.py b/icehouse-patches/glance/glance_location_patch/glance/common/utils.py new file mode 100644 index 00000000..f5d5e723 --- /dev/null +++ b/icehouse-patches/glance/glance_location_patch/glance/common/utils.py @@ -0,0 +1,602 @@ +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +System-level utilities and helper functions. +""" + +import errno + +try: + from eventlet import sleep +except ImportError: + from time import sleep +from eventlet.green import socket + +import functools +import os +import platform +import re +import subprocess +import sys +import urlparse +import uuid + +from OpenSSL import crypto +from oslo.config import cfg +from webob import exc + +from glance.common import exception +import glance.openstack.common.log as logging +from glance.openstack.common import strutils + +CONF = cfg.CONF + +LOG = logging.getLogger(__name__) + +FEATURE_BLACKLIST = ['content-length', 'content-type', 'x-image-meta-size'] + +# Whitelist of v1 API headers of form x-image-meta-xxx +IMAGE_META_HEADERS = ['x-image-meta-location', 'x-image-meta-size', + 'x-image-meta-is_public', 'x-image-meta-disk_format', + 'x-image-meta-container_format', 'x-image-meta-name', + 'x-image-meta-status', 'x-image-meta-copy_from', + 'x-image-meta-uri', 'x-image-meta-checksum', + 'x-image-meta-created_at', 'x-image-meta-updated_at', + 'x-image-meta-deleted_at', 'x-image-meta-min_ram', + 'x-image-meta-min_disk', 'x-image-meta-owner', + 'x-image-meta-store', 'x-image-meta-id', + 'x-image-meta-protected', 'x-image-meta-deleted'] + +GLANCE_TEST_SOCKET_FD_STR = 'GLANCE_TEST_SOCKET_FD' + + +def chunkreadable(iter, chunk_size=65536): + """ + Wrap a readable iterator with a reader yielding chunks of + a preferred size, otherwise leave iterator unchanged. + + :param iter: an iter which may also be readable + :param chunk_size: maximum size of chunk + """ + return chunkiter(iter, chunk_size) if hasattr(iter, 'read') else iter + + +def chunkiter(fp, chunk_size=65536): + """ + Return an iterator to a file-like obj which yields fixed size chunks + + :param fp: a file-like object + :param chunk_size: maximum size of chunk + """ + while True: + chunk = fp.read(chunk_size) + if chunk: + yield chunk + else: + break + + +def cooperative_iter(iter): + """ + Return an iterator which schedules after each + iteration. This can prevent eventlet thread starvation. + + :param iter: an iterator to wrap + """ + try: + for chunk in iter: + sleep(0) + yield chunk + except Exception as err: + msg = _("Error: cooperative_iter exception %s") % err + LOG.error(msg) + raise + + +def cooperative_read(fd): + """ + Wrap a file descriptor's read with a partial function which schedules + after each read. This can prevent eventlet thread starvation. + + :param fd: a file descriptor to wrap + """ + def readfn(*args): + result = fd.read(*args) + sleep(0) + return result + return readfn + + +class CooperativeReader(object): + + """ + An eventlet thread friendly class for reading in image data. + + When accessing data either through the iterator or the read method + we perform a sleep to allow a co-operative yield. When there is more than + one image being uploaded/downloaded this prevents eventlet thread + starvation, ie allows all threads to be scheduled periodically rather than + having the same thread be continuously active. + """ + + def __init__(self, fd): + """ + :param fd: Underlying image file object + """ + self.fd = fd + self.iterator = None + # NOTE(markwash): if the underlying supports read(), overwrite the + # default iterator-based implementation with cooperative_read which + # is more straightforward + if hasattr(fd, 'read'): + self.read = cooperative_read(fd) + + def read(self, length=None): + """Return the next chunk of the underlying iterator. + + This is replaced with cooperative_read in __init__ if the underlying + fd already supports read(). + """ + if self.iterator is None: + self.iterator = self.__iter__() + try: + return self.iterator.next() + except StopIteration: + return '' + + def __iter__(self): + return cooperative_iter(self.fd.__iter__()) + + +class LimitingReader(object): + + """ + Reader designed to fail when reading image data past the configured + allowable amount. + """ + + def __init__(self, data, limit): + """ + :param data: Underlying image data object + :param limit: maximum number of bytes the reader should allow + """ + self.data = data + self.limit = limit + self.bytes_read = 0 + + def __iter__(self): + for chunk in self.data: + self.bytes_read += len(chunk) + if self.bytes_read > self.limit: + raise exception.ImageSizeLimitExceeded() + else: + yield chunk + + def read(self, i): + result = self.data.read(i) + self.bytes_read += len(result) + if self.bytes_read > self.limit: + raise exception.ImageSizeLimitExceeded() + return result + + +def image_meta_to_http_headers(image_meta): + """ + Returns a set of image metadata into a dict + of HTTP headers that can be fed to either a Webob + Request object or an httplib.HTTP(S)Connection object + + :param image_meta: Mapping of image metadata + """ + headers = {} + for k, v in image_meta.items(): + if v is not None: + if k == 'properties': + for pk, pv in v.items(): + if pv is not None: + headers["x-image-meta-property-%s" + % pk.lower()] = unicode(pv) + else: + headers["x-image-meta-%s" % k.lower()] = unicode(v) + return headers + + +def add_features_to_http_headers(features, headers): + """ + Adds additional headers representing glance features to be enabled. + + :param headers: Base set of headers + :param features: Map of enabled features + """ + if features: + for k, v in features.items(): + if k.lower() in FEATURE_BLACKLIST: + raise exception.UnsupportedHeaderFeature(feature=k) + if v is not None: + headers[k.lower()] = unicode(v) + + +def get_image_meta_from_headers(response): + """ + Processes HTTP headers from a supplied response that + match the x-image-meta and x-image-meta-property and + returns a mapping of image metadata and properties + + :param response: Response to process + """ + result = {} + properties = {} + + if hasattr(response, 'getheaders'): # httplib.HTTPResponse + headers = response.getheaders() + else: # webob.Response + headers = response.headers.items() + + for key, value in headers: + key = str(key.lower()) + if key.startswith('x-image-meta-property-'): + field_name = key[len('x-image-meta-property-'):].replace('-', '_') + properties[field_name] = value or None + elif key.startswith('x-image-meta-'): + field_name = key[len('x-image-meta-'):].replace('-', '_') + if 'x-image-meta-' + field_name not in IMAGE_META_HEADERS: + msg = _("Bad header: %(header_name)s") % {'header_name': key} + raise exc.HTTPBadRequest(msg, content_type="text/plain") + result[field_name] = value or None + result['properties'] = properties + + for key in ('size', 'min_disk', 'min_ram'): + if key in result: + try: + result[key] = int(result[key]) + except ValueError: + extra = (_("Cannot convert image %(key)s '%(value)s' " + "to an integer.") + % {'key': key, 'value': result[key]}) + raise exception.InvalidParameterValue(value=result[key], + param=key, + extra_msg=extra) + if result[key] < 0: + extra = (_("Image %(key)s must be >= 0 " + "('%(value)s' specified).") + % {'key': key, 'value': result[key]}) + raise exception.InvalidParameterValue(value=result[key], + param=key, + extra_msg=extra) + + for key in ('is_public', 'deleted', 'protected'): + if key in result: + result[key] = strutils.bool_from_string(result[key]) + return result + + +def safe_mkdirs(path): + try: + os.makedirs(path) + except OSError as e: + if e.errno != errno.EEXIST: + raise + + +def safe_remove(path): + try: + os.remove(path) + except OSError as e: + if e.errno != errno.ENOENT: + raise + + +class PrettyTable(object): + + """Creates an ASCII art table for use in bin/glance + + Example: + + ID Name Size Hits + --- ----------------- ------------ ----- + 122 image 22 0 + """ + + def __init__(self): + self.columns = [] + + def add_column(self, width, label="", just='l'): + """Add a column to the table + + :param width: number of characters wide the column should be + :param label: column heading + :param just: justification for the column, 'l' for left, + 'r' for right + """ + self.columns.append((width, label, just)) + + def make_header(self): + label_parts = [] + break_parts = [] + for width, label, _ in self.columns: + # NOTE(sirp): headers are always left justified + label_part = self._clip_and_justify(label, width, 'l') + label_parts.append(label_part) + + break_part = '-' * width + break_parts.append(break_part) + + label_line = ' '.join(label_parts) + break_line = ' '.join(break_parts) + return '\n'.join([label_line, break_line]) + + def make_row(self, *args): + row = args + row_parts = [] + for data, (width, _, just) in zip(row, self.columns): + row_part = self._clip_and_justify(data, width, just) + row_parts.append(row_part) + + row_line = ' '.join(row_parts) + return row_line + + @staticmethod + def _clip_and_justify(data, width, just): + # clip field to column width + clipped_data = str(data)[:width] + + if just == 'r': + # right justify + justified = clipped_data.rjust(width) + else: + # left justify + justified = clipped_data.ljust(width) + + return justified + + +def get_terminal_size(): + + def _get_terminal_size_posix(): + import fcntl + import struct + import termios + + height_width = None + + try: + height_width = struct.unpack( + 'hh', + fcntl.ioctl( + sys.stderr.fileno(), + termios.TIOCGWINSZ, + struct.pack( + 'HH', + 0, + 0))) + except Exception: + pass + + if not height_width: + try: + p = subprocess.Popen(['stty', 'size'], + shell=False, + stdout=subprocess.PIPE, + stderr=open(os.devnull, 'w')) + result = p.communicate() + if p.returncode == 0: + return tuple(int(x) for x in result[0].split()) + except Exception: + pass + + return height_width + + def _get_terminal_size_win32(): + try: + from ctypes import create_string_buffer + from ctypes import windll + handle = windll.kernel32.GetStdHandle(-12) + csbi = create_string_buffer(22) + res = windll.kernel32.GetConsoleScreenBufferInfo(handle, csbi) + except Exception: + return None + if res: + import struct + unpack_tmp = struct.unpack("hhhhHhhhhhh", csbi.raw) + (bufx, bufy, curx, cury, wattr, + left, top, right, bottom, maxx, maxy) = unpack_tmp + height = bottom - top + 1 + width = right - left + 1 + return (height, width) + else: + return None + + def _get_terminal_size_unknownOS(): + raise NotImplementedError + + func = {'posix': _get_terminal_size_posix, + 'win32': _get_terminal_size_win32} + + height_width = func.get(platform.os.name, _get_terminal_size_unknownOS)() + + if height_width is None: + raise exception.Invalid() + + for i in height_width: + if not isinstance(i, int) or i <= 0: + raise exception.Invalid() + + return height_width[0], height_width[1] + + +def mutating(func): + """Decorator to enforce read-only logic""" + @functools.wraps(func) + def wrapped(self, req, *args, **kwargs): + if req.context.read_only: + msg = _("Read-only access") + LOG.debug(msg) + raise exc.HTTPForbidden(msg, request=req, + content_type="text/plain") + return func(self, req, *args, **kwargs) + return wrapped + + +def setup_remote_pydev_debug(host, port): + error_msg = ('Error setting up the debug environment. Verify that the' + ' option pydev_worker_debug_port is pointing to a valid ' + 'hostname or IP on which a pydev server is listening on' + ' the port indicated by pydev_worker_debug_port.') + + try: + try: + from pydev import pydevd + except ImportError: + import pydevd + + pydevd.settrace(host, + port=port, + stdoutToServer=True, + stderrToServer=True) + return True + except Exception: + LOG.exception(error_msg) + raise + + +class LazyPluggable(object): + + """A pluggable backend loaded lazily based on some value.""" + + def __init__(self, pivot, config_group=None, **backends): + self.__backends = backends + self.__pivot = pivot + self.__backend = None + self.__config_group = config_group + + def __get_backend(self): + if not self.__backend: + if self.__config_group is None: + backend_name = CONF[self.__pivot] + else: + backend_name = CONF[self.__config_group][self.__pivot] + if backend_name not in self.__backends: + msg = _('Invalid backend: %s') % backend_name + raise exception.GlanceException(msg) + + backend = self.__backends[backend_name] + if isinstance(backend, tuple): + name = backend[0] + fromlist = backend[1] + else: + name = backend + fromlist = backend + + self.__backend = __import__(name, None, None, fromlist) + return self.__backend + + def __getattr__(self, key): + backend = self.__get_backend() + return getattr(backend, key) + + +def validate_key_cert(key_file, cert_file): + try: + error_key_name = "private key" + error_filename = key_file + key_str = open(key_file, "r").read() + key = crypto.load_privatekey(crypto.FILETYPE_PEM, key_str) + + error_key_name = "certficate" + error_filename = cert_file + cert_str = open(cert_file, "r").read() + cert = crypto.load_certificate(crypto.FILETYPE_PEM, cert_str) + except IOError as ioe: + raise RuntimeError(_("There is a problem with your %(error_key_name)s " + "%(error_filename)s. Please verify it." + " Error: %(ioe)s") % + {'error_key_name': error_key_name, + 'error_filename': error_filename, + 'ioe': ioe}) + except crypto.Error as ce: + raise RuntimeError(_("There is a problem with your %(error_key_name)s " + "%(error_filename)s. Please verify it. OpenSSL" + " error: %(ce)s") % + {'error_key_name': error_key_name, + 'error_filename': error_filename, + 'ce': ce}) + + try: + data = str(uuid.uuid4()) + digest = "sha1" + + out = crypto.sign(key, data, digest) + crypto.verify(cert, out, data, digest) + except crypto.Error as ce: + raise RuntimeError(_("There is a problem with your key pair. " + "Please verify that cert %(cert_file)s and " + "key %(key_file)s belong together. OpenSSL " + "error %(ce)s") % {'cert_file': cert_file, + 'key_file': key_file, + 'ce': ce}) + + +def get_test_suite_socket(): + global GLANCE_TEST_SOCKET_FD_STR + if GLANCE_TEST_SOCKET_FD_STR in os.environ: + fd = int(os.environ[GLANCE_TEST_SOCKET_FD_STR]) + sock = socket.fromfd(fd, socket.AF_INET, socket.SOCK_STREAM) + sock = socket.SocketType(_sock=sock) + sock.listen(CONF.backlog) + del os.environ[GLANCE_TEST_SOCKET_FD_STR] + os.close(fd) + return sock + return None + + +def is_uuid_like(val): + """Returns validation of a value as a UUID. + + For our purposes, a UUID is a canonical form string: + aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa + """ + try: + return str(uuid.UUID(val)) == val + except (TypeError, ValueError, AttributeError): + return False + + +pattern = re.compile(r'^https?://\S+/v2/images/\S+$') + + +def is_glance_location(loc_url): + return pattern.match(loc_url) + + +def check_synced(image, ep_url_list): + if image.status != 'active': + return + + is_synced = True + if not ep_url_list: + is_synced = False + else: + all_host_list = [urlparse.urlparse(url).netloc for url in ep_url_list] + synced_host_list = [urlparse.urlparse(loc['url']).netloc + for loc in image.locations + if is_glance_location(loc['url'])] + is_synced = set(all_host_list) == set(synced_host_list) + + if not is_synced: + image.status = 'queued' + image.size = None + image.virtual_size = None diff --git a/icehouse-patches/glance/glance_location_patch/glance/gateway.py b/icehouse-patches/glance/glance_location_patch/glance/gateway.py new file mode 100644 index 00000000..9c7e8cdb --- /dev/null +++ b/icehouse-patches/glance/glance_location_patch/glance/gateway.py @@ -0,0 +1,125 @@ +# Copyright 2012 OpenStack Foundation +# Copyright 2013 IBM Corp. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo.config import cfg + +from glance.api import authorization +from glance.api import policy +from glance.api import property_protections +from glance.common import property_utils +import glance.db +import glance.domain +import glance.notifier +import glance.quota +import glance.store +from glance.sync.client.v1 import api as syncapi + + +CONF = cfg.CONF +CONF.import_opt('sync_enabled', 'glance.common.config') + + +class Gateway(object): + + def __init__(self, db_api=None, store_api=None, notifier=None, + policy_enforcer=None, sync_api=None): + self.db_api = db_api or glance.db.get_api() + self.store_api = store_api or glance.store + self.notifier = notifier or glance.notifier.Notifier() + self.policy = policy_enforcer or policy.Enforcer() + self.sync_api = sync_api or syncapi + + def get_image_factory(self, context): + image_factory = glance.domain.ImageFactory() + store_image_factory = glance.store.ImageFactoryProxy( + image_factory, context, self.store_api) + quota_image_factory = glance.quota.ImageFactoryProxy( + store_image_factory, context, self.db_api) + policy_image_factory = policy.ImageFactoryProxy( + quota_image_factory, context, self.policy) + notifier_image_factory = glance.notifier.ImageFactoryProxy( + policy_image_factory, context, self.notifier) + if property_utils.is_property_protection_enabled(): + property_rules = property_utils.PropertyRules(self.policy) + protected_image_factory = property_protections.\ + ProtectedImageFactoryProxy(notifier_image_factory, context, + property_rules) + authorized_image_factory = authorization.ImageFactoryProxy( + protected_image_factory, context) + else: + authorized_image_factory = authorization.ImageFactoryProxy( + notifier_image_factory, context) + if CONF.sync_enabled: + sync_image_factory = glance.sync.ImageFactoryProxy( + authorized_image_factory, context, self.sync_api) + return sync_image_factory + return authorized_image_factory + + def get_image_member_factory(self, context): + image_factory = glance.domain.ImageMemberFactory() + quota_image_factory = glance.quota.ImageMemberFactoryProxy( + image_factory, context, self.db_api) + policy_member_factory = policy.ImageMemberFactoryProxy( + quota_image_factory, context, self.policy) + authorized_image_factory = authorization.ImageMemberFactoryProxy( + policy_member_factory, context) + return authorized_image_factory + + def get_repo(self, context): + image_repo = glance.db.ImageRepo(context, self.db_api) + store_image_repo = glance.store.ImageRepoProxy( + image_repo, context, self.store_api) + quota_image_repo = glance.quota.ImageRepoProxy( + store_image_repo, context, self.db_api) + policy_image_repo = policy.ImageRepoProxy( + quota_image_repo, context, self.policy) + notifier_image_repo = glance.notifier.ImageRepoProxy( + policy_image_repo, context, self.notifier) + if property_utils.is_property_protection_enabled(): + property_rules = property_utils.PropertyRules(self.policy) + protected_image_repo = property_protections.\ + ProtectedImageRepoProxy(notifier_image_repo, context, + property_rules) + authorized_image_repo = authorization.ImageRepoProxy( + protected_image_repo, context) + else: + authorized_image_repo = authorization.ImageRepoProxy( + notifier_image_repo, context) + if CONF.sync_enabled: + sync_image_repo = glance.sync.ImageRepoProxy( + authorized_image_repo, context, self.sync_api) + return sync_image_repo + return authorized_image_repo + + def get_task_factory(self, context): + task_factory = glance.domain.TaskFactory() + policy_task_factory = policy.TaskFactoryProxy( + task_factory, context, self.policy) + notifier_task_factory = glance.notifier.TaskFactoryProxy( + policy_task_factory, context, self.notifier) + authorized_task_factory = authorization.TaskFactoryProxy( + notifier_task_factory, context) + return authorized_task_factory + + def get_task_repo(self, context): + task_repo = glance.db.TaskRepo(context, self.db_api) + policy_task_repo = policy.TaskRepoProxy( + task_repo, context, self.policy) + notifier_task_repo = glance.notifier.TaskRepoProxy( + policy_task_repo, context, self.notifier) + authorized_task_repo = authorization.TaskRepoProxy( + notifier_task_repo, context) + return authorized_task_repo diff --git a/icehouse-patches/glance/glance_location_patch/glance/store/__init__.py b/icehouse-patches/glance/glance_location_patch/glance/store/__init__.py new file mode 100644 index 00000000..90452c20 --- /dev/null +++ b/icehouse-patches/glance/glance_location_patch/glance/store/__init__.py @@ -0,0 +1,814 @@ +# Copyright 2010-2011 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import collections +import copy +import re +import sys + +from oslo.config import cfg +import six + +from glance.common import exception +from glance.common import utils +import glance.context +import glance.domain.proxy +from glance.openstack.common import importutils +import glance.openstack.common.log as logging +from glance import scrubber +from glance.store import location + +LOG = logging.getLogger(__name__) + +store_opts = [ + cfg.ListOpt('known_stores', + default=[ + 'glance.store.filesystem.Store', + 'glance.store.http.Store' + ], + help=_('List of which store classes and store class locations ' + 'are currently known to glance at startup.')), + cfg.StrOpt('default_store', default='file', + help=_("Default scheme to use to store image data. The " + "scheme must be registered by one of the stores " + "defined by the 'known_stores' config option.")), + cfg.StrOpt('scrubber_datadir', + default='/var/lib/glance/scrubber', + help=_('Directory that the scrubber will use to track ' + 'information about what to delete. ' + 'Make sure this is set in glance-api.conf and ' + 'glance-scrubber.conf.')), + cfg.BoolOpt('delayed_delete', default=False, + help=_('Turn on/off delayed delete.')), + cfg.BoolOpt('use_user_token', default=True, + help=_('Whether to pass through the user token when ' + 'making requests to the registry.')), + cfg.IntOpt('scrub_time', default=0, + help=_('The amount of time in seconds to delay before ' + 'performing a delete.')), +] + +REGISTERED_STORES = set() +CONF = cfg.CONF +CONF.register_opts(store_opts) + +_ALL_STORES = [ + 'glance.store.filesystem.Store', + 'glance.store.http.Store', + 'glance.store.rbd.Store', + 'glance.store.s3.Store', + 'glance.store.swift.Store', + 'glance.store.sheepdog.Store', + 'glance.store.cinder.Store', + 'glance.store.gridfs.Store', + 'glance.store.vmware_datastore.Store' +] + + +class BackendException(Exception): + pass + + +class UnsupportedBackend(BackendException): + pass + + +class Indexable(object): + + """ + Wrapper that allows an iterator or filelike be treated as an indexable + data structure. This is required in the case where the return value from + Store.get() is passed to Store.add() when adding a Copy-From image to a + Store where the client library relies on eventlet GreenSockets, in which + case the data to be written is indexed over. + """ + + def __init__(self, wrapped, size): + """ + Initialize the object + + :param wrappped: the wrapped iterator or filelike. + :param size: the size of data available + """ + self.wrapped = wrapped + self.size = int(size) if size else (wrapped.len + if hasattr(wrapped, 'len') else 0) + self.cursor = 0 + self.chunk = None + + def __iter__(self): + """ + Delegate iteration to the wrapped instance. + """ + for self.chunk in self.wrapped: + yield self.chunk + + def __getitem__(self, i): + """ + Index into the next chunk (or previous chunk in the case where + the last data returned was not fully consumed). + + :param i: a slice-to-the-end + """ + start = i.start if isinstance(i, slice) else i + if start < self.cursor: + return self.chunk[(start - self.cursor):] + + self.chunk = self.another() + if self.chunk: + self.cursor += len(self.chunk) + + return self.chunk + + def another(self): + """Implemented by subclasses to return the next element""" + raise NotImplementedError + + def getvalue(self): + """ + Return entire string value... used in testing + """ + return self.wrapped.getvalue() + + def __len__(self): + """ + Length accessor. + """ + return self.size + + +def _register_stores(store_classes): + """ + Given a set of store names, add them to a globally available set + of store names. + """ + for store_cls in store_classes: + REGISTERED_STORES.add(store_cls.__module__.split('.')[2]) + # NOTE (spredzy): The actual class name is filesystem but in order + # to maintain backward compatibility we need to keep the 'file' store + # as a known store + if 'filesystem' in REGISTERED_STORES: + REGISTERED_STORES.add('file') + + +def _get_store_class(store_entry): + store_cls = None + try: + LOG.debug("Attempting to import store %s", store_entry) + store_cls = importutils.import_class(store_entry) + except exception.NotFound: + raise BackendException('Unable to load store. ' + 'Could not find a class named %s.' + % store_entry) + return store_cls + + +def create_stores(): + """ + Registers all store modules and all schemes + from the given config. Duplicates are not re-registered. + """ + store_count = 0 + store_classes = set() + for store_entry in set(CONF.known_stores + _ALL_STORES): + store_entry = store_entry.strip() + if not store_entry: + continue + store_cls = _get_store_class(store_entry) + try: + store_instance = store_cls() + except exception.BadStoreConfiguration as e: + if store_entry in CONF.known_stores: + LOG.warn(_("%s Skipping store driver.") % unicode(e)) + continue + finally: + # NOTE(flaper87): To be removed in Juno + if store_entry not in CONF.known_stores: + LOG.deprecated(_("%s not found in `known_store`. " + "Stores need to be explicitly enabled in " + "the configuration file.") % store_entry) + + schemes = store_instance.get_schemes() + if not schemes: + raise BackendException('Unable to register store %s. ' + 'No schemes associated with it.' + % store_cls) + else: + if store_cls not in store_classes: + LOG.debug("Registering store %s with schemes %s", + store_cls, schemes) + store_classes.add(store_cls) + scheme_map = {} + for scheme in schemes: + loc_cls = store_instance.get_store_location_class() + scheme_map[scheme] = { + 'store_class': store_cls, + 'location_class': loc_cls, + } + location.register_scheme_map(scheme_map) + store_count += 1 + else: + LOG.debug("Store %s already registered", store_cls) + _register_stores(store_classes) + return store_count + + +def verify_default_store(): + scheme = cfg.CONF.default_store + context = glance.context.RequestContext() + try: + get_store_from_scheme(context, scheme) + except exception.UnknownScheme: + msg = _("Store for scheme %s not found") % scheme + raise RuntimeError(msg) + + +def get_known_schemes(): + """Returns list of known schemes""" + return location.SCHEME_TO_CLS_MAP.keys() + + +def get_known_stores(): + """Returns list of known stores""" + return list(REGISTERED_STORES) + + +def get_store_from_scheme(context, scheme, loc=None): + """ + Given a scheme, return the appropriate store object + for handling that scheme. + """ + if scheme not in location.SCHEME_TO_CLS_MAP: + raise exception.UnknownScheme(scheme=scheme) + scheme_info = location.SCHEME_TO_CLS_MAP[scheme] + store = scheme_info['store_class'](context, loc) + return store + + +def get_store_from_uri(context, uri, loc=None): + """ + Given a URI, return the store object that would handle + operations on the URI. + + :param uri: URI to analyze + """ + scheme = uri[0:uri.find('/') - 1] + store = get_store_from_scheme(context, scheme, loc) + return store + + +def get_from_backend(context, uri, **kwargs): + """Yields chunks of data from backend specified by uri""" + + loc = location.get_location_from_uri(uri) + store = get_store_from_uri(context, uri, loc) + + try: + return store.get(loc) + except NotImplementedError: + raise exception.StoreGetNotSupported + + +def get_size_from_backend(context, uri): + """Retrieves image size from backend specified by uri""" + if utils.is_glance_location(uri): + uri += ('?auth_token=' + context.auth_tok) + loc = location.get_location_from_uri(uri) + store = get_store_from_uri(context, uri, loc) + return store.get_size(loc) + + +def _check_glance_loc(context, location): + uri = location['url'] + if not utils.is_glance_location(uri): + return False + if 'auth_token=' in uri: + return True + location['url'] = uri + ('?auth_token=' + context.auth_tok) + return True + + +def delete_from_backend(context, uri, **kwargs): + """Removes chunks of data from backend specified by uri""" + loc = location.get_location_from_uri(uri) + store = get_store_from_uri(context, uri, loc) + + try: + return store.delete(loc) + except NotImplementedError: + raise exception.StoreDeleteNotSupported + + +def get_store_from_location(uri): + """ + Given a location (assumed to be a URL), attempt to determine + the store from the location. We use here a simple guess that + the scheme of the parsed URL is the store... + + :param uri: Location to check for the store + """ + loc = location.get_location_from_uri(uri) + return loc.store_name + + +def safe_delete_from_backend(context, uri, image_id, **kwargs): + """Given a uri, delete an image from the store.""" + try: + return delete_from_backend(context, uri, **kwargs) + except exception.NotFound: + msg = _('Failed to delete image %s in store from URI') + LOG.warn(msg % image_id) + except exception.StoreDeleteNotSupported as e: + LOG.warn(six.text_type(e)) + except UnsupportedBackend: + exc_type = sys.exc_info()[0].__name__ + msg = (_('Failed to delete image %(image_id)s from store ' + '(%(error)s)') % {'image_id': image_id, + 'error': exc_type}) + LOG.error(msg) + + +def schedule_delayed_delete_from_backend(context, uri, image_id, **kwargs): + """Given a uri, schedule the deletion of an image location.""" + (file_queue, _db_queue) = scrubber.get_scrub_queues() + # NOTE(zhiyan): Defautly ask glance-api store using file based queue. + # In future we can change it using DB based queued instead, + # such as using image location's status to saving pending delete flag + # when that property be added. + if CONF.use_user_token is False: + context = None + file_queue.add_location(image_id, uri, user_context=context) + + +def delete_image_from_backend(context, store_api, image_id, uri): + if CONF.delayed_delete: + store_api.schedule_delayed_delete_from_backend(context, uri, image_id) + else: + store_api.safe_delete_from_backend(context, uri, image_id) + + +def check_location_metadata(val, key=''): + if isinstance(val, dict): + for key in val: + check_location_metadata(val[key], key=key) + elif isinstance(val, list): + ndx = 0 + for v in val: + check_location_metadata(v, key='%s[%d]' % (key, ndx)) + ndx = ndx + 1 + elif not isinstance(val, unicode): + raise BackendException(_("The image metadata key %(key)s has an " + "invalid type of %(val)s. Only dict, list, " + "and unicode are supported.") % + {'key': key, + 'val': type(val)}) + + +def store_add_to_backend(image_id, data, size, store): + """ + A wrapper around a call to each stores add() method. This gives glance + a common place to check the output + + :param image_id: The image add to which data is added + :param data: The data to be stored + :param size: The length of the data in bytes + :param store: The store to which the data is being added + :return: The url location of the file, + the size amount of data, + the checksum of the data + the storage systems metadata dictionary for the location + """ + (location, size, checksum, metadata) = store.add(image_id, data, size) + if metadata is not None: + if not isinstance(metadata, dict): + msg = (_("The storage driver %(store)s returned invalid metadata " + "%(metadata)s. This must be a dictionary type") % + {'store': six.text_type(store), + 'metadata': six.text_type(metadata)}) + LOG.error(msg) + raise BackendException(msg) + try: + check_location_metadata(metadata) + except BackendException as e: + e_msg = (_("A bad metadata structure was returned from the " + "%(store)s storage driver: %(metadata)s. %(error)s.") % + {'store': six.text_type(store), + 'metadata': six.text_type(metadata), + 'error': six.text_type(e)}) + LOG.error(e_msg) + raise BackendException(e_msg) + return (location, size, checksum, metadata) + + +def add_to_backend(context, scheme, image_id, data, size): + store = get_store_from_scheme(context, scheme) + try: + return store_add_to_backend(image_id, data, size, store) + except NotImplementedError: + raise exception.StoreAddNotSupported + + +def set_acls(context, location_uri, public=False, read_tenants=[], + write_tenants=[]): + loc = location.get_location_from_uri(location_uri) + scheme = get_store_from_location(location_uri) + store = get_store_from_scheme(context, scheme, loc) + try: + store.set_acls(loc, public=public, read_tenants=read_tenants, + write_tenants=write_tenants) + except NotImplementedError: + LOG.debug(_("Skipping store.set_acls... not implemented.")) + + +class ImageRepoProxy(glance.domain.proxy.Repo): + + def __init__(self, image_repo, context, store_api): + self.context = context + self.store_api = store_api + proxy_kwargs = {'context': context, 'store_api': store_api} + super(ImageRepoProxy, self).__init__(image_repo, + item_proxy_class=ImageProxy, + item_proxy_kwargs=proxy_kwargs) + + def _set_acls(self, image): + public = image.visibility == 'public' + member_ids = [] + if image.locations and not public: + member_repo = image.get_member_repo() + member_ids = [m.member_id for m in member_repo.list()] + for location in image.locations: + self.store_api.set_acls(self.context, location['url'], public, + read_tenants=member_ids) + + def add(self, image): + result = super(ImageRepoProxy, self).add(image) + self._set_acls(image) + return result + + def save(self, image): + result = super(ImageRepoProxy, self).save(image) + self._set_acls(image) + return result + + +def _check_location_uri(context, store_api, uri): + """ + Check if an image location uri is valid. + + :param context: Glance request context + :param store_api: store API module + :param uri: location's uri string + """ + is_ok = True + try: + size = store_api.get_size_from_backend(context, uri) + # NOTE(zhiyan): Some stores return zero when it catch exception + is_ok = size > 0 + except (exception.UnknownScheme, exception.NotFound): + is_ok = False + if not is_ok: + raise exception.BadStoreUri(_('Invalid location: %s') % uri) + + +def _check_image_location(context, store_api, location): + if not _check_glance_loc(context, location): + _check_location_uri(context, store_api, location['url']) + store_api.check_location_metadata(location['metadata']) + + +def _remove_extra_info(location): + url = location['url'] + if url.startswith('http'): + start = url.find('auth_token') + if start == -1: + return + end = url.find('&', start) + if end == -1: + if url[start - 1] == '?': + url = re.sub(r'\?auth_token=\S+', r'', url) + elif url[start - 1] == '&': + url = re.sub(r'&auth_token=\S+', r'', url) + else: + url = re.sub(r'auth_token=\S+&', r'', url) + + location['url'] = url + + +def _set_image_size(context, image, locations): + if not image.size: + for location in locations: + size_from_backend = glance.store.get_size_from_backend( + context, location['url']) + if size_from_backend: + # NOTE(flwang): This assumes all locations have the same size + image.size = size_from_backend + break + + +class ImageFactoryProxy(glance.domain.proxy.ImageFactory): + + def __init__(self, factory, context, store_api): + self.context = context + self.store_api = store_api + proxy_kwargs = {'context': context, 'store_api': store_api} + super(ImageFactoryProxy, self).__init__(factory, + proxy_class=ImageProxy, + proxy_kwargs=proxy_kwargs) + + def new_image(self, **kwargs): + locations = kwargs.get('locations', []) + for l in locations: + _check_image_location(self.context, self.store_api, l) + + if locations.count(l) > 1: + raise exception.DuplicateLocation(location=l['url']) + + return super(ImageFactoryProxy, self).new_image(**kwargs) + + +class StoreLocations(collections.MutableSequence): + + """ + The proxy for store location property. It takes responsibility for: + 1. Location uri correctness checking when adding a new location. + 2. Remove the image data from the store when a location is removed + from an image. + """ + + def __init__(self, image_proxy, value): + self.image_proxy = image_proxy + if isinstance(value, list): + self.value = value + else: + self.value = list(value) + + def append(self, location): + # NOTE(flaper87): Insert this + # location at the very end of + # the value list. + self.insert(len(self.value), location) + + def extend(self, other): + if isinstance(other, StoreLocations): + locations = other.value + else: + locations = list(other) + + for location in locations: + self.append(location) + + def insert(self, i, location): + _check_image_location(self.image_proxy.context, + self.image_proxy.store_api, location) + + _remove_extra_info(location) + if location in self.value: + raise exception.DuplicateLocation(location=location['url']) + + self.value.insert(i, location) + _set_image_size(self.image_proxy.context, + self.image_proxy, + [location]) + + def pop(self, i=-1): + location = self.value.pop(i) + try: + delete_image_from_backend(self.image_proxy.context, + self.image_proxy.store_api, + self.image_proxy.image.image_id, + location['url']) + except Exception: + self.value.insert(i, location) + raise + return location + + def count(self, location): + return self.value.count(location) + + def index(self, location, *args): + return self.value.index(location, *args) + + def remove(self, location): + if self.count(location): + self.pop(self.index(location)) + else: + self.value.remove(location) + + def reverse(self): + self.value.reverse() + + # Mutable sequence, so not hashable + __hash__ = None + + def __getitem__(self, i): + return self.value.__getitem__(i) + + def __setitem__(self, i, location): + _check_image_location(self.image_proxy.context, + self.image_proxy.store_api, location) + self.value.__setitem__(i, location) + _set_image_size(self.image_proxy.context, + self.image_proxy, + [location]) + + def __delitem__(self, i): + location = None + try: + location = self.value.__getitem__(i) + except Exception: + return self.value.__delitem__(i) + delete_image_from_backend(self.image_proxy.context, + self.image_proxy.store_api, + self.image_proxy.image.image_id, + location['url']) + self.value.__delitem__(i) + + def __delslice__(self, i, j): + i = max(i, 0) + j = max(j, 0) + locations = [] + try: + locations = self.value.__getslice__(i, j) + except Exception: + return self.value.__delslice__(i, j) + for location in locations: + delete_image_from_backend(self.image_proxy.context, + self.image_proxy.store_api, + self.image_proxy.image.image_id, + location['url']) + self.value.__delitem__(i) + + def __iadd__(self, other): + self.extend(other) + return self + + def __contains__(self, location): + return location in self.value + + def __len__(self): + return len(self.value) + + def __cast(self, other): + if isinstance(other, StoreLocations): + return other.value + else: + return other + + def __cmp__(self, other): + return cmp(self.value, self.__cast(other)) + + def __iter__(self): + return iter(self.value) + + def __copy__(self): + return type(self)(self.image_proxy, self.value) + + def __deepcopy__(self, memo): + # NOTE(zhiyan): Only copy location entries, others can be reused. + value = copy.deepcopy(self.value, memo) + self.image_proxy.image.locations = value + return type(self)(self.image_proxy, value) + + +def _locations_proxy(target, attr): + """ + Make a location property proxy on the image object. + + :param target: the image object on which to add the proxy + :param attr: the property proxy we want to hook + """ + + def get_attr(self): + value = getattr(getattr(self, target), attr) + return StoreLocations(self, value) + + def set_attr(self, value): + if not isinstance(value, (list, StoreLocations)): + raise exception.BadStoreUri(_('Invalid locations: %s') % value) + ori_value = getattr(getattr(self, target), attr) + if ori_value != value: + # NOTE(zhiyan): Enforced locations list was previously empty list. + if len(ori_value) > 0: + raise exception.Invalid(_('Original locations is not empty: ' + '%s') % ori_value) + # NOTE(zhiyan): Check locations are all valid. + for location in value: + _check_image_location(self.context, self.store_api, + location) + if value.count(location) > 1: + raise exception.DuplicateLocation(location=location['url']) + _set_image_size(self.context, getattr(self, target), value) + return setattr(getattr(self, target), attr, list(value)) + + def del_attr(self): + value = getattr(getattr(self, target), attr) + while len(value): + delete_image_from_backend(self.context, self.store_api, + self.image.image_id, value[0]['url']) + del value[0] + setattr(getattr(self, target), attr, value) + return delattr(getattr(self, target), attr) + + return property(get_attr, set_attr, del_attr) + +pattern = re.compile(r'^https?://\S+/v2/images/\S+$') + + +class ImageProxy(glance.domain.proxy.Image): + + locations = _locations_proxy('image', 'locations') + + def __init__(self, image, context, store_api): + self.image = image + self.context = context + self.store_api = store_api + proxy_kwargs = { + 'context': context, + 'image': self, + 'store_api': store_api, + } + super(ImageProxy, self).__init__( + image, member_repo_proxy_class=ImageMemberRepoProxy, + member_repo_proxy_kwargs=proxy_kwargs) + + def delete(self): + self.image.delete() + if self.image.locations: + for location in self.image.locations: + self.store_api.delete_image_from_backend(self.context, + self.store_api, + self.image.image_id, + location['url']) + + def set_data(self, data, size=None): + if size is None: + size = 0 # NOTE(markwash): zero -> unknown size + location, size, checksum, loc_meta = self.store_api.add_to_backend( + self.context, CONF.default_store, + self.image.image_id, utils.CooperativeReader(data), size) + loc_meta = loc_meta or {} + loc_meta['is_default'] = 'true' + self.image.locations = [{'url': location, 'metadata': loc_meta}] + self.image.size = size + self.image.checksum = checksum + self.image.status = 'active' + + def get_data(self): + if not self.image.locations: + raise exception.NotFound(_("No image data could be found")) + err = None + for loc in self.image.locations: + if pattern.match(loc['url']): + continue + try: + data, size = self.store_api.get_from_backend(self.context, + loc['url']) + + return data + except Exception as e: + LOG.warn(_('Get image %(id)s data failed: ' + '%(err)s.') % {'id': self.image.image_id, + 'err': six.text_type(e)}) + err = e + # tried all locations + LOG.error(_('Glance tried all locations to get data for image %s ' + 'but all have failed.') % self.image.image_id) + raise err + + +class ImageMemberRepoProxy(glance.domain.proxy.Repo): + + def __init__(self, repo, image, context, store_api): + self.repo = repo + self.image = image + self.context = context + self.store_api = store_api + super(ImageMemberRepoProxy, self).__init__(repo) + + def _set_acls(self): + public = self.image.visibility == 'public' + if self.image.locations and not public: + member_ids = [m.member_id for m in self.repo.list()] + for location in self.image.locations: + self.store_api.set_acls(self.context, location['url'], public, + read_tenants=member_ids) + + def add(self, member): + super(ImageMemberRepoProxy, self).add(member) + self._set_acls() + + def remove(self, member): + super(ImageMemberRepoProxy, self).remove(member) + self._set_acls() diff --git a/icehouse-patches/glance/glance_location_patch/glance/store/http.py b/icehouse-patches/glance/glance_location_patch/glance/store/http.py new file mode 100644 index 00000000..35047388 --- /dev/null +++ b/icehouse-patches/glance/glance_location_patch/glance/store/http.py @@ -0,0 +1,216 @@ +# Copyright 2010 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import httplib + +import six.moves.urllib.parse as urlparse + +from glance.common import exception +from glance.openstack.common import jsonutils +import glance.openstack.common.log as logging +import glance.store.base +import glance.store.location + +LOG = logging.getLogger(__name__) + + +MAX_REDIRECTS = 5 + + +class StoreLocation(glance.store.location.StoreLocation): + + """Class describing an HTTP(S) URI""" + + def process_specs(self): + self.scheme = self.specs.get('scheme', 'http') + self.netloc = self.specs['netloc'] + self.user = self.specs.get('user') + self.password = self.specs.get('password') + self.path = self.specs.get('path') + + def _get_credstring(self): + if self.user: + return '%s:%s@' % (self.user, self.password) + return '' + + def get_uri(self): + return "%s://%s%s%s" % ( + self.scheme, + self._get_credstring(), + self.netloc, + self.path) + + def parse_uri(self, uri): + """ + Parse URLs. This method fixes an issue where credentials specified + in the URL are interpreted differently in Python 2.6.1+ than prior + versions of Python. + """ + pieces = urlparse.urlparse(uri) + assert pieces.scheme in ('https', 'http') + self.scheme = pieces.scheme + netloc = pieces.netloc + path = pieces.path + try: + if '@' in netloc: + creds, netloc = netloc.split('@') + else: + creds = None + except ValueError: + # Python 2.6.1 compat + # see lp659445 and Python issue7904 + if '@' in path: + creds, path = path.split('@') + else: + creds = None + if creds: + try: + self.user, self.password = creds.split(':') + except ValueError: + reason = (_("Credentials '%s' not well-formatted.") + % "".join(creds)) + LOG.debug(reason) + raise exception.BadStoreUri() + else: + self.user = None + if netloc == '': + reason = _("No address specified in HTTP URL") + LOG.debug(reason) + raise exception.BadStoreUri(message=reason) + self.netloc = netloc + self.path = path + + self.token = None + if pieces.query: + params = pieces.query.split('&') + for param in params: + if 'auth_token' == param.split("=")[0].strip(): + self.token = param.split("=")[1] + break + + +def http_response_iterator(conn, response, size): + """ + Return an iterator for a file-like object. + + :param conn: HTTP(S) Connection + :param response: httplib.HTTPResponse object + :param size: Chunk size to iterate with + """ + chunk = response.read(size) + while chunk: + yield chunk + chunk = response.read(size) + conn.close() + + +class Store(glance.store.base.Store): + + """An implementation of the HTTP(S) Backend Adapter""" + + def get(self, location): + """ + Takes a `glance.store.location.Location` object that indicates + where to find the image file, and returns a tuple of generator + (for reading the image file) and image_size + + :param location `glance.store.location.Location` object, supplied + from glance.store.location.get_location_from_uri() + """ + conn, resp, content_length = self._query(location, 'GET') + + iterator = http_response_iterator(conn, resp, self.CHUNKSIZE) + + class ResponseIndexable(glance.store.Indexable): + + def another(self): + try: + return self.wrapped.next() + except StopIteration: + return '' + + return (ResponseIndexable(iterator, content_length), content_length) + + def get_schemes(self): + return ('http', 'https') + + def get_size(self, location): + """ + Takes a `glance.store.location.Location` object that indicates + where to find the image file, and returns the size + + :param location `glance.store.location.Location` object, supplied + from glance.store.location.get_location_from_uri() + """ + try: + return self._query(location, 'HEAD')[2] + except Exception: + return 0 + + def _query(self, location, verb, depth=0): + if depth > MAX_REDIRECTS: + reason = (_("The HTTP URL exceeded %s maximum " + "redirects.") % MAX_REDIRECTS) + LOG.debug(reason) + raise exception.MaxRedirectsExceeded(redirects=MAX_REDIRECTS) + loc = location.store_location + conn_class = self._get_conn_class(loc) + conn = conn_class(loc.netloc) + hearders = {} + if loc.token: + hearders.setdefault('x-auth-token', loc.token) + verb = 'GET' + conn.request(verb, loc.path, "", hearders) + resp = conn.getresponse() + try: + size = jsonutils.loads(resp.read())['size'] + except Exception: + size = 0 + raise exception.BadStoreUri(loc.path, reason) + return (conn, resp, size) + + conn.request(verb, loc.path, "", hearders) + resp = conn.getresponse() + + # Check for bad status codes + if resp.status >= 400: + reason = _("HTTP URL returned a %s status code.") % resp.status + LOG.debug(reason) + raise exception.BadStoreUri(loc.path, reason) + + location_header = resp.getheader("location") + if location_header: + if resp.status not in (301, 302): + reason = (_("The HTTP URL attempted to redirect with an " + "invalid %s status code.") % resp.status) + LOG.debug(reason) + raise exception.BadStoreUri(loc.path, reason) + location_class = glance.store.location.Location + new_loc = location_class(location.store_name, + location.store_location.__class__, + uri=location_header, + image_id=location.image_id, + store_specs=location.store_specs) + return self._query(new_loc, verb, depth + 1) + content_length = int(resp.getheader('content-length', 0)) + return (conn, resp, content_length) + + def _get_conn_class(self, loc): + """ + Returns connection class for accessing the resource. Useful + for dependency injection and stubouts in testing... + """ + return {'http': httplib.HTTPConnection, + 'https': httplib.HTTPSConnection}[loc.scheme] diff --git a/icehouse-patches/glance/glance_location_patch/installation/install.sh b/icehouse-patches/glance/glance_location_patch/installation/install.sh new file mode 100644 index 00000000..f7c5b019 --- /dev/null +++ b/icehouse-patches/glance/glance_location_patch/installation/install.sh @@ -0,0 +1,111 @@ +#!/bin/bash + +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# Copyright (c) 2014 Huawei Technologies. + +CURPATH=$(cd "$(dirname "$0")"; pwd) +_GLANCE_CONF_DIR="/etc/glance" +_GLANCE_API_CONF_FILE="glance-api.conf" +_PYTHON_INSTALL_DIR="/usr/lib64/python2.6/site-packages" +_GLANCE_DIR="${_PYTHON_INSTALL_DIR}/glance" + +# if you did not make changes to the installation files, +# please do not edit the following directories. +_CODE_DIR="${CURPATH}/../glance" +_CONF_DIR="${CURPATH}/../etc" +_PATCH_DIR="${CURPATH}/.." +_BACKUP_DIR="${_GLANCE_DIR}/glance-installation-backup" + +_SCRIPT_LOGFILE="/var/log/glance/installation/install.log" + +api_config_option_list="sync_enabled=True sync_server_port=9595 sync_server_host=127.0.0.1" + +export PS4='+{$LINENO:${FUNCNAME[0]}}' + +ERRTRAP() +{ + echo "[LINE:$1] Error: Command or function exited with status $?" +} + +function log() +{ + echo "$@" + echo "`date -u +'%Y-%m-%d %T.%N'`: $@" >> $_SCRIPT_LOGFILE +} + +trap 'ERRTRAP $LINENO' ERR + +if [[ ${EUID} -ne 0 ]]; then + log "Please run as root." + exit 1 +fi + +if [ ! -d "/var/log/glance/installation" ]; then + mkdir /var/log/glance/installation + touch _SCRIPT_LOGFILE +fi + +cd `dirname $0` + +log "checking installation directories..." +if [ ! -d "${_GLANCE_DIR}" ] ; then + log "Could not find the glance installation. Please check the variables in the beginning of the script." + log "aborted." + exit 1 +fi +if [ ! -f "${_GLANCE_CONF_DIR}/${_GLANCE_API_CONF_FILE}" ] ; then + log "Could not find glance-api config file. Please check the variables in the beginning of the script." + log "aborted." + exit 1 +fi + + +log "checking previous installation..." +if [ -d "${_BACKUP_DIR}/glance" ] ; then + log "It seems glance cascading has already been installed!" + log "Please check README for solution if this is not true." + exit 1 +fi + +log "backing up current files that might be overwritten..." +mkdir -p "${_BACKUP_DIR}/glance" +mkdir -p "${_BACKUP_DIR}/etc" +mkdir -p "${_BACKUP_DIR}/etc/glance" +cp -rf "${_GLANCE_CONF_DIR}/${_GLANCE_API_CONF_FILE}" "${_BACKUP_DIR}/etc/glance/" + +if [ $? -ne 0 ] ; then + rm -r "${_BACKUP_DIR}/glance" + rm -r "${_BACKUP_DIR}/etc" + log "Error in config backup, aborted." + exit 1 +fi + +log "copying in new files..." + +cp -r "${_PATCH_DIR}/glance" `dirname ${_GLANCE_DIR}` + +glanceEggDir=`ls ${_PYTHON_INSTALL_DIR} |grep -e glance- |grep -e egg-info ` +if [ ! -d ${_PYTHON_INSTALL_DIR}/${glanceEggDir} ]; then + log "glance install dir not exist. Pleas check manually." + exit 1 +fi +cp "${_PATCH_DIR}/glance.egg-info/entry_points.txt" "${_PYTHON_INSTALL_DIR}/${glanceEggDir}/" +if [ $? -ne 0 ] ; then + log "Error in copying, aborted. Please install manually." + exit 1 +fi + +log "Completed." +log "See README to get started." + +exit 0 diff --git a/icehouse-patches/neutron/dvr-patch/README.md b/icehouse-patches/neutron/dvr-patch/README.md new file mode 100644 index 00000000..4c5e10fe --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/README.md @@ -0,0 +1,165 @@ +Openstack Neutron DVR patch +=============================== + + To solve the scalability problem in the OpenStack Neutron Deployment and to distribute the Network Node load to other Compute Nodes, some people proposed a solusion which is named DVR(Distributed Virtual Router).Distributed Virtual Router solves both the problems by providing a solution that would fit into the existing model. + + DVR feature code has been merged into the neutron master branch, Neutron Juno release version would have expected the DVR characteristic. This patch was download from DVR branch on 1st June. + + +Key modules +----------- + + * L2 Agent Doc + + https://docs.google.com/document/d/1depasJSnGZPOnRLxEC_PYsVLcGVFXZLqP52RFTe21BE/edit#heading=h.5w7clq272tji + + * L3 Agent Doc + + https://docs.google.com/document/d/1jCmraZGirmXq5V1MtRqhjdZCbUfiwBhRkUjDXGt5QUQ/edit + + Addressed by: https://review.openstack.org/84223 + * Add L3 Extension for Distributed Routers + + Addressed by: https://review.openstack.org/87730 + * L2 Agent/ML2 Plugin changes for L3 DVR + + Addressed by: https://review.openstack.org/88442 + * Add 'ip neigh' to ip_lib + + Addressed by: https://review.openstack.org/89413 + * Modify L3 Agent for Distributed Routers + + Addressed by: https://review.openstack.org/89694 + * Add L3 Scheduler Changes for Distributed Routers + + Addressed by: https://review.openstack.org/93233 + * Add 'ip rule add from' to ip_lib + + Addressed by: https://review.openstack.org/96389 + * Addressed merge conflict + + Addressed by: https://review.openstack.org/97028 + * Refactor some router-related methods + + Addressed by: https://review.openstack.org/97275 + * Allow L3 base to handle extensions on router creation + + Addressed by: https://review.openstack.org/102101 + * L2 Model additions to support DVR + + Addressed by: https://review.openstack.org/102332 + * RPC additions to support DVR + + Addressed by: https://review.openstack.org/102398 + * ML2 additions to support DVR + +Requirements +------------ +* openstack-neutron-server-2014.1-1.1 has been installed +* oslo.db-0.2.0 has been installed +* sqlalchemy-migrate-0.9.1 has been installed + +Installation +------------ + +We provide two ways to install the DVR patch code. In this section, we will guide you through installing the neutron DVR code with the minimum configuration. + +* **Note:** + + - Make sure you have an existing installation of **Openstack Icehouse**. + - We recommend that you Do backup at least the following files before installation, because they are to be overwritten or modified: + $NEUTRON_CONFIG_PARENT_DIR/neutron.conf + (replace the $... with actual directory names.) + +* **Manual Installation** + + - Navigate to the local repository and copy the contents in 'neutron' sub-directory to the corresponding places in existing neutron, e.g. + ```cp -r $LOCAL_REPOSITORY_DIR/neutron $NEUTRON_PARENT_DIR``` + (replace the $... with actual directory name.) + + - Navigate to the local repository and copy the contents in 'etc' sub-directory to the corresponding places in existing neutron, e.g. + ```cp -r $LOCAL_REPOSITORY_DIR/etc $NEUTRON_CONFIG_DIR``` + (replace the $... with actual directory name.) + + - Update the neutron configuration file (e.g. /etc/neutron/l3_agent.ini, /etc/neutron/plugins/openvswitch/ovs_neutron_plugin.ini) with the minimum option below. If the option already exists, modify its value, otherwise add it to the config file. Check the "Configurations" section below for a full configuration guide. + 1)update l3 agent configurations(/etc/neutron/l3_agent.ini) + ``` + [DEFAULT] + ... + distributed_agent=True + ``` + 2)update openvswitch agent configurations(/etc/neutron/plugins/openvswitch/ovs_neutron_plugin.ini) + ``` + [AGENT] + ... + enable_distributed_routing = True + ``` + + - Remove the neutron DB + + - Create the neutron DB + ```neutron-db-manage --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/ml2_conf.ini upgrade head``` + + - Restart the neutron-server/openvswitch-agent/l3-agent. + ```service openstack-neutron restart``` + ```service openstack-neutron-openvswitch-agent restart``` + ```service openstack-neutron-l3-agent restart``` + + - Done. + +* **Automatic Installation** + + - Navigate to the installation directory and run installation script. + ``` + cd $LOCAL_REPOSITORY_DIR/installation + sudo bash ./install.sh + ``` + (replace the $... with actual directory name.) + + - Done. The installation code should setup the DVR code without the minimum configuration modifying. Check the "Configurations" section for a full configuration guide. + 1)update l3 agent configurations(/etc/neutron/l3_agent.ini) + ``` + [DEFAULT] + ... + distributed_agent=True + ``` + 2)update openvswitch agent configurations(/etc/neutron/plugins/openvswitch/ovs_neutron_plugin.ini) + ``` + [AGENT] + ... + enable_distributed_routing = True + ``` + +* **Troubleshooting** + + In case the automatic installation process is not complete, please check the followings: + + - Make sure your OpenStack version is Icehouse. + + - Check the variables in the beginning of the install.sh scripts. Your installation directories may be different from the default values we provide. + + - The installation code will automatically add the related codes to $NEUTRON_PARENT_DIR/nova but not modify the related configuration, you should update the related configurations manually. + - In case the automatic installation does not work, try to install manually. + +Configurations +-------------- + +* This is a (default) configuration sample for the l2 proxy. Please add/modify these options in (/etc/neutron/l3_agent.ini, /etc/neutron/plugins/openvswitch/ovs_neutron_plugin.ini). +* Note: + - Please carefully make sure that options in the configuration file are not duplicated. If an option name already exists, modify its value instead of adding a new one of the same name. + - Please refer to the 'Configuration Details' section below for proper configuration and usage of costs and constraints. + + 1)add or update l3 agent configurations(/etc/neutron/l3_agent.ini) + ``` + [DEFAULT] + ... + #Enables distributed router agent function + distributed_agent=True + ``` + 2)add or update openvswitch agent configurations(/etc/neutron/plugins/openvswitch/ovs_neutron_plugin.ini) + ``` + [AGENT] + ... + #Make the l2 agent run in dvr mode + enable_distributed_routing = True + ``` diff --git a/icehouse-patches/neutron/dvr-patch/installation/install.sh b/icehouse-patches/neutron/dvr-patch/installation/install.sh new file mode 100644 index 00000000..c2690142 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/installation/install.sh @@ -0,0 +1,148 @@ +#!/bin/bash + +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# Copyright (c) 2014 Huawei Technologies. + +_MYSQL_PASS="Galax8800" +_NEUTRON_CONF_DIR="/etc/neutron" +_NEUTRON_CONF_FILE='neutron.conf' +_NEUTRON_INSTALL="/usr/lib64/python2.6/site-packages" +_NEUTRON_DIR="${_NEUTRON_INSTALL}/neutron" + +_NEUTRON_L2_CONFIG_FILE='/plugins/openvswitch/ovs_neutron_plugin.ini' +_NEUTRON_L3_CONFIG_FILE='l3_agent.ini' +# if you did not make changes to the installation files, +# please do not edit the following directories. +_CODE_DIR="../neutron/" +_BACKUP_DIR="${_NEUTRON_INSTALL}/.neutron-dvr-code-installation-backup" + +l2_config_option_list="\[AGENT\]:firewall_driver=neutron.agent.firewall.NoopFirewallDriver \[SECURITYGROUP\]:enable_distributed_routing=True" +l3_config_option_list="\[DEFAULT\]:distributed_agent=True" + +#_SCRIPT_NAME="${0##*/}" +#_SCRIPT_LOGFILE="/var/log/neutron-dvr-code/installation/${_SCRIPT_NAME}.log" + +if [[ ${EUID} -ne 0 ]]; then + echo "Please run as root." + exit 1 +fi + +##Redirecting output to logfile as well as stdout +#exec > >(tee -a ${_SCRIPT_LOGFILE}) +#exec 2> >(tee -a ${_SCRIPT_LOGFILE} >&2) + +cd `dirname $0` + +echo "checking installation directories..." +if [ ! -d "${_NEUTRON_DIR}" ] ; then + echo "Could not find the neutron installation. Please check the variables in the beginning of the script." + echo "aborted." + exit 1 +fi +if [ ! -f "${_NEUTRON_CONF_DIR}/${_NEUTRON_CONF_FILE}" ] ; then + echo "Could not find neutron config file. Please check the variables in the beginning of the script." + echo "aborted." + exit 1 +fi + +echo "checking previous installation..." +if [ -d "${_BACKUP_DIR}/neutron" ] ; then + echo "It seems neutron-dvr-code-cascaded has already been installed!" + echo "Please check README for solution if this is not true." + exit 1 +fi + +echo "backing up current code files that might be overwritten..." +mkdir -p "${_BACKUP_DIR}" +cp -r "${_NEUTRON_DIR}/" "${_BACKUP_DIR}/" +if [ $? -ne 0 ] ; then + rm -r "${_BACKUP_DIR}/neutron" + echo "Error in code backup code files, aborted." + exit 1 +fi + +echo "backing up current config code files that might be overwritten..." +mkdir -p "${_BACKUP_DIR}/etc" +cp -r "${_NEUTRON_CONF_DIR}/" "${_BACKUP_DIR}/etc" +if [ $? -ne 0 ] ; then + rm -r "${_BACKUP_DIR}/etc" + echo "Error in code backup config files, aborted." + exit 1 +fi + +echo "copying in new files..." +cp -r "${_CODE_DIR}" `dirname ${_NEUTRON_DIR}` +if [ $? -ne 0 ] ; then + echo "Error in copying, aborted." + echo "Recovering original files..." + cp -r "${_BACKUP_DIR}/neutron" `dirname ${_NEUTRON_DIR}` && rm -r "${_BACKUP_DIR}/neutron" + if [ $? -ne 0 ] ; then + echo "Recovering failed! Please install manually." + fi + exit 1 + +fi + +if [ -d "${_NEUTRON_DIR}/openstack/common/db/rpc" ] ; then + rm -r "${_NEUTRON_DIR}/openstack/common/db/rpc" +fi + +echo "updating l2 config file..." +for option in $l2_config_option_list +do + option_branch=`echo $option|awk -F ":" '{print $1}'` + option_config=`echo $option|awk -F ":" '{print $2}'` + option_key=`echo $option_config|awk -F "=" '{print $1}'` + option_value=`echo $option_config|awk -F "=" '{print $2}'` + sed -i.backup -e "/$option_key *=/d" "${_NEUTRON_CONF_DIR}/${_NEUTRON_L2_CONFIG_FILE}" + echo "$option_key,***************$option_value" + sed -i "/$option_branch/a\\$option_key=$option_value" "${_NEUTRON_CONF_DIR}/${_NEUTRON_L2_CONFIG_FILE}" + +done +echo "updating l3 config file..." +for option in $l3_config_option_list +do + option_branch=`echo $option|awk -F ":" '{print $1}'` + option_config=`echo $option|awk -F ":" '{print $2}'` + option_key=`echo $option_config|awk -F "=" '{print $1}'` + option_value=`echo $option_config|awk -F "=" '{print $2}'` + sed -i.backup -e "/$option_key *=/d" "${_NEUTRON_CONF_DIR}/${_NEUTRON_L3_CONFIG_FILE}" + echo "$option_key,***************$option_value" + sed -i "/$option_branch/a\\$option_key=$option_value" "${_NEUTRON_CONF_DIR}/${_NEUTRON_L3_CONFIG_FILE}" + +done + + +echo "create neutron db..." +exec_sql_str="DROP DATABASE if exists neutron;CREATE DATABASE neutron;GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'%' IDENTIFIED BY \"$_MYSQL_PASS\";GRANT ALL PRIVILEGES ON *.* TO 'neutron'@'%'IDENTIFIED BY \"$_MYSQL_PASS\";" +mysql -u root -p$_MYSQL_PASS -e "$exec_sql_str" +echo "syc neutron db..." +neutron-db-manage --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/ml2_conf.ini upgrade head + +if [ $? -ne 0 ] ; then + log "There was an error in sync neutron db, please sync neutron db manually." + exit 1 +fi + +#echo "restarting neutron server..." +#service openstack-neutron stop + +#if [ $? -ne 0 ] ; then +# echo "There was an error in restarting the service, please restart neutron server manually." +# exit 1 +#fi + +echo "Completed." +echo "See README to get started." + +exit 0 diff --git a/icehouse-patches/neutron/dvr-patch/neutron/__init__.py b/icehouse-patches/neutron/dvr-patch/neutron/__init__.py new file mode 100644 index 00000000..710b18c4 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/__init__.py @@ -0,0 +1,19 @@ +# Copyright 2011 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import gettext + + +gettext.install('neutron', unicode=1) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/agent/__init__.py b/icehouse-patches/neutron/dvr-patch/neutron/agent/__init__.py new file mode 100644 index 00000000..bf3075dd --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/agent/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2012 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/icehouse-patches/neutron/dvr-patch/neutron/agent/common/__init__.py b/icehouse-patches/neutron/dvr-patch/neutron/agent/common/__init__.py new file mode 100644 index 00000000..bf3075dd --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/agent/common/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2012 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/icehouse-patches/neutron/dvr-patch/neutron/agent/common/config.py b/icehouse-patches/neutron/dvr-patch/neutron/agent/common/config.py new file mode 100644 index 00000000..d9395609 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/agent/common/config.py @@ -0,0 +1,121 @@ +# Copyright 2012 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import os + +from oslo.config import cfg + +from neutron.common import config +from neutron.openstack.common import log as logging + + +LOG = logging.getLogger(__name__) + + +ROOT_HELPER_OPTS = [ + cfg.StrOpt('root_helper', default='sudo', + help=_('Root helper application.')), +] + +AGENT_STATE_OPTS = [ + cfg.FloatOpt('report_interval', default=30, + help=_('Seconds between nodes reporting state to server; ' + 'should be less than agent_down_time, best if it ' + 'is half or less than agent_down_time.')), +] + +INTERFACE_DRIVER_OPTS = [ + cfg.StrOpt('interface_driver', + help=_("The driver used to manage the virtual interface.")), +] + +USE_NAMESPACES_OPTS = [ + cfg.BoolOpt('use_namespaces', default=True, + help=_("Allow overlapping IP.")), +] + + +def get_log_args(conf, log_file_name): + cmd_args = [] + if conf.debug: + cmd_args.append('--debug') + if conf.verbose: + cmd_args.append('--verbose') + if (conf.log_dir or conf.log_file): + cmd_args.append('--log-file=%s' % log_file_name) + log_dir = None + if conf.log_dir and conf.log_file: + log_dir = os.path.dirname( + os.path.join(conf.log_dir, conf.log_file)) + elif conf.log_dir: + log_dir = conf.log_dir + elif conf.log_file: + log_dir = os.path.dirname(conf.log_file) + if log_dir: + cmd_args.append('--log-dir=%s' % log_dir) + else: + if conf.use_syslog: + cmd_args.append('--use-syslog') + if conf.syslog_log_facility: + cmd_args.append( + '--syslog-log-facility=%s' % conf.syslog_log_facility) + return cmd_args + + +def register_root_helper(conf): + # The first call is to ensure backward compatibility + conf.register_opts(ROOT_HELPER_OPTS) + conf.register_opts(ROOT_HELPER_OPTS, 'AGENT') + + +def register_agent_state_opts_helper(conf): + conf.register_opts(AGENT_STATE_OPTS, 'AGENT') + + +def register_interface_driver_opts_helper(conf): + conf.register_opts(INTERFACE_DRIVER_OPTS) + + +def register_use_namespaces_opts_helper(conf): + conf.register_opts(USE_NAMESPACES_OPTS) + + +def get_root_helper(conf): + root_helper = conf.AGENT.root_helper + if root_helper != 'sudo': + return root_helper + + root_helper = conf.root_helper + if root_helper != 'sudo': + LOG.deprecated(_('DEFAULT.root_helper is deprecated! Please move ' + 'root_helper configuration to [AGENT] section.')) + return root_helper + + return 'sudo' + + +def setup_conf(): + bind_opts = [ + cfg.StrOpt('state_path', + default='/var/lib/neutron', + help=_('Top-level directory for maintaining dhcp state')), + ] + + conf = cfg.ConfigOpts() + conf.register_opts(bind_opts) + return conf + +# add a logging setup method here for convenience +setup_logging = config.setup_logging diff --git a/icehouse-patches/neutron/dvr-patch/neutron/agent/dhcp_agent.py b/icehouse-patches/neutron/dvr-patch/neutron/agent/dhcp_agent.py new file mode 100644 index 00000000..29119799 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/agent/dhcp_agent.py @@ -0,0 +1,620 @@ +# Copyright 2012 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import os +import sys + +import eventlet +eventlet.monkey_patch() + +import netaddr +from oslo.config import cfg + +from neutron.agent.common import config +from neutron.agent.linux import dhcp +from neutron.agent.linux import external_process +from neutron.agent.linux import interface +from neutron.agent.linux import ovs_lib # noqa +from neutron.agent import rpc as agent_rpc +from neutron.common import config as common_config +from neutron.common import constants +from neutron.common import exceptions +from neutron.common import rpc as n_rpc +from neutron.common import topics +from neutron.common import utils +from neutron import context +from neutron import manager +from neutron.openstack.common import importutils +from neutron.openstack.common import log as logging +from neutron.openstack.common import loopingcall +from neutron.openstack.common import service +from neutron import service as neutron_service + +LOG = logging.getLogger(__name__) + + +class DhcpAgent(manager.Manager): + OPTS = [ + cfg.IntOpt('resync_interval', default=5, + help=_("Interval to resync.")), + cfg.StrOpt('dhcp_driver', + default='neutron.agent.linux.dhcp.Dnsmasq', + help=_("The driver used to manage the DHCP server.")), + cfg.BoolOpt('enable_isolated_metadata', default=False, + help=_("Support Metadata requests on isolated networks.")), + cfg.BoolOpt('enable_metadata_network', default=False, + help=_("Allows for serving metadata requests from a " + "dedicated network. Requires " + "enable_isolated_metadata = True")), + cfg.IntOpt('num_sync_threads', default=4, + help=_('Number of threads to use during sync process.')), + cfg.StrOpt('metadata_proxy_socket', + default='$state_path/metadata_proxy', + help=_('Location of Metadata Proxy UNIX domain ' + 'socket')), + ] + + def __init__(self, host=None): + super(DhcpAgent, self).__init__(host=host) + self.needs_resync_reasons = [] + self.conf = cfg.CONF + self.cache = NetworkCache() + self.root_helper = config.get_root_helper(self.conf) + self.dhcp_driver_cls = importutils.import_class(self.conf.dhcp_driver) + ctx = context.get_admin_context_without_session() + self.plugin_rpc = DhcpPluginApi(topics.PLUGIN, + ctx, self.conf.use_namespaces) + # create dhcp dir to store dhcp info + dhcp_dir = os.path.dirname("/%s/dhcp/" % self.conf.state_path) + if not os.path.isdir(dhcp_dir): + os.makedirs(dhcp_dir, 0o755) + self.dhcp_version = self.dhcp_driver_cls.check_version() + self._populate_networks_cache() + + def _populate_networks_cache(self): + """Populate the networks cache when the DHCP-agent starts.""" + try: + existing_networks = self.dhcp_driver_cls.existing_dhcp_networks( + self.conf, + self.root_helper + ) + for net_id in existing_networks: + net = dhcp.NetModel(self.conf.use_namespaces, + {"id": net_id, + "subnets": [], + "ports": []}) + self.cache.put(net) + except NotImplementedError: + # just go ahead with an empty networks cache + LOG.debug( + _("The '%s' DHCP-driver does not support retrieving of a " + "list of existing networks"), + self.conf.dhcp_driver + ) + + def after_start(self): + self.run() + LOG.info(_("DHCP agent started")) + + def run(self): + """Activate the DHCP agent.""" + self.sync_state() + self.periodic_resync() + + def call_driver(self, action, network, **action_kwargs): + """Invoke an action on a DHCP driver instance.""" + LOG.debug(_('Calling driver for network: %(net)s action: %(action)s'), + {'net': network.id, 'action': action}) + try: + # the Driver expects something that is duck typed similar to + # the base models. + driver = self.dhcp_driver_cls(self.conf, + network, + self.root_helper, + self.dhcp_version, + self.plugin_rpc) + + getattr(driver, action)(**action_kwargs) + return True + except exceptions.Conflict: + # No need to resync here, the agent will receive the event related + # to a status update for the network + LOG.warning(_('Unable to %(action)s dhcp for %(net_id)s: there is ' + 'a conflict with its current state; please check ' + 'that the network and/or its subnet(s) still exist.') + % {'net_id': network.id, 'action': action}) + except Exception as e: + self.schedule_resync(e) + if (isinstance(e, n_rpc.RemoteError) + and e.exc_type == 'NetworkNotFound' + or isinstance(e, exceptions.NetworkNotFound)): + LOG.warning(_("Network %s has been deleted."), network.id) + else: + LOG.exception(_('Unable to %(action)s dhcp for %(net_id)s.') + % {'net_id': network.id, 'action': action}) + + def schedule_resync(self, reason): + """Schedule a resync for a given reason.""" + self.needs_resync_reasons.append(reason) + + @utils.synchronized('dhcp-agent') + def sync_state(self): + """Sync the local DHCP state with Neutron.""" + LOG.info(_('Synchronizing state')) + pool = eventlet.GreenPool(cfg.CONF.num_sync_threads) + known_network_ids = set(self.cache.get_network_ids()) + + try: + active_networks = self.plugin_rpc.get_active_networks_info() + active_network_ids = set(network.id for network in active_networks) + for deleted_id in known_network_ids - active_network_ids: + try: + self.disable_dhcp_helper(deleted_id) + except Exception as e: + self.schedule_resync(e) + LOG.exception(_('Unable to sync network state on deleted ' + 'network %s'), deleted_id) + + for network in active_networks: + pool.spawn(self.safe_configure_dhcp_for_network, network) + pool.waitall() + LOG.info(_('Synchronizing state complete')) + + except Exception as e: + self.schedule_resync(e) + LOG.exception(_('Unable to sync network state.')) + + def _periodic_resync_helper(self): + """Resync the dhcp state at the configured interval.""" + while True: + eventlet.sleep(self.conf.resync_interval) + if self.needs_resync_reasons: + # be careful to avoid a race with additions to list + # from other threads + reasons = self.needs_resync_reasons + self.needs_resync_reasons = [] + for r in reasons: + LOG.debug(_("resync: %(reason)s"), + {"reason": r}) + self.sync_state() + + def periodic_resync(self): + """Spawn a thread to periodically resync the dhcp state.""" + eventlet.spawn(self._periodic_resync_helper) + + def safe_get_network_info(self, network_id): + try: + network = self.plugin_rpc.get_network_info(network_id) + if not network: + LOG.warn(_('Network %s has been deleted.'), network_id) + return network + except Exception as e: + self.schedule_resync(e) + LOG.exception(_('Network %s info call failed.'), network_id) + + def enable_dhcp_helper(self, network_id): + """Enable DHCP for a network that meets enabling criteria.""" + network = self.safe_get_network_info(network_id) + if network: + self.configure_dhcp_for_network(network) + + def safe_configure_dhcp_for_network(self, network): + try: + self.configure_dhcp_for_network(network) + except (exceptions.NetworkNotFound, RuntimeError): + LOG.warn(_('Network %s may have been deleted and its resources ' + 'may have already been disposed.'), network.id) + + def configure_dhcp_for_network(self, network): + if not network.admin_state_up: + return + + for subnet in network.subnets: + if subnet.enable_dhcp: + if self.call_driver('enable', network): + if (self.conf.use_namespaces and + self.conf.enable_isolated_metadata): + self.enable_isolated_metadata_proxy(network) + self.cache.put(network) + break + + def disable_dhcp_helper(self, network_id): + """Disable DHCP for a network known to the agent.""" + network = self.cache.get_network_by_id(network_id) + if network: + if (self.conf.use_namespaces and + self.conf.enable_isolated_metadata): + self.disable_isolated_metadata_proxy(network) + if self.call_driver('disable', network): + self.cache.remove(network) + + def refresh_dhcp_helper(self, network_id): + """Refresh or disable DHCP for a network depending on the current state + of the network. + """ + old_network = self.cache.get_network_by_id(network_id) + if not old_network: + # DHCP current not running for network. + return self.enable_dhcp_helper(network_id) + + network = self.safe_get_network_info(network_id) + if not network: + return + + old_cidrs = set(s.cidr for s in old_network.subnets if s.enable_dhcp) + new_cidrs = set(s.cidr for s in network.subnets if s.enable_dhcp) + + if new_cidrs and old_cidrs == new_cidrs: + self.call_driver('reload_allocations', network) + self.cache.put(network) + elif new_cidrs: + if self.call_driver('restart', network): + self.cache.put(network) + else: + self.disable_dhcp_helper(network.id) + + @utils.synchronized('dhcp-agent') + def network_create_end(self, context, payload): + """Handle the network.create.end notification event.""" + network_id = payload['network']['id'] + self.enable_dhcp_helper(network_id) + + @utils.synchronized('dhcp-agent') + def network_update_end(self, context, payload): + """Handle the network.update.end notification event.""" + network_id = payload['network']['id'] + if payload['network']['admin_state_up']: + self.enable_dhcp_helper(network_id) + else: + self.disable_dhcp_helper(network_id) + + @utils.synchronized('dhcp-agent') + def network_delete_end(self, context, payload): + """Handle the network.delete.end notification event.""" + self.disable_dhcp_helper(payload['network_id']) + + @utils.synchronized('dhcp-agent') + def subnet_update_end(self, context, payload): + """Handle the subnet.update.end notification event.""" + network_id = payload['subnet']['network_id'] + self.refresh_dhcp_helper(network_id) + + # Use the update handler for the subnet create event. + subnet_create_end = subnet_update_end + + @utils.synchronized('dhcp-agent') + def subnet_delete_end(self, context, payload): + """Handle the subnet.delete.end notification event.""" + subnet_id = payload['subnet_id'] + network = self.cache.get_network_by_subnet_id(subnet_id) + if network: + self.refresh_dhcp_helper(network.id) + + @utils.synchronized('dhcp-agent') + def port_update_end(self, context, payload): + """Handle the port.update.end notification event.""" + updated_port = dhcp.DictModel(payload['port']) + network = self.cache.get_network_by_id(updated_port.network_id) + if network: + self.cache.put_port(updated_port) + self.call_driver('reload_allocations', network) + + # Use the update handler for the port create event. + port_create_end = port_update_end + + @utils.synchronized('dhcp-agent') + def port_delete_end(self, context, payload): + """Handle the port.delete.end notification event.""" + port = self.cache.get_port_by_id(payload['port_id']) + if port: + network = self.cache.get_network_by_id(port.network_id) + self.cache.remove_port(port) + self.call_driver('reload_allocations', network) + + def enable_isolated_metadata_proxy(self, network): + + # The proxy might work for either a single network + # or all the networks connected via a router + # to the one passed as a parameter + neutron_lookup_param = '--network_id=%s' % network.id + meta_cidr = netaddr.IPNetwork(dhcp.METADATA_DEFAULT_CIDR) + has_metadata_subnet = any(netaddr.IPNetwork(s.cidr) in meta_cidr + for s in network.subnets) + if (self.conf.enable_metadata_network and has_metadata_subnet): + router_ports = [port for port in network.ports + if (port.device_owner == + constants.DEVICE_OWNER_ROUTER_INTF)] + if router_ports: + # Multiple router ports should not be allowed + if len(router_ports) > 1: + LOG.warning(_("%(port_num)d router ports found on the " + "metadata access network. Only the port " + "%(port_id)s, for router %(router_id)s " + "will be considered"), + {'port_num': len(router_ports), + 'port_id': router_ports[0].id, + 'router_id': router_ports[0].device_id}) + neutron_lookup_param = ('--router_id=%s' % + router_ports[0].device_id) + + def callback(pid_file): + metadata_proxy_socket = cfg.CONF.metadata_proxy_socket + proxy_cmd = ['neutron-ns-metadata-proxy', + '--pid_file=%s' % pid_file, + '--metadata_proxy_socket=%s' % metadata_proxy_socket, + neutron_lookup_param, + '--state_path=%s' % self.conf.state_path, + '--metadata_port=%d' % dhcp.METADATA_PORT] + proxy_cmd.extend(config.get_log_args( + cfg.CONF, 'neutron-ns-metadata-proxy-%s.log' % network.id)) + return proxy_cmd + + pm = external_process.ProcessManager( + self.conf, + network.id, + self.root_helper, + network.namespace) + pm.enable(callback) + + def disable_isolated_metadata_proxy(self, network): + pm = external_process.ProcessManager( + self.conf, + network.id, + self.root_helper, + network.namespace) + pm.disable() + + +class DhcpPluginApi(n_rpc.RpcProxy): + """Agent side of the dhcp rpc API. + + API version history: + 1.0 - Initial version. + 1.1 - Added get_active_networks_info, create_dhcp_port, + and update_dhcp_port methods. + + """ + + BASE_RPC_API_VERSION = '1.1' + + def __init__(self, topic, context, use_namespaces): + super(DhcpPluginApi, self).__init__( + topic=topic, default_version=self.BASE_RPC_API_VERSION) + self.context = context + self.host = cfg.CONF.host + self.use_namespaces = use_namespaces + + def get_active_networks_info(self): + """Make a remote process call to retrieve all network info.""" + networks = self.call(self.context, + self.make_msg('get_active_networks_info', + host=self.host), + topic=self.topic) + return [dhcp.NetModel(self.use_namespaces, n) for n in networks] + + def get_network_info(self, network_id): + """Make a remote process call to retrieve network info.""" + network = self.call(self.context, + self.make_msg('get_network_info', + network_id=network_id, + host=self.host), + topic=self.topic) + if network: + return dhcp.NetModel(self.use_namespaces, network) + + def get_dhcp_port(self, network_id, device_id): + """Make a remote process call to get the dhcp port.""" + port = self.call(self.context, + self.make_msg('get_dhcp_port', + network_id=network_id, + device_id=device_id, + host=self.host), + topic=self.topic) + if port: + return dhcp.DictModel(port) + + def create_dhcp_port(self, port): + """Make a remote process call to create the dhcp port.""" + port = self.call(self.context, + self.make_msg('create_dhcp_port', + port=port, + host=self.host), + topic=self.topic) + if port: + return dhcp.DictModel(port) + + def update_dhcp_port(self, port_id, port): + """Make a remote process call to update the dhcp port.""" + port = self.call(self.context, + self.make_msg('update_dhcp_port', + port_id=port_id, + port=port, + host=self.host), + topic=self.topic) + if port: + return dhcp.DictModel(port) + + def release_dhcp_port(self, network_id, device_id): + """Make a remote process call to release the dhcp port.""" + return self.call(self.context, + self.make_msg('release_dhcp_port', + network_id=network_id, + device_id=device_id, + host=self.host), + topic=self.topic) + + def release_port_fixed_ip(self, network_id, device_id, subnet_id): + """Make a remote process call to release a fixed_ip on the port.""" + return self.call(self.context, + self.make_msg('release_port_fixed_ip', + network_id=network_id, + subnet_id=subnet_id, + device_id=device_id, + host=self.host), + topic=self.topic) + + +class NetworkCache(object): + """Agent cache of the current network state.""" + def __init__(self): + self.cache = {} + self.subnet_lookup = {} + self.port_lookup = {} + + def get_network_ids(self): + return self.cache.keys() + + def get_network_by_id(self, network_id): + return self.cache.get(network_id) + + def get_network_by_subnet_id(self, subnet_id): + return self.cache.get(self.subnet_lookup.get(subnet_id)) + + def get_network_by_port_id(self, port_id): + return self.cache.get(self.port_lookup.get(port_id)) + + def put(self, network): + if network.id in self.cache: + self.remove(self.cache[network.id]) + + self.cache[network.id] = network + + for subnet in network.subnets: + self.subnet_lookup[subnet.id] = network.id + + for port in network.ports: + self.port_lookup[port.id] = network.id + + def remove(self, network): + del self.cache[network.id] + + for subnet in network.subnets: + del self.subnet_lookup[subnet.id] + + for port in network.ports: + del self.port_lookup[port.id] + + def put_port(self, port): + network = self.get_network_by_id(port.network_id) + for index in range(len(network.ports)): + if network.ports[index].id == port.id: + network.ports[index] = port + break + else: + network.ports.append(port) + + self.port_lookup[port.id] = network.id + + def remove_port(self, port): + network = self.get_network_by_port_id(port.id) + + for index in range(len(network.ports)): + if network.ports[index] == port: + del network.ports[index] + del self.port_lookup[port.id] + break + + def get_port_by_id(self, port_id): + network = self.get_network_by_port_id(port_id) + if network: + for port in network.ports: + if port.id == port_id: + return port + + def get_state(self): + net_ids = self.get_network_ids() + num_nets = len(net_ids) + num_subnets = 0 + num_ports = 0 + for net_id in net_ids: + network = self.get_network_by_id(net_id) + num_subnets += len(network.subnets) + num_ports += len(network.ports) + return {'networks': num_nets, + 'subnets': num_subnets, + 'ports': num_ports} + + +class DhcpAgentWithStateReport(DhcpAgent): + def __init__(self, host=None): + super(DhcpAgentWithStateReport, self).__init__(host=host) + self.state_rpc = agent_rpc.PluginReportStateAPI(topics.PLUGIN) + self.agent_state = { + 'binary': 'neutron-dhcp-agent', + 'host': host, + 'topic': topics.DHCP_AGENT, + 'configurations': { + 'dhcp_driver': cfg.CONF.dhcp_driver, + 'use_namespaces': cfg.CONF.use_namespaces, + 'dhcp_lease_duration': cfg.CONF.dhcp_lease_duration}, + 'start_flag': True, + 'agent_type': constants.AGENT_TYPE_DHCP} + report_interval = cfg.CONF.AGENT.report_interval + self.use_call = True + if report_interval: + self.heartbeat = loopingcall.FixedIntervalLoopingCall( + self._report_state) + self.heartbeat.start(interval=report_interval) + + def _report_state(self): + try: + self.agent_state.get('configurations').update( + self.cache.get_state()) + ctx = context.get_admin_context_without_session() + self.state_rpc.report_state(ctx, self.agent_state, self.use_call) + self.use_call = False + except AttributeError: + # This means the server does not support report_state + LOG.warn(_("Neutron server does not support state report." + " State report for this agent will be disabled.")) + self.heartbeat.stop() + self.run() + return + except Exception: + LOG.exception(_("Failed reporting state!")) + return + if self.agent_state.pop('start_flag', None): + self.run() + + def agent_updated(self, context, payload): + """Handle the agent_updated notification event.""" + self.schedule_resync(_("Agent updated: %(payload)s") % + {"payload": payload}) + LOG.info(_("agent_updated by server side %s!"), payload) + + def after_start(self): + LOG.info(_("DHCP agent started")) + + +def register_options(): + cfg.CONF.register_opts(DhcpAgent.OPTS) + config.register_interface_driver_opts_helper(cfg.CONF) + config.register_use_namespaces_opts_helper(cfg.CONF) + config.register_agent_state_opts_helper(cfg.CONF) + config.register_root_helper(cfg.CONF) + cfg.CONF.register_opts(dhcp.OPTS) + cfg.CONF.register_opts(interface.OPTS) + + +def main(): + register_options() + common_config.init(sys.argv[1:]) + config.setup_logging(cfg.CONF) + server = neutron_service.Service.create( + binary='neutron-dhcp-agent', + topic=topics.DHCP_AGENT, + report_interval=cfg.CONF.AGENT.report_interval, + manager='neutron.agent.dhcp_agent.DhcpAgentWithStateReport') + service.launch(server).wait() diff --git a/icehouse-patches/neutron/dvr-patch/neutron/agent/firewall.py b/icehouse-patches/neutron/dvr-patch/neutron/agent/firewall.py new file mode 100644 index 00000000..7ce1f992 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/agent/firewall.py @@ -0,0 +1,136 @@ +# Copyright 2012, Nachi Ueno, NTT MCL, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import abc +import contextlib + +import six + + +@six.add_metaclass(abc.ABCMeta) +class FirewallDriver(object): + """Firewall Driver base class. + + Defines methods that any driver providing security groups + and provider firewall functionality should implement. + Note port attribute should have information of security group ids and + security group rules. + + the dict of port should have + device : interface name + fixed_ips: ips of the device + mac_address: mac_address of the device + security_groups: [sgid, sgid] + security_group_rules : [ rule, rule ] + the rule must contain ethertype and direction + the rule may contain security_group_id, + protocol, port_min, port_max + source_ip_prefix, source_port_min, + source_port_max, dest_ip_prefix, and + remote_group_id + Note: source_group_ip in REST API should be converted by this rule + if direction is ingress: + remote_group_ip will be a source_ip_prefix + if direction is egress: + remote_group_ip will be a dest_ip_prefix + Note: remote_group_id in REST API should be converted by this rule + if direction is ingress: + remote_group_id will be a list of source_ip_prefix + if direction is egress: + remote_group_id will be a list of dest_ip_prefix + remote_group_id will also remaining membership update management + """ + + def prepare_port_filter(self, port): + """Prepare filters for the port. + + This method should be called before the port is created. + """ + raise NotImplementedError() + + def apply_port_filter(self, port): + """Apply port filter. + + Once this method returns, the port should be firewalled + appropriately. This method should as far as possible be a + no-op. It's vastly preferred to get everything set up in + prepare_port_filter. + """ + raise NotImplementedError() + + def update_port_filter(self, port): + """Refresh security group rules from data store + + Gets called when an port gets added to or removed from + the security group the port is a member of or if the + group gains or looses a rule. + """ + raise NotImplementedError() + + def remove_port_filter(self, port): + """Stop filtering port.""" + raise NotImplementedError() + + def filter_defer_apply_on(self): + """Defer application of filtering rule.""" + pass + + def filter_defer_apply_off(self): + """Turn off deferral of rules and apply the rules now.""" + pass + + @property + def ports(self): + """Returns filtered ports.""" + pass + + @contextlib.contextmanager + def defer_apply(self): + """Defer apply context.""" + self.filter_defer_apply_on() + try: + yield + finally: + self.filter_defer_apply_off() + + +class NoopFirewallDriver(FirewallDriver): + """Noop Firewall Driver. + + Firewall driver which does nothing. + This driver is for disabling the firewall functionality. + """ + + def prepare_port_filter(self, port): + pass + + def apply_port_filter(self, port): + pass + + def update_port_filter(self, port): + pass + + def remove_port_filter(self, port): + pass + + def filter_defer_apply_on(self): + pass + + def filter_defer_apply_off(self): + pass + + @property + def ports(self): + return {} diff --git a/icehouse-patches/neutron/dvr-patch/neutron/agent/l3_agent.py b/icehouse-patches/neutron/dvr-patch/neutron/agent/l3_agent.py new file mode 100644 index 00000000..7c7b9def --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/agent/l3_agent.py @@ -0,0 +1,1579 @@ +# Copyright 2012 VMware, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +import sys + +import eventlet +eventlet.monkey_patch() + +import netaddr +from oslo.config import cfg + +from neutron.agent.common import config +from neutron.agent.linux import external_process +from neutron.agent.linux import interface +from neutron.agent.linux import ip_lib +from neutron.agent.linux import iptables_manager +from neutron.agent import rpc as agent_rpc +from neutron.common import config as common_config +from neutron.common import constants as l3_constants +from neutron.common import rpc as rpc_compat +from neutron.common import topics +from neutron.common import utils as common_utils +from neutron import context +from neutron import manager +from neutron.openstack.common import excutils +from neutron.openstack.common import importutils +from neutron.openstack.common import lockutils +from neutron.openstack.common import log as logging +from neutron.openstack.common import loopingcall +from neutron.openstack.common import periodic_task +from neutron.openstack.common import processutils +from neutron.openstack.common import service +from neutron import service as neutron_service +from neutron.services.firewall.agents.l3reference import firewall_l3_agent + +LOG = logging.getLogger(__name__) +NS_PREFIX = 'qrouter-' +INTERNAL_DEV_PREFIX = 'qr-' +EXTERNAL_DEV_PREFIX = 'qg-' +SNAT_INT_DEV_PREFIX = 'sg-' +FIP_NS_PREFIX = 'fip-' +SNAT_NS_PREFIX = 'snat-' +FIP_2_RTR_DEV_PREFIX = 'fpr-' +RTR_2_FIP_DEV_PREFIX = 'rfp-' +FIP_EXT_DEV_PREFIX = 'fg-' +FIP_LL_PREFIX = '169.254.30.' +# Route Table index for FIPs +FIP_RT_TBL = 16 +# Rule priority range for FIPs +FIP_PR_ST = 32768 +FIP_PR_END = FIP_PR_ST + 40000 +RPC_LOOP_INTERVAL = 1 +FLOATING_IP_CIDR_SUFFIX = '/32' + + +class L3PluginApi(rpc_compat.RpcProxy): + """Agent side of the l3 agent RPC API. + + API version history: + 1.0 - Initial version. + 1.1 - Floating IP operational status updates + 1.2 - DVR support + + """ + + BASE_RPC_API_VERSION = '1.0' + + def __init__(self, topic, host): + super(L3PluginApi, self).__init__( + topic=topic, default_version=self.BASE_RPC_API_VERSION) + self.host = host + + def get_routers(self, context, router_ids=None): + """Make a remote process call to retrieve the sync data for routers.""" + return self.call(context, + self.make_msg('sync_routers', host=self.host, + router_ids=router_ids), + topic=self.topic) + + def get_external_network_id(self, context): + """Make a remote process call to retrieve the external network id. + + @raise rpc_compat.RemoteError: with TooManyExternalNetworks + as exc_type if there are + more than one external network + """ + return self.call(context, + self.make_msg('get_external_network_id', + host=self.host), + topic=self.topic) + + def update_floatingip_statuses(self, context, router_id, fip_statuses): + """Call the plugin update floating IPs's operational status.""" + return self.call(context, + self.make_msg('update_floatingip_statuses', + router_id=router_id, + fip_statuses=fip_statuses), + topic=self.topic, + version='1.1') + + def get_ports_by_subnet(self, context, subnet_id): + """Retrieve ports by subnet id.""" + return self.call(context, + self.make_msg('get_ports_by_subnet', host=self.host, + subnet_id=subnet_id), + topic=self.topic, + version='1.2') + + def get_agent_gateway_port(self, context, fip_net): + """Get or create a agent_gateway_port.""" + return self.call(context, + self.make_msg('get_agent_gateway_port', + network_id=fip_net, host=self.host), + topic=self.topic, + version='1.2') + + +class RouterInfo(object): + + def __init__(self, router_id, root_helper, use_namespaces, router): + self.router_id = router_id + self.ex_gw_port = None + self._snat_enabled = None + self._snat_action = None + self.internal_ports = [] + self.snat_ports = [] + self.floating_ips = set() + # TODO(mrsmith): DVR merge cleanup + self.floating_ips_dict = {} + self.root_helper = root_helper + self.use_namespaces = use_namespaces + # Invoke the setter for establishing initial SNAT action + self.router = router + self.ns_name = NS_PREFIX + router_id if use_namespaces else None + self.iptables_manager = iptables_manager.IptablesManager( + root_helper=root_helper, + #FIXME(danwent): use_ipv6=True, + namespace=self.ns_name) + self.routes = [] + # DVR Data + # Linklocal router to floating IP addr + self.rtr_2_fip = None + # Linklocal floating to router IP addr + self.fip_2_rtr = None + self.dist_fip_count = 0 + + @property + def router(self): + return self._router + + @router.setter + def router(self, value): + self._router = value + if not self._router: + return + # enable_snat by default if it wasn't specified by plugin + self._snat_enabled = self._router.get('enable_snat', True) + # Set a SNAT action for the router + if self._router.get('gw_port'): + self._snat_action = ('add_rules' if self._snat_enabled + else 'remove_rules') + elif self.ex_gw_port: + # Gateway port was removed, remove rules + self._snat_action = 'remove_rules' + + def perform_snat_action(self, snat_callback, *args): + # Process SNAT rules for attached subnets + if self._snat_action: + snat_callback(self, self._router.get('gw_port'), + *args, action=self._snat_action) + self._snat_action = None + + +class L3NATAgent(firewall_l3_agent.FWaaSL3AgentRpcCallback, manager.Manager): + """Manager for L3NatAgent + + API version history: + 1.0 initial Version + 1.1 changed the type of the routers parameter + to the routers_updated method. + It was previously a list of routers in dict format. + It is now a list of router IDs only. + Per rpc versioning rules, it is backwards compatible. + """ + RPC_API_VERSION = '1.1' + + OPTS = [ + cfg.BoolOpt('distributed_agent', default=False, + help=_("Enables distributed router agent function.")), + cfg.BoolOpt('centralized_snat', default=False, + help=_("Enables centralized SNAT in dvr mode.")), + cfg.BoolOpt('centralized_router', default=True, + help=_("Enables centralized router in dvr mode.")), + cfg.StrOpt('external_network_bridge', default='br-ex', + help=_("Name of bridge used for external network " + "traffic.")), + cfg.IntOpt('metadata_port', + default=9697, + help=_("TCP Port used by Neutron metadata namespace " + "proxy.")), + cfg.IntOpt('send_arp_for_ha', + default=3, + help=_("Send this many gratuitous ARPs for HA setup, if " + "less than or equal to 0, the feature is disabled")), + cfg.StrOpt('router_id', default='', + help=_("If namespaces is disabled, the l3 agent can only" + " configure a router that has the matching router " + "ID.")), + cfg.BoolOpt('handle_internal_only_routers', + default=True, + help=_("Agent should implement routers with no gateway")), + cfg.StrOpt('gateway_external_network_id', default='', + help=_("UUID of external network for routers implemented " + "by the agents.")), + cfg.BoolOpt('enable_metadata_proxy', default=True, + help=_("Allow running metadata proxy.")), + cfg.BoolOpt('router_delete_namespaces', default=False, + help=_("Delete namespace after removing a router.")), + cfg.StrOpt('metadata_proxy_socket', + default='$state_path/metadata_proxy', + help=_('Location of Metadata Proxy UNIX domain ' + 'socket')), + ] + + def __init__(self, host, conf=None): + if conf: + self.conf = conf + else: + self.conf = cfg.CONF + self.root_helper = config.get_root_helper(self.conf) + self.router_info = {} + + self._check_config_params() + + try: + self.driver = importutils.import_object( + self.conf.interface_driver, + self.conf + ) + except Exception: + msg = _("Error importing interface driver " + "'%s'") % self.conf.interface_driver + LOG.error(msg) + raise SystemExit(1) + + self.context = context.get_admin_context_without_session() + self.plugin_rpc = L3PluginApi(topics.L3PLUGIN, host) + self.fullsync = True + self.updated_routers = set() + self.removed_routers = set() + self.sync_progress = False + + # TODO(mrsmith): remove once agent restart with + # stale namespaces is supported for DVR + root_ip = ip_lib.IPWrapper(self.root_helper) + host_namespaces = root_ip.get_namespaces(self.root_helper) + snat_namespaces = set(ns for ns in host_namespaces + if ns.startswith(SNAT_NS_PREFIX)) + self._destroy_stale_router_namespaces(snat_namespaces) + fip_namespaces = set(ns for ns in host_namespaces + if ns.startswith(FIP_NS_PREFIX)) + self._destroy_stale_router_namespaces(fip_namespaces) + + self._clean_stale_namespaces = self.conf.use_namespaces + + # dvr data + self.agent_gateway_port = None + self.agent_fip_count = 0 + self.local_ips = set(xrange(2, 251)) + self.fip_priorities = set(xrange(FIP_PR_ST, FIP_PR_END)) + + self.rpc_loop = loopingcall.FixedIntervalLoopingCall( + self._rpc_loop) + self.rpc_loop.start(interval=RPC_LOOP_INTERVAL) + super(L3NATAgent, self).__init__(conf=self.conf) + + self.target_ex_net_id = None + + def _check_config_params(self): + """Check items in configuration files. + + Check for required and invalid configuration items. + The actual values are not verified for correctness. + """ + if not self.conf.interface_driver: + msg = _('An interface driver must be specified') + LOG.error(msg) + raise SystemExit(1) + + if not self.conf.use_namespaces and not self.conf.router_id: + msg = _('Router id is required if not using namespaces.') + LOG.error(msg) + raise SystemExit(1) + + def _cleanup_namespaces(self, routers): + """Destroy stale router namespaces on host when L3 agent restarts + + This routine is called when self._clean_stale_namespaces is True. + + The argument routers is the list of routers that are recorded in + the database as being hosted on this node. + """ + try: + root_ip = ip_lib.IPWrapper(self.root_helper) + + host_namespaces = root_ip.get_namespaces(self.root_helper) + router_namespaces = set(ns for ns in host_namespaces + if ns.startswith(NS_PREFIX)) + ns_to_ignore = set(NS_PREFIX + r['id'] for r in routers) + # TODO(mrsmith): include DVR SNAT namespaces, FIP namespaces + ns_to_destroy = router_namespaces - ns_to_ignore + except RuntimeError: + LOG.exception(_('RuntimeError in obtaining router list ' + 'for namespace cleanup.')) + else: + self._destroy_stale_router_namespaces(ns_to_destroy) + + def _destroy_stale_router_namespaces(self, router_namespaces): + """Destroys the stale router namespaces + + The argumenet router_namespaces is a list of stale router namespaces + + As some stale router namespaces may not be able to be deleted, only + one attempt will be made to delete them. + """ + for ns in router_namespaces: + try: + self._destroy_namespace(ns) + except RuntimeError: + LOG.exception(_('Failed to destroy stale router namespace ' + '%s'), ns) + self._clean_stale_namespaces = False + + def _destroy_namespace(self, ns): + if ns.startswith(NS_PREFIX): + if self.conf.enable_metadata_proxy: + self._destroy_metadata_proxy(ns[len(NS_PREFIX):], ns) + self._destroy_router_namespace(ns) + elif ns.startswith(FIP_NS_PREFIX): + self._destroy_fip_namespace(ns) + elif ns.startswith(SNAT_NS_PREFIX): + self._destroy_snat_namespace(ns) + + def _destroy_snat_namespace(self, ns_name): + ns_ip = ip_lib.IPWrapper(self.root_helper, namespace=ns_name) + # delete internal interfaces + for d in ns_ip.get_devices(exclude_loopback=True): + if d.name.startswith(SNAT_INT_DEV_PREFIX): + LOG.debug('DVR: unplug: %s', d.name) + self.driver.unplug(d.name, namespace=ns_name, + prefix=SNAT_INT_DEV_PREFIX) + + # TODO(mrsmith): delete ext-gw-port + LOG.debug('DVR: destroy snat ns: %s', ns_name) + ns_ip.netns.delete(ns_name) + + def _destroy_fip_namespace(self, ns_name): + ns_ip = ip_lib.IPWrapper(self.root_helper, namespace=ns_name) + for d in ns_ip.get_devices(exclude_loopback=True): + if d.name.startswith(FIP_2_RTR_DEV_PREFIX): + # internal link between IRs and FIP NS + # TODO(mrsmith): remove IR interfaces (IP pool?) + pass + elif d.name.startswith(FIP_EXT_DEV_PREFIX): + # single port from FIP NS to br-ext + # TODO(mrsmith): remove br-ext interface + LOG.debug('DVR: unplug: %s', d.name) + self.driver.unplug(d.name, + bridge=self.conf.external_network_bridge, + namespace=ns_name, + prefix=FIP_EXT_DEV_PREFIX) + LOG.debug('DVR: destroy fip ns: %s', ns_name) + # TODO(mrsmith): add LOG warn if fip count != 0 + ns_ip.netns.delete(ns_name) + self.agent_gateway_port = None + + def _destroy_router_namespace(self, namespace): + ns_ip = ip_lib.IPWrapper(self.root_helper, namespace=namespace) + for d in ns_ip.get_devices(exclude_loopback=True): + if d.name.startswith(INTERNAL_DEV_PREFIX): + # device is on default bridge + self.driver.unplug(d.name, namespace=namespace, + prefix=INTERNAL_DEV_PREFIX) + elif d.name.startswith(EXTERNAL_DEV_PREFIX): + self.driver.unplug(d.name, + bridge=self.conf.external_network_bridge, + namespace=namespace, + prefix=EXTERNAL_DEV_PREFIX) + + if self.conf.router_delete_namespaces: + try: + ns_ip.netns.delete(namespace) + except RuntimeError: + msg = _('Failed trying to delete namespace: %s') + LOG.exception(msg % namespace) + + def _create_namespace(self, name): + ip_wrapper_root = ip_lib.IPWrapper(self.root_helper) + ip_wrapper = ip_wrapper_root.ensure_namespace(name) + LOG.debug('DVR: ns-name: %s', name) + ip_wrapper.netns.execute(['sysctl', '-w', 'net.ipv4.ip_forward=1']) + + def _create_router_namespace(self, ri): + self._create_namespace(ri.ns_name) + + def _fetch_external_net_id(self, force=False): + """Find UUID of single external network for this agent.""" + if self.conf.gateway_external_network_id: + return self.conf.gateway_external_network_id + + # L3 agent doesn't use external_network_bridge to handle external + # networks, so bridge_mappings with provider networks will be used + # and the L3 agent is able to handle any external networks. + if not self.conf.external_network_bridge: + return + + if not force and self.target_ex_net_id: + return self.target_ex_net_id + + try: + self.target_ex_net_id = self.plugin_rpc.get_external_network_id( + self.context) + return self.target_ex_net_id + except rpc_compat.RemoteError as e: + with excutils.save_and_reraise_exception() as ctx: + if e.exc_type == 'TooManyExternalNetworks': + ctx.reraise = False + msg = _( + "The 'gateway_external_network_id' option must be " + "configured for this agent as Neutron has more than " + "one external network.") + raise Exception(msg) + + def _router_added(self, router_id, router): + ri = RouterInfo(router_id, self.root_helper, + self.conf.use_namespaces, router) + self.router_info[router_id] = ri + if self.conf.use_namespaces: + self._create_router_namespace(ri) + for c, r in self.metadata_filter_rules(): + ri.iptables_manager.ipv4['filter'].add_rule(c, r) + for c, r in self.metadata_nat_rules(): + ri.iptables_manager.ipv4['nat'].add_rule(c, r) + ri.iptables_manager.apply() + super(L3NATAgent, self).process_router_add(ri) + if self.conf.enable_metadata_proxy: + self._spawn_metadata_proxy(ri.router_id, ri.ns_name) + + def _router_removed(self, router_id): + ri = self.router_info.get(router_id) + if ri is None: + LOG.warn(_("Info for router %s were not found. " + "Skipping router removal"), router_id) + return + ri.router['gw_port'] = None + ri.router[l3_constants.INTERFACE_KEY] = [] + ri.router[l3_constants.FLOATINGIP_KEY] = [] + self.process_router(ri) + for c, r in self.metadata_filter_rules(): + ri.iptables_manager.ipv4['filter'].remove_rule(c, r) + for c, r in self.metadata_nat_rules(): + ri.iptables_manager.ipv4['nat'].remove_rule(c, r) + ri.iptables_manager.apply() + if self.conf.enable_metadata_proxy: + self._destroy_metadata_proxy(ri.router_id, ri.ns_name) + del self.router_info[router_id] + self._destroy_router_namespace(ri.ns_name) + + def _spawn_metadata_proxy(self, router_id, ns_name): + def callback(pid_file): + metadata_proxy_socket = cfg.CONF.metadata_proxy_socket + proxy_cmd = ['neutron-ns-metadata-proxy', + '--pid_file=%s' % pid_file, + '--metadata_proxy_socket=%s' % metadata_proxy_socket, + '--router_id=%s' % router_id, + '--state_path=%s' % self.conf.state_path, + '--metadata_port=%s' % self.conf.metadata_port] + proxy_cmd.extend(config.get_log_args( + cfg.CONF, 'neutron-ns-metadata-proxy-%s.log' % + router_id)) + return proxy_cmd + + pm = external_process.ProcessManager( + self.conf, + router_id, + self.root_helper, + ns_name) + pm.enable(callback) + + def _destroy_metadata_proxy(self, router_id, ns_name): + pm = external_process.ProcessManager( + self.conf, + router_id, + self.root_helper, + ns_name) + pm.disable() + + def _set_subnet_arp_info(self, ri, port): + """Get ARP info from Plugin for existing ports for subnet.""" + if 'id' not in port['subnet'] or not ri.router['distributed']: + return + subnet_id = port['subnet']['id'] + subnet_ports = ( + self.plugin_rpc.get_ports_by_subnet(self.context, + subnet_id)) + + for p in subnet_ports: + if (p['device_owner'] not in ( + 'network:router_interface', + 'network:router_interface_distributed')): + for fixed_ip in p['fixed_ips']: + self._update_arp_entry(ri, fixed_ip['ip_address'], + p['mac_address'], + subnet_id, 'add') + + def _set_subnet_info(self, port): + ips = port['fixed_ips'] + if not ips: + raise Exception(_("Router port %s has no IP address") % port['id']) + if len(ips) > 1: + LOG.error(_("Ignoring multiple IPs on router port %s"), + port['id']) + prefixlen = netaddr.IPNetwork(port['subnet']['cidr']).prefixlen + port['ip_cidr'] = "%s/%s" % (ips[0]['ip_address'], prefixlen) + + def _get_existing_devices(self, ri): + ip_wrapper = ip_lib.IPWrapper(root_helper=self.root_helper, + namespace=ri.ns_name) + ip_devs = ip_wrapper.get_devices(exclude_loopback=True) + return [ip_dev.name for ip_dev in ip_devs] + + def process_router(self, ri): + # TODO(mrsmith) - we shouldn't need to check here + if 'distributed' not in ri.router: + ri.router['distributed'] = False + ri.iptables_manager.defer_apply_on() + ex_gw_port = self._get_ex_gw_port(ri) + internal_ports = ri.router.get(l3_constants.INTERFACE_KEY, []) + snat_ports = ri.router.get(l3_constants.SNAT_ROUTER_INTF_KEY, []) + existing_port_ids = set([p['id'] for p in ri.internal_ports]) + current_port_ids = set([p['id'] for p in internal_ports + if p['admin_state_up']]) + new_ports = [p for p in internal_ports if + p['id'] in current_port_ids and + p['id'] not in existing_port_ids] + old_ports = [p for p in ri.internal_ports if + p['id'] not in current_port_ids] + for p in new_ports: + self._set_subnet_info(p) + self.internal_network_added(ri, p) + ri.internal_ports.append(p) + self._set_subnet_arp_info(ri, p) + + for p in old_ports: + self.internal_network_removed(ri, p) + ri.internal_ports.remove(p) + + existing_devices = self._get_existing_devices(ri) + current_internal_devs = set([n for n in existing_devices + if n.startswith(INTERNAL_DEV_PREFIX)]) + current_port_devs = set([self.get_internal_device_name(id) for + id in current_port_ids]) + stale_devs = current_internal_devs - current_port_devs + for stale_dev in stale_devs: + LOG.debug(_('Deleting stale internal router device: %s'), + stale_dev) + self.driver.unplug(stale_dev, + namespace=ri.ns_name, + prefix=INTERNAL_DEV_PREFIX) + + # Get IPv4 only internal CIDRs + internal_cidrs = [p['ip_cidr'] for p in ri.internal_ports + if netaddr.IPNetwork(p['ip_cidr']).version == 4] + # TODO(salv-orlando): RouterInfo would be a better place for + # this logic too + ex_gw_port_id = (ex_gw_port and ex_gw_port['id'] or + ri.ex_gw_port and ri.ex_gw_port['id']) + + interface_name = None + if ex_gw_port_id: + interface_name = self.get_external_device_name(ex_gw_port_id) + if ex_gw_port and ex_gw_port != ri.ex_gw_port: + self._set_subnet_info(ex_gw_port) + self.external_gateway_added(ri, ex_gw_port, + interface_name, internal_cidrs) + elif not ex_gw_port and ri.ex_gw_port: + self.external_gateway_removed(ri, ri.ex_gw_port, + interface_name, internal_cidrs) + + stale_devs = [dev for dev in existing_devices + if dev.startswith(EXTERNAL_DEV_PREFIX) + and dev != interface_name] + for stale_dev in stale_devs: + LOG.debug(_('Deleting stale external router device: %s'), + stale_dev) + self.driver.unplug(stale_dev, + bridge=self.conf.external_network_bridge, + namespace=ri.ns_name, + prefix=EXTERNAL_DEV_PREFIX) + + # Process static routes for router + self.routes_updated(ri) + # Process SNAT rules for external gateway + if (not ri.router['distributed'] or + ex_gw_port and ri.router['gw_port_host'] == self.host): + ri.perform_snat_action(self._handle_router_snat_rules, + internal_cidrs, interface_name) + + # Process SNAT/DNAT rules for floating IPs + fip_statuses = {} + try: + if ex_gw_port: + existing_floating_ips = ri.floating_ips + self.process_router_floating_ip_nat_rules(ri) + ri.iptables_manager.defer_apply_off() + # Once NAT rules for floating IPs are safely in place + # configure their addresses on the external gateway port + fip_statuses = self.process_router_floating_ip_addresses( + ri, ex_gw_port) + except Exception: + # TODO(salv-orlando): Less broad catching + # All floating IPs must be put in error state + for fip in ri.router.get(l3_constants.FLOATINGIP_KEY, []): + fip_statuses[fip['id']] = l3_constants.FLOATINGIP_STATUS_ERROR + + if ex_gw_port: + # Identify floating IPs which were disabled + ri.floating_ips = set(fip_statuses.keys()) + for fip_id in existing_floating_ips - ri.floating_ips: + fip_statuses[fip_id] = l3_constants.FLOATINGIP_STATUS_DOWN + # Update floating IP status on the neutron server + self.plugin_rpc.update_floatingip_statuses( + self.context, ri.router_id, fip_statuses) + + # Update ex_gw_port and enable_snat on the router info cache + ri.ex_gw_port = ex_gw_port + ri.snat_ports = snat_ports + ri.enable_snat = ri.router.get('enable_snat') + + def _handle_router_snat_rules(self, ri, ex_gw_port, internal_cidrs, + interface_name, action): + # Remove all the rules + # This is safe because if use_namespaces is set as False + # then the agent can only configure one router, otherwise + # each router's SNAT rules will be in their own namespace + if ri.router['distributed']: + iptables_manager = ri.snat_iptables_manager + else: + iptables_manager = ri.iptables_manager + + iptables_manager.ipv4['nat'].empty_chain('POSTROUTING') + iptables_manager.ipv4['nat'].empty_chain('snat') + + if not ri.router['distributed']: + # Add back the jump to float-snat + iptables_manager.ipv4['nat'].add_rule('snat', '-j $float-snat') + + # And add them back if the action is add_rules + if action == 'add_rules' and ex_gw_port: + # ex_gw_port should not be None in this case + # NAT rules are added only if ex_gw_port has an IPv4 address + for ip_addr in ex_gw_port['fixed_ips']: + ex_gw_ip = ip_addr['ip_address'] + if netaddr.IPAddress(ex_gw_ip).version == 4: + rules = self.external_gateway_nat_rules(ex_gw_ip, + internal_cidrs, + interface_name) + for rule in rules: + iptables_manager.ipv4['nat'].add_rule(*rule) + break + iptables_manager.apply() + + def _handle_router_fip_nat_rules(self, ri, interface_name, action): + """Configures NAT rules for Floating IPs for DVR. + + Remove all the rules. This is safe because if + use_namespaces is set as False then the agent can + only configure one router, otherwise each router's + NAT rules will be in their own namespace. + """ + ri.iptables_manager.ipv4['nat'].empty_chain('POSTROUTING') + ri.iptables_manager.ipv4['nat'].empty_chain('snat') + + # Add back the jump to float-snat + ri.iptables_manager.ipv4['nat'].add_rule('snat', '-j $float-snat') + + # And add them back if the action if add_rules + if action == 'add_rules' and interface_name: + rule = ('POSTROUTING', '! -i %(interface_name)s ' + '! -o %(interface_name)s -m conntrack ! ' + '--ctstate DNAT -j ACCEPT' % + {'interface_name': interface_name}) + ri.iptables_manager.ipv4['nat'].add_rule(*rule) + ri.iptables_manager.apply() + + def process_router_floating_ip_nat_rules(self, ri): + """Configure NAT rules for the router's floating IPs. + + Configures iptables rules for the floating ips of the given router + """ + # Clear out all iptables rules for floating ips + ri.iptables_manager.ipv4['nat'].clear_rules_by_tag('floating_ip') + + floating_ips = self.get_floating_ips(ri) + # Loop once to ensure that floating ips are configured. + for fip in floating_ips: + # Rebuild iptables rules for the floating ip. + fixed = fip['fixed_ip_address'] + fip_ip = fip['floating_ip_address'] + for chain, rule in self.floating_forward_rules(fip_ip, fixed): + ri.iptables_manager.ipv4['nat'].add_rule(chain, rule, + tag='floating_ip') + + ri.iptables_manager.apply() + + def process_router_floating_ip_addresses(self, ri, ex_gw_port): + """Configure IP addresses on router's external gateway interface. + + Ensures addresses for existing floating IPs and cleans up + those that should not longer be configured. + """ + fip_statuses = {} + + floating_ips = ri.router.get(l3_constants.FLOATINGIP_KEY, []) + if ri.router['distributed']: + # filter out only FIPs for this host/agent + floating_ips = [i for i in floating_ips if i['host'] == self.host] + if floating_ips and self.agent_gateway_port is None: + self._create_agent_gateway_port(ri, floating_ips[0] + ['floating_network_id']) + + if self.agent_gateway_port: + if floating_ips and ri.dist_fip_count == 0: + self.create_rtr_2_fip_link(ri, floating_ips[0] + ['floating_network_id']) + interface_name = self.get_rtr_int_device_name(ri.router_id) + else: + # there are no fips or agent port, no work to do + return fip_statuses + else: + interface_name = self.get_external_device_name(ex_gw_port['id']) + + device = ip_lib.IPDevice(interface_name, self.root_helper, + namespace=ri.ns_name) + existing_cidrs = set([addr['cidr'] for addr in device.addr.list()]) + new_cidrs = set() + + # Loop once to ensure that floating ips are configured. + for fip in floating_ips: + fip_ip = fip['floating_ip_address'] + ip_cidr = str(fip_ip) + FLOATING_IP_CIDR_SUFFIX + + new_cidrs.add(ip_cidr) + + if ip_cidr not in existing_cidrs: + net = netaddr.IPNetwork(ip_cidr) + try: + device.addr.add(net.version, ip_cidr, str(net.broadcast)) + except (processutils.UnknownArgumentError, + processutils.ProcessExecutionError): + # any exception occurred here should cause the floating IP + # to be set in error state + fip_statuses[fip['id']] = ( + l3_constants.FLOATINGIP_STATUS_ERROR) + LOG.warn(_("Unable to configure IP address for " + "floating IP: %s"), fip['id']) + continue + if ri.router['distributed']: + # Special Handling for DVR - update FIP namespace + # and ri.namespace to handle DVR based FIP + self.floating_ip_added_dist(ri, fip) + else: + # As GARP is processed in a distinct thread the call below + # won't raise an exception to be handled. + self._send_gratuitous_arp_packet( + ri.ns_name, interface_name, fip_ip) + fip_statuses[fip['id']] = ( + l3_constants.FLOATINGIP_STATUS_ACTIVE) + + # Clean up addresses that no longer belong on the gateway interface. + for ip_cidr in existing_cidrs - new_cidrs: + if ip_cidr.endswith(FLOATING_IP_CIDR_SUFFIX): + net = netaddr.IPNetwork(ip_cidr) + device.addr.delete(net.version, ip_cidr) + if ri.router['distributed']: + self.floating_ip_removed_dist(ri, ip_cidr) + return fip_statuses + + def _get_ex_gw_port(self, ri): + return ri.router.get('gw_port') + + def _arping(self, ns_name, interface_name, ip_address, dist=None): + if dist: + device = ip_lib.IPDevice(interface_name, self.root_helper, + namespace=ns_name) + ip_cidr = str(ip_address) + FLOATING_IP_CIDR_SUFFIX + net = netaddr.IPNetwork(ip_cidr) + device.addr.add(net.version, ip_cidr, str(net.broadcast)) + + arping_cmd = ['arping', '-A', + '-I', interface_name, + '-c', self.conf.send_arp_for_ha, + ip_address] + try: + ip_wrapper = ip_lib.IPWrapper(self.root_helper, + namespace=ns_name) + ip_wrapper.netns.execute(arping_cmd, check_exit_code=True) + except Exception as e: + LOG.error(_("Failed sending gratuitous ARP: %s"), str(e)) + if dist: + device.addr.delete(net.version, ip_cidr) + + def _send_gratuitous_arp_packet(self, ns_name, interface_name, ip_address, + dist=None): + if self.conf.send_arp_for_ha > 0: + eventlet.spawn_n(self._arping, ns_name, interface_name, ip_address, + dist) + + def get_internal_port(self, ri, subnet_id): + """Returns internal router port based on subnet_id.""" + router_ports = ri.router.get(l3_constants.INTERFACE_KEY, []) + for port in router_ports: + fips = port['fixed_ips'] + for f in fips: + if f['subnet_id'] == subnet_id: + return port + + def get_internal_device_name(self, port_id): + return (INTERNAL_DEV_PREFIX + port_id)[:self.driver.DEV_NAME_LEN] + + def get_external_device_name(self, port_id): + return (EXTERNAL_DEV_PREFIX + port_id)[:self.driver.DEV_NAME_LEN] + + def get_fip_ext_device_name(self, port_id): + return (FIP_EXT_DEV_PREFIX + port_id)[:self.driver.DEV_NAME_LEN] + + def get_rtr_int_device_name(self, router_id): + return (RTR_2_FIP_DEV_PREFIX + router_id)[:self.driver.DEV_NAME_LEN] + + def get_fip_int_device_name(self, router_id): + return (FIP_2_RTR_DEV_PREFIX + router_id)[:self.driver.DEV_NAME_LEN] + + def get_snat_int_device_name(self, port_id): + return (SNAT_INT_DEV_PREFIX + port_id)[:self.driver.DEV_NAME_LEN] + + def get_fip_ns_name(self, ext_net_id): + return (FIP_NS_PREFIX + ext_net_id) + + def get_snat_ns_name(self, ext_gw_port_id): + return (SNAT_NS_PREFIX + ext_gw_port_id) + + def get_snat_interfaces(self, ri): + return ri.router.get(l3_constants.SNAT_ROUTER_INTF_KEY, []) + + def get_floating_ips(self, ri): + """Filters Floating IPs for DVR to be hosted on this agent.""" + floating_ips = ri.router.get(l3_constants.FLOATINGIP_KEY, []) + if ri.router['distributed']: + floating_ips = [i for i in floating_ips if i['host'] == self.host] + return floating_ips + + def _map_internal_interfaces(self, ri, int_port, snat_ports): + """Returns the SNAT port for the given internal interface port.""" + fixed_ip = int_port['fixed_ips'][0] + subnet_id = fixed_ip['subnet_id'] + match_port = [p for p in snat_ports if + p['fixed_ips'][0]['subnet_id'] == subnet_id] + if match_port: + return match_port[0] + else: + LOG.debug('DVR: no map match_port found!') + + def _create_dvr_gateway(self, ri, ex_gw_port, gw_interface_name, + internal_cidrs, snat_ports): + """Create SNAT namespace.""" + snat_ns_name = self.get_snat_ns_name(ex_gw_port['id']) + self._create_namespace(snat_ns_name) + # connect snat_ports to br_int from SNAT namespace + for port in snat_ports: + # create interface_name + self._set_subnet_info(port) + interface_name = self.get_snat_int_device_name(port['id']) + self._internal_network_added(snat_ns_name, port['network_id'], + port['id'], port['ip_cidr'], + port['mac_address'], interface_name, + SNAT_INT_DEV_PREFIX) + self._external_gateway_added(ri, ex_gw_port, gw_interface_name, + internal_cidrs, snat_ns_name, + preserve_ips=[]) + ri.snat_iptables_manager = ( + iptables_manager.IptablesManager( + root_helper=self.root_helper, namespace=snat_ns_name + ) + ) + + def external_gateway_added(self, ri, ex_gw_port, + interface_name, internal_cidrs): + if ri.router['distributed']: + snat_ports = self.get_snat_interfaces(ri) + for p in ri.internal_ports: + gateway = self._map_internal_interfaces(ri, p, snat_ports) + id_name = self.get_internal_device_name(p['id']) + self._snat_redirect_add(ri, gateway['fixed_ips'][0] + ['ip_address'], p, id_name) + + if self.conf.centralized_snat and ( + ri.router['gw_port_host'] == self.host): + if snat_ports: + self._create_dvr_gateway(ri, ex_gw_port, + interface_name, + internal_cidrs, snat_ports) + for port in snat_ports: + for ip in port['fixed_ips']: + self._update_arp_entry(ri, ip['ip_address'], + port['mac_address'], + ip['subnet_id'], 'add') + return + + # Compute a list of addresses this router is supposed to have. + # This avoids unnecessarily removing those addresses and + # causing a momentarily network outage. + floating_ips = self.get_floating_ips(ri) + preserve_ips = [ip['floating_ip_address'] + FLOATING_IP_CIDR_SUFFIX + for ip in floating_ips] + + self._external_gateway_added(ri, ex_gw_port, interface_name, + internal_cidrs, ri.ns_name, + preserve_ips) + + def _external_gateway_added(self, ri, ex_gw_port, interface_name, + internal_cidrs, ns_name, preserve_ips): + if not ip_lib.device_exists(interface_name, + root_helper=self.root_helper, + namespace=ns_name): + self.driver.plug(ex_gw_port['network_id'], + ex_gw_port['id'], interface_name, + ex_gw_port['mac_address'], + bridge=self.conf.external_network_bridge, + namespace=ns_name, + prefix=EXTERNAL_DEV_PREFIX) + + self.driver.init_l3(interface_name, [ex_gw_port['ip_cidr']], + namespace=ns_name, + gateway=ex_gw_port['subnet'].get('gateway_ip'), + extra_subnets=ex_gw_port.get('extra_subnets', []), + preserve_ips=preserve_ips) + ip_address = ex_gw_port['ip_cidr'].split('/')[0] + self._send_gratuitous_arp_packet(ns_name, + interface_name, ip_address) + + def agent_gateway_added(self, ns_name, ex_gw_port, + interface_name): + """Adds Floating IP gateway port to FIP namespace.""" + if not ip_lib.device_exists(interface_name, + root_helper=self.root_helper, + namespace=ns_name): + self.driver.plug(ex_gw_port['network_id'], + ex_gw_port['id'], interface_name, + ex_gw_port['mac_address'], + bridge=self.conf.external_network_bridge, + namespace=ns_name, + prefix=FIP_EXT_DEV_PREFIX) + + self.driver.init_l3(interface_name, [ex_gw_port['ip_cidr']], + namespace=ns_name) + ip_address = ex_gw_port['ip_cidr'].split('/')[0] + self._send_gratuitous_arp_packet(ns_name, interface_name, ip_address) + + gw_ip = ex_gw_port['subnet']['gateway_ip'] + if gw_ip: + ipd = ip_lib.IPDevice(interface_name, self.root_helper, + namespace=ns_name) + ipd.route.add_gateway(gw_ip) + + cmd = ['sysctl', '-w', 'net.ipv4.conf.%s.proxy_arp=1' % interface_name] + ip_wrapper = ip_lib.IPWrapper(self.root_helper, namespace=ns_name) + ip_wrapper.netns.execute(cmd, check_exit_code=False) + + def internal_ns_interface_added(self, ip_cidr, + interface_name, ns_name): + ip_wrapper = ip_lib.IPWrapper(self.root_helper, namespace=ns_name) + ip_wrapper.netns.execute(['ip', 'addr', 'add', + ip_cidr, 'dev', interface_name]) + + def external_gateway_removed(self, ri, ex_gw_port, + interface_name, internal_cidrs): + if ri.router['distributed']: + for p in ri.internal_ports: + internal_interface = self.get_internal_device_name(p['id']) + self._snat_redirect_remove(ri, p, internal_interface) + + if self.conf.centralized_snat and ( + ex_gw_port['binding:host_id'] == self.host): + ns_name = self.get_snat_ns_name(ex_gw_port['id']) + else: + # not hosting agent - no work to do + LOG.debug('DVR: CSNAT not hosted: %s', ex_gw_port) + return + else: + ns_name = ri.ns_name + + self.driver.unplug(interface_name, + bridge=self.conf.external_network_bridge, + namespace=ns_name, + prefix=EXTERNAL_DEV_PREFIX) + if ri.router['distributed']: + self._destroy_snat_namespace(ns_name) + + def metadata_filter_rules(self): + rules = [] + if self.conf.enable_metadata_proxy: + rules.append(('INPUT', '-s 0.0.0.0/0 -d 127.0.0.1 ' + '-p tcp -m tcp --dport %s ' + '-j ACCEPT' % self.conf.metadata_port)) + return rules + + def metadata_nat_rules(self): + rules = [] + if self.conf.enable_metadata_proxy: + rules.append(('PREROUTING', '-s 0.0.0.0/0 -d 169.254.169.254/32 ' + '-p tcp -m tcp --dport 80 -j REDIRECT ' + '--to-port %s' % self.conf.metadata_port)) + return rules + + def external_gateway_nat_rules(self, ex_gw_ip, internal_cidrs, + interface_name): + rules = [('POSTROUTING', '! -i %(interface_name)s ' + '! -o %(interface_name)s -m conntrack ! ' + '--ctstate DNAT -j ACCEPT' % + {'interface_name': interface_name})] + for cidr in internal_cidrs: + rules.extend(self.internal_network_nat_rules(ex_gw_ip, cidr)) + return rules + + def _gen_snat_idx(self, cidr): + """Generate index based on cidr for SNAT entries.""" + ip = cidr.split('/')[0] + ip_str = ip.split('.') + ip_num = (((int(ip_str[0])) << 24) + ((int(ip_str[1])) << 16) + + ((int(ip_str[2])) << 8) + (int(ip_str[3]))) + return ip_num + + def _snat_redirect_add(self, ri, gateway, sn_port, sn_int): + """Adds rules and routes for SNAT redirection.""" + try: + snat_idx = self._gen_snat_idx(sn_port['ip_cidr']) + ns_ipr = ip_lib.IpRule(self.root_helper, namespace=ri.ns_name) + ns_ipd = ip_lib.IPDevice(sn_int, self.root_helper, + namespace=ri.ns_name) + ns_ipd.route.add_gateway(gateway, table=snat_idx) + ns_ipr.add_rule_from(sn_port['ip_cidr'], snat_idx, snat_idx) + ns_ipr.netns.execute(['sysctl', '-w', + 'net.ipv4.conf.all.send_redirects=0']) + ns_ipr.netns.execute(['sysctl', '-w', 'net.ipv4.conf.%s.' + 'send_redirects=0' % sn_int]) + except Exception: + LOG.exception(_('DVR: error adding redirection logic')) + + def _snat_redirect_remove(self, ri, sn_port, sn_int): + """Removes rules and routes for SNAT redirection.""" + try: + snat_idx = self._gen_snat_idx(sn_port['ip_cidr']) + ns_ipr = ip_lib.IpRule(self.root_helper, namespace=ri.ns_name) + ns_ipd = ip_lib.IPDevice(sn_int, self.root_helper, + namespace=ri.ns_name) + ns_ipd.route.delete_gateway(table=snat_idx) + ns_ipr.delete_rule_priority(snat_idx) + except Exception: + LOG.exception(_('DVR: removed snat failed')) + + def _internal_network_added(self, ns_name, network_id, port_id, + internal_cidr, mac_address, + interface_name, prefix): + if not ip_lib.device_exists(interface_name, + root_helper=self.root_helper, + namespace=ns_name): + self.driver.plug(network_id, port_id, interface_name, mac_address, + namespace=ns_name, + prefix=prefix) + + self.driver.init_l3(interface_name, [internal_cidr], + namespace=ns_name) + ip_address = internal_cidr.split('/')[0] + self._send_gratuitous_arp_packet(ns_name, interface_name, ip_address) + + def internal_network_added(self, ri, port): + network_id = port['network_id'] + port_id = port['id'] + internal_cidr = port['ip_cidr'] + mac_address = port['mac_address'] + + interface_name = self.get_internal_device_name(port_id) + + self._internal_network_added(ri.ns_name, network_id, port_id, + internal_cidr, mac_address, + interface_name, INTERNAL_DEV_PREFIX) + + if ri.router['distributed'] and ri.ex_gw_port: + ex_gw_port = ri.ex_gw_port + snat_ports = self.get_snat_interfaces(ri) + snat_ip = self._map_internal_interfaces(ri, port, snat_ports) + self._snat_redirect_add(ri, snat_ip['fixed_ips'][0] + ['ip_address'], port, interface_name) + if self.conf.centralized_snat and ( + ri.router['gw_port_host'] == self.host): + for port in snat_ports: + self._set_subnet_info(port) + interface_name = self.get_snat_int_device_name(port['id']) + ns_name = self.get_snat_ns_name(ex_gw_port['id']) + self._internal_network_added(ns_name, port['network_id'], + port['id'], internal_cidr, + port['mac_address'], + interface_name, + SNAT_INT_DEV_PREFIX) + + def internal_network_removed(self, ri, port): + port_id = port['id'] + interface_name = self.get_internal_device_name(port_id) + if ri.router['distributed'] and ri.ex_gw_port: + # DVR handling code for SNAT + ex_gw_port = ri.ex_gw_port + self._snat_redirect_remove(ri, port, interface_name) + if self.conf.centralized_snat and ( + ri.ex_gw_port['binding:host_id'] == self.host): + snat_port = self._map_internal_interfaces(ri, port, + ri.snat_ports) + snat_interface = ( + self.get_snat_int_device_name(snat_port['id']) + ) + ns_name = self.get_snat_ns_name(ex_gw_port['id']) + prefix = SNAT_INT_DEV_PREFIX + if ip_lib.device_exists(snat_interface, + root_helper=self.root_helper, + namespace=ns_name): + self.driver.unplug(snat_interface, namespace=ns_name, + prefix=prefix) + + if ip_lib.device_exists(interface_name, + root_helper=self.root_helper, + namespace=ri.ns_name): + self.driver.unplug(interface_name, namespace=ri.ns_name, + prefix=INTERNAL_DEV_PREFIX) + + def internal_network_nat_rules(self, ex_gw_ip, internal_cidr): + rules = [('snat', '-s %s -j SNAT --to-source %s' % + (internal_cidr, ex_gw_ip))] + return rules + + def _create_agent_gateway_port(self, ri, network_id): + """Creates Floating IP gateway port. + + Request port creation from Plugin then creates + Floating IP namespace and adds gateway port. + """ + # Port does not exist, request port from plugin + self.agent_gateway_port = ( + self.plugin_rpc.get_agent_gateway_port( + self.context, network_id)) + if 'subnet' not in self.agent_gateway_port: + LOG.error(_('Missing subnet/agent_gateway_port')) + return + self._set_subnet_info(self.agent_gateway_port) + + # add fip-namespace and agent_gateway_port + fip_ns_name = ( + self.get_fip_ns_name(str(network_id))) + self._create_namespace(fip_ns_name) + interface_name = ( + self.get_fip_ext_device_name(self.agent_gateway_port['id'])) + self.agent_gateway_added(fip_ns_name, self.agent_gateway_port, + interface_name) + + def create_rtr_2_fip_link(self, ri, network_id): + """Creates interface between router and Floating IP namespace.""" + rtr_2_fip_name = self.get_rtr_int_device_name(ri.router_id) + fip_2_rtr_name = self.get_fip_int_device_name(ri.router_id) + fip_ns_name = self.get_fip_ns_name(str(network_id)) + + # add link local IP to interface + if ri.rtr_2_fip is None: + ri.rtr_2_fip = FIP_LL_PREFIX + str(self.local_ips.pop()) + if ri.fip_2_rtr is None: + ri.fip_2_rtr = FIP_LL_PREFIX + str(self.local_ips.pop()) + ip_wrapper = ip_lib.IPWrapper(self.root_helper, + namespace=ri.ns_name) + int_dev = ip_wrapper.add_veth(rtr_2_fip_name, + fip_2_rtr_name, fip_ns_name) + self.internal_ns_interface_added(ri.rtr_2_fip + '/31', + rtr_2_fip_name, ri.ns_name) + self.internal_ns_interface_added(ri.fip_2_rtr + '/31', + fip_2_rtr_name, fip_ns_name) + int_dev[0].link.set_up() + int_dev[1].link.set_up() + # add default route for the link local interface + device = ip_lib.IPDevice(rtr_2_fip_name, self.root_helper, + namespace=ri.ns_name) + device.route.add_gateway(ri.fip_2_rtr, table=FIP_RT_TBL) + #setup the NAT rules and chains + self._handle_router_fip_nat_rules(ri, rtr_2_fip_name, 'add_rules') + + def floating_ip_added_dist(self, ri, fip): + """Adds floating IP to FIP namespace.""" + floating_ip = fip['floating_ip_address'] + fixed_ip = fip['fixed_ip_address'] + rule_pr = self.fip_priorities.pop() + ri.floating_ips_dict[floating_ip] = rule_pr + fip_2_rtr_name = self.get_fip_int_device_name(ri.router_id) + ipRule = ip_lib.IpRule(self.root_helper, namespace=ri.ns_name) + ipRule.add_rule_from(fixed_ip, FIP_RT_TBL, rule_pr) + + #Add routing rule in fip namespace + fip_cidr = str(floating_ip) + FLOATING_IP_CIDR_SUFFIX + fip_ns_name = self.get_fip_ns_name(str(fip['floating_network_id'])) + device = ip_lib.IPDevice(fip_2_rtr_name, self.root_helper, + namespace=fip_ns_name) + device.route.add_route(fip_cidr, ri.rtr_2_fip) + interface_name = ( + self.get_fip_ext_device_name(self.agent_gateway_port['id'])) + self._send_gratuitous_arp_packet(fip_ns_name, + interface_name, floating_ip, + dist=True) + # update internal structures + self.agent_fip_count = self.agent_fip_count + 1 + ri.dist_fip_count = ri.dist_fip_count + 1 + + def floating_ip_removed_dist(self, ri, fip_cidr): + """Removes floating IP from FIP namespace.""" + floating_ip = fip_cidr.split('/')[0] + rtr_2_fip_name = self.get_rtr_int_device_name(ri.router_id) + fip_2_rtr_name = self.get_fip_int_device_name(ri.router_id) + fip_ns_name = self.get_fip_ns_name(str(self._fetch_external_net_id())) + ip_rule_rtr = ip_lib.IpRule(self.root_helper, namespace=ri.ns_name) + if floating_ip in ri.floating_ips_dict: + rule_pr = ri.floating_ips_dict[floating_ip] + #TODO(rajeev): Handle else case - exception/log? + else: + rule_pr = None + + ip_rule_rtr.delete_rule_priority(rule_pr) + self.fip_priorities.add(rule_pr) + device = ip_lib.IPDevice(fip_2_rtr_name, self.root_helper, + namespace=fip_ns_name) + + device.route.delete_route(fip_cidr, ri.rtr_2_fip) + # check if this is the last FIP for this router + ri.dist_fip_count = ri.dist_fip_count - 1 + if ri.dist_fip_count == 0: + #remove default route entry + device = ip_lib.IPDevice(rtr_2_fip_name, self.root_helper, + namespace=ri.ns_name) + device.route.delete_gateway(ri.fip_2_rtr, table=FIP_RT_TBL) + self.local_ips.add(ri.rtr_2_fip.rsplit('.', 1)[1]) + ri.rtr_2_fip = None + self.local_ips.add(ri.fip_2_rtr.rsplit('.', 1)[1]) + ri.fip_2_rtr = None + # TODO(mrsmith): remove interface + # clean up fip-namespace if this is the last FIP + self.agent_fip_count = self.agent_fip_count - 1 + if self.agent_fip_count == 0: + self._destroy_fip_namespace(fip_ns_name) + + def floating_forward_rules(self, floating_ip, fixed_ip): + return [('PREROUTING', '-d %s -j DNAT --to %s' % + (floating_ip, fixed_ip)), + ('OUTPUT', '-d %s -j DNAT --to %s' % + (floating_ip, fixed_ip)), + ('float-snat', '-s %s -j SNAT --to %s' % + (fixed_ip, floating_ip))] + + def router_deleted(self, context, router_id): + """Deal with router deletion RPC message.""" + LOG.debug(_('Got router deleted notification for %s'), router_id) + self.removed_routers.add(router_id) + + def _update_arp_entry(self, ri, ip, mac, subnet_id, operation): + """Add or delete arp entry into router namespace.""" + port = self.get_internal_port(ri, subnet_id) + if 'id' in port: + ip_cidr = str(ip) + '/32' + try: + # TODO(mrsmith): optimize the calls below for bulk calls + net = netaddr.IPNetwork(ip_cidr) + interface_name = self.get_internal_device_name(port['id']) + device = ip_lib.IPDevice(interface_name, self.root_helper, + namespace=ri.ns_name) + if operation == 'add': + device.neigh.add(net.version, ip, mac) + elif operation == 'delete': + device.neigh.delete(net.version, ip, mac) + except Exception: + LOG.exception(_("DVR: Failed updating arp entry")) + self.fullsync = True + + def add_arp_entry(self, context, payload): + """Adds arp entry into router namespace from RPC.""" + arp_table = payload['arp_table'] + router_id = payload['router_id'] + ip = arp_table['ip_address'] + mac = arp_table['mac_address'] + subnet_id = arp_table['subnet_id'] + ri = self.router_info.get(router_id) + self._update_arp_entry(ri, ip, mac, subnet_id, 'add') + + def delete_arp_entry(self, context, payload): + """Deletes arp entry into router namespace from RPC.""" + arp_table = payload['arp_table'] + router_id = payload['router_id'] + ip = arp_table['ip_address'] + mac = arp_table['mac_address'] + subnet_id = arp_table['subnet_id'] + ri = self.router_info.get(router_id) + self._update_arp_entry(ri, ip, mac, subnet_id, 'delete') + + def routers_updated(self, context, routers): + """Deal with routers modification and creation RPC message.""" + LOG.debug(_('Got routers updated notification :%s'), routers) + if routers: + # This is needed for backward compatibility + if isinstance(routers[0], dict): + routers = [router['id'] for router in routers] + self.updated_routers.update(routers) + + def router_removed_from_agent(self, context, payload): + LOG.debug(_('Got router removed from agent :%r'), payload) + self.removed_routers.add(payload['router_id']) + + def router_added_to_agent(self, context, payload): + LOG.debug(_('Got router added to agent :%r'), payload) + self.routers_updated(context, payload) + + def _process_routers(self, routers, all_routers=False): + pool = eventlet.GreenPool() + if (self.conf.external_network_bridge and + not ip_lib.device_exists(self.conf.external_network_bridge)): + LOG.error(_("The external network bridge '%s' does not exist"), + self.conf.external_network_bridge) + return + + target_ex_net_id = self._fetch_external_net_id() + # if routers are all the routers we have (They are from router sync on + # starting or when error occurs during running), we seek the + # routers which should be removed. + # If routers are from server side notification, we seek them + # from subset of incoming routers and ones we have now. + if all_routers: + prev_router_ids = set(self.router_info) + else: + prev_router_ids = set(self.router_info) & set( + [router['id'] for router in routers]) + cur_router_ids = set() + for r in routers: + # If namespaces are disabled, only process the router associated + # with the configured agent id. + if (not self.conf.use_namespaces and + r['id'] != self.conf.router_id): + continue + ex_net_id = (r['external_gateway_info'] or {}).get('network_id') + if not ex_net_id and not self.conf.handle_internal_only_routers: + continue + if (target_ex_net_id and ex_net_id and + ex_net_id != target_ex_net_id): + # Double check that our single external_net_id has not changed + # by forcing a check by RPC. + if (ex_net_id != self._fetch_external_net_id(force=True)): + continue + cur_router_ids.add(r['id']) + if r['id'] not in self.router_info: + self._router_added(r['id'], r) + ri = self.router_info[r['id']] + ri.router = r + pool.spawn_n(self.process_router, ri) + # identify and remove routers that no longer exist + for router_id in prev_router_ids - cur_router_ids: + pool.spawn_n(self._router_removed, router_id) + pool.waitall() + + @lockutils.synchronized('l3-agent', 'neutron-') + def _rpc_loop(self): + # _rpc_loop and _sync_routers_task will not be + # executed in the same time because of lock. + # so we can clear the value of updated_routers + # and removed_routers, but they can be updated by + # updated_routers and removed_routers rpc call + try: + LOG.debug(_("Starting RPC loop for %d updated routers"), + len(self.updated_routers)) + if self.updated_routers: + # We're capturing and clearing the list, and will + # process the "captured" updates in this loop, + # and any updates that happen due to a context switch + # will be picked up on the next pass. + updated_routers = set(self.updated_routers) + self.updated_routers.clear() + router_ids = list(updated_routers) + routers = self.plugin_rpc.get_routers( + self.context, router_ids) + # routers with admin_state_up=false will not be in the fetched + fetched = set([r['id'] for r in routers]) + self.removed_routers.update(updated_routers - fetched) + + self._process_routers(routers) + self._process_router_delete() + LOG.debug(_("RPC loop successfully completed")) + except Exception: + LOG.exception(_("Failed synchronizing routers")) + self.fullsync = True + + def _process_router_delete(self): + current_removed_routers = list(self.removed_routers) + for router_id in current_removed_routers: + self._router_removed(router_id) + self.removed_routers.remove(router_id) + + def _router_ids(self): + if not self.conf.use_namespaces: + return [self.conf.router_id] + + @periodic_task.periodic_task + @lockutils.synchronized('l3-agent', 'neutron-') + def _sync_routers_task(self, context): + if self.services_sync: + super(L3NATAgent, self).process_services_sync(context) + LOG.debug(_("Starting _sync_routers_task - fullsync:%s"), + self.fullsync) + if not self.fullsync: + return + try: + router_ids = self._router_ids() + self.updated_routers.clear() + self.removed_routers.clear() + routers = self.plugin_rpc.get_routers( + context, router_ids) + + LOG.debug(_('Processing :%r'), routers) + self._process_routers(routers, all_routers=True) + self.fullsync = False + LOG.debug(_("_sync_routers_task successfully completed")) + except rpc_compat.RPCException: + LOG.exception(_("Failed synchronizing routers due to RPC error")) + self.fullsync = True + return + except Exception: + LOG.exception(_("Failed synchronizing routers")) + self.fullsync = True + + # Resync is not necessary for the cleanup of stale + # namespaces. + if self._clean_stale_namespaces: + self._cleanup_namespaces(routers) + + def after_start(self): + LOG.info(_("L3 agent started")) + + def _update_routing_table(self, ri, operation, route): + cmd = ['ip', 'route', operation, 'to', route['destination'], + 'via', route['nexthop']] + ip_wrapper = ip_lib.IPWrapper(self.root_helper, + namespace=ri.ns_name) + ip_wrapper.netns.execute(cmd, check_exit_code=False) + + def routes_updated(self, ri): + new_routes = ri.router['routes'] + old_routes = ri.routes + adds, removes = common_utils.diff_list_of_dict(old_routes, + new_routes) + for route in adds: + LOG.debug(_("Added route entry is '%s'"), route) + # remove replaced route from deleted route + for del_route in removes: + if route['destination'] == del_route['destination']: + removes.remove(del_route) + #replace success even if there is no existing route + self._update_routing_table(ri, 'replace', route) + for route in removes: + LOG.debug(_("Removed route entry is '%s'"), route) + self._update_routing_table(ri, 'delete', route) + ri.routes = new_routes + + +class L3NATAgentWithStateReport(L3NATAgent): + + def __init__(self, host, conf=None): + super(L3NATAgentWithStateReport, self).__init__(host=host, conf=conf) + self.state_rpc = agent_rpc.PluginReportStateAPI(topics.PLUGIN) + self.agent_state = { + 'binary': 'neutron-l3-agent', + 'host': host, + 'topic': topics.L3_AGENT, + 'configurations': { + 'distributed_agent': self.conf.distributed_agent, + 'centralized_snat': self.conf.centralized_snat, + 'centralized_router': self.conf.centralized_router, + 'use_namespaces': self.conf.use_namespaces, + 'router_id': self.conf.router_id, + 'handle_internal_only_routers': + self.conf.handle_internal_only_routers, + 'external_network_bridge': self.conf.external_network_bridge, + 'gateway_external_network_id': + self.conf.gateway_external_network_id, + 'interface_driver': self.conf.interface_driver}, + 'start_flag': True, + 'agent_type': l3_constants.AGENT_TYPE_L3} + report_interval = cfg.CONF.AGENT.report_interval + self.use_call = True + if report_interval: + self.heartbeat = loopingcall.FixedIntervalLoopingCall( + self._report_state) + self.heartbeat.start(interval=report_interval) + + def _report_state(self): + LOG.debug(_("Report state task started")) + num_ex_gw_ports = 0 + num_interfaces = 0 + num_floating_ips = 0 + router_infos = self.router_info.values() + num_routers = len(router_infos) + for ri in router_infos: + ex_gw_port = self._get_ex_gw_port(ri) + if ex_gw_port: + num_ex_gw_ports += 1 + num_interfaces += len(ri.router.get(l3_constants.INTERFACE_KEY, + [])) + num_floating_ips += len(ri.router.get(l3_constants.FLOATINGIP_KEY, + [])) + configurations = self.agent_state['configurations'] + configurations['routers'] = num_routers + configurations['ex_gw_ports'] = num_ex_gw_ports + configurations['interfaces'] = num_interfaces + configurations['floating_ips'] = num_floating_ips + try: + self.state_rpc.report_state(self.context, self.agent_state, + self.use_call) + self.agent_state.pop('start_flag', None) + self.use_call = False + LOG.debug(_("Report state task successfully completed")) + except AttributeError: + # This means the server does not support report_state + LOG.warn(_("Neutron server does not support state report." + " State report for this agent will be disabled.")) + self.heartbeat.stop() + return + except Exception: + LOG.exception(_("Failed reporting state!")) + + def agent_updated(self, context, payload): + """Handle the agent_updated notification event.""" + self.fullsync = True + LOG.info(_("agent_updated by server side %s!"), payload) + + +def main(manager='neutron.agent.l3_agent.L3NATAgentWithStateReport'): + conf = cfg.CONF + conf.register_opts(L3NATAgent.OPTS) + config.register_interface_driver_opts_helper(conf) + config.register_use_namespaces_opts_helper(conf) + config.register_agent_state_opts_helper(conf) + config.register_root_helper(conf) + conf.register_opts(interface.OPTS) + conf.register_opts(external_process.OPTS) + common_config.init(sys.argv[1:]) + config.setup_logging(conf) + server = neutron_service.Service.create( + binary='neutron-l3-agent', + topic=topics.L3_AGENT, + report_interval=cfg.CONF.AGENT.report_interval, + manager=manager) + service.launch(server).wait() diff --git a/icehouse-patches/neutron/dvr-patch/neutron/agent/linux/__init__.py b/icehouse-patches/neutron/dvr-patch/neutron/agent/linux/__init__.py new file mode 100644 index 00000000..bf3075dd --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/agent/linux/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2012 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/icehouse-patches/neutron/dvr-patch/neutron/agent/linux/async_process.py b/icehouse-patches/neutron/dvr-patch/neutron/agent/linux/async_process.py new file mode 100644 index 00000000..2253918c --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/agent/linux/async_process.py @@ -0,0 +1,221 @@ +# Copyright 2013 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import eventlet +import eventlet.event +import eventlet.queue + +from neutron.agent.linux import utils +from neutron.openstack.common import log as logging + + +LOG = logging.getLogger(__name__) + + +class AsyncProcessException(Exception): + pass + + +class AsyncProcess(object): + """Manages an asynchronous process. + + This class spawns a new process via subprocess and uses + greenthreads to read stderr and stdout asynchronously into queues + that can be read via repeatedly calling iter_stdout() and + iter_stderr(). + + If respawn_interval is non-zero, any error in communicating with + the managed process will result in the process and greenthreads + being cleaned up and the process restarted after the specified + interval. + + Example usage: + + >>> import time + >>> proc = AsyncProcess(['ping']) + >>> proc.start() + >>> time.sleep(5) + >>> proc.stop() + >>> for line in proc.iter_stdout(): + ... print line + """ + + def __init__(self, cmd, root_helper=None, respawn_interval=None): + """Constructor. + + :param cmd: The list of command arguments to invoke. + :param root_helper: Optional, utility to use when running shell cmds. + :param respawn_interval: Optional, the interval in seconds to wait + to respawn after unexpected process death. Respawn will + only be attempted if a value of 0 or greater is provided. + """ + self.cmd = cmd + self.root_helper = root_helper + if respawn_interval is not None and respawn_interval < 0: + raise ValueError(_('respawn_interval must be >= 0 if provided.')) + self.respawn_interval = respawn_interval + self._process = None + self._kill_event = None + self._reset_queues() + self._watchers = [] + + def _reset_queues(self): + self._stdout_lines = eventlet.queue.LightQueue() + self._stderr_lines = eventlet.queue.LightQueue() + + def start(self): + """Launch a process and monitor it asynchronously.""" + if self._kill_event: + raise AsyncProcessException(_('Process is already started')) + else: + LOG.debug(_('Launching async process [%s].'), self.cmd) + self._spawn() + + def stop(self): + """Halt the process and watcher threads.""" + if self._kill_event: + LOG.debug(_('Halting async process [%s].'), self.cmd) + self._kill() + else: + raise AsyncProcessException(_('Process is not running.')) + + def _spawn(self): + """Spawn a process and its watchers.""" + self._kill_event = eventlet.event.Event() + self._process, cmd = utils.create_process(self.cmd, + root_helper=self.root_helper) + self._watchers = [] + for reader in (self._read_stdout, self._read_stderr): + # Pass the stop event directly to the greenthread to + # ensure that assignment of a new event to the instance + # attribute does not prevent the greenthread from using + # the original event. + watcher = eventlet.spawn(self._watch_process, + reader, + self._kill_event) + self._watchers.append(watcher) + + def _kill(self, respawning=False): + """Kill the process and the associated watcher greenthreads. + + :param respawning: Optional, whether respawn will be subsequently + attempted. + """ + # Halt the greenthreads + self._kill_event.send() + + pid = self._get_pid_to_kill() + if pid: + self._kill_process(pid) + + if not respawning: + # Clear the kill event to ensure the process can be + # explicitly started again. + self._kill_event = None + + def _get_pid_to_kill(self): + pid = self._process.pid + # If root helper was used, two or more processes will be created: + # + # - a root helper process (e.g. sudo myscript) + # - possibly a rootwrap script (e.g. neutron-rootwrap) + # - a child process (e.g. myscript) + # + # Killing the root helper process will leave the child process + # running, re-parented to init, so the only way to ensure that both + # die is to target the child process directly. + if self.root_helper: + try: + pid = utils.find_child_pids(pid)[0] + except IndexError: + # Process is already dead + return None + while True: + try: + # We shouldn't have more than one child per process + # so keep getting the children of the first one + pid = utils.find_child_pids(pid)[0] + except IndexError: + # Last process in the tree, return it + break + return pid + + def _kill_process(self, pid): + try: + # A process started by a root helper will be running as + # root and need to be killed via the same helper. + utils.execute(['kill', '-9', pid], root_helper=self.root_helper) + except Exception as ex: + stale_pid = (isinstance(ex, RuntimeError) and + 'No such process' in str(ex)) + if not stale_pid: + LOG.exception(_('An error occurred while killing [%s].'), + self.cmd) + return False + return True + + def _handle_process_error(self): + """Kill the async process and respawn if necessary.""" + LOG.debug(_('Halting async process [%s] in response to an error.'), + self.cmd) + respawning = self.respawn_interval >= 0 + self._kill(respawning=respawning) + if respawning: + eventlet.sleep(self.respawn_interval) + LOG.debug(_('Respawning async process [%s].'), self.cmd) + self._spawn() + + def _watch_process(self, callback, kill_event): + while not kill_event.ready(): + try: + if not callback(): + break + except Exception: + LOG.exception(_('An error occurred while communicating ' + 'with async process [%s].'), self.cmd) + break + # Ensure that watching a process with lots of output does + # not block execution of other greenthreads. + eventlet.sleep() + # The kill event not being ready indicates that the loop was + # broken out of due to an error in the watched process rather + # than the loop condition being satisfied. + if not kill_event.ready(): + self._handle_process_error() + + def _read(self, stream, queue): + data = stream.readline() + if data: + data = data.strip() + queue.put(data) + return data + + def _read_stdout(self): + return self._read(self._process.stdout, self._stdout_lines) + + def _read_stderr(self): + return self._read(self._process.stderr, self._stderr_lines) + + def _iter_queue(self, queue): + while True: + try: + yield queue.get_nowait() + except eventlet.queue.Empty: + break + + def iter_stdout(self): + return self._iter_queue(self._stdout_lines) + + def iter_stderr(self): + return self._iter_queue(self._stderr_lines) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/agent/linux/daemon.py b/icehouse-patches/neutron/dvr-patch/neutron/agent/linux/daemon.py new file mode 100644 index 00000000..b5fbacf1 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/agent/linux/daemon.py @@ -0,0 +1,149 @@ +# Copyright 2012 New Dream Network, LLC (DreamHost) +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Mark McClain, DreamHost + +import atexit +import fcntl +import os +import signal +import sys + +from neutron.openstack.common import log as logging + +LOG = logging.getLogger(__name__) + + +class Pidfile(object): + def __init__(self, pidfile, procname, uuid=None): + self.pidfile = pidfile + self.procname = procname + self.uuid = uuid + try: + self.fd = os.open(pidfile, os.O_CREAT | os.O_RDWR) + fcntl.flock(self.fd, fcntl.LOCK_EX | fcntl.LOCK_NB) + except IOError: + LOG.exception(_("Error while handling pidfile: %s"), pidfile) + sys.exit(1) + + def __str__(self): + return self.pidfile + + def unlock(self): + if not not fcntl.flock(self.fd, fcntl.LOCK_UN): + raise IOError(_('Unable to unlock pid file')) + + def write(self, pid): + os.ftruncate(self.fd, 0) + os.write(self.fd, "%d" % pid) + os.fsync(self.fd) + + def read(self): + try: + pid = int(os.read(self.fd, 128)) + os.lseek(self.fd, 0, os.SEEK_SET) + return pid + except ValueError: + return + + def is_running(self): + pid = self.read() + if not pid: + return False + + cmdline = '/proc/%s/cmdline' % pid + try: + with open(cmdline, "r") as f: + exec_out = f.readline() + return self.procname in exec_out and (not self.uuid or + self.uuid in exec_out) + except IOError: + return False + + +class Daemon(object): + """A generic daemon class. + + Usage: subclass the Daemon class and override the run() method + """ + def __init__(self, pidfile, stdin='/dev/null', stdout='/dev/null', + stderr='/dev/null', procname='python', uuid=None): + self.stdin = stdin + self.stdout = stdout + self.stderr = stderr + self.procname = procname + self.pidfile = Pidfile(pidfile, procname, uuid) + + def _fork(self): + try: + pid = os.fork() + if pid > 0: + sys.exit(0) + except OSError: + LOG.exception(_('Fork failed')) + sys.exit(1) + + def daemonize(self): + """Daemonize process by doing Stevens double fork.""" + # fork first time + self._fork() + + # decouple from parent environment + os.chdir("/") + os.setsid() + os.umask(0) + + # fork second time + self._fork() + + # redirect standard file descriptors + sys.stdout.flush() + sys.stderr.flush() + stdin = open(self.stdin, 'r') + stdout = open(self.stdout, 'a+') + stderr = open(self.stderr, 'a+', 0) + os.dup2(stdin.fileno(), sys.stdin.fileno()) + os.dup2(stdout.fileno(), sys.stdout.fileno()) + os.dup2(stderr.fileno(), sys.stderr.fileno()) + + # write pidfile + atexit.register(self.delete_pid) + signal.signal(signal.SIGTERM, self.handle_sigterm) + self.pidfile.write(os.getpid()) + + def delete_pid(self): + os.remove(str(self.pidfile)) + + def handle_sigterm(self, signum, frame): + sys.exit(0) + + def start(self): + """Start the daemon.""" + + if self.pidfile.is_running(): + self.pidfile.unlock() + message = _('Pidfile %s already exist. Daemon already running?') + LOG.error(message, self.pidfile) + sys.exit(1) + + # Start the daemon + self.daemonize() + self.run() + + def run(self): + """Override this method when subclassing Daemon. + + start() will call this method after the process has daemonized. + """ + pass diff --git a/icehouse-patches/neutron/dvr-patch/neutron/agent/linux/dhcp.py b/icehouse-patches/neutron/dvr-patch/neutron/agent/linux/dhcp.py new file mode 100644 index 00000000..d05cad16 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/agent/linux/dhcp.py @@ -0,0 +1,921 @@ +# Copyright 2012 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import abc +import collections +import os +import re +import shutil +import socket +import sys + +import netaddr +from oslo.config import cfg +import six + +from neutron.agent.linux import ip_lib +from neutron.agent.linux import utils +from neutron.common import constants +from neutron.common import exceptions +from neutron.common import utils as commonutils +from neutron.openstack.common import importutils +from neutron.openstack.common import jsonutils +from neutron.openstack.common import log as logging +from neutron.openstack.common import uuidutils + +LOG = logging.getLogger(__name__) + +OPTS = [ + cfg.StrOpt('dhcp_confs', + default='$state_path/dhcp', + help=_('Location to store DHCP server config files')), + cfg.StrOpt('dhcp_domain', + default='openstacklocal', + help=_('Domain to use for building the hostnames')), + cfg.StrOpt('dnsmasq_config_file', + default='', + help=_('Override the default dnsmasq settings with this file')), + cfg.ListOpt('dnsmasq_dns_servers', + help=_('Comma-separated list of the DNS servers which will be ' + 'used as forwarders.'), + deprecated_name='dnsmasq_dns_server'), + cfg.BoolOpt('dhcp_delete_namespaces', default=False, + help=_("Delete namespace after removing a dhcp server.")), + cfg.IntOpt( + 'dnsmasq_lease_max', + default=(2 ** 24), + help=_('Limit number of leases to prevent a denial-of-service.')), +] + +IPV4 = 4 +IPV6 = 6 +UDP = 'udp' +TCP = 'tcp' +DNS_PORT = 53 +DHCPV4_PORT = 67 +DHCPV6_PORT = 547 +METADATA_DEFAULT_PREFIX = 16 +METADATA_DEFAULT_IP = '169.254.169.254' +METADATA_DEFAULT_CIDR = '%s/%d' % (METADATA_DEFAULT_IP, + METADATA_DEFAULT_PREFIX) +METADATA_PORT = 80 +WIN2k3_STATIC_DNS = 249 +NS_PREFIX = 'qdhcp-' + + +class DictModel(dict): + """Convert dict into an object that provides attribute access to values.""" + + def __init__(self, *args, **kwargs): + """Convert dict values to DictModel values.""" + super(DictModel, self).__init__(*args, **kwargs) + + def needs_upgrade(item): + """Check if `item` is a dict and needs to be changed to DictModel. + """ + return isinstance(item, dict) and not isinstance(item, DictModel) + + def upgrade(item): + """Upgrade item if it needs to be upgraded.""" + if needs_upgrade(item): + return DictModel(item) + else: + return item + + for key, value in self.iteritems(): + if isinstance(value, (list, tuple)): + # Keep the same type but convert dicts to DictModels + self[key] = type(value)( + (upgrade(item) for item in value) + ) + elif needs_upgrade(value): + # Change dict instance values to DictModel instance values + self[key] = DictModel(value) + + def __getattr__(self, name): + try: + return self[name] + except KeyError as e: + raise AttributeError(e) + + def __setattr__(self, name, value): + self[name] = value + + def __delattr__(self, name): + del self[name] + + +class NetModel(DictModel): + + def __init__(self, use_namespaces, d): + super(NetModel, self).__init__(d) + + self._ns_name = (use_namespaces and + "%s%s" % (NS_PREFIX, self.id) or None) + + @property + def namespace(self): + return self._ns_name + + +@six.add_metaclass(abc.ABCMeta) +class DhcpBase(object): + + def __init__(self, conf, network, root_helper='sudo', + version=None, plugin=None): + self.conf = conf + self.network = network + self.root_helper = root_helper + self.device_manager = DeviceManager(self.conf, + self.root_helper, plugin) + self.version = version + + @abc.abstractmethod + def enable(self): + """Enables DHCP for this network.""" + + @abc.abstractmethod + def disable(self, retain_port=False): + """Disable dhcp for this network.""" + + def restart(self): + """Restart the dhcp service for the network.""" + self.disable(retain_port=True) + self.enable() + + @abc.abstractproperty + def active(self): + """Boolean representing the running state of the DHCP server.""" + + @abc.abstractmethod + def reload_allocations(self): + """Force the DHCP server to reload the assignment database.""" + + @classmethod + def existing_dhcp_networks(cls, conf, root_helper): + """Return a list of existing networks ids that we have configs for.""" + + raise NotImplementedError + + @classmethod + def check_version(cls): + """Execute version checks on DHCP server.""" + + raise NotImplementedError + + +class DhcpLocalProcess(DhcpBase): + PORTS = [] + + def _enable_dhcp(self): + """check if there is a subnet within the network with dhcp enabled.""" + for subnet in self.network.subnets: + if subnet.enable_dhcp: + return True + return False + + def enable(self): + """Enables DHCP for this network by spawning a local process.""" + interface_name = self.device_manager.setup(self.network) + if self.active: + self.restart() + elif self._enable_dhcp(): + self.interface_name = interface_name + self.spawn_process() + + def disable(self, retain_port=False): + """Disable DHCP for this network by killing the local process.""" + pid = self.pid + + if pid: + if self.active: + cmd = ['kill', '-9', pid] + utils.execute(cmd, self.root_helper) + else: + LOG.debug(_('DHCP for %(net_id)s is stale, pid %(pid)d ' + 'does not exist, performing cleanup'), + {'net_id': self.network.id, 'pid': pid}) + if not retain_port: + self.device_manager.destroy(self.network, + self.interface_name) + else: + LOG.debug(_('No DHCP started for %s'), self.network.id) + + self._remove_config_files() + + if not retain_port: + if self.conf.dhcp_delete_namespaces and self.network.namespace: + ns_ip = ip_lib.IPWrapper(self.root_helper, + self.network.namespace) + try: + ns_ip.netns.delete(self.network.namespace) + except RuntimeError: + msg = _('Failed trying to delete namespace: %s') + LOG.exception(msg, self.network.namespace) + + def _remove_config_files(self): + confs_dir = os.path.abspath(os.path.normpath(self.conf.dhcp_confs)) + conf_dir = os.path.join(confs_dir, self.network.id) + shutil.rmtree(conf_dir, ignore_errors=True) + + def get_conf_file_name(self, kind, ensure_conf_dir=False): + """Returns the file name for a given kind of config file.""" + confs_dir = os.path.abspath(os.path.normpath(self.conf.dhcp_confs)) + conf_dir = os.path.join(confs_dir, self.network.id) + if ensure_conf_dir: + if not os.path.isdir(conf_dir): + os.makedirs(conf_dir, 0o755) + + return os.path.join(conf_dir, kind) + + def _get_value_from_conf_file(self, kind, converter=None): + """A helper function to read a value from one of the state files.""" + file_name = self.get_conf_file_name(kind) + msg = _('Error while reading %s') + + try: + with open(file_name, 'r') as f: + try: + return converter and converter(f.read()) or f.read() + except ValueError: + msg = _('Unable to convert value in %s') + except IOError: + msg = _('Unable to access %s') + + LOG.debug(msg % file_name) + return None + + @property + def pid(self): + """Last known pid for the DHCP process spawned for this network.""" + return self._get_value_from_conf_file('pid', int) + + @property + def active(self): + pid = self.pid + if pid is None: + return False + + cmdline = '/proc/%s/cmdline' % pid + try: + with open(cmdline, "r") as f: + return self.network.id in f.readline() + except IOError: + return False + + @property + def interface_name(self): + return self._get_value_from_conf_file('interface') + + @interface_name.setter + def interface_name(self, value): + interface_file_path = self.get_conf_file_name('interface', + ensure_conf_dir=True) + utils.replace_file(interface_file_path, value) + + @abc.abstractmethod + def spawn_process(self): + pass + + +class Dnsmasq(DhcpLocalProcess): + # The ports that need to be opened when security policies are active + # on the Neutron port used for DHCP. These are provided as a convenience + # for users of this class. + PORTS = {IPV4: [(UDP, DNS_PORT), (TCP, DNS_PORT), (UDP, DHCPV4_PORT)], + IPV6: [(UDP, DNS_PORT), (TCP, DNS_PORT), (UDP, DHCPV6_PORT)], + } + + _TAG_PREFIX = 'tag%d' + + NEUTRON_NETWORK_ID_KEY = 'NEUTRON_NETWORK_ID' + NEUTRON_RELAY_SOCKET_PATH_KEY = 'NEUTRON_RELAY_SOCKET_PATH' + MINIMUM_VERSION = 2.59 + + @classmethod + def check_version(cls): + ver = 0 + try: + cmd = ['dnsmasq', '--version'] + out = utils.execute(cmd) + ver = re.findall("\d+.\d+", out)[0] + is_valid_version = float(ver) >= cls.MINIMUM_VERSION + if not is_valid_version: + LOG.warning(_('FAILED VERSION REQUIREMENT FOR DNSMASQ. ' + 'DHCP AGENT MAY NOT RUN CORRECTLY! ' + 'Please ensure that its version is %s ' + 'or above!'), cls.MINIMUM_VERSION) + except (OSError, RuntimeError, IndexError, ValueError): + LOG.warning(_('Unable to determine dnsmasq version. ' + 'Please ensure that its version is %s ' + 'or above!'), cls.MINIMUM_VERSION) + return float(ver) + + @classmethod + def existing_dhcp_networks(cls, conf, root_helper): + """Return a list of existing networks ids that we have configs for.""" + + confs_dir = os.path.abspath(os.path.normpath(conf.dhcp_confs)) + + return [ + c for c in os.listdir(confs_dir) + if uuidutils.is_uuid_like(c) + ] + + def spawn_process(self): + """Spawns a Dnsmasq process for the network.""" + env = { + self.NEUTRON_NETWORK_ID_KEY: self.network.id, + } + + cmd = [ + 'dnsmasq', + '--no-hosts', + '--no-resolv', + '--strict-order', + '--bind-interfaces', + '--interface=%s' % self.interface_name, + '--except-interface=lo', + '--pid-file=%s' % self.get_conf_file_name( + 'pid', ensure_conf_dir=True), + '--dhcp-hostsfile=%s' % self._output_hosts_file(), + '--addn-hosts=%s' % self._output_addn_hosts_file(), + '--dhcp-optsfile=%s' % self._output_opts_file(), + '--leasefile-ro', + ] + + possible_leases = 0 + for i, subnet in enumerate(self.network.subnets): + # if a subnet is specified to have dhcp disabled + if not subnet.enable_dhcp: + continue + if subnet.ip_version == 4: + mode = 'static' + else: + # Note(scollins) If the IPv6 attributes are not set, set it as + # static to preserve previous behavior + if (not getattr(subnet, 'ipv6_ra_mode', None) and + not getattr(subnet, 'ipv6_address_mode', None)): + mode = 'static' + elif getattr(subnet, 'ipv6_ra_mode', None) is None: + # RA mode is not set - do not launch dnsmasq + continue + if self.version >= self.MINIMUM_VERSION: + set_tag = 'set:' + else: + set_tag = '' + + cidr = netaddr.IPNetwork(subnet.cidr) + + if self.conf.dhcp_lease_duration == -1: + lease = 'infinite' + else: + lease = '%ss' % self.conf.dhcp_lease_duration + + cmd.append('--dhcp-range=%s%s,%s,%s,%s' % + (set_tag, self._TAG_PREFIX % i, + cidr.network, mode, lease)) + + possible_leases += cidr.size + + # Cap the limit because creating lots of subnets can inflate + # this possible lease cap. + cmd.append('--dhcp-lease-max=%d' % + min(possible_leases, self.conf.dnsmasq_lease_max)) + + cmd.append('--conf-file=%s' % self.conf.dnsmasq_config_file) + if self.conf.dnsmasq_dns_servers: + cmd.extend( + '--server=%s' % server + for server in self.conf.dnsmasq_dns_servers) + + if self.conf.dhcp_domain: + cmd.append('--domain=%s' % self.conf.dhcp_domain) + + ip_wrapper = ip_lib.IPWrapper(self.root_helper, + self.network.namespace) + ip_wrapper.netns.execute(cmd, addl_env=env) + + def _release_lease(self, mac_address, ip): + """Release a DHCP lease.""" + cmd = ['dhcp_release', self.interface_name, ip, mac_address] + ip_wrapper = ip_lib.IPWrapper(self.root_helper, + self.network.namespace) + ip_wrapper.netns.execute(cmd) + + def reload_allocations(self): + """Rebuild the dnsmasq config and signal the dnsmasq to reload.""" + + # If all subnets turn off dhcp, kill the process. + if not self._enable_dhcp(): + self.disable() + LOG.debug(_('Killing dhcpmasq for network since all subnets have ' + 'turned off DHCP: %s'), self.network.id) + return + + self._release_unused_leases() + self._output_hosts_file() + self._output_addn_hosts_file() + self._output_opts_file() + if self.active: + cmd = ['kill', '-HUP', self.pid] + utils.execute(cmd, self.root_helper) + else: + LOG.debug(_('Pid %d is stale, relaunching dnsmasq'), self.pid) + LOG.debug(_('Reloading allocations for network: %s'), self.network.id) + self.device_manager.update(self.network, self.interface_name) + + def _iter_hosts(self): + """Iterate over hosts. + + For each host on the network we yield a tuple containing: + ( + port, # a DictModel instance representing the port. + alloc, # a DictModel instance of the allocated ip and subnet. + host_name, # Host name. + name, # Host name and domain name in the format 'hostname.domain'. + ) + """ + v6_nets = dict((subnet.id, subnet) for subnet in + self.network.subnets if subnet.ip_version == 6) + for port in self.network.ports: + for alloc in port.fixed_ips: + # Note(scollins) Only create entries that are + # associated with the subnet being managed by this + # dhcp agent + if alloc.subnet_id in v6_nets: + ra_mode = v6_nets[alloc.subnet_id].ipv6_ra_mode + addr_mode = v6_nets[alloc.subnet_id].ipv6_address_mode + if (ra_mode is None and addr_mode == constants.IPV6_SLAAC): + continue + hostname = 'host-%s' % alloc.ip_address.replace( + '.', '-').replace(':', '-') + fqdn = '%s.%s' % (hostname, self.conf.dhcp_domain) + yield (port, alloc, hostname, fqdn) + + def _output_hosts_file(self): + """Writes a dnsmasq compatible dhcp hosts file. + + The generated file is sent to the --dhcp-hostsfile option of dnsmasq, + and lists the hosts on the network which should receive a dhcp lease. + Each line in this file is in the form:: + + 'mac_address,FQDN,ip_address' + + IMPORTANT NOTE: a dnsmasq instance does not resolve hosts defined in + this file if it did not give a lease to a host listed in it (e.g.: + multiple dnsmasq instances on the same network if this network is on + multiple network nodes). This file is only defining hosts which + should receive a dhcp lease, the hosts resolution in itself is + defined by the `_output_addn_hosts_file` method. + """ + buf = six.StringIO() + filename = self.get_conf_file_name('host') + + LOG.debug(_('Building host file: %s'), filename) + for (port, alloc, hostname, name) in self._iter_hosts(): + set_tag = '' + # (dzyu) Check if it is legal ipv6 address, if so, need wrap + # it with '[]' to let dnsmasq to distinguish MAC address from + # IPv6 address. + ip_address = alloc.ip_address + if netaddr.valid_ipv6(ip_address): + ip_address = '[%s]' % ip_address + + LOG.debug(_('Adding %(mac)s : %(name)s : %(ip)s'), + {"mac": port.mac_address, "name": name, + "ip": ip_address}) + + if getattr(port, 'extra_dhcp_opts', False): + if self.version >= self.MINIMUM_VERSION: + set_tag = 'set:' + + buf.write('%s,%s,%s,%s%s\n' % + (port.mac_address, name, ip_address, + set_tag, port.id)) + else: + buf.write('%s,%s,%s\n' % + (port.mac_address, name, ip_address)) + + utils.replace_file(filename, buf.getvalue()) + LOG.debug(_('Done building host file %s'), filename) + return filename + + def _read_hosts_file_leases(self, filename): + leases = set() + if os.path.exists(filename): + with open(filename) as f: + for l in f.readlines(): + host = l.strip().split(',') + leases.add((host[2], host[0])) + return leases + + def _release_unused_leases(self): + filename = self.get_conf_file_name('host') + old_leases = self._read_hosts_file_leases(filename) + + new_leases = set() + for port in self.network.ports: + for alloc in port.fixed_ips: + new_leases.add((alloc.ip_address, port.mac_address)) + + for ip, mac in old_leases - new_leases: + self._release_lease(mac, ip) + + def _output_addn_hosts_file(self): + """Writes a dnsmasq compatible additional hosts file. + + The generated file is sent to the --addn-hosts option of dnsmasq, + and lists the hosts on the network which should be resolved even if + the dnsmaq instance did not give a lease to the host (see the + `_output_hosts_file` method). + Each line in this file is in the same form as a standard /etc/hosts + file. + """ + buf = six.StringIO() + for (port, alloc, hostname, fqdn) in self._iter_hosts(): + # It is compulsory to write the `fqdn` before the `hostname` in + # order to obtain it in PTR responses. + buf.write('%s\t%s %s\n' % (alloc.ip_address, fqdn, hostname)) + addn_hosts = self.get_conf_file_name('addn_hosts') + utils.replace_file(addn_hosts, buf.getvalue()) + return addn_hosts + + def _output_opts_file(self): + """Write a dnsmasq compatible options file.""" + + if self.conf.enable_isolated_metadata: + subnet_to_interface_ip = self._make_subnet_interface_ip_map() + + options = [] + + dhcp_ips = collections.defaultdict(list) + subnet_idx_map = {} + for i, subnet in enumerate(self.network.subnets): + if not subnet.enable_dhcp: + continue + if subnet.dns_nameservers: + options.append( + self._format_option(i, 'dns-server', + ','.join(subnet.dns_nameservers))) + else: + # use the dnsmasq ip as nameservers only if there is no + # dns-server submitted by the server + subnet_idx_map[subnet.id] = i + + gateway = subnet.gateway_ip + host_routes = [] + for hr in subnet.host_routes: + if hr.destination == "0.0.0.0/0": + if not gateway: + gateway = hr.nexthop + else: + host_routes.append("%s,%s" % (hr.destination, hr.nexthop)) + + # Add host routes for isolated network segments + + if self._enable_metadata(subnet): + subnet_dhcp_ip = subnet_to_interface_ip[subnet.id] + host_routes.append( + '%s/32,%s' % (METADATA_DEFAULT_IP, subnet_dhcp_ip) + ) + + if host_routes: + if gateway and subnet.ip_version == 4: + host_routes.append("%s,%s" % ("0.0.0.0/0", gateway)) + options.append( + self._format_option(i, 'classless-static-route', + ','.join(host_routes))) + options.append( + self._format_option(i, WIN2k3_STATIC_DNS, + ','.join(host_routes))) + + if subnet.ip_version == 4: + if gateway: + options.append(self._format_option(i, 'router', gateway)) + else: + options.append(self._format_option(i, 'router')) + + for port in self.network.ports: + if getattr(port, 'extra_dhcp_opts', False): + options.extend( + self._format_option(port.id, opt.opt_name, opt.opt_value) + for opt in port.extra_dhcp_opts) + + # provides all dnsmasq ip as dns-server if there is more than + # one dnsmasq for a subnet and there is no dns-server submitted + # by the server + if port.device_owner == constants.DEVICE_OWNER_DHCP: + for ip in port.fixed_ips: + i = subnet_idx_map.get(ip.subnet_id) + if i is None: + continue + dhcp_ips[i].append(ip.ip_address) + + for i, ips in dhcp_ips.items(): + if len(ips) > 1: + options.append(self._format_option(i, + 'dns-server', + ','.join(ips))) + + name = self.get_conf_file_name('opts') + utils.replace_file(name, '\n'.join(options)) + return name + + def _make_subnet_interface_ip_map(self): + ip_dev = ip_lib.IPDevice( + self.interface_name, + self.root_helper, + self.network.namespace + ) + + subnet_lookup = dict( + (netaddr.IPNetwork(subnet.cidr), subnet.id) + for subnet in self.network.subnets + ) + + retval = {} + + for addr in ip_dev.addr.list(): + ip_net = netaddr.IPNetwork(addr['cidr']) + + if ip_net in subnet_lookup: + retval[subnet_lookup[ip_net]] = addr['cidr'].split('/')[0] + + return retval + + def _format_option(self, tag, option, *args): + """Format DHCP option by option name or code.""" + if self.version >= self.MINIMUM_VERSION: + set_tag = 'tag:' + else: + set_tag = '' + + option = str(option) + + if isinstance(tag, int): + tag = self._TAG_PREFIX % tag + + if not option.isdigit(): + option = 'option:%s' % option + + return ','.join((set_tag + tag, '%s' % option) + args) + + def _enable_metadata(self, subnet): + '''Determine if the metadata route will be pushed to hosts on subnet. + + If subnet has a Neutron router attached, we want the hosts to get + metadata from the router's proxy via their default route instead. + ''' + if self.conf.enable_isolated_metadata and subnet.ip_version == 4: + if subnet.gateway_ip is None: + return True + else: + for port in self.network.ports: + if port.device_owner == constants.DEVICE_OWNER_ROUTER_INTF: + for alloc in port.fixed_ips: + if alloc.subnet_id == subnet.id: + return False + return True + else: + return False + + @classmethod + def lease_update(cls): + network_id = os.environ.get(cls.NEUTRON_NETWORK_ID_KEY) + dhcp_relay_socket = os.environ.get(cls.NEUTRON_RELAY_SOCKET_PATH_KEY) + + action = sys.argv[1] + if action not in ('add', 'del', 'old'): + sys.exit() + + mac_address = sys.argv[2] + ip_address = sys.argv[3] + + if action == 'del': + lease_remaining = 0 + else: + lease_remaining = int(os.environ.get('DNSMASQ_TIME_REMAINING', 0)) + + data = dict(network_id=network_id, mac_address=mac_address, + ip_address=ip_address, lease_remaining=lease_remaining) + + if os.path.exists(dhcp_relay_socket): + sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) + sock.connect(dhcp_relay_socket) + sock.send(jsonutils.dumps(data)) + sock.close() + + +class DeviceManager(object): + + def __init__(self, conf, root_helper, plugin): + self.conf = conf + self.root_helper = root_helper + self.plugin = plugin + if not conf.interface_driver: + msg = _('An interface driver must be specified') + LOG.error(msg) + raise SystemExit(1) + try: + self.driver = importutils.import_object( + conf.interface_driver, conf) + except Exception as e: + msg = (_("Error importing interface driver '%(driver)s': " + "%(inner)s") % {'driver': conf.interface_driver, + 'inner': e}) + LOG.error(msg) + raise SystemExit(1) + + def get_interface_name(self, network, port): + """Return interface(device) name for use by the DHCP process.""" + return self.driver.get_device_name(port) + + def get_device_id(self, network): + """Return a unique DHCP device ID for this host on the network.""" + # There could be more than one dhcp server per network, so create + # a device id that combines host and network ids + return commonutils.get_dhcp_agent_device_id(network.id, self.conf.host) + + def _set_default_route(self, network, device_name): + """Sets the default gateway for this dhcp namespace. + + This method is idempotent and will only adjust the route if adjusting + it would change it from what it already is. This makes it safe to call + and avoids unnecessary perturbation of the system. + """ + device = ip_lib.IPDevice(device_name, + self.root_helper, + network.namespace) + gateway = device.route.get_gateway() + if gateway: + gateway = gateway['gateway'] + + for subnet in network.subnets: + skip_subnet = ( + subnet.ip_version != 4 + or not subnet.enable_dhcp + or subnet.gateway_ip is None) + + if skip_subnet: + continue + + if gateway != subnet.gateway_ip: + m = _('Setting gateway for dhcp netns on net %(n)s to %(ip)s') + LOG.debug(m, {'n': network.id, 'ip': subnet.gateway_ip}) + + device.route.add_gateway(subnet.gateway_ip) + + return + + # No subnets on the network have a valid gateway. Clean it up to avoid + # confusion from seeing an invalid gateway here. + if gateway is not None: + msg = _('Removing gateway for dhcp netns on net %s') + LOG.debug(msg, network.id) + + device.route.delete_gateway(gateway) + + def setup_dhcp_port(self, network): + """Create/update DHCP port for the host if needed and return port.""" + + device_id = self.get_device_id(network) + subnets = {} + dhcp_enabled_subnet_ids = [] + for subnet in network.subnets: + if subnet.enable_dhcp: + dhcp_enabled_subnet_ids.append(subnet.id) + subnets[subnet.id] = subnet + + dhcp_port = None + for port in network.ports: + port_device_id = getattr(port, 'device_id', None) + if port_device_id == device_id: + port_fixed_ips = [] + for fixed_ip in port.fixed_ips: + port_fixed_ips.append({'subnet_id': fixed_ip.subnet_id, + 'ip_address': fixed_ip.ip_address}) + if fixed_ip.subnet_id in dhcp_enabled_subnet_ids: + dhcp_enabled_subnet_ids.remove(fixed_ip.subnet_id) + + # If there are dhcp_enabled_subnet_ids here that means that + # we need to add those to the port and call update. + if dhcp_enabled_subnet_ids: + port_fixed_ips.extend( + [dict(subnet_id=s) for s in dhcp_enabled_subnet_ids]) + dhcp_port = self.plugin.update_dhcp_port( + port.id, {'port': {'network_id': network.id, + 'fixed_ips': port_fixed_ips}}) + if not dhcp_port: + raise exceptions.Conflict() + else: + dhcp_port = port + # break since we found port that matches device_id + break + + # check for a reserved DHCP port + if dhcp_port is None: + LOG.debug(_('DHCP port %(device_id)s on network %(network_id)s' + ' does not yet exist. Checking for a reserved port.'), + {'device_id': device_id, 'network_id': network.id}) + for port in network.ports: + port_device_id = getattr(port, 'device_id', None) + if port_device_id == constants.DEVICE_ID_RESERVED_DHCP_PORT: + dhcp_port = self.plugin.update_dhcp_port( + port.id, {'port': {'network_id': network.id, + 'device_id': device_id}}) + if dhcp_port: + break + + # DHCP port has not yet been created. + if dhcp_port is None: + LOG.debug(_('DHCP port %(device_id)s on network %(network_id)s' + ' does not yet exist.'), {'device_id': device_id, + 'network_id': network.id}) + port_dict = dict( + name='', + admin_state_up=True, + device_id=device_id, + network_id=network.id, + tenant_id=network.tenant_id, + fixed_ips=[dict(subnet_id=s) for s in dhcp_enabled_subnet_ids]) + dhcp_port = self.plugin.create_dhcp_port({'port': port_dict}) + + if not dhcp_port: + raise exceptions.Conflict() + + # Convert subnet_id to subnet dict + fixed_ips = [dict(subnet_id=fixed_ip.subnet_id, + ip_address=fixed_ip.ip_address, + subnet=subnets[fixed_ip.subnet_id]) + for fixed_ip in dhcp_port.fixed_ips] + + ips = [DictModel(item) if isinstance(item, dict) else item + for item in fixed_ips] + dhcp_port.fixed_ips = ips + + return dhcp_port + + def setup(self, network): + """Create and initialize a device for network's DHCP on this host.""" + port = self.setup_dhcp_port(network) + interface_name = self.get_interface_name(network, port) + + if ip_lib.ensure_device_is_ready(interface_name, + self.root_helper, + network.namespace): + LOG.debug(_('Reusing existing device: %s.'), interface_name) + else: + self.driver.plug(network.id, + port.id, + interface_name, + port.mac_address, + namespace=network.namespace) + ip_cidrs = [] + for fixed_ip in port.fixed_ips: + subnet = fixed_ip.subnet + net = netaddr.IPNetwork(subnet.cidr) + ip_cidr = '%s/%s' % (fixed_ip.ip_address, net.prefixlen) + ip_cidrs.append(ip_cidr) + + if (self.conf.enable_isolated_metadata and + self.conf.use_namespaces): + ip_cidrs.append(METADATA_DEFAULT_CIDR) + + self.driver.init_l3(interface_name, ip_cidrs, + namespace=network.namespace) + + # ensure that the dhcp interface is first in the list + if network.namespace is None: + device = ip_lib.IPDevice(interface_name, + self.root_helper) + device.route.pullup_route(interface_name) + + if self.conf.use_namespaces: + self._set_default_route(network, interface_name) + + return interface_name + + def update(self, network, device_name): + """Update device settings for the network's DHCP on this host.""" + if self.conf.use_namespaces: + self._set_default_route(network, device_name) + + def destroy(self, network, device_name): + """Destroy the device used for the network's DHCP on this host.""" + self.driver.unplug(device_name, namespace=network.namespace) + + self.plugin.release_dhcp_port(network.id, + self.get_device_id(network)) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/agent/linux/external_process.py b/icehouse-patches/neutron/dvr-patch/neutron/agent/linux/external_process.py new file mode 100644 index 00000000..2cadc700 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/agent/linux/external_process.py @@ -0,0 +1,102 @@ +# Copyright 2012 New Dream Network, LLC (DreamHost) +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Mark McClain, DreamHost + +import os + +from oslo.config import cfg + +from neutron.agent.linux import ip_lib +from neutron.agent.linux import utils +from neutron.openstack.common import log as logging + +LOG = logging.getLogger(__name__) + +OPTS = [ + cfg.StrOpt('external_pids', + default='$state_path/external/pids', + help=_('Location to store child pid files')), +] + +cfg.CONF.register_opts(OPTS) + + +class ProcessManager(object): + """An external process manager for Neutron spawned processes. + + Note: The manager expects uuid to be in cmdline. + """ + def __init__(self, conf, uuid, root_helper='sudo', namespace=None): + self.conf = conf + self.uuid = uuid + self.root_helper = root_helper + self.namespace = namespace + + def enable(self, cmd_callback): + if not self.active: + cmd = cmd_callback(self.get_pid_file_name(ensure_pids_dir=True)) + + ip_wrapper = ip_lib.IPWrapper(self.root_helper, self.namespace) + ip_wrapper.netns.execute(cmd) + + def disable(self): + pid = self.pid + + if self.active: + cmd = ['kill', '-9', pid] + utils.execute(cmd, self.root_helper) + elif pid: + LOG.debug(_('Process for %(uuid)s pid %(pid)d is stale, ignoring ' + 'command'), {'uuid': self.uuid, 'pid': pid}) + else: + LOG.debug(_('No process started for %s'), self.uuid) + + def get_pid_file_name(self, ensure_pids_dir=False): + """Returns the file name for a given kind of config file.""" + pids_dir = os.path.abspath(os.path.normpath(self.conf.external_pids)) + if ensure_pids_dir and not os.path.isdir(pids_dir): + os.makedirs(pids_dir, 0o755) + + return os.path.join(pids_dir, self.uuid + '.pid') + + @property + def pid(self): + """Last known pid for this external process spawned for this uuid.""" + file_name = self.get_pid_file_name() + msg = _('Error while reading %s') + + try: + with open(file_name, 'r') as f: + return int(f.read()) + except IOError: + msg = _('Unable to access %s') + except ValueError: + msg = _('Unable to convert value in %s') + + LOG.debug(msg, file_name) + return None + + @property + def active(self): + pid = self.pid + if pid is None: + return False + + cmdline = '/proc/%s/cmdline' % pid + try: + with open(cmdline, "r") as f: + return self.uuid in f.readline() + except IOError: + return False diff --git a/icehouse-patches/neutron/dvr-patch/neutron/agent/linux/interface.py b/icehouse-patches/neutron/dvr-patch/neutron/agent/linux/interface.py new file mode 100644 index 00000000..c6677c11 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/agent/linux/interface.py @@ -0,0 +1,448 @@ +# Copyright 2012 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import abc + +import netaddr +from oslo.config import cfg +import six + +from neutron.agent.common import config +from neutron.agent.linux import ip_lib +from neutron.agent.linux import ovs_lib +from neutron.agent.linux import utils +from neutron.common import exceptions +from neutron.extensions import flavor +from neutron.openstack.common import importutils +from neutron.openstack.common import log as logging + + +LOG = logging.getLogger(__name__) + +OPTS = [ + cfg.StrOpt('ovs_integration_bridge', + default='br-int', + help=_('Name of Open vSwitch bridge to use')), + cfg.BoolOpt('ovs_use_veth', + default=False, + help=_('Uses veth for an interface or not')), + cfg.IntOpt('network_device_mtu', + help=_('MTU setting for device.')), + cfg.StrOpt('meta_flavor_driver_mappings', + help=_('Mapping between flavor and LinuxInterfaceDriver')), + cfg.StrOpt('admin_user', + help=_("Admin username")), + cfg.StrOpt('admin_password', + help=_("Admin password"), + secret=True), + cfg.StrOpt('admin_tenant_name', + help=_("Admin tenant name")), + cfg.StrOpt('auth_url', + help=_("Authentication URL")), + cfg.StrOpt('auth_strategy', default='keystone', + help=_("The type of authentication to use")), + cfg.StrOpt('auth_region', + help=_("Authentication region")), +] + + +@six.add_metaclass(abc.ABCMeta) +class LinuxInterfaceDriver(object): + + # from linux IF_NAMESIZE + DEV_NAME_LEN = 14 + DEV_NAME_PREFIX = 'tap' + + def __init__(self, conf): + self.conf = conf + self.root_helper = config.get_root_helper(conf) + + def init_l3(self, device_name, ip_cidrs, namespace=None, + preserve_ips=[], gateway=None, extra_subnets=[]): + """Set the L3 settings for the interface using data from the port. + + ip_cidrs: list of 'X.X.X.X/YY' strings + preserve_ips: list of ip cidrs that should not be removed from device + """ + device = ip_lib.IPDevice(device_name, + self.root_helper, + namespace=namespace) + + previous = {} + for address in device.addr.list(scope='global', filters=['permanent']): + previous[address['cidr']] = address['ip_version'] + + # add new addresses + for ip_cidr in ip_cidrs: + + net = netaddr.IPNetwork(ip_cidr) + # Convert to compact IPv6 address because the return values of + # "ip addr list" are compact. + if net.version == 6: + ip_cidr = str(net) + if ip_cidr in previous: + del previous[ip_cidr] + continue + + device.addr.add(net.version, ip_cidr, str(net.broadcast)) + + # clean up any old addresses + for ip_cidr, ip_version in previous.items(): + if ip_cidr not in preserve_ips: + device.addr.delete(ip_version, ip_cidr) + + if gateway: + device.route.add_gateway(gateway) + + new_onlink_routes = set(s['cidr'] for s in extra_subnets) + existing_onlink_routes = set(device.route.list_onlink_routes()) + for route in new_onlink_routes - existing_onlink_routes: + device.route.add_onlink_route(route) + for route in existing_onlink_routes - new_onlink_routes: + device.route.delete_onlink_route(route) + + def check_bridge_exists(self, bridge): + if not ip_lib.device_exists(bridge): + raise exceptions.BridgeDoesNotExist(bridge=bridge) + + def get_device_name(self, port): + return (self.DEV_NAME_PREFIX + port.id)[:self.DEV_NAME_LEN] + + @abc.abstractmethod + def plug(self, network_id, port_id, device_name, mac_address, + bridge=None, namespace=None, prefix=None): + """Plug in the interface.""" + + @abc.abstractmethod + def unplug(self, device_name, bridge=None, namespace=None, prefix=None): + """Unplug the interface.""" + + +class NullDriver(LinuxInterfaceDriver): + def plug(self, network_id, port_id, device_name, mac_address, + bridge=None, namespace=None, prefix=None): + pass + + def unplug(self, device_name, bridge=None, namespace=None, prefix=None): + pass + + +class OVSInterfaceDriver(LinuxInterfaceDriver): + """Driver for creating an internal interface on an OVS bridge.""" + + DEV_NAME_PREFIX = 'tap' + + def __init__(self, conf): + super(OVSInterfaceDriver, self).__init__(conf) + if self.conf.ovs_use_veth: + self.DEV_NAME_PREFIX = 'ns-' + + def _get_tap_name(self, dev_name, prefix=None): + if self.conf.ovs_use_veth: + dev_name = dev_name.replace(prefix or self.DEV_NAME_PREFIX, 'tap') + return dev_name + + def _ovs_add_port(self, bridge, device_name, port_id, mac_address, + internal=True): + cmd = ['ovs-vsctl', '--', '--if-exists', 'del-port', device_name, '--', + 'add-port', bridge, device_name] + if internal: + cmd += ['--', 'set', 'Interface', device_name, 'type=internal'] + cmd += ['--', 'set', 'Interface', device_name, + 'external-ids:iface-id=%s' % port_id, + '--', 'set', 'Interface', device_name, + 'external-ids:iface-status=active', + '--', 'set', 'Interface', device_name, + 'external-ids:attached-mac=%s' % mac_address] + utils.execute(cmd, self.root_helper) + + def plug(self, network_id, port_id, device_name, mac_address, + bridge=None, namespace=None, prefix=None): + """Plug in the interface.""" + if not bridge: + bridge = self.conf.ovs_integration_bridge + + if not ip_lib.device_exists(device_name, + self.root_helper, + namespace=namespace): + + self.check_bridge_exists(bridge) + + ip = ip_lib.IPWrapper(self.root_helper) + tap_name = self._get_tap_name(device_name, prefix) + + if self.conf.ovs_use_veth: + # Create ns_dev in a namespace if one is configured. + root_dev, ns_dev = ip.add_veth(tap_name, + device_name, + namespace2=namespace) + else: + ns_dev = ip.device(device_name) + + internal = not self.conf.ovs_use_veth + self._ovs_add_port(bridge, tap_name, port_id, mac_address, + internal=internal) + + ns_dev.link.set_address(mac_address) + + if self.conf.network_device_mtu: + ns_dev.link.set_mtu(self.conf.network_device_mtu) + if self.conf.ovs_use_veth: + root_dev.link.set_mtu(self.conf.network_device_mtu) + + # Add an interface created by ovs to the namespace. + if not self.conf.ovs_use_veth and namespace: + namespace_obj = ip.ensure_namespace(namespace) + namespace_obj.add_device_to_namespace(ns_dev) + + ns_dev.link.set_up() + if self.conf.ovs_use_veth: + root_dev.link.set_up() + else: + LOG.info(_("Device %s already exists"), device_name) + + def unplug(self, device_name, bridge=None, namespace=None, prefix=None): + """Unplug the interface.""" + if not bridge: + bridge = self.conf.ovs_integration_bridge + + tap_name = self._get_tap_name(device_name, prefix) + self.check_bridge_exists(bridge) + ovs = ovs_lib.OVSBridge(bridge, self.root_helper) + + try: + ovs.delete_port(tap_name) + if self.conf.ovs_use_veth: + device = ip_lib.IPDevice(device_name, + self.root_helper, + namespace) + device.link.delete() + LOG.debug(_("Unplugged interface '%s'"), device_name) + except RuntimeError: + LOG.error(_("Failed unplugging interface '%s'"), + device_name) + + +class MidonetInterfaceDriver(LinuxInterfaceDriver): + + def plug(self, network_id, port_id, device_name, mac_address, + bridge=None, namespace=None, prefix=None): + """This method is called by the Dhcp agent or by the L3 agent + when a new network is created + """ + if not ip_lib.device_exists(device_name, + self.root_helper, + namespace=namespace): + ip = ip_lib.IPWrapper(self.root_helper) + tap_name = device_name.replace(prefix or 'tap', 'tap') + + # Create ns_dev in a namespace if one is configured. + root_dev, ns_dev = ip.add_veth(tap_name, device_name, + namespace2=namespace) + + ns_dev.link.set_address(mac_address) + + # Add an interface created by ovs to the namespace. + namespace_obj = ip.ensure_namespace(namespace) + namespace_obj.add_device_to_namespace(ns_dev) + + ns_dev.link.set_up() + root_dev.link.set_up() + + cmd = ['mm-ctl', '--bind-port', port_id, device_name] + utils.execute(cmd, self.root_helper) + + else: + LOG.info(_("Device %s already exists"), device_name) + + def unplug(self, device_name, bridge=None, namespace=None, prefix=None): + # the port will be deleted by the dhcp agent that will call the plugin + device = ip_lib.IPDevice(device_name, + self.root_helper, + namespace) + try: + device.link.delete() + except RuntimeError: + LOG.error(_("Failed unplugging interface '%s'"), device_name) + LOG.debug(_("Unplugged interface '%s'"), device_name) + + ip_lib.IPWrapper( + self.root_helper, namespace).garbage_collect_namespace() + + +class IVSInterfaceDriver(LinuxInterfaceDriver): + """Driver for creating an internal interface on an IVS bridge.""" + + DEV_NAME_PREFIX = 'tap' + + def __init__(self, conf): + super(IVSInterfaceDriver, self).__init__(conf) + self.DEV_NAME_PREFIX = 'ns-' + + def _get_tap_name(self, dev_name, prefix=None): + dev_name = dev_name.replace(prefix or self.DEV_NAME_PREFIX, 'tap') + return dev_name + + def _ivs_add_port(self, device_name, port_id, mac_address): + cmd = ['ivs-ctl', 'add-port', device_name] + utils.execute(cmd, self.root_helper) + + def plug(self, network_id, port_id, device_name, mac_address, + bridge=None, namespace=None, prefix=None): + """Plug in the interface.""" + if not ip_lib.device_exists(device_name, + self.root_helper, + namespace=namespace): + + ip = ip_lib.IPWrapper(self.root_helper) + tap_name = self._get_tap_name(device_name, prefix) + + root_dev, ns_dev = ip.add_veth(tap_name, device_name) + + self._ivs_add_port(tap_name, port_id, mac_address) + + ns_dev = ip.device(device_name) + ns_dev.link.set_address(mac_address) + + if self.conf.network_device_mtu: + ns_dev.link.set_mtu(self.conf.network_device_mtu) + root_dev.link.set_mtu(self.conf.network_device_mtu) + + if namespace: + namespace_obj = ip.ensure_namespace(namespace) + namespace_obj.add_device_to_namespace(ns_dev) + + ns_dev.link.set_up() + root_dev.link.set_up() + else: + LOG.info(_("Device %s already exists"), device_name) + + def unplug(self, device_name, bridge=None, namespace=None, prefix=None): + """Unplug the interface.""" + tap_name = self._get_tap_name(device_name, prefix) + try: + cmd = ['ivs-ctl', 'del-port', tap_name] + utils.execute(cmd, self.root_helper) + device = ip_lib.IPDevice(device_name, + self.root_helper, + namespace) + device.link.delete() + LOG.debug(_("Unplugged interface '%s'"), device_name) + except RuntimeError: + LOG.error(_("Failed unplugging interface '%s'"), + device_name) + + +class BridgeInterfaceDriver(LinuxInterfaceDriver): + """Driver for creating bridge interfaces.""" + + DEV_NAME_PREFIX = 'ns-' + + def plug(self, network_id, port_id, device_name, mac_address, + bridge=None, namespace=None, prefix=None): + """Plugin the interface.""" + if not ip_lib.device_exists(device_name, + self.root_helper, + namespace=namespace): + ip = ip_lib.IPWrapper(self.root_helper) + + # Enable agent to define the prefix + if prefix: + tap_name = device_name.replace(prefix, 'tap') + else: + tap_name = device_name.replace(self.DEV_NAME_PREFIX, 'tap') + # Create ns_veth in a namespace if one is configured. + root_veth, ns_veth = ip.add_veth(tap_name, device_name, + namespace2=namespace) + ns_veth.link.set_address(mac_address) + + if self.conf.network_device_mtu: + root_veth.link.set_mtu(self.conf.network_device_mtu) + ns_veth.link.set_mtu(self.conf.network_device_mtu) + + root_veth.link.set_up() + ns_veth.link.set_up() + + else: + LOG.info(_("Device %s already exists"), device_name) + + def unplug(self, device_name, bridge=None, namespace=None, prefix=None): + """Unplug the interface.""" + device = ip_lib.IPDevice(device_name, self.root_helper, namespace) + try: + device.link.delete() + LOG.debug(_("Unplugged interface '%s'"), device_name) + except RuntimeError: + LOG.error(_("Failed unplugging interface '%s'"), + device_name) + + +class MetaInterfaceDriver(LinuxInterfaceDriver): + def __init__(self, conf): + super(MetaInterfaceDriver, self).__init__(conf) + from neutronclient.v2_0 import client + self.neutron = client.Client( + username=self.conf.admin_user, + password=self.conf.admin_password, + tenant_name=self.conf.admin_tenant_name, + auth_url=self.conf.auth_url, + auth_strategy=self.conf.auth_strategy, + region_name=self.conf.auth_region + ) + self.flavor_driver_map = {} + for net_flavor, driver_name in [ + driver_set.split(':') + for driver_set in + self.conf.meta_flavor_driver_mappings.split(',')]: + self.flavor_driver_map[net_flavor] = self._load_driver(driver_name) + + def _get_flavor_by_network_id(self, network_id): + network = self.neutron.show_network(network_id) + return network['network'][flavor.FLAVOR_NETWORK] + + def _get_driver_by_network_id(self, network_id): + net_flavor = self._get_flavor_by_network_id(network_id) + return self.flavor_driver_map[net_flavor] + + def _set_device_plugin_tag(self, network_id, device_name, namespace=None): + plugin_tag = self._get_flavor_by_network_id(network_id) + device = ip_lib.IPDevice(device_name, self.conf.root_helper, namespace) + device.link.set_alias(plugin_tag) + + def _get_device_plugin_tag(self, device_name, namespace=None): + device = ip_lib.IPDevice(device_name, self.conf.root_helper, namespace) + return device.link.alias + + def get_device_name(self, port): + driver = self._get_driver_by_network_id(port.network_id) + return driver.get_device_name(port) + + def plug(self, network_id, port_id, device_name, mac_address, + bridge=None, namespace=None, prefix=None): + driver = self._get_driver_by_network_id(network_id) + ret = driver.plug(network_id, port_id, device_name, mac_address, + bridge=bridge, namespace=namespace, prefix=prefix) + self._set_device_plugin_tag(network_id, device_name, namespace) + return ret + + def unplug(self, device_name, bridge=None, namespace=None, prefix=None): + plugin_tag = self._get_device_plugin_tag(device_name, namespace) + driver = self.flavor_driver_map[plugin_tag] + return driver.unplug(device_name, bridge, namespace, prefix) + + def _load_driver(self, driver_provider): + LOG.debug(_("Driver location: %s"), driver_provider) + plugin_klass = importutils.import_class(driver_provider) + return plugin_klass(self.conf) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/agent/linux/ip_lib.py b/icehouse-patches/neutron/dvr-patch/neutron/agent/linux/ip_lib.py new file mode 100644 index 00000000..9b6b18b3 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/agent/linux/ip_lib.py @@ -0,0 +1,567 @@ +# Copyright 2012 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import netaddr +from oslo.config import cfg + +from neutron.agent.linux import utils +from neutron.common import exceptions + + +OPTS = [ + cfg.BoolOpt('ip_lib_force_root', + default=False, + help=_('Force ip_lib calls to use the root helper')), +] + + +LOOPBACK_DEVNAME = 'lo' +# NOTE(ethuleau): depend of the version of iproute2, the vlan +# interface details vary. +VLAN_INTERFACE_DETAIL = ['vlan protocol 802.1q', + 'vlan protocol 802.1Q', + 'vlan id'] + + +class SubProcessBase(object): + def __init__(self, root_helper=None, namespace=None): + self.root_helper = root_helper + self.namespace = namespace + try: + self.force_root = cfg.CONF.ip_lib_force_root + except cfg.NoSuchOptError: + # Only callers that need to force use of the root helper + # need to register the option. + self.force_root = False + + def _run(self, options, command, args): + if self.namespace: + return self._as_root(options, command, args) + elif self.force_root: + # Force use of the root helper to ensure that commands + # will execute in dom0 when running under XenServer/XCP. + return self._execute(options, command, args, self.root_helper) + else: + return self._execute(options, command, args) + + def _as_root(self, options, command, args, use_root_namespace=False): + if not self.root_helper: + raise exceptions.SudoRequired() + + namespace = self.namespace if not use_root_namespace else None + + return self._execute(options, + command, + args, + self.root_helper, + namespace) + + @classmethod + def _execute(cls, options, command, args, root_helper=None, + namespace=None): + opt_list = ['-%s' % o for o in options] + if namespace: + ip_cmd = ['ip', 'netns', 'exec', namespace, 'ip'] + else: + ip_cmd = ['ip'] + return utils.execute(ip_cmd + opt_list + [command] + list(args), + root_helper=root_helper) + + +class IPWrapper(SubProcessBase): + def __init__(self, root_helper=None, namespace=None): + super(IPWrapper, self).__init__(root_helper=root_helper, + namespace=namespace) + self.netns = IpNetnsCommand(self) + + def device(self, name): + return IPDevice(name, self.root_helper, self.namespace) + + def get_devices(self, exclude_loopback=False): + retval = [] + output = self._execute(['o', 'd'], 'link', ('list',), + self.root_helper, self.namespace) + for line in output.split('\n'): + if '<' not in line: + continue + tokens = line.split(' ', 2) + if len(tokens) == 3: + if any(v in tokens[2] for v in VLAN_INTERFACE_DETAIL): + delimiter = '@' + else: + delimiter = ':' + name = tokens[1].rpartition(delimiter)[0].strip() + + if exclude_loopback and name == LOOPBACK_DEVNAME: + continue + + retval.append(IPDevice(name, + self.root_helper, + self.namespace)) + return retval + + def add_tuntap(self, name, mode='tap'): + self._as_root('', 'tuntap', ('add', name, 'mode', mode)) + return IPDevice(name, self.root_helper, self.namespace) + + def add_veth(self, name1, name2, namespace2=None): + args = ['add', name1, 'type', 'veth', 'peer', 'name', name2] + + if namespace2 is None: + namespace2 = self.namespace + else: + self.ensure_namespace(namespace2) + args += ['netns', namespace2] + + self._as_root('', 'link', tuple(args)) + + return (IPDevice(name1, self.root_helper, self.namespace), + IPDevice(name2, self.root_helper, namespace2)) + + def ensure_namespace(self, name): + if not self.netns.exists(name): + ip = self.netns.add(name) + lo = ip.device(LOOPBACK_DEVNAME) + lo.link.set_up() + else: + ip = IPWrapper(self.root_helper, name) + return ip + + def namespace_is_empty(self): + return not self.get_devices(exclude_loopback=True) + + def garbage_collect_namespace(self): + """Conditionally destroy the namespace if it is empty.""" + if self.namespace and self.netns.exists(self.namespace): + if self.namespace_is_empty(): + self.netns.delete(self.namespace) + return True + return False + + def add_device_to_namespace(self, device): + if self.namespace: + device.link.set_netns(self.namespace) + + def add_vxlan(self, name, vni, group=None, dev=None, ttl=None, tos=None, + local=None, port=None, proxy=False): + cmd = ['add', name, 'type', 'vxlan', 'id', vni] + if group: + cmd.extend(['group', group]) + if dev: + cmd.extend(['dev', dev]) + if ttl: + cmd.extend(['ttl', ttl]) + if tos: + cmd.extend(['tos', tos]) + if local: + cmd.extend(['local', local]) + if proxy: + cmd.append('proxy') + # tuple: min,max + if port and len(port) == 2: + cmd.extend(['port', port[0], port[1]]) + elif port: + raise exceptions.NetworkVxlanPortRangeError(vxlan_range=port) + self._as_root('', 'link', cmd) + return (IPDevice(name, self.root_helper, self.namespace)) + + @classmethod + def get_namespaces(cls, root_helper): + output = cls._execute('', 'netns', ('list',), root_helper=root_helper) + return [l.strip() for l in output.split('\n')] + + +class IpRule(IPWrapper): + def add_rule_from(self, ip, table, rule_pr): + args = ['add', 'from', ip, 'lookup', table, 'priority', rule_pr] + ip = self._as_root('', 'rule', tuple(args)) + return ip + + def delete_rule_priority(self, rule_pr): + args = ['del', 'priority', rule_pr] + ip = self._as_root('', 'rule', tuple(args)) + return ip + + +class IPDevice(SubProcessBase): + def __init__(self, name, root_helper=None, namespace=None): + super(IPDevice, self).__init__(root_helper=root_helper, + namespace=namespace) + self.name = name + self.link = IpLinkCommand(self) + self.addr = IpAddrCommand(self) + self.route = IpRouteCommand(self) + self.neigh = IpNeighCommand(self) + + def __eq__(self, other): + return (other is not None and self.name == other.name + and self.namespace == other.namespace) + + def __str__(self): + return self.name + + +class IpCommandBase(object): + COMMAND = '' + + def __init__(self, parent): + self._parent = parent + + def _run(self, *args, **kwargs): + return self._parent._run(kwargs.get('options', []), self.COMMAND, args) + + def _as_root(self, *args, **kwargs): + return self._parent._as_root(kwargs.get('options', []), + self.COMMAND, + args, + kwargs.get('use_root_namespace', False)) + + +class IpDeviceCommandBase(IpCommandBase): + @property + def name(self): + return self._parent.name + + +class IpLinkCommand(IpDeviceCommandBase): + COMMAND = 'link' + + def set_address(self, mac_address): + self._as_root('set', self.name, 'address', mac_address) + + def set_mtu(self, mtu_size): + self._as_root('set', self.name, 'mtu', mtu_size) + + def set_up(self): + self._as_root('set', self.name, 'up') + + def set_down(self): + self._as_root('set', self.name, 'down') + + def set_netns(self, namespace): + self._as_root('set', self.name, 'netns', namespace) + self._parent.namespace = namespace + + def set_name(self, name): + self._as_root('set', self.name, 'name', name) + self._parent.name = name + + def set_alias(self, alias_name): + self._as_root('set', self.name, 'alias', alias_name) + + def delete(self): + self._as_root('delete', self.name) + + @property + def address(self): + return self.attributes.get('link/ether') + + @property + def state(self): + return self.attributes.get('state') + + @property + def mtu(self): + return self.attributes.get('mtu') + + @property + def qdisc(self): + return self.attributes.get('qdisc') + + @property + def qlen(self): + return self.attributes.get('qlen') + + @property + def alias(self): + return self.attributes.get('alias') + + @property + def attributes(self): + return self._parse_line(self._run('show', self.name, options='o')) + + def _parse_line(self, value): + if not value: + return {} + + device_name, settings = value.replace("\\", '').split('>', 1) + tokens = settings.split() + keys = tokens[::2] + values = [int(v) if v.isdigit() else v for v in tokens[1::2]] + + retval = dict(zip(keys, values)) + return retval + + +class IpAddrCommand(IpDeviceCommandBase): + COMMAND = 'addr' + + def add(self, ip_version, cidr, broadcast, scope='global'): + self._as_root('add', + cidr, + 'brd', + broadcast, + 'scope', + scope, + 'dev', + self.name, + options=[ip_version]) + + def delete(self, ip_version, cidr): + self._as_root('del', + cidr, + 'dev', + self.name, + options=[ip_version]) + + def flush(self): + self._as_root('flush', self.name) + + def list(self, scope=None, to=None, filters=None): + if filters is None: + filters = [] + + retval = [] + + if scope: + filters += ['scope', scope] + if to: + filters += ['to', to] + + for line in self._run('show', self.name, *filters).split('\n'): + line = line.strip() + if not line.startswith('inet'): + continue + parts = line.split() + if parts[0] == 'inet6': + version = 6 + scope = parts[3] + broadcast = '::' + else: + version = 4 + if parts[2] == 'brd': + broadcast = parts[3] + scope = parts[5] + else: + # sometimes output of 'ip a' might look like: + # inet 192.168.100.100/24 scope global eth0 + # and broadcast needs to be calculated from CIDR + broadcast = str(netaddr.IPNetwork(parts[1]).broadcast) + scope = parts[3] + + retval.append(dict(cidr=parts[1], + broadcast=broadcast, + scope=scope, + ip_version=version, + dynamic=('dynamic' == parts[-1]))) + return retval + + +class IpRouteCommand(IpDeviceCommandBase): + COMMAND = 'route' + + def add_gateway(self, gateway, metric=None, table=None): + args = ['replace', 'default', 'via', gateway] + if metric: + args += ['metric', metric] + args += ['dev', self.name] + if table: + args += ['table', table] + self._as_root(*args) + + def delete_gateway(self, gateway=None, table=None): + args = ['del', 'default'] + if gateway: + args += ['via', gateway] + args += ['dev', self.name] + if table: + args += ['table', table] + self._as_root(*args) + + def list_onlink_routes(self): + def iterate_routes(): + output = self._run('list', 'dev', self.name, 'scope', 'link') + for line in output.split('\n'): + line = line.strip() + if line and not line.count('src'): + yield line + + return [x for x in iterate_routes()] + + def add_onlink_route(self, cidr): + self._as_root('replace', cidr, 'dev', self.name, 'scope', 'link') + + def delete_onlink_route(self, cidr): + self._as_root('del', cidr, 'dev', self.name, 'scope', 'link') + + def get_gateway(self, scope=None, filters=None): + if filters is None: + filters = [] + + retval = None + + if scope: + filters += ['scope', scope] + + route_list_lines = self._run('list', 'dev', self.name, + *filters).split('\n') + default_route_line = next((x.strip() for x in + route_list_lines if + x.strip().startswith('default')), None) + if default_route_line: + gateway_index = 2 + parts = default_route_line.split() + retval = dict(gateway=parts[gateway_index]) + if 'metric' in parts: + metric_index = parts.index('metric') + 1 + retval.update(metric=int(parts[metric_index])) + + return retval + + def pullup_route(self, interface_name): + """Ensures that the route entry for the interface is before all + others on the same subnet. + """ + device_list = [] + device_route_list_lines = self._run('list', 'proto', 'kernel', + 'dev', interface_name).split('\n') + for device_route_line in device_route_list_lines: + try: + subnet = device_route_line.split()[0] + except Exception: + continue + subnet_route_list_lines = self._run('list', 'proto', 'kernel', + 'match', subnet).split('\n') + for subnet_route_line in subnet_route_list_lines: + i = iter(subnet_route_line.split()) + while(i.next() != 'dev'): + pass + device = i.next() + try: + while(i.next() != 'src'): + pass + src = i.next() + except Exception: + src = '' + if device != interface_name: + device_list.append((device, src)) + else: + break + + for (device, src) in device_list: + self._as_root('del', subnet, 'dev', device) + if (src != ''): + self._as_root('append', subnet, 'proto', 'kernel', + 'src', src, 'dev', device) + else: + self._as_root('append', subnet, 'proto', 'kernel', + 'dev', device) + + def add_route(self, cidr, ip, table=None): + args = ['replace', cidr, 'via', ip, 'dev', self.name] + if table: + args += ['table', table] + self._as_root(*args) + + def delete_route(self, cidr, ip, table=None): + args = ['del', cidr, 'via', ip, 'dev', self.name] + if table: + args += ['table', table] + self._as_root(*args) + + +class IpNeighCommand(IpDeviceCommandBase): + COMMAND = 'neigh' + + def add(self, ip_version, ip_address, mac_address): + self._as_root('replace', + ip_address, + 'lladdr', + mac_address, + 'nud', + 'permanent', + 'dev', + self.name, + options=[ip_version]) + + def delete(self, ip_version, ip_address, mac_address): + self._as_root('del', + ip_address, + 'lladdr', + mac_address, + 'dev', + self.name, + options=[ip_version]) + + +class IpNetnsCommand(IpCommandBase): + COMMAND = 'netns' + + def add(self, name): + self._as_root('add', name, use_root_namespace=True) + return IPWrapper(self._parent.root_helper, name) + + def delete(self, name): + self._as_root('delete', name, use_root_namespace=True) + + def execute(self, cmds, addl_env={}, check_exit_code=True): + if not self._parent.root_helper: + raise exceptions.SudoRequired() + ns_params = [] + if self._parent.namespace: + ns_params = ['ip', 'netns', 'exec', self._parent.namespace] + + env_params = [] + if addl_env: + env_params = (['env'] + + ['%s=%s' % pair for pair in addl_env.items()]) + return utils.execute( + ns_params + env_params + list(cmds), + root_helper=self._parent.root_helper, + check_exit_code=check_exit_code) + + def exists(self, name): + output = self._parent._execute('o', 'netns', ['list']) + + for line in output.split('\n'): + if name == line.strip(): + return True + return False + + +def device_exists(device_name, root_helper=None, namespace=None): + try: + address = IPDevice(device_name, root_helper, namespace).link.address + except RuntimeError: + return False + return bool(address) + + +def ensure_device_is_ready(device_name, root_helper=None, namespace=None): + dev = IPDevice(device_name, root_helper, namespace) + try: + # Ensure the device is up, even if it is already up. If the device + # doesn't exist, a RuntimeError will be raised. + dev.link.set_up() + except RuntimeError: + return False + return True + + +def iproute_arg_supported(command, arg, root_helper=None): + command += ['help'] + stdout, stderr = utils.execute(command, root_helper=root_helper, + check_exit_code=False, return_stderr=True) + return any(arg in line for line in stderr.split('\n')) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/agent/linux/iptables_firewall.py b/icehouse-patches/neutron/dvr-patch/neutron/agent/linux/iptables_firewall.py new file mode 100644 index 00000000..ff65a9b2 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/agent/linux/iptables_firewall.py @@ -0,0 +1,381 @@ +# Copyright 2012, Nachi Ueno, NTT MCL, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import netaddr +from oslo.config import cfg + +from neutron.agent import firewall +from neutron.agent.linux import iptables_manager +from neutron.common import constants +from neutron.openstack.common import log as logging + + +LOG = logging.getLogger(__name__) +SG_CHAIN = 'sg-chain' +INGRESS_DIRECTION = 'ingress' +EGRESS_DIRECTION = 'egress' +SPOOF_FILTER = 'spoof-filter' +CHAIN_NAME_PREFIX = {INGRESS_DIRECTION: 'i', + EGRESS_DIRECTION: 'o', + SPOOF_FILTER: 's'} +LINUX_DEV_LEN = 14 + + +class IptablesFirewallDriver(firewall.FirewallDriver): + """Driver which enforces security groups through iptables rules.""" + IPTABLES_DIRECTION = {INGRESS_DIRECTION: 'physdev-out', + EGRESS_DIRECTION: 'physdev-in'} + + def __init__(self): + self.iptables = iptables_manager.IptablesManager( + root_helper=cfg.CONF.AGENT.root_helper, + use_ipv6=True) + # list of port which has security group + self.filtered_ports = {} + self._add_fallback_chain_v4v6() + self._defer_apply = False + self._pre_defer_filtered_ports = None + + @property + def ports(self): + return self.filtered_ports + + def prepare_port_filter(self, port): + LOG.debug(_("Preparing device (%s) filter"), port['device']) + self._remove_chains() + self.filtered_ports[port['device']] = port + # each security group has it own chains + self._setup_chains() + self.iptables.apply() + + def update_port_filter(self, port): + LOG.debug(_("Updating device (%s) filter"), port['device']) + if port['device'] not in self.filtered_ports: + LOG.info(_('Attempted to update port filter which is not ' + 'filtered %s'), port['device']) + return + self._remove_chains() + self.filtered_ports[port['device']] = port + self._setup_chains() + self.iptables.apply() + + def remove_port_filter(self, port): + LOG.debug(_("Removing device (%s) filter"), port['device']) + if not self.filtered_ports.get(port['device']): + LOG.info(_('Attempted to remove port filter which is not ' + 'filtered %r'), port) + return + self._remove_chains() + self.filtered_ports.pop(port['device'], None) + self._setup_chains() + self.iptables.apply() + + def _setup_chains(self): + """Setup ingress and egress chain for a port.""" + if not self._defer_apply: + self._setup_chains_apply(self.filtered_ports) + + def _setup_chains_apply(self, ports): + self._add_chain_by_name_v4v6(SG_CHAIN) + for port in ports.values(): + self._setup_chain(port, INGRESS_DIRECTION) + self._setup_chain(port, EGRESS_DIRECTION) + self.iptables.ipv4['filter'].add_rule(SG_CHAIN, '-j ACCEPT') + self.iptables.ipv6['filter'].add_rule(SG_CHAIN, '-j ACCEPT') + + def _remove_chains(self): + """Remove ingress and egress chain for a port.""" + if not self._defer_apply: + self._remove_chains_apply(self.filtered_ports) + + def _remove_chains_apply(self, ports): + for port in ports.values(): + self._remove_chain(port, INGRESS_DIRECTION) + self._remove_chain(port, EGRESS_DIRECTION) + self._remove_chain(port, SPOOF_FILTER) + self._remove_chain_by_name_v4v6(SG_CHAIN) + + def _setup_chain(self, port, DIRECTION): + self._add_chain(port, DIRECTION) + self._add_rule_by_security_group(port, DIRECTION) + + def _remove_chain(self, port, DIRECTION): + chain_name = self._port_chain_name(port, DIRECTION) + self._remove_chain_by_name_v4v6(chain_name) + + def _add_fallback_chain_v4v6(self): + self.iptables.ipv4['filter'].add_chain('sg-fallback') + self.iptables.ipv4['filter'].add_rule('sg-fallback', '-j DROP') + self.iptables.ipv6['filter'].add_chain('sg-fallback') + self.iptables.ipv6['filter'].add_rule('sg-fallback', '-j DROP') + + def _add_chain_by_name_v4v6(self, chain_name): + self.iptables.ipv6['filter'].add_chain(chain_name) + self.iptables.ipv4['filter'].add_chain(chain_name) + + def _remove_chain_by_name_v4v6(self, chain_name): + self.iptables.ipv4['filter'].ensure_remove_chain(chain_name) + self.iptables.ipv6['filter'].ensure_remove_chain(chain_name) + + def _add_rule_to_chain_v4v6(self, chain_name, ipv4_rules, ipv6_rules): + for rule in ipv4_rules: + self.iptables.ipv4['filter'].add_rule(chain_name, rule) + + for rule in ipv6_rules: + self.iptables.ipv6['filter'].add_rule(chain_name, rule) + + def _get_device_name(self, port): + return port['device'] + + def _add_chain(self, port, direction): + chain_name = self._port_chain_name(port, direction) + self._add_chain_by_name_v4v6(chain_name) + + # Note(nati) jump to the security group chain (SG_CHAIN) + # This is needed because the packet may much two rule in port + # if the two port is in the same host + # We accept the packet at the end of SG_CHAIN. + + # jump to the security group chain + device = self._get_device_name(port) + jump_rule = ['-m physdev --%s %s --physdev-is-bridged ' + '-j $%s' % (self.IPTABLES_DIRECTION[direction], + device, + SG_CHAIN)] + self._add_rule_to_chain_v4v6('FORWARD', jump_rule, jump_rule) + + # jump to the chain based on the device + jump_rule = ['-m physdev --%s %s --physdev-is-bridged ' + '-j $%s' % (self.IPTABLES_DIRECTION[direction], + device, + chain_name)] + self._add_rule_to_chain_v4v6(SG_CHAIN, jump_rule, jump_rule) + + if direction == EGRESS_DIRECTION: + self._add_rule_to_chain_v4v6('INPUT', jump_rule, jump_rule) + + def _split_sgr_by_ethertype(self, security_group_rules): + ipv4_sg_rules = [] + ipv6_sg_rules = [] + for rule in security_group_rules: + if rule.get('ethertype') == constants.IPv4: + ipv4_sg_rules.append(rule) + elif rule.get('ethertype') == constants.IPv6: + if rule.get('protocol') == 'icmp': + rule['protocol'] = 'icmpv6' + ipv6_sg_rules.append(rule) + return ipv4_sg_rules, ipv6_sg_rules + + def _select_sgr_by_direction(self, port, direction): + return [rule + for rule in port.get('security_group_rules', []) + if rule['direction'] == direction] + + def _setup_spoof_filter_chain(self, port, table, mac_ip_pairs, rules): + if mac_ip_pairs: + chain_name = self._port_chain_name(port, SPOOF_FILTER) + table.add_chain(chain_name) + for mac, ip in mac_ip_pairs: + if ip is None: + # If fixed_ips is [] this rule will be added to the end + # of the list after the allowed_address_pair rules. + table.add_rule(chain_name, + '-m mac --mac-source %s -j RETURN' + % mac) + else: + table.add_rule(chain_name, + '-m mac --mac-source %s -s %s -j RETURN' + % (mac, ip)) + table.add_rule(chain_name, '-j DROP') + rules.append('-j $%s' % chain_name) + + def _build_ipv4v6_mac_ip_list(self, mac, ip_address, mac_ipv4_pairs, + mac_ipv6_pairs): + if netaddr.IPNetwork(ip_address).version == 4: + mac_ipv4_pairs.append((mac, ip_address)) + else: + mac_ipv6_pairs.append((mac, ip_address)) + + def _spoofing_rule(self, port, ipv4_rules, ipv6_rules): + #Note(nati) allow dhcp or RA packet + ipv4_rules += ['-p udp -m udp --sport 68 --dport 67 -j RETURN'] + ipv6_rules += ['-p icmpv6 -j RETURN'] + ipv6_rules += ['-p udp -m udp --sport 546 --dport 547 -j RETURN'] + mac_ipv4_pairs = [] + mac_ipv6_pairs = [] + + if isinstance(port.get('allowed_address_pairs'), list): + for address_pair in port['allowed_address_pairs']: + self._build_ipv4v6_mac_ip_list(address_pair['mac_address'], + address_pair['ip_address'], + mac_ipv4_pairs, + mac_ipv6_pairs) + + for ip in port['fixed_ips']: + self._build_ipv4v6_mac_ip_list(port['mac_address'], ip, + mac_ipv4_pairs, mac_ipv6_pairs) + if not port['fixed_ips']: + mac_ipv4_pairs.append((port['mac_address'], None)) + mac_ipv6_pairs.append((port['mac_address'], None)) + + self._setup_spoof_filter_chain(port, self.iptables.ipv4['filter'], + mac_ipv4_pairs, ipv4_rules) + self._setup_spoof_filter_chain(port, self.iptables.ipv6['filter'], + mac_ipv6_pairs, ipv6_rules) + + def _drop_dhcp_rule(self, ipv4_rules, ipv6_rules): + #Note(nati) Drop dhcp packet from VM + ipv4_rules += ['-p udp -m udp --sport 67 --dport 68 -j DROP'] + ipv6_rules += ['-p udp -m udp --sport 547 --dport 546 -j DROP'] + + def _accept_inbound_icmpv6(self): + # Allow multicast listener, neighbor solicitation and + # neighbor advertisement into the instance + icmpv6_rules = [] + for icmp6_type in constants.ICMPV6_ALLOWED_TYPES: + icmpv6_rules += ['-p icmpv6 --icmpv6-type %s -j RETURN' % + icmp6_type] + return icmpv6_rules + + def _add_rule_by_security_group(self, port, direction): + chain_name = self._port_chain_name(port, direction) + # select rules for current direction + security_group_rules = self._select_sgr_by_direction(port, direction) + # split groups by ip version + # for ipv4, iptables command is used + # for ipv6, iptables6 command is used + ipv4_sg_rules, ipv6_sg_rules = self._split_sgr_by_ethertype( + security_group_rules) + ipv4_iptables_rule = [] + ipv6_iptables_rule = [] + if direction == EGRESS_DIRECTION: + self._spoofing_rule(port, + ipv4_iptables_rule, + ipv6_iptables_rule) + self._drop_dhcp_rule(ipv4_iptables_rule, ipv6_iptables_rule) + if direction == INGRESS_DIRECTION: + ipv6_iptables_rule += self._accept_inbound_icmpv6() + ipv4_iptables_rule += self._convert_sgr_to_iptables_rules( + ipv4_sg_rules) + ipv6_iptables_rule += self._convert_sgr_to_iptables_rules( + ipv6_sg_rules) + self._add_rule_to_chain_v4v6(chain_name, + ipv4_iptables_rule, + ipv6_iptables_rule) + + def _convert_sgr_to_iptables_rules(self, security_group_rules): + iptables_rules = [] + self._drop_invalid_packets(iptables_rules) + self._allow_established(iptables_rules) + for rule in security_group_rules: + # These arguments MUST be in the format iptables-save will + # display them: source/dest, protocol, sport, dport, target + # Otherwise the iptables_manager code won't be able to find + # them to preserve their [packet:byte] counts. + args = self._ip_prefix_arg('s', + rule.get('source_ip_prefix')) + args += self._ip_prefix_arg('d', + rule.get('dest_ip_prefix')) + args += self._protocol_arg(rule.get('protocol')) + args += self._port_arg('sport', + rule.get('protocol'), + rule.get('source_port_range_min'), + rule.get('source_port_range_max')) + args += self._port_arg('dport', + rule.get('protocol'), + rule.get('port_range_min'), + rule.get('port_range_max')) + args += ['-j RETURN'] + iptables_rules += [' '.join(args)] + + iptables_rules += ['-j $sg-fallback'] + + return iptables_rules + + def _drop_invalid_packets(self, iptables_rules): + # Always drop invalid packets + iptables_rules += ['-m state --state ' 'INVALID -j DROP'] + return iptables_rules + + def _allow_established(self, iptables_rules): + # Allow established connections + iptables_rules += ['-m state --state RELATED,ESTABLISHED -j RETURN'] + return iptables_rules + + def _protocol_arg(self, protocol): + if not protocol: + return [] + + iptables_rule = ['-p', protocol] + # iptables always adds '-m protocol' for udp and tcp + if protocol in ['udp', 'tcp']: + iptables_rule += ['-m', protocol] + return iptables_rule + + def _port_arg(self, direction, protocol, port_range_min, port_range_max): + if (protocol not in ['udp', 'tcp', 'icmp', 'icmpv6'] + or not port_range_min): + return [] + + if protocol in ['icmp', 'icmpv6']: + # Note(xuhanp): port_range_min/port_range_max represent + # icmp type/code when protocol is icmp or icmpv6 + # icmp code can be 0 so we cannot use "if port_range_max" here + if port_range_max is not None: + return ['--%s-type' % protocol, + '%s/%s' % (port_range_min, port_range_max)] + return ['--%s-type' % protocol, '%s' % port_range_min] + elif port_range_min == port_range_max: + return ['--%s' % direction, '%s' % (port_range_min,)] + else: + return ['-m', 'multiport', + '--%ss' % direction, + '%s:%s' % (port_range_min, port_range_max)] + + def _ip_prefix_arg(self, direction, ip_prefix): + #NOTE (nati) : source_group_id is converted to list of source_ + # ip_prefix in server side + if ip_prefix: + return ['-%s' % direction, ip_prefix] + return [] + + def _port_chain_name(self, port, direction): + return iptables_manager.get_chain_name( + '%s%s' % (CHAIN_NAME_PREFIX[direction], port['device'][3:])) + + def filter_defer_apply_on(self): + if not self._defer_apply: + self.iptables.defer_apply_on() + self._pre_defer_filtered_ports = dict(self.filtered_ports) + self._defer_apply = True + + def filter_defer_apply_off(self): + if self._defer_apply: + self._defer_apply = False + self._remove_chains_apply(self._pre_defer_filtered_ports) + self._pre_defer_filtered_ports = None + self._setup_chains_apply(self.filtered_ports) + self.iptables.defer_apply_off() + + +class OVSHybridIptablesFirewallDriver(IptablesFirewallDriver): + OVS_HYBRID_TAP_PREFIX = 'tap' + + def _port_chain_name(self, port, direction): + return iptables_manager.get_chain_name( + '%s%s' % (CHAIN_NAME_PREFIX[direction], port['device'])) + + def _get_device_name(self, port): + return (self.OVS_HYBRID_TAP_PREFIX + port['device'])[:LINUX_DEV_LEN] diff --git a/icehouse-patches/neutron/dvr-patch/neutron/agent/linux/iptables_manager.py b/icehouse-patches/neutron/dvr-patch/neutron/agent/linux/iptables_manager.py new file mode 100644 index 00000000..5fbd9931 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/agent/linux/iptables_manager.py @@ -0,0 +1,666 @@ +# Copyright 2012 Locaweb. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# @author: Juliano Martinez, Locaweb. +# based on +# https://github.com/openstack/nova/blob/master/nova/network/linux_net.py + +"""Implements iptables rules using linux utilities.""" + +import inspect +import os +import re + +from neutron.agent.linux import utils as linux_utils +from neutron.common import utils +from neutron.openstack.common import excutils +from neutron.openstack.common import lockutils +from neutron.openstack.common import log as logging + +LOG = logging.getLogger(__name__) + + +# NOTE(vish): Iptables supports chain names of up to 28 characters, and we +# add up to 12 characters to binary_name which is used as a prefix, +# so we limit it to 16 characters. +# (max_chain_name_length - len('-POSTROUTING') == 16) +def get_binary_name(): + """Grab the name of the binary we're running in.""" + return os.path.basename(inspect.stack()[-1][1])[:16] + +binary_name = get_binary_name() + +# A length of a chain name must be less than or equal to 11 characters. +# - ( + '-') = 28-(16+1) = 11 +MAX_CHAIN_LEN_WRAP = 11 +MAX_CHAIN_LEN_NOWRAP = 28 + +# Number of iptables rules to print before and after a rule that causes a +# a failure during iptables-restore +IPTABLES_ERROR_LINES_OF_CONTEXT = 5 + + +def get_chain_name(chain_name, wrap=True): + if wrap: + return chain_name[:MAX_CHAIN_LEN_WRAP] + else: + return chain_name[:MAX_CHAIN_LEN_NOWRAP] + + +class IptablesRule(object): + """An iptables rule. + + You shouldn't need to use this class directly, it's only used by + IptablesManager. + + """ + + def __init__(self, chain, rule, wrap=True, top=False, + binary_name=binary_name, tag=None): + self.chain = get_chain_name(chain, wrap) + self.rule = rule + self.wrap = wrap + self.top = top + self.wrap_name = binary_name[:16] + self.tag = tag + + def __eq__(self, other): + return ((self.chain == other.chain) and + (self.rule == other.rule) and + (self.top == other.top) and + (self.wrap == other.wrap)) + + def __ne__(self, other): + return not self == other + + def __str__(self): + if self.wrap: + chain = '%s-%s' % (self.wrap_name, self.chain) + else: + chain = self.chain + return '-A %s %s' % (chain, self.rule) + + +class IptablesTable(object): + """An iptables table.""" + + def __init__(self, binary_name=binary_name): + self.rules = [] + self.remove_rules = [] + self.chains = set() + self.unwrapped_chains = set() + self.remove_chains = set() + self.wrap_name = binary_name[:16] + + def add_chain(self, name, wrap=True): + """Adds a named chain to the table. + + The chain name is wrapped to be unique for the component creating + it, so different components of Nova can safely create identically + named chains without interfering with one another. + + At the moment, its wrapped name is -, + so if nova-compute creates a chain named 'OUTPUT', it'll actually + end up named 'nova-compute-OUTPUT'. + + """ + name = get_chain_name(name, wrap) + if wrap: + self.chains.add(name) + else: + self.unwrapped_chains.add(name) + + def _select_chain_set(self, wrap): + if wrap: + return self.chains + else: + return self.unwrapped_chains + + def ensure_remove_chain(self, name, wrap=True): + """Ensure the chain is removed. + + This removal "cascades". All rule in the chain are removed, as are + all rules in other chains that jump to it. + """ + name = get_chain_name(name, wrap) + chain_set = self._select_chain_set(wrap) + if name not in chain_set: + return + + self.remove_chain(name, wrap) + + def remove_chain(self, name, wrap=True): + """Remove named chain. + + This removal "cascades". All rule in the chain are removed, as are + all rules in other chains that jump to it. + + If the chain is not found, this is merely logged. + + """ + name = get_chain_name(name, wrap) + chain_set = self._select_chain_set(wrap) + + if name not in chain_set: + LOG.warn(_('Attempted to remove chain %s which does not exist'), + name) + return + + chain_set.remove(name) + + if not wrap: + # non-wrapped chains and rules need to be dealt with specially, + # so we keep a list of them to be iterated over in apply() + self.remove_chains.add(name) + + # first, add rules to remove that have a matching chain name + self.remove_rules += [r for r in self.rules if r.chain == name] + + # next, remove rules from list that have a matching chain name + self.rules = [r for r in self.rules if r.chain != name] + + if not wrap: + jump_snippet = '-j %s' % name + # next, add rules to remove that have a matching jump chain + self.remove_rules += [r for r in self.rules + if jump_snippet in r.rule] + else: + jump_snippet = '-j %s-%s' % (self.wrap_name, name) + + # finally, remove rules from list that have a matching jump chain + self.rules = [r for r in self.rules + if jump_snippet not in r.rule] + + def add_rule(self, chain, rule, wrap=True, top=False, tag=None): + """Add a rule to the table. + + This is just like what you'd feed to iptables, just without + the '-A ' bit at the start. + + However, if you need to jump to one of your wrapped chains, + prepend its name with a '$' which will ensure the wrapping + is applied correctly. + + """ + chain = get_chain_name(chain, wrap) + if wrap and chain not in self.chains: + raise LookupError(_('Unknown chain: %r') % chain) + + if '$' in rule: + rule = ' '.join( + self._wrap_target_chain(e, wrap) for e in rule.split(' ')) + + self.rules.append(IptablesRule(chain, rule, wrap, top, self.wrap_name, + tag)) + + def _wrap_target_chain(self, s, wrap): + if s.startswith('$'): + s = ('%s-%s' % (self.wrap_name, get_chain_name(s[1:], wrap))) + + return s + + def remove_rule(self, chain, rule, wrap=True, top=False): + """Remove a rule from a chain. + + Note: The rule must be exactly identical to the one that was added. + You cannot switch arguments around like you can with the iptables + CLI tool. + + """ + chain = get_chain_name(chain, wrap) + try: + if '$' in rule: + rule = ' '.join( + self._wrap_target_chain(e, wrap) for e in rule.split(' ')) + + self.rules.remove(IptablesRule(chain, rule, wrap, top, + self.wrap_name)) + if not wrap: + self.remove_rules.append(IptablesRule(chain, rule, wrap, top, + self.wrap_name)) + except ValueError: + LOG.warn(_('Tried to remove rule that was not there:' + ' %(chain)r %(rule)r %(wrap)r %(top)r'), + {'chain': chain, 'rule': rule, + 'top': top, 'wrap': wrap}) + + def empty_chain(self, chain, wrap=True): + """Remove all rules from a chain.""" + chain = get_chain_name(chain, wrap) + chained_rules = [rule for rule in self.rules + if rule.chain == chain and rule.wrap == wrap] + for rule in chained_rules: + self.rules.remove(rule) + + def clear_rules_by_tag(self, tag): + if not tag: + return + rules = [rule for rule in self.rules if rule.tag == tag] + for rule in rules: + self.rules.remove(rule) + + +class IptablesManager(object): + """Wrapper for iptables. + + See IptablesTable for some usage docs + + A number of chains are set up to begin with. + + First, neutron-filter-top. It's added at the top of FORWARD and OUTPUT. Its + name is not wrapped, so it's shared between the various nova workers. It's + intended for rules that need to live at the top of the FORWARD and OUTPUT + chains. It's in both the ipv4 and ipv6 set of tables. + + For ipv4 and ipv6, the built-in INPUT, OUTPUT, and FORWARD filter chains + are wrapped, meaning that the "real" INPUT chain has a rule that jumps to + the wrapped INPUT chain, etc. Additionally, there's a wrapped chain named + "local" which is jumped to from neutron-filter-top. + + For ipv4, the built-in PREROUTING, OUTPUT, and POSTROUTING nat chains are + wrapped in the same was as the built-in filter chains. Additionally, + there's a snat chain that is applied after the POSTROUTING chain. + + """ + + def __init__(self, _execute=None, state_less=False, + root_helper=None, use_ipv6=False, namespace=None, + binary_name=binary_name): + if _execute: + self.execute = _execute + else: + self.execute = linux_utils.execute + + self.use_ipv6 = use_ipv6 + self.root_helper = root_helper + self.namespace = namespace + self.iptables_apply_deferred = False + self.wrap_name = binary_name[:16] + + self.ipv4 = {'filter': IptablesTable(binary_name=self.wrap_name)} + self.ipv6 = {'filter': IptablesTable(binary_name=self.wrap_name)} + + # Add a neutron-filter-top chain. It's intended to be shared + # among the various nova components. It sits at the very top + # of FORWARD and OUTPUT. + for tables in [self.ipv4, self.ipv6]: + tables['filter'].add_chain('neutron-filter-top', wrap=False) + tables['filter'].add_rule('FORWARD', '-j neutron-filter-top', + wrap=False, top=True) + tables['filter'].add_rule('OUTPUT', '-j neutron-filter-top', + wrap=False, top=True) + + tables['filter'].add_chain('local') + tables['filter'].add_rule('neutron-filter-top', '-j $local', + wrap=False) + + # Wrap the built-in chains + builtin_chains = {4: {'filter': ['INPUT', 'OUTPUT', 'FORWARD']}, + 6: {'filter': ['INPUT', 'OUTPUT', 'FORWARD']}} + + if not state_less: + self.ipv4.update( + {'nat': IptablesTable(binary_name=self.wrap_name)}) + builtin_chains[4].update({'nat': ['PREROUTING', + 'OUTPUT', 'POSTROUTING']}) + + for ip_version in builtin_chains: + if ip_version == 4: + tables = self.ipv4 + elif ip_version == 6: + tables = self.ipv6 + + for table, chains in builtin_chains[ip_version].iteritems(): + for chain in chains: + tables[table].add_chain(chain) + tables[table].add_rule(chain, '-j $%s' % + (chain), wrap=False) + + if not state_less: + # Add a neutron-postrouting-bottom chain. It's intended to be + # shared among the various nova components. We set it as the last + # chain of POSTROUTING chain. + self.ipv4['nat'].add_chain('neutron-postrouting-bottom', + wrap=False) + self.ipv4['nat'].add_rule('POSTROUTING', + '-j neutron-postrouting-bottom', + wrap=False) + + # We add a snat chain to the shared neutron-postrouting-bottom + # chain so that it's applied last. + self.ipv4['nat'].add_chain('snat') + self.ipv4['nat'].add_rule('neutron-postrouting-bottom', + '-j $snat', wrap=False) + + # And then we add a float-snat chain and jump to first thing in + # the snat chain. + self.ipv4['nat'].add_chain('float-snat') + self.ipv4['nat'].add_rule('snat', '-j $float-snat') + + def defer_apply_on(self): + self.iptables_apply_deferred = True + + def defer_apply_off(self): + self.iptables_apply_deferred = False + self._apply() + + def apply(self): + if self.iptables_apply_deferred: + return + + self._apply() + + def _apply(self): + lock_name = 'iptables' + if self.namespace: + lock_name += '-' + self.namespace + + try: + with lockutils.lock(lock_name, utils.SYNCHRONIZED_PREFIX, True): + LOG.debug(_('Got semaphore / lock "%s"'), lock_name) + return self._apply_synchronized() + finally: + LOG.debug(_('Semaphore / lock released "%s"'), lock_name) + + def _apply_synchronized(self): + """Apply the current in-memory set of iptables rules. + + This will blow away any rules left over from previous runs of the + same component of Nova, and replace them with our current set of + rules. This happens atomically, thanks to iptables-restore. + + """ + s = [('iptables', self.ipv4)] + if self.use_ipv6: + s += [('ip6tables', self.ipv6)] + + for cmd, tables in s: + args = ['%s-save' % (cmd,), '-c'] + if self.namespace: + args = ['ip', 'netns', 'exec', self.namespace] + args + all_tables = self.execute(args, root_helper=self.root_helper) + all_lines = all_tables.split('\n') + for table_name, table in tables.iteritems(): + start, end = self._find_table(all_lines, table_name) + all_lines[start:end] = self._modify_rules( + all_lines[start:end], table, table_name) + + args = ['%s-restore' % (cmd,), '-c'] + if self.namespace: + args = ['ip', 'netns', 'exec', self.namespace] + args + try: + self.execute(args, process_input='\n'.join(all_lines), + root_helper=self.root_helper) + except RuntimeError as r_error: + with excutils.save_and_reraise_exception(): + try: + line_no = int(re.search( + 'iptables-restore: line ([0-9]+?) failed', + str(r_error)).group(1)) + context = IPTABLES_ERROR_LINES_OF_CONTEXT + log_start = max(0, line_no - context) + log_end = line_no + context + except AttributeError: + # line error wasn't found, print all lines instead + log_start = 0 + log_end = len(all_lines) + log_lines = ('%7d. %s' % (idx, l) + for idx, l in enumerate( + all_lines[log_start:log_end], + log_start + 1) + ) + LOG.error(_("IPTablesManager.apply failed to apply the " + "following set of iptables rules:\n%s"), + '\n'.join(log_lines)) + LOG.debug(_("IPTablesManager.apply completed with success")) + + def _find_table(self, lines, table_name): + if len(lines) < 3: + # length only <2 when fake iptables + return (0, 0) + try: + start = lines.index('*%s' % table_name) - 1 + except ValueError: + # Couldn't find table_name + LOG.debug(_('Unable to find table %s'), table_name) + return (0, 0) + end = lines[start:].index('COMMIT') + start + 2 + return (start, end) + + def _find_rules_index(self, lines): + seen_chains = False + rules_index = 0 + for rules_index, rule in enumerate(lines): + if not seen_chains: + if rule.startswith(':'): + seen_chains = True + else: + if not rule.startswith(':'): + break + + if not seen_chains: + rules_index = 2 + + return rules_index + + def _find_last_entry(self, filter_list, match_str): + # find a matching entry, starting from the bottom + for s in reversed(filter_list): + s = s.strip() + if match_str in s: + return s + + def _modify_rules(self, current_lines, table, table_name): + unwrapped_chains = table.unwrapped_chains + chains = table.chains + remove_chains = table.remove_chains + rules = table.rules + remove_rules = table.remove_rules + + if not current_lines: + fake_table = ['# Generated by iptables_manager', + '*' + table_name, 'COMMIT', + '# Completed by iptables_manager'] + current_lines = fake_table + + # Fill old_filter with any chains or rules we might have added, + # they could have a [packet:byte] count we want to preserve. + # Fill new_filter with any chains or rules without our name in them. + old_filter, new_filter = [], [] + for line in current_lines: + (old_filter if self.wrap_name in line else + new_filter).append(line.strip()) + + rules_index = self._find_rules_index(new_filter) + + all_chains = [':%s' % name for name in unwrapped_chains] + all_chains += [':%s-%s' % (self.wrap_name, name) for name in chains] + + # Iterate through all the chains, trying to find an existing + # match. + our_chains = [] + for chain in all_chains: + chain_str = str(chain).strip() + + old = self._find_last_entry(old_filter, chain_str) + if not old: + dup = self._find_last_entry(new_filter, chain_str) + new_filter = [s for s in new_filter if chain_str not in s.strip()] + + # if no old or duplicates, use original chain + if old or dup: + chain_str = str(old or dup) + else: + # add-on the [packet:bytes] + chain_str += ' - [0:0]' + + our_chains += [chain_str] + + # Iterate through all the rules, trying to find an existing + # match. + our_rules = [] + bot_rules = [] + for rule in rules: + rule_str = str(rule).strip() + # Further down, we weed out duplicates from the bottom of the + # list, so here we remove the dupes ahead of time. + + old = self._find_last_entry(old_filter, rule_str) + if not old: + dup = self._find_last_entry(new_filter, rule_str) + new_filter = [s for s in new_filter if rule_str not in s.strip()] + + # if no old or duplicates, use original rule + if old or dup: + rule_str = str(old or dup) + # backup one index so we write the array correctly + if not old: + rules_index -= 1 + else: + # add-on the [packet:bytes] + rule_str = '[0:0] ' + rule_str + + if rule.top: + # rule.top == True means we want this rule to be at the top. + our_rules += [rule_str] + else: + bot_rules += [rule_str] + + our_rules += bot_rules + + new_filter[rules_index:rules_index] = our_rules + new_filter[rules_index:rules_index] = our_chains + + def _strip_packets_bytes(line): + # strip any [packet:byte] counts at start or end of lines + if line.startswith(':'): + # it's a chain, for example, ":neutron-billing - [0:0]" + line = line.split(':')[1] + line = line.split(' - [', 1)[0] + elif line.startswith('['): + # it's a rule, for example, "[0:0] -A neutron-billing..." + line = line.split('] ', 1)[1] + line = line.strip() + return line + + seen_chains = set() + + def _weed_out_duplicate_chains(line): + # ignore [packet:byte] counts at end of lines + if line.startswith(':'): + line = _strip_packets_bytes(line) + if line in seen_chains: + return False + else: + seen_chains.add(line) + + # Leave it alone + return True + + seen_rules = set() + + def _weed_out_duplicate_rules(line): + if line.startswith('['): + line = _strip_packets_bytes(line) + if line in seen_rules: + return False + else: + seen_rules.add(line) + + # Leave it alone + return True + + def _weed_out_removes(line): + # We need to find exact matches here + if line.startswith(':'): + line = _strip_packets_bytes(line) + for chain in remove_chains: + if chain == line: + remove_chains.remove(chain) + return False + elif line.startswith('['): + line = _strip_packets_bytes(line) + for rule in remove_rules: + rule_str = _strip_packets_bytes(str(rule)) + if rule_str == line: + remove_rules.remove(rule) + return False + + # Leave it alone + return True + + # We filter duplicates. Go through the chains and rules, letting + # the *last* occurrence take precendence since it could have a + # non-zero [packet:byte] count we want to preserve. We also filter + # out anything in the "remove" list. + new_filter.reverse() + new_filter = [line for line in new_filter + if _weed_out_duplicate_chains(line) and + _weed_out_duplicate_rules(line) and + _weed_out_removes(line)] + new_filter.reverse() + + # flush lists, just in case we didn't find something + remove_chains.clear() + for rule in remove_rules: + remove_rules.remove(rule) + + return new_filter + + def _get_traffic_counters_cmd_tables(self, chain, wrap=True): + name = get_chain_name(chain, wrap) + + cmd_tables = [('iptables', key) for key, table in self.ipv4.items() + if name in table._select_chain_set(wrap)] + + cmd_tables += [('ip6tables', key) for key, table in self.ipv6.items() + if name in table._select_chain_set(wrap)] + + return cmd_tables + + def get_traffic_counters(self, chain, wrap=True, zero=False): + """Return the sum of the traffic counters of all rules of a chain.""" + cmd_tables = self._get_traffic_counters_cmd_tables(chain, wrap) + if not cmd_tables: + LOG.warn(_('Attempted to get traffic counters of chain %s which ' + 'does not exist'), chain) + return + + name = get_chain_name(chain, wrap) + acc = {'pkts': 0, 'bytes': 0} + + for cmd, table in cmd_tables: + args = [cmd, '-t', table, '-L', name, '-n', '-v', '-x'] + if zero: + args.append('-Z') + if self.namespace: + args = ['ip', 'netns', 'exec', self.namespace] + args + current_table = (self.execute(args, + root_helper=self.root_helper)) + current_lines = current_table.split('\n') + + for line in current_lines[2:]: + if not line: + break + data = line.split() + if (len(data) < 2 or + not data[0].isdigit() or + not data[1].isdigit()): + break + + acc['pkts'] += int(data[0]) + acc['bytes'] += int(data[1]) + + return acc diff --git a/icehouse-patches/neutron/dvr-patch/neutron/agent/linux/ovs_lib.py b/icehouse-patches/neutron/dvr-patch/neutron/agent/linux/ovs_lib.py new file mode 100644 index 00000000..4197b4ec --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/agent/linux/ovs_lib.py @@ -0,0 +1,564 @@ +# Copyright 2011 VMware, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo.config import cfg + +from neutron.agent.linux import ip_lib +from neutron.agent.linux import utils +from neutron.common import exceptions +from neutron.common import utils as common_utils +from neutron.openstack.common import excutils +from neutron.openstack.common import jsonutils +from neutron.openstack.common import log as logging +from neutron.plugins.common import constants as p_const +# TODO(JLH) Should we remove the explicit include of the ovs plugin here +from neutron.plugins.openvswitch.common import constants + +# Default timeout for ovs-vsctl command +DEFAULT_OVS_VSCTL_TIMEOUT = 10 +OPTS = [ + cfg.IntOpt('ovs_vsctl_timeout', + default=DEFAULT_OVS_VSCTL_TIMEOUT, + help=_('Timeout in seconds for ovs-vsctl commands')), +] +cfg.CONF.register_opts(OPTS) + +LOG = logging.getLogger(__name__) + + +class VifPort: + def __init__(self, port_name, ofport, vif_id, vif_mac, switch): + self.port_name = port_name + self.ofport = ofport + self.vif_id = vif_id + self.vif_mac = vif_mac + self.switch = switch + + def __str__(self): + return ("iface-id=" + self.vif_id + ", vif_mac=" + + self.vif_mac + ", port_name=" + self.port_name + + ", ofport=" + str(self.ofport) + ", bridge_name=" + + self.switch.br_name) + + +class BaseOVS(object): + + def __init__(self, root_helper): + self.root_helper = root_helper + self.vsctl_timeout = cfg.CONF.ovs_vsctl_timeout + + def run_vsctl(self, args, check_error=False): + full_args = ["ovs-vsctl", "--timeout=%d" % self.vsctl_timeout] + args + try: + return utils.execute(full_args, root_helper=self.root_helper) + except Exception as e: + with excutils.save_and_reraise_exception() as ctxt: + LOG.error(_("Unable to execute %(cmd)s. " + "Exception: %(exception)s"), + {'cmd': full_args, 'exception': e}) + if not check_error: + ctxt.reraise = False + + def add_bridge(self, bridge_name): + self.run_vsctl(["--", "--may-exist", "add-br", bridge_name]) + return OVSBridge(bridge_name, self.root_helper) + + def delete_bridge(self, bridge_name): + self.run_vsctl(["--", "--if-exists", "del-br", bridge_name]) + + def bridge_exists(self, bridge_name): + try: + self.run_vsctl(['br-exists', bridge_name], check_error=True) + except RuntimeError as e: + with excutils.save_and_reraise_exception() as ctxt: + if 'Exit code: 2\n' in str(e): + ctxt.reraise = False + return False + return True + + def get_bridge_name_for_port_name(self, port_name): + try: + return self.run_vsctl(['port-to-br', port_name], check_error=True) + except RuntimeError as e: + with excutils.save_and_reraise_exception() as ctxt: + if 'Exit code: 1\n' in str(e): + ctxt.reraise = False + + def port_exists(self, port_name): + return bool(self.get_bridge_name_for_port_name(port_name)) + + +class OVSBridge(BaseOVS): + def __init__(self, br_name, root_helper): + super(OVSBridge, self).__init__(root_helper) + self.br_name = br_name + self.defer_apply_flows = False + self.deferred_flows = {'add': '', 'mod': '', 'del': ''} + + def set_controller(self, controller_names): + vsctl_command = ['--', 'set-controller', self.br_name] + vsctl_command.extend(controller_names) + self.run_vsctl(vsctl_command, check_error=True) + + def del_controller(self): + self.run_vsctl(['--', 'del-controller', self.br_name], + check_error=True) + + def get_controller(self): + res = self.run_vsctl(['--', 'get-controller', self.br_name], + check_error=True) + if res: + return res.strip().split('\n') + return res + + def set_secure_mode(self): + self.run_vsctl(['--', 'set-fail-mode', self.br_name, 'secure'], + check_error=True) + + def set_protocols(self, protocols): + self.run_vsctl(['--', 'set', 'bridge', self.br_name, + "protocols=%s" % protocols], + check_error=True) + + def create(self): + self.add_bridge(self.br_name) + + def destroy(self): + self.delete_bridge(self.br_name) + + def reset_bridge(self): + self.destroy() + self.create() + + def add_port(self, port_name): + self.run_vsctl(["--", "--may-exist", "add-port", self.br_name, + port_name]) + return self.get_port_ofport(port_name) + + def delete_port(self, port_name): + self.run_vsctl(["--", "--if-exists", "del-port", self.br_name, + port_name]) + + def set_db_attribute(self, table_name, record, column, value): + args = ["set", table_name, record, "%s=%s" % (column, value)] + self.run_vsctl(args) + + def clear_db_attribute(self, table_name, record, column): + args = ["clear", table_name, record, column] + self.run_vsctl(args) + + def run_ofctl(self, cmd, args, process_input=None): + full_args = ["ovs-ofctl", cmd, self.br_name] + args + try: + return utils.execute(full_args, root_helper=self.root_helper, + process_input=process_input) + except Exception as e: + LOG.error(_("Unable to execute %(cmd)s. Exception: %(exception)s"), + {'cmd': full_args, 'exception': e}) + + def count_flows(self): + flow_list = self.run_ofctl("dump-flows", []).split("\n")[1:] + return len(flow_list) - 1 + + def remove_all_flows(self): + self.run_ofctl("del-flows", []) + + def get_port_ofport(self, port_name): + ofport = self.db_get_val("Interface", port_name, "ofport") + # This can return a non-integer string, like '[]' so ensure a + # common failure case + try: + int(ofport) + return ofport + except ValueError: + return constants.INVALID_OFPORT + + def get_datapath_id(self): + return self.db_get_val('Bridge', + self.br_name, 'datapath_id').strip('"') + + def add_flow(self, **kwargs): + flow_str = _build_flow_expr_str(kwargs, 'add') + if self.defer_apply_flows: + self.deferred_flows['add'] += flow_str + '\n' + else: + self.run_ofctl("add-flow", [flow_str]) + + def mod_flow(self, **kwargs): + flow_str = _build_flow_expr_str(kwargs, 'mod') + if self.defer_apply_flows: + self.deferred_flows['mod'] += flow_str + '\n' + else: + self.run_ofctl("mod-flows", [flow_str]) + + def delete_flows(self, **kwargs): + flow_expr_str = _build_flow_expr_str(kwargs, 'del') + if self.defer_apply_flows: + self.deferred_flows['del'] += flow_expr_str + '\n' + else: + self.run_ofctl("del-flows", [flow_expr_str]) + + def dump_flows_for_table(self, table): + retval = None + flow_str = "table=%s" % table + flows = self.run_ofctl("dump-flows", [flow_str]) + if flows: + retval = '\n'.join(item for item in flows.splitlines() + if 'NXST' not in item) + return retval + + def defer_apply_on(self): + LOG.debug(_('defer_apply_on')) + self.defer_apply_flows = True + + def defer_apply_off(self): + LOG.debug(_('defer_apply_off')) + # Note(ethuleau): stash flows and disable deferred mode. Then apply + # flows from the stashed reference to be sure to not purge flows that + # were added between two ofctl commands. + stashed_deferred_flows, self.deferred_flows = ( + self.deferred_flows, {'add': '', 'mod': '', 'del': ''} + ) + self.defer_apply_flows = False + for action, flows in stashed_deferred_flows.items(): + if flows: + LOG.debug(_('Applying following deferred flows ' + 'to bridge %s'), self.br_name) + for line in flows.splitlines(): + LOG.debug(_('%(action)s: %(flow)s'), + {'action': action, 'flow': line}) + self.run_ofctl('%s-flows' % action, ['-'], flows) + + def add_tunnel_port(self, port_name, remote_ip, local_ip, + tunnel_type=p_const.TYPE_GRE, + vxlan_udp_port=constants.VXLAN_UDP_PORT, + dont_fragment=True): + vsctl_command = ["--", "--may-exist", "add-port", self.br_name, + port_name] + vsctl_command.extend(["--", "set", "Interface", port_name, + "type=%s" % tunnel_type]) + if tunnel_type == p_const.TYPE_VXLAN: + # Only set the VXLAN UDP port if it's not the default + if vxlan_udp_port != constants.VXLAN_UDP_PORT: + vsctl_command.append("options:dst_port=%s" % vxlan_udp_port) + vsctl_command.append(("options:df_default=%s" % + bool(dont_fragment)).lower()) + vsctl_command.extend(["options:remote_ip=%s" % remote_ip, + "options:local_ip=%s" % local_ip, + "options:in_key=flow", + "options:out_key=flow"]) + self.run_vsctl(vsctl_command) + ofport = self.get_port_ofport(port_name) + if (tunnel_type == p_const.TYPE_VXLAN and + ofport == constants.INVALID_OFPORT): + LOG.error(_('Unable to create VXLAN tunnel port. Please ensure ' + 'that an openvswitch version that supports VXLAN is ' + 'installed.')) + return ofport + + def add_patch_port(self, local_name, remote_name): + self.run_vsctl(["add-port", self.br_name, local_name, + "--", "set", "Interface", local_name, + "type=patch", "options:peer=%s" % remote_name]) + return self.get_port_ofport(local_name) + + def db_get_map(self, table, record, column, check_error=False): + output = self.run_vsctl(["get", table, record, column], check_error) + if output: + output_str = output.rstrip("\n\r") + return self.db_str_to_map(output_str) + return {} + + def db_get_val(self, table, record, column, check_error=False): + output = self.run_vsctl(["get", table, record, column], check_error) + if output: + return output.rstrip("\n\r") + + def db_str_to_map(self, full_str): + list = full_str.strip("{}").split(", ") + ret = {} + for e in list: + if e.find("=") == -1: + continue + arr = e.split("=") + ret[arr[0]] = arr[1].strip("\"") + return ret + + def get_port_name_list(self): + res = self.run_vsctl(["list-ports", self.br_name], check_error=True) + if res: + return res.strip().split("\n") + return [] + + def get_port_stats(self, port_name): + return self.db_get_map("Interface", port_name, "statistics") + + def get_xapi_iface_id(self, xs_vif_uuid): + args = ["xe", "vif-param-get", "param-name=other-config", + "param-key=nicira-iface-id", "uuid=%s" % xs_vif_uuid] + try: + return utils.execute(args, root_helper=self.root_helper).strip() + except Exception as e: + with excutils.save_and_reraise_exception(): + LOG.error(_("Unable to execute %(cmd)s. " + "Exception: %(exception)s"), + {'cmd': args, 'exception': e}) + + # returns a VIF object for each VIF port + def get_vif_ports(self): + edge_ports = [] + port_names = self.get_port_name_list() + for name in port_names: + external_ids = self.db_get_map("Interface", name, "external_ids", + check_error=True) + ofport = self.db_get_val("Interface", name, "ofport", + check_error=True) + if "iface-id" in external_ids and "attached-mac" in external_ids: + p = VifPort(name, ofport, external_ids["iface-id"], + external_ids["attached-mac"], self) + edge_ports.append(p) + elif ("xs-vif-uuid" in external_ids and + "attached-mac" in external_ids): + # if this is a xenserver and iface-id is not automatically + # synced to OVS from XAPI, we grab it from XAPI directly + iface_id = self.get_xapi_iface_id(external_ids["xs-vif-uuid"]) + p = VifPort(name, ofport, iface_id, + external_ids["attached-mac"], self) + edge_ports.append(p) + + return edge_ports + + def get_vif_port_set(self): + port_names = self.get_port_name_list() + edge_ports = set() + args = ['--format=json', '--', '--columns=name,external_ids,ofport', + 'list', 'Interface'] + result = self.run_vsctl(args, check_error=True) + if not result: + return edge_ports + for row in jsonutils.loads(result)['data']: + name = row[0] + if name not in port_names: + continue + external_ids = dict(row[1][1]) + # Do not consider VIFs which aren't yet ready + # This can happen when ofport values are either [] or ["set", []] + # We will therefore consider only integer values for ofport + ofport = row[2] + try: + int_ofport = int(ofport) + except (ValueError, TypeError): + LOG.warn(_("Found not yet ready openvswitch port: %s"), row) + else: + if int_ofport > 0: + if ("iface-id" in external_ids and + "attached-mac" in external_ids): + edge_ports.add(external_ids['iface-id']) + elif ("xs-vif-uuid" in external_ids and + "attached-mac" in external_ids): + # if this is a xenserver and iface-id is not + # automatically synced to OVS from XAPI, we grab it + # from XAPI directly + iface_id = self.get_xapi_iface_id( + external_ids["xs-vif-uuid"]) + edge_ports.add(iface_id) + else: + LOG.warn(_("Found failed openvswitch port: %s"), row) + return edge_ports + + def get_port_tag_dict(self): + """Get a dict of port names and associated vlan tags. + + e.g. the returned dict is of the following form:: + + {u'int-br-eth2': [], + u'patch-tun': [], + u'qr-76d9e6b6-21': 1, + u'tapce5318ff-78': 1, + u'tape1400310-e6': 1} + + The TAG ID is only available in the "Port" table and is not available + in the "Interface" table queried by the get_vif_port_set() method. + + """ + port_names = self.get_port_name_list() + args = ['--format=json', '--', '--columns=name,tag', 'list', 'Port'] + result = self.run_vsctl(args, check_error=True) + port_tag_dict = {} + if not result: + return port_tag_dict + for name, tag in jsonutils.loads(result)['data']: + if name not in port_names: + continue + # 'tag' can be [u'set', []] or an integer + if isinstance(tag, list): + tag = tag[1] + port_tag_dict[name] = tag + return port_tag_dict + + def get_vif_port_by_id(self, port_id): + args = ['--format=json', '--', '--columns=external_ids,name,ofport', + 'find', 'Interface', + 'external_ids:iface-id="%s"' % port_id] + result = self.run_vsctl(args) + if not result: + return + json_result = jsonutils.loads(result) + try: + # Retrieve the indexes of the columns we're looking for + headings = json_result['headings'] + ext_ids_idx = headings.index('external_ids') + name_idx = headings.index('name') + ofport_idx = headings.index('ofport') + # If data attribute is missing or empty the line below will raise + # an exeception which will be captured in this block. + # We won't deal with the possibility of ovs-vsctl return multiple + # rows since the interface identifier is unique + data = json_result['data'][0] + port_name = data[name_idx] + switch = get_bridge_for_iface(self.root_helper, port_name) + if switch != self.br_name: + LOG.info(_("Port: %(port_name)s is on %(switch)s," + " not on %(br_name)s"), {'port_name': port_name, + 'switch': switch, + 'br_name': self.br_name}) + return + ofport = data[ofport_idx] + # ofport must be integer otherwise return None + if not isinstance(ofport, int) or ofport == -1: + LOG.warn(_("ofport: %(ofport)s for VIF: %(vif)s is not a " + "positive integer"), {'ofport': ofport, + 'vif': port_id}) + return + # Find VIF's mac address in external ids + ext_id_dict = dict((item[0], item[1]) for item in + data[ext_ids_idx][1]) + vif_mac = ext_id_dict['attached-mac'] + return VifPort(port_name, ofport, port_id, vif_mac, self) + except Exception as e: + LOG.warn(_("Unable to parse interface details. Exception: %s"), e) + return + + def delete_ports(self, all_ports=False): + if all_ports: + port_names = self.get_port_name_list() + else: + port_names = (port.port_name for port in self.get_vif_ports()) + + for port_name in port_names: + self.delete_port(port_name) + + def get_local_port_mac(self): + """Retrieve the mac of the bridge's local port.""" + address = ip_lib.IPDevice(self.br_name, self.root_helper).link.address + if address: + return address + else: + msg = _('Unable to determine mac address for %s') % self.br_name + raise Exception(msg) + + def __enter__(self): + self.create() + return self + + def __exit__(self, exc_type, exc_value, exc_tb): + self.destroy() + + +def get_bridge_for_iface(root_helper, iface): + args = ["ovs-vsctl", "--timeout=%d" % cfg.CONF.ovs_vsctl_timeout, + "iface-to-br", iface] + try: + return utils.execute(args, root_helper=root_helper).strip() + except Exception: + LOG.exception(_("Interface %s not found."), iface) + return None + + +def get_bridges(root_helper): + args = ["ovs-vsctl", "--timeout=%d" % cfg.CONF.ovs_vsctl_timeout, + "list-br"] + try: + return utils.execute(args, root_helper=root_helper).strip().split("\n") + except Exception as e: + with excutils.save_and_reraise_exception(): + LOG.exception(_("Unable to retrieve bridges. Exception: %s"), e) + + +def get_bridge_external_bridge_id(root_helper, bridge): + args = ["ovs-vsctl", "--timeout=2", "br-get-external-id", + bridge, "bridge-id"] + try: + return utils.execute(args, root_helper=root_helper).strip() + except Exception: + LOG.exception(_("Bridge %s not found."), bridge) + return None + + +def _build_flow_expr_str(flow_dict, cmd): + flow_expr_arr = [] + actions = None + + if cmd == 'add': + flow_expr_arr.append("hard_timeout=%s" % + flow_dict.pop('hard_timeout', '0')) + flow_expr_arr.append("idle_timeout=%s" % + flow_dict.pop('idle_timeout', '0')) + flow_expr_arr.append("priority=%s" % + flow_dict.pop('priority', '1')) + elif 'priority' in flow_dict: + msg = _("Cannot match priority on flow deletion or modification") + raise exceptions.InvalidInput(error_message=msg) + + if cmd != 'del': + if "actions" not in flow_dict: + msg = _("Must specify one or more actions on flow addition" + " or modification") + raise exceptions.InvalidInput(error_message=msg) + actions = "actions=%s" % flow_dict.pop('actions') + + for key, value in flow_dict.iteritems(): + if key == 'proto': + flow_expr_arr.append(value) + else: + flow_expr_arr.append("%s=%s" % (key, str(value))) + + if actions: + flow_expr_arr.append(actions) + + return ','.join(flow_expr_arr) + + +def ofctl_arg_supported(root_helper, cmd, args): + '''Verify if ovs-ofctl binary supports command with specific args. + + :param root_helper: utility to use when running shell cmds. + :param cmd: ovs-vsctl command to use for test. + :param args: arguments to test with command. + :returns: a boolean if the args supported. + ''' + supported = True + br_name = 'br-test-%s' % common_utils.get_random_string(6) + test_br = OVSBridge(br_name, root_helper) + test_br.reset_bridge() + + full_args = ["ovs-ofctl", cmd, test_br.br_name] + args + try: + utils.execute(full_args, root_helper=root_helper) + except Exception: + supported = False + + test_br.destroy() + return supported diff --git a/icehouse-patches/neutron/dvr-patch/neutron/agent/linux/ovsdb_monitor.py b/icehouse-patches/neutron/dvr-patch/neutron/agent/linux/ovsdb_monitor.py new file mode 100644 index 00000000..4dcdc57e --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/agent/linux/ovsdb_monitor.py @@ -0,0 +1,105 @@ +# Copyright 2013 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import eventlet + +from neutron.agent.linux import async_process +from neutron.openstack.common import log as logging + + +LOG = logging.getLogger(__name__) + + +class OvsdbMonitor(async_process.AsyncProcess): + """Manages an invocation of 'ovsdb-client monitor'.""" + + def __init__(self, table_name, columns=None, format=None, + root_helper=None, respawn_interval=None): + + cmd = ['ovsdb-client', 'monitor', table_name] + if columns: + cmd.append(','.join(columns)) + if format: + cmd.append('--format=%s' % format) + super(OvsdbMonitor, self).__init__(cmd, + root_helper=root_helper, + respawn_interval=respawn_interval) + + def _read_stdout(self): + data = self._process.stdout.readline() + if not data: + return + self._stdout_lines.put(data) + LOG.debug(_('Output received from ovsdb monitor: %s') % data) + return data + + def _read_stderr(self): + data = super(OvsdbMonitor, self)._read_stderr() + if data: + LOG.error(_('Error received from ovsdb monitor: %s') % data) + # Do not return value to ensure that stderr output will + # stop the monitor. + + +class SimpleInterfaceMonitor(OvsdbMonitor): + """Monitors the Interface table of the local host's ovsdb for changes. + + The has_updates() method indicates whether changes to the ovsdb + Interface table have been detected since the monitor started or + since the previous access. + """ + + def __init__(self, root_helper=None, respawn_interval=None): + super(SimpleInterfaceMonitor, self).__init__( + 'Interface', + columns=['name', 'ofport'], + format='json', + root_helper=root_helper, + respawn_interval=respawn_interval, + ) + self.data_received = False + + @property + def is_active(self): + return (self.data_received and + self._kill_event and + not self._kill_event.ready()) + + @property + def has_updates(self): + """Indicate whether the ovsdb Interface table has been updated. + + True will be returned if the monitor process is not active. + This 'failing open' minimizes the risk of falsely indicating + the absence of updates at the expense of potential false + positives. + """ + return bool(list(self.iter_stdout())) or not self.is_active + + def start(self, block=False, timeout=5): + super(SimpleInterfaceMonitor, self).start() + if block: + eventlet.timeout.Timeout(timeout) + while not self.is_active: + eventlet.sleep() + + def _kill(self, *args, **kwargs): + self.data_received = False + super(SimpleInterfaceMonitor, self)._kill(*args, **kwargs) + + def _read_stdout(self): + data = super(SimpleInterfaceMonitor, self)._read_stdout() + if data and not self.data_received: + self.data_received = True + return data diff --git a/icehouse-patches/neutron/dvr-patch/neutron/agent/linux/polling.py b/icehouse-patches/neutron/dvr-patch/neutron/agent/linux/polling.py new file mode 100644 index 00000000..23168085 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/agent/linux/polling.py @@ -0,0 +1,112 @@ +# Copyright 2013 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import contextlib + +import eventlet + +from neutron.agent.linux import ovsdb_monitor +from neutron.plugins.openvswitch.common import constants + + +@contextlib.contextmanager +def get_polling_manager(minimize_polling=False, + root_helper=None, + ovsdb_monitor_respawn_interval=( + constants.DEFAULT_OVSDBMON_RESPAWN)): + if minimize_polling: + pm = InterfacePollingMinimizer( + root_helper=root_helper, + ovsdb_monitor_respawn_interval=ovsdb_monitor_respawn_interval) + pm.start() + else: + pm = AlwaysPoll() + try: + yield pm + finally: + if minimize_polling: + pm.stop() + + +class BasePollingManager(object): + + def __init__(self): + self._force_polling = False + self._polling_completed = True + + def force_polling(self): + self._force_polling = True + + def polling_completed(self): + self._polling_completed = True + + def _is_polling_required(self): + raise NotImplemented + + @property + def is_polling_required(self): + # Always consume the updates to minimize polling. + polling_required = self._is_polling_required() + + # Polling is required regardless of whether updates have been + # detected. + if self._force_polling: + self._force_polling = False + polling_required = True + + # Polling is required if not yet done for previously detected + # updates. + if not self._polling_completed: + polling_required = True + + if polling_required: + # Track whether polling has been completed to ensure that + # polling can be required until the caller indicates via a + # call to polling_completed() that polling has been + # successfully performed. + self._polling_completed = False + + return polling_required + + +class AlwaysPoll(BasePollingManager): + + @property + def is_polling_required(self): + return True + + +class InterfacePollingMinimizer(BasePollingManager): + """Monitors ovsdb to determine when polling is required.""" + + def __init__(self, root_helper=None, + ovsdb_monitor_respawn_interval=( + constants.DEFAULT_OVSDBMON_RESPAWN)): + + super(InterfacePollingMinimizer, self).__init__() + self._monitor = ovsdb_monitor.SimpleInterfaceMonitor( + root_helper=root_helper, + respawn_interval=ovsdb_monitor_respawn_interval) + + def start(self): + self._monitor.start() + + def stop(self): + self._monitor.stop() + + def _is_polling_required(self): + # Maximize the chances of update detection having a chance to + # collect output. + eventlet.sleep() + return self._monitor.has_updates diff --git a/icehouse-patches/neutron/dvr-patch/neutron/agent/linux/utils.py b/icehouse-patches/neutron/dvr-patch/neutron/agent/linux/utils.py new file mode 100644 index 00000000..f7cdaf2f --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/agent/linux/utils.py @@ -0,0 +1,128 @@ +# Copyright 2012 Locaweb. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Juliano Martinez, Locaweb. + +import fcntl +import os +import shlex +import socket +import struct +import tempfile + +from eventlet.green import subprocess +from eventlet import greenthread + +from neutron.common import constants +from neutron.common import utils +from neutron.openstack.common import excutils +from neutron.openstack.common import log as logging + + +LOG = logging.getLogger(__name__) + + +def create_process(cmd, root_helper=None, addl_env=None): + """Create a process object for the given command. + + The return value will be a tuple of the process object and the + list of command arguments used to create it. + """ + if root_helper: + cmd = shlex.split(root_helper) + cmd + cmd = map(str, cmd) + + LOG.debug(_("Running command: %s"), cmd) + env = os.environ.copy() + if addl_env: + env.update(addl_env) + + obj = utils.subprocess_popen(cmd, shell=False, + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + env=env) + + return obj, cmd + + +def execute(cmd, root_helper=None, process_input=None, addl_env=None, + check_exit_code=True, return_stderr=False): + try: + obj, cmd = create_process(cmd, root_helper=root_helper, + addl_env=addl_env) + _stdout, _stderr = (process_input and + obj.communicate(process_input) or + obj.communicate()) + obj.stdin.close() + m = _("\nCommand: %(cmd)s\nExit code: %(code)s\nStdout: %(stdout)r\n" + "Stderr: %(stderr)r") % {'cmd': cmd, 'code': obj.returncode, + 'stdout': _stdout, 'stderr': _stderr} + if obj.returncode: + LOG.error(m) + if check_exit_code: + raise RuntimeError(m) + else: + LOG.debug(m) + finally: + # NOTE(termie): this appears to be necessary to let the subprocess + # call clean something up in between calls, without + # it two execute calls in a row hangs the second one + greenthread.sleep(0) + + return return_stderr and (_stdout, _stderr) or _stdout + + +def get_interface_mac(interface): + MAC_START = 18 + MAC_END = 24 + s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) + info = fcntl.ioctl(s.fileno(), 0x8927, + struct.pack('256s', interface[:constants.DEVICE_NAME_MAX_LEN])) + return ''.join(['%02x:' % ord(char) + for char in info[MAC_START:MAC_END]])[:-1] + + +def replace_file(file_name, data): + """Replaces the contents of file_name with data in a safe manner. + + First write to a temp file and then rename. Since POSIX renames are + atomic, the file is unlikely to be corrupted by competing writes. + + We create the tempfile on the same device to ensure that it can be renamed. + """ + + base_dir = os.path.dirname(os.path.abspath(file_name)) + tmp_file = tempfile.NamedTemporaryFile('w+', dir=base_dir, delete=False) + tmp_file.write(data) + tmp_file.close() + os.chmod(tmp_file.name, 0o644) + os.rename(tmp_file.name, file_name) + + +def find_child_pids(pid): + """Retrieve a list of the pids of child processes of the given pid.""" + + try: + raw_pids = execute(['ps', '--ppid', pid, '-o', 'pid=']) + except RuntimeError as e: + # Unexpected errors are the responsibility of the caller + with excutils.save_and_reraise_exception() as ctxt: + # Exception has already been logged by execute + no_children_found = 'Exit code: 1' in str(e) + if no_children_found: + ctxt.reraise = False + return [] + return [x.strip() for x in raw_pids.split('\n') if x.strip()] diff --git a/icehouse-patches/neutron/dvr-patch/neutron/agent/metadata/__init__.py b/icehouse-patches/neutron/dvr-patch/neutron/agent/metadata/__init__.py new file mode 100644 index 00000000..f7461d23 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/agent/metadata/__init__.py @@ -0,0 +1,15 @@ +# Copyright 2012 New Dream Network, LLC (DreamHost) +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Mark McClain, DreamHost diff --git a/icehouse-patches/neutron/dvr-patch/neutron/agent/metadata/agent.py b/icehouse-patches/neutron/dvr-patch/neutron/agent/metadata/agent.py new file mode 100644 index 00000000..c56c78d7 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/agent/metadata/agent.py @@ -0,0 +1,390 @@ +# Copyright 2012 New Dream Network, LLC (DreamHost) +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Mark McClain, DreamHost + +import hashlib +import hmac +import os +import socket +import sys + +import eventlet +eventlet.monkey_patch() + +import httplib2 +from neutronclient.v2_0 import client +from oslo.config import cfg +import six.moves.urllib.parse as urlparse +import webob + +from neutron.agent.common import config as agent_conf +from neutron.agent import rpc as agent_rpc +from neutron.common import config +from neutron.common import constants as n_const +from neutron.common import topics +from neutron.common import utils +from neutron import context +from neutron.openstack.common.cache import cache +from neutron.openstack.common import excutils +from neutron.openstack.common import log as logging +from neutron.openstack.common import loopingcall +from neutron.openstack.common import service +from neutron import wsgi + +LOG = logging.getLogger(__name__) + + +class MetadataProxyHandler(object): + OPTS = [ + cfg.StrOpt('admin_user', + help=_("Admin user")), + cfg.StrOpt('admin_password', + help=_("Admin password"), + secret=True), + cfg.StrOpt('admin_tenant_name', + help=_("Admin tenant name")), + cfg.StrOpt('auth_url', + help=_("Authentication URL")), + cfg.StrOpt('auth_strategy', default='keystone', + help=_("The type of authentication to use")), + cfg.StrOpt('auth_region', + help=_("Authentication region")), + cfg.BoolOpt('auth_insecure', + default=False, + help=_("Turn off verification of the certificate for" + " ssl")), + cfg.StrOpt('auth_ca_cert', + help=_("Certificate Authority public key (CA cert) " + "file for ssl")), + cfg.StrOpt('endpoint_type', + default='adminURL', + help=_("Network service endpoint type to pull from " + "the keystone catalog")), + cfg.StrOpt('nova_metadata_ip', default='127.0.0.1', + help=_("IP address used by Nova metadata server.")), + cfg.IntOpt('nova_metadata_port', + default=8775, + help=_("TCP Port used by Nova metadata server.")), + cfg.StrOpt('metadata_proxy_shared_secret', + default='', + help=_('Shared secret to sign instance-id request'), + secret=True), + cfg.StrOpt('nova_metadata_protocol', + default='http', + choices=['http', 'https'], + help=_("Protocol to access nova metadata, http or https")), + cfg.BoolOpt('nova_metadata_insecure', default=False, + help=_("Allow to perform insecure SSL (https) requests to " + "nova metadata")), + cfg.StrOpt('nova_client_cert', + default='', + help=_("Client certificate for nova metadata api server.")), + cfg.StrOpt('nova_client_priv_key', + default='', + help=_("Private key of client certificate.")) + ] + + def __init__(self, conf): + self.conf = conf + self.auth_info = {} + if self.conf.cache_url: + self._cache = cache.get_cache(self.conf.cache_url) + else: + self._cache = False + + def _get_neutron_client(self): + qclient = client.Client( + username=self.conf.admin_user, + password=self.conf.admin_password, + tenant_name=self.conf.admin_tenant_name, + auth_url=self.conf.auth_url, + auth_strategy=self.conf.auth_strategy, + region_name=self.conf.auth_region, + token=self.auth_info.get('auth_token'), + insecure=self.conf.auth_insecure, + ca_cert=self.conf.auth_ca_cert, + endpoint_url=self.auth_info.get('endpoint_url'), + endpoint_type=self.conf.endpoint_type + ) + return qclient + + @webob.dec.wsgify(RequestClass=webob.Request) + def __call__(self, req): + try: + LOG.debug(_("Request: %s"), req) + + instance_id, tenant_id = self._get_instance_and_tenant_id(req) + if instance_id: + return self._proxy_request(instance_id, tenant_id, req) + else: + return webob.exc.HTTPNotFound() + + except Exception: + LOG.exception(_("Unexpected error.")) + msg = _('An unknown error has occurred. ' + 'Please try your request again.') + return webob.exc.HTTPInternalServerError(explanation=unicode(msg)) + + @utils.cache_method_results + def _get_router_networks(self, router_id): + """Find all networks connected to given router.""" + qclient = self._get_neutron_client() + + internal_ports = qclient.list_ports( + device_id=router_id, + device_owner=n_const.DEVICE_OWNER_ROUTER_INTF)['ports'] + return tuple(p['network_id'] for p in internal_ports) + + @utils.cache_method_results + def _get_ports_for_remote_address(self, remote_address, networks): + """Get list of ports that has given ip address and are part of + given networks. + + :param networks: list of networks in which the ip address will be + searched for + + """ + qclient = self._get_neutron_client() + + return qclient.list_ports( + network_id=networks, + fixed_ips=['ip_address=%s' % remote_address])['ports'] + + def _get_ports(self, remote_address, network_id=None, router_id=None): + """Search for all ports that contain passed ip address and belongs to + given network. + + If no network is passed ports are searched on all networks connected to + given router. Either one of network_id or router_id must be passed. + + """ + if network_id: + networks = (network_id,) + elif router_id: + networks = self._get_router_networks(router_id) + else: + raise TypeError(_("Either one of parameter network_id or router_id" + " must be passed to _get_ports method.")) + + return self._get_ports_for_remote_address(remote_address, networks) + + def _get_instance_and_tenant_id(self, req): + qclient = self._get_neutron_client() + + remote_address = req.headers.get('X-Forwarded-For') + network_id = req.headers.get('X-Neutron-Network-ID') + router_id = req.headers.get('X-Neutron-Router-ID') + + ports = self._get_ports(remote_address, network_id, router_id) + + self.auth_info = qclient.get_auth_info() + if len(ports) == 1: + return ports[0]['device_id'], ports[0]['tenant_id'] + return None, None + + def _proxy_request(self, instance_id, tenant_id, req): + headers = { + 'X-Forwarded-For': req.headers.get('X-Forwarded-For'), + 'X-Instance-ID': instance_id, + 'X-Tenant-ID': tenant_id, + 'X-Instance-ID-Signature': self._sign_instance_id(instance_id) + } + + nova_ip_port = '%s:%s' % (self.conf.nova_metadata_ip, + self.conf.nova_metadata_port) + url = urlparse.urlunsplit(( + self.conf.nova_metadata_protocol, + nova_ip_port, + req.path_info, + req.query_string, + '')) + + h = httplib2.Http(ca_certs=self.conf.auth_ca_cert, + disable_ssl_certificate_validation= + self.conf.nova_metadata_insecure) + if self.conf.nova_client_cert and self.conf.nova_client_priv_key: + h.add_certificate(self.conf.nova_client_priv_key, + self.conf.nova_client_cert, + nova_ip_port) + resp, content = h.request(url, method=req.method, headers=headers, + body=req.body) + + if resp.status == 200: + LOG.debug(str(resp)) + req.response.content_type = resp['content-type'] + req.response.body = content + return req.response + elif resp.status == 403: + msg = _( + 'The remote metadata server responded with Forbidden. This ' + 'response usually occurs when shared secrets do not match.' + ) + LOG.warn(msg) + return webob.exc.HTTPForbidden() + elif resp.status == 404: + return webob.exc.HTTPNotFound() + elif resp.status == 409: + return webob.exc.HTTPConflict() + elif resp.status == 500: + msg = _( + 'Remote metadata server experienced an internal server error.' + ) + LOG.warn(msg) + return webob.exc.HTTPInternalServerError(explanation=unicode(msg)) + else: + raise Exception(_('Unexpected response code: %s') % resp.status) + + def _sign_instance_id(self, instance_id): + return hmac.new(self.conf.metadata_proxy_shared_secret, + instance_id, + hashlib.sha256).hexdigest() + + +class UnixDomainHttpProtocol(eventlet.wsgi.HttpProtocol): + def __init__(self, request, client_address, server): + if client_address == '': + client_address = ('', 0) + # base class is old-style, so super does not work properly + eventlet.wsgi.HttpProtocol.__init__(self, request, client_address, + server) + + +class WorkerService(wsgi.WorkerService): + def start(self): + self._server = self._service.pool.spawn(self._service._run, + self._application, + self._service._socket) + + +class UnixDomainWSGIServer(wsgi.Server): + def __init__(self, name): + self._socket = None + self._launcher = None + self._server = None + super(UnixDomainWSGIServer, self).__init__(name) + + def start(self, application, file_socket, workers, backlog): + self._socket = eventlet.listen(file_socket, + family=socket.AF_UNIX, + backlog=backlog) + if workers < 1: + # For the case where only one process is required. + self._server = self.pool.spawn_n(self._run, application, + self._socket) + else: + # Minimize the cost of checking for child exit by extending the + # wait interval past the default of 0.01s. + self._launcher = service.ProcessLauncher(wait_interval=1.0) + self._server = WorkerService(self, application) + self._launcher.launch_service(self._server, workers=workers) + + def _run(self, application, socket): + """Start a WSGI service in a new green thread.""" + logger = logging.getLogger('eventlet.wsgi.server') + eventlet.wsgi.server(socket, + application, + custom_pool=self.pool, + protocol=UnixDomainHttpProtocol, + log=logging.WritableLogger(logger)) + + +class UnixDomainMetadataProxy(object): + OPTS = [ + cfg.StrOpt('metadata_proxy_socket', + default='$state_path/metadata_proxy', + help=_('Location for Metadata Proxy UNIX domain socket')), + cfg.IntOpt('metadata_workers', + default=utils.cpu_count() // 2, + help=_('Number of separate worker processes for metadata ' + 'server')), + cfg.IntOpt('metadata_backlog', + default=4096, + help=_('Number of backlog requests to configure the ' + 'metadata server socket with')) + ] + + def __init__(self, conf): + self.conf = conf + + dirname = os.path.dirname(cfg.CONF.metadata_proxy_socket) + if os.path.isdir(dirname): + try: + os.unlink(cfg.CONF.metadata_proxy_socket) + except OSError: + with excutils.save_and_reraise_exception() as ctxt: + if not os.path.exists(cfg.CONF.metadata_proxy_socket): + ctxt.reraise = False + else: + os.makedirs(dirname, 0o755) + + self._init_state_reporting() + + def _init_state_reporting(self): + self.context = context.get_admin_context_without_session() + self.state_rpc = agent_rpc.PluginReportStateAPI(topics.PLUGIN) + self.agent_state = { + 'binary': 'neutron-metadata-agent', + 'host': cfg.CONF.host, + 'topic': 'N/A', + 'configurations': { + 'metadata_proxy_socket': cfg.CONF.metadata_proxy_socket, + 'nova_metadata_ip': cfg.CONF.nova_metadata_ip, + 'nova_metadata_port': cfg.CONF.nova_metadata_port, + }, + 'start_flag': True, + 'agent_type': n_const.AGENT_TYPE_METADATA} + report_interval = cfg.CONF.AGENT.report_interval + if report_interval: + self.heartbeat = loopingcall.FixedIntervalLoopingCall( + self._report_state) + self.heartbeat.start(interval=report_interval) + + def _report_state(self): + try: + self.state_rpc.report_state( + self.context, + self.agent_state, + use_call=self.agent_state.get('start_flag')) + except AttributeError: + # This means the server does not support report_state + LOG.warn(_('Neutron server does not support state report.' + ' State report for this agent will be disabled.')) + self.heartbeat.stop() + return + except Exception: + LOG.exception(_("Failed reporting state!")) + return + self.agent_state.pop('start_flag', None) + + def run(self): + server = UnixDomainWSGIServer('neutron-metadata-agent') + server.start(MetadataProxyHandler(self.conf), + self.conf.metadata_proxy_socket, + workers=self.conf.metadata_workers, + backlog=self.conf.metadata_backlog) + server.wait() + + +def main(): + cfg.CONF.register_opts(UnixDomainMetadataProxy.OPTS) + cfg.CONF.register_opts(MetadataProxyHandler.OPTS) + cache.register_oslo_configs(cfg.CONF) + cfg.CONF.set_default(name='cache_url', default='memory://?default_ttl=5') + agent_conf.register_agent_state_opts_helper(cfg.CONF) + config.init(sys.argv[1:]) + config.setup_logging(cfg.CONF) + utils.log_opt_values(LOG) + proxy = UnixDomainMetadataProxy(cfg.CONF) + proxy.run() diff --git a/icehouse-patches/neutron/dvr-patch/neutron/agent/metadata/namespace_proxy.py b/icehouse-patches/neutron/dvr-patch/neutron/agent/metadata/namespace_proxy.py new file mode 100644 index 00000000..c6a58c2e --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/agent/metadata/namespace_proxy.py @@ -0,0 +1,182 @@ +# Copyright 2012 New Dream Network, LLC (DreamHost) +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Mark McClain, DreamHost + +import httplib +import socket + +import eventlet +eventlet.monkey_patch() + +import httplib2 +from oslo.config import cfg +import six.moves.urllib.parse as urlparse +import webob + +from neutron.agent.linux import daemon +from neutron.common import config +from neutron.common import utils +from neutron.openstack.common import log as logging +from neutron import wsgi + +LOG = logging.getLogger(__name__) + + +class UnixDomainHTTPConnection(httplib.HTTPConnection): + """Connection class for HTTP over UNIX domain socket.""" + def __init__(self, host, port=None, strict=None, timeout=None, + proxy_info=None): + httplib.HTTPConnection.__init__(self, host, port, strict) + self.timeout = timeout + + def connect(self): + self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) + if self.timeout: + self.sock.settimeout(self.timeout) + self.sock.connect(cfg.CONF.metadata_proxy_socket) + + +class NetworkMetadataProxyHandler(object): + """Proxy AF_INET metadata request through Unix Domain socket. + + The Unix domain socket allows the proxy access resource that are not + accessible within the isolated tenant context. + """ + + def __init__(self, network_id=None, router_id=None): + self.network_id = network_id + self.router_id = router_id + + if network_id is None and router_id is None: + msg = _('network_id and router_id are None. One must be provided.') + raise ValueError(msg) + + @webob.dec.wsgify(RequestClass=webob.Request) + def __call__(self, req): + LOG.debug(_("Request: %s"), req) + try: + return self._proxy_request(req.remote_addr, + req.method, + req.path_info, + req.query_string, + req.body) + except Exception: + LOG.exception(_("Unexpected error.")) + msg = _('An unknown error has occurred. ' + 'Please try your request again.') + return webob.exc.HTTPInternalServerError(explanation=unicode(msg)) + + def _proxy_request(self, remote_address, method, path_info, + query_string, body): + headers = { + 'X-Forwarded-For': remote_address, + } + + if self.router_id: + headers['X-Neutron-Router-ID'] = self.router_id + else: + headers['X-Neutron-Network-ID'] = self.network_id + + url = urlparse.urlunsplit(( + 'http', + '169.254.169.254', # a dummy value to make the request proper + path_info, + query_string, + '')) + + h = httplib2.Http() + resp, content = h.request( + url, + method=method, + headers=headers, + body=body, + connection_type=UnixDomainHTTPConnection) + + if resp.status == 200: + LOG.debug(resp) + LOG.debug(content) + response = webob.Response() + response.status = resp.status + response.headers['Content-Type'] = resp['content-type'] + response.body = content + return response + elif resp.status == 404: + return webob.exc.HTTPNotFound() + elif resp.status == 409: + return webob.exc.HTTPConflict() + elif resp.status == 500: + msg = _( + 'Remote metadata server experienced an internal server error.' + ) + LOG.debug(msg) + return webob.exc.HTTPInternalServerError(explanation=unicode(msg)) + else: + raise Exception(_('Unexpected response code: %s') % resp.status) + + +class ProxyDaemon(daemon.Daemon): + def __init__(self, pidfile, port, network_id=None, router_id=None): + uuid = network_id or router_id + super(ProxyDaemon, self).__init__(pidfile, uuid=uuid) + self.network_id = network_id + self.router_id = router_id + self.port = port + + def run(self): + handler = NetworkMetadataProxyHandler( + self.network_id, + self.router_id) + proxy = wsgi.Server('neutron-network-metadata-proxy') + proxy.start(handler, self.port) + proxy.wait() + + +def main(): + opts = [ + cfg.StrOpt('network_id', + help=_('Network that will have instance metadata ' + 'proxied.')), + cfg.StrOpt('router_id', + help=_('Router that will have connected instances\' ' + 'metadata proxied.')), + cfg.StrOpt('pid_file', + help=_('Location of pid file of this process.')), + cfg.BoolOpt('daemonize', + default=True, + help=_('Run as daemon.')), + cfg.IntOpt('metadata_port', + default=9697, + help=_("TCP Port to listen for metadata server " + "requests.")), + cfg.StrOpt('metadata_proxy_socket', + default='$state_path/metadata_proxy', + help=_('Location of Metadata Proxy UNIX domain ' + 'socket')) + ] + + cfg.CONF.register_cli_opts(opts) + # Don't get the default configuration file + cfg.CONF(project='neutron', default_config_files=[]) + config.setup_logging(cfg.CONF) + utils.log_opt_values(LOG) + proxy = ProxyDaemon(cfg.CONF.pid_file, + cfg.CONF.metadata_port, + network_id=cfg.CONF.network_id, + router_id=cfg.CONF.router_id) + + if cfg.CONF.daemonize: + proxy.start() + else: + proxy.run() diff --git a/icehouse-patches/neutron/dvr-patch/neutron/agent/netns_cleanup_util.py b/icehouse-patches/neutron/dvr-patch/neutron/agent/netns_cleanup_util.py new file mode 100644 index 00000000..292c179a --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/agent/netns_cleanup_util.py @@ -0,0 +1,174 @@ +# Copyright (c) 2012 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import re + +import eventlet +eventlet.monkey_patch() + +from oslo.config import cfg + +from neutron.agent.common import config as agent_config +from neutron.agent import dhcp_agent +from neutron.agent import l3_agent +from neutron.agent.linux import dhcp +from neutron.agent.linux import interface +from neutron.agent.linux import ip_lib +from neutron.agent.linux import ovs_lib +from neutron.api.v2 import attributes +from neutron.common import config +from neutron.openstack.common import importutils +from neutron.openstack.common import log as logging + + +LOG = logging.getLogger(__name__) +NS_MANGLING_PATTERN = ('(%s|%s)' % (dhcp.NS_PREFIX, l3_agent.NS_PREFIX) + + attributes.UUID_PATTERN) + + +class FakeDhcpPlugin(object): + """Fake RPC plugin to bypass any RPC calls.""" + def __getattribute__(self, name): + def fake_method(*args): + pass + return fake_method + + +def setup_conf(): + """Setup the cfg for the clean up utility. + + Use separate setup_conf for the utility because there are many options + from the main config that do not apply during clean-up. + """ + + cli_opts = [ + cfg.BoolOpt('force', + default=False, + help=_('Delete the namespace by removing all devices.')), + ] + + conf = cfg.CONF + conf.register_cli_opts(cli_opts) + agent_config.register_interface_driver_opts_helper(conf) + agent_config.register_use_namespaces_opts_helper(conf) + agent_config.register_root_helper(conf) + conf.register_opts(dhcp.OPTS) + conf.register_opts(dhcp_agent.DhcpAgent.OPTS) + conf.register_opts(interface.OPTS) + return conf + + +def kill_dhcp(conf, namespace): + """Disable DHCP for a network if DHCP is still active.""" + root_helper = agent_config.get_root_helper(conf) + network_id = namespace.replace(dhcp.NS_PREFIX, '') + + dhcp_driver = importutils.import_object( + conf.dhcp_driver, + conf=conf, + network=dhcp.NetModel(conf.use_namespaces, {'id': network_id}), + root_helper=root_helper, + plugin=FakeDhcpPlugin()) + + if dhcp_driver.active: + dhcp_driver.disable() + + +def eligible_for_deletion(conf, namespace, force=False): + """Determine whether a namespace is eligible for deletion. + + Eligibility is determined by having only the lo device or if force + is passed as a parameter. + """ + + # filter out namespaces without UUID as the name + if not re.match(NS_MANGLING_PATTERN, namespace): + return False + + root_helper = agent_config.get_root_helper(conf) + ip = ip_lib.IPWrapper(root_helper, namespace) + return force or ip.namespace_is_empty() + + +def unplug_device(conf, device): + try: + device.link.delete() + except RuntimeError: + root_helper = agent_config.get_root_helper(conf) + # Maybe the device is OVS port, so try to delete + bridge_name = ovs_lib.get_bridge_for_iface(root_helper, device.name) + if bridge_name: + bridge = ovs_lib.OVSBridge(bridge_name, root_helper) + bridge.delete_port(device.name) + else: + LOG.debug(_('Unable to find bridge for device: %s'), device.name) + + +def destroy_namespace(conf, namespace, force=False): + """Destroy a given namespace. + + If force is True, then dhcp (if it exists) will be disabled and all + devices will be forcibly removed. + """ + + try: + root_helper = agent_config.get_root_helper(conf) + ip = ip_lib.IPWrapper(root_helper, namespace) + + if force: + kill_dhcp(conf, namespace) + # NOTE: The dhcp driver will remove the namespace if is it empty, + # so a second check is required here. + if ip.netns.exists(namespace): + for device in ip.get_devices(exclude_loopback=True): + unplug_device(conf, device) + + ip.garbage_collect_namespace() + except Exception: + LOG.exception(_('Error unable to destroy namespace: %s'), namespace) + + +def main(): + """Main method for cleaning up network namespaces. + + This method will make two passes checking for namespaces to delete. The + process will identify candidates, sleep, and call garbage collect. The + garbage collection will re-verify that the namespace meets the criteria for + deletion (ie it is empty). The period of sleep and the 2nd pass allow + time for the namespace state to settle, so that the check prior deletion + will re-confirm the namespace is empty. + + The utility is designed to clean-up after the forced or unexpected + termination of Neutron agents. + + The --force flag should only be used as part of the cleanup of a devstack + installation as it will blindly purge namespaces and their devices. This + option also kills any lingering DHCP instances. + """ + conf = setup_conf() + conf() + config.setup_logging(conf) + + root_helper = agent_config.get_root_helper(conf) + # Identify namespaces that are candidates for deletion. + candidates = [ns for ns in + ip_lib.IPWrapper.get_namespaces(root_helper) + if eligible_for_deletion(conf, ns, conf.force)] + + if candidates: + eventlet.sleep(2) + + for namespace in candidates: + destroy_namespace(conf, namespace, conf.force) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/agent/ovs_cleanup_util.py b/icehouse-patches/neutron/dvr-patch/neutron/agent/ovs_cleanup_util.py new file mode 100644 index 00000000..1179f103 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/agent/ovs_cleanup_util.py @@ -0,0 +1,110 @@ +# Copyright (c) 2012 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo.config import cfg + +from neutron.agent.common import config as agent_config +from neutron.agent import l3_agent +from neutron.agent.linux import interface +from neutron.agent.linux import ip_lib +from neutron.agent.linux import ovs_lib +from neutron.common import config +from neutron.openstack.common import log as logging + + +LOG = logging.getLogger(__name__) + + +def setup_conf(): + """Setup the cfg for the clean up utility. + + Use separate setup_conf for the utility because there are many options + from the main config that do not apply during clean-up. + """ + opts = [ + cfg.BoolOpt('ovs_all_ports', + default=False, + help=_('True to delete all ports on all the OpenvSwitch ' + 'bridges. False to delete ports created by ' + 'Neutron on integration and external network ' + 'bridges.')) + ] + + conf = cfg.CONF + conf.register_cli_opts(opts) + conf.register_opts(l3_agent.L3NATAgent.OPTS) + conf.register_opts(interface.OPTS) + agent_config.register_interface_driver_opts_helper(conf) + agent_config.register_use_namespaces_opts_helper(conf) + agent_config.register_root_helper(conf) + return conf + + +def collect_neutron_ports(bridges, root_helper): + """Collect ports created by Neutron from OVS.""" + ports = [] + for bridge in bridges: + ovs = ovs_lib.OVSBridge(bridge, root_helper) + ports += [port.port_name for port in ovs.get_vif_ports()] + return ports + + +def delete_neutron_ports(ports, root_helper): + """Delete non-internal ports created by Neutron + + Non-internal OVS ports need to be removed manually. + """ + for port in ports: + if ip_lib.device_exists(port): + device = ip_lib.IPDevice(port, root_helper) + device.link.delete() + LOG.info(_("Delete %s"), port) + + +def main(): + """Main method for cleaning up OVS bridges. + + The utility cleans up the integration bridges used by Neutron. + """ + + conf = setup_conf() + conf() + config.setup_logging(conf) + + configuration_bridges = set([conf.ovs_integration_bridge, + conf.external_network_bridge]) + ovs_bridges = set(ovs_lib.get_bridges(conf.AGENT.root_helper)) + available_configuration_bridges = configuration_bridges & ovs_bridges + + if conf.ovs_all_ports: + bridges = ovs_bridges + else: + bridges = available_configuration_bridges + + # Collect existing ports created by Neutron on configuration bridges. + # After deleting ports from OVS bridges, we cannot determine which + # ports were created by Neutron, so port information is collected now. + ports = collect_neutron_ports(available_configuration_bridges, + conf.AGENT.root_helper) + + for bridge in bridges: + LOG.info(_("Cleaning %s"), bridge) + ovs = ovs_lib.OVSBridge(bridge, conf.AGENT.root_helper) + ovs.delete_ports(all_ports=conf.ovs_all_ports) + + # Remove remaining ports created by Neutron (usually veth pair) + delete_neutron_ports(ports, conf.AGENT.root_helper) + + LOG.info(_("OVS cleanup completed successfully")) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/agent/rpc.py b/icehouse-patches/neutron/dvr-patch/neutron/agent/rpc.py new file mode 100644 index 00000000..33ff7bd8 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/agent/rpc.py @@ -0,0 +1,134 @@ +# Copyright (c) 2012 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import itertools +from oslo import messaging + +from neutron.common import rpc as n_rpc +from neutron.common import topics + +from neutron.openstack.common import log as logging +from neutron.openstack.common import timeutils + + +LOG = logging.getLogger(__name__) + + +def create_consumers(endpoints, prefix, topic_details): + """Create agent RPC consumers. + + :param endpoints: The list of endpoints to process the incoming messages. + :param prefix: Common prefix for the plugin/agent message queues. + :param topic_details: A list of topics. Each topic has a name, an + operation, and an optional host param keying the + subscription to topic.host for plugin calls. + + :returns: A common Connection. + """ + + connection = n_rpc.create_connection(new=True) + for details in topic_details: + topic, operation, node_name = itertools.islice( + itertools.chain(details, [None]), 3) + + topic_name = topics.get_topic_name(prefix, topic, operation) + connection.create_consumer(topic_name, endpoints, fanout=True) + if node_name: + node_topic_name = '%s.%s' % (topic_name, node_name) + connection.create_consumer(node_topic_name, + endpoints, + fanout=False) + connection.consume_in_threads() + return connection + + +class PluginReportStateAPI(n_rpc.RpcProxy): + BASE_RPC_API_VERSION = '1.0' + + def __init__(self, topic): + super(PluginReportStateAPI, self).__init__( + topic=topic, default_version=self.BASE_RPC_API_VERSION) + + def report_state(self, context, agent_state, use_call=False): + msg = self.make_msg('report_state', + agent_state={'agent_state': + agent_state}, + time=timeutils.strtime()) + if use_call: + return self.call(context, msg, topic=self.topic) + else: + return self.cast(context, msg, topic=self.topic) + + +class PluginApi(n_rpc.RpcProxy): + '''Agent side of the rpc API. + + API version history: + 1.0 - Initial version. + 1.3 - get_device_details rpc signature upgrade to obtain 'host' and + return value to include fixed_ips and device_owner for + the device port + ''' + + BASE_RPC_API_VERSION = '1.3' + + def __init__(self, topic): + super(PluginApi, self).__init__( + topic=topic, default_version=self.BASE_RPC_API_VERSION) + + def get_device_details(self, context, device, agent_id, host=None): + return self.call(context, + self.make_msg('get_device_details', device=device, + agent_id=agent_id, + host=host), + topic=self.topic) + + def get_devices_details_list(self, context, devices, agent_id, host=None): + res = [] + try: + res = self.call(context, + self.make_msg('get_devices_details_list', + devices=devices, + agent_id=agent_id, + host=host), + topic=self.topic, + version=self.BASE_RPC_API_VERSION) + except messaging.UnsupportedVersion: + res = [ + self.call(context, + self.make_msg('get_device_details', device=device, + agent_id=agent_id, host=host), + topic=self.topic) + for device in devices + ] + return res + + def update_device_down(self, context, device, agent_id, host=None): + return self.call(context, + self.make_msg('update_device_down', device=device, + agent_id=agent_id, host=host), + topic=self.topic) + + def update_device_up(self, context, device, agent_id, host=None): + return self.call(context, + self.make_msg('update_device_up', device=device, + agent_id=agent_id, host=host), + topic=self.topic) + + def tunnel_sync(self, context, tunnel_ip, tunnel_type=None): + return self.call(context, + self.make_msg('tunnel_sync', tunnel_ip=tunnel_ip, + tunnel_type=tunnel_type), + topic=self.topic) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/agent/securitygroups_rpc.py b/icehouse-patches/neutron/dvr-patch/neutron/agent/securitygroups_rpc.py new file mode 100644 index 00000000..91f56704 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/agent/securitygroups_rpc.py @@ -0,0 +1,301 @@ +# Copyright 2012, Nachi Ueno, NTT MCL, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +from oslo.config import cfg + +from neutron.common import topics +from neutron.openstack.common import importutils +from neutron.openstack.common import log as logging + +LOG = logging.getLogger(__name__) +SG_RPC_VERSION = "1.1" + +security_group_opts = [ + cfg.StrOpt( + 'firewall_driver', + help=_('Driver for security groups firewall in the L2 agent')), + cfg.BoolOpt( + 'enable_security_group', + default=True, + help=_( + 'Controls whether the neutron security group API is enabled ' + 'in the server. It should be false when using no security ' + 'groups or using the nova security group API.')) +] +cfg.CONF.register_opts(security_group_opts, 'SECURITYGROUP') + + +#This is backward compatibility check for Havana +def _is_valid_driver_combination(): + return ((cfg.CONF.SECURITYGROUP.enable_security_group and + (cfg.CONF.SECURITYGROUP.firewall_driver and + cfg.CONF.SECURITYGROUP.firewall_driver != + 'neutron.agent.firewall.NoopFirewallDriver')) or + (not cfg.CONF.SECURITYGROUP.enable_security_group and + (cfg.CONF.SECURITYGROUP.firewall_driver == + 'neutron.agent.firewall.NoopFirewallDriver' or + cfg.CONF.SECURITYGROUP.firewall_driver is None) + )) + + +def is_firewall_enabled(): + if not _is_valid_driver_combination(): + LOG.warn(_("Driver configuration doesn't match with " + "enable_security_group")) + + return cfg.CONF.SECURITYGROUP.enable_security_group + + +def _disable_extension(extension, aliases): + if extension in aliases: + aliases.remove(extension) + + +def disable_security_group_extension_by_config(aliases): + if not is_firewall_enabled(): + LOG.info(_('Disabled security-group extension.')) + _disable_extension('security-group', aliases) + LOG.info(_('Disabled allowed-address-pairs extension.')) + _disable_extension('allowed-address-pairs', aliases) + + +class SecurityGroupServerRpcApiMixin(object): + """A mix-in that enable SecurityGroup support in plugin rpc.""" + def security_group_rules_for_devices(self, context, devices): + LOG.debug(_("Get security group rules " + "for devices via rpc %r"), devices) + return self.call(context, + self.make_msg('security_group_rules_for_devices', + devices=devices), + version=SG_RPC_VERSION, + topic=self.topic) + + +class SecurityGroupAgentRpcCallbackMixin(object): + """A mix-in that enable SecurityGroup agent + support in agent implementations. + """ + #mix-in object should be have sg_agent + sg_agent = None + + def _security_groups_agent_not_set(self): + LOG.warning(_("Security group agent binding currently not set. " + "This should be set by the end of the init " + "process.")) + + def security_groups_rule_updated(self, context, **kwargs): + """Callback for security group rule update. + + :param security_groups: list of updated security_groups + """ + security_groups = kwargs.get('security_groups', []) + LOG.debug( + _("Security group rule updated on remote: %s"), security_groups) + if not self.sg_agent: + return self._security_groups_agent_not_set() + self.sg_agent.security_groups_rule_updated(security_groups) + + def security_groups_member_updated(self, context, **kwargs): + """Callback for security group member update. + + :param security_groups: list of updated security_groups + """ + security_groups = kwargs.get('security_groups', []) + LOG.debug( + _("Security group member updated on remote: %s"), security_groups) + if not self.sg_agent: + return self._security_groups_agent_not_set() + self.sg_agent.security_groups_member_updated(security_groups) + + def security_groups_provider_updated(self, context, **kwargs): + """Callback for security group provider update.""" + LOG.debug(_("Provider rule updated")) + if not self.sg_agent: + return self._security_groups_agent_not_set() + self.sg_agent.security_groups_provider_updated() + + +class SecurityGroupAgentRpcMixin(object): + """A mix-in that enable SecurityGroup agent + support in agent implementations. + """ + + def init_firewall(self, defer_refresh_firewall=False): + firewall_driver = cfg.CONF.SECURITYGROUP.firewall_driver + LOG.debug(_("Init firewall settings (driver=%s)"), firewall_driver) + if not _is_valid_driver_combination(): + LOG.warn(_("Driver configuration doesn't match " + "with enable_security_group")) + if not firewall_driver: + firewall_driver = 'neutron.agent.firewall.NoopFirewallDriver' + self.firewall = importutils.import_object(firewall_driver) + # The following flag will be set to true if port filter must not be + # applied as soon as a rule or membership notification is received + self.defer_refresh_firewall = defer_refresh_firewall + # Stores devices for which firewall should be refreshed when + # deferred refresh is enabled. + self.devices_to_refilter = set() + # Flag raised when a global refresh is needed + self.global_refresh_firewall = False + + def prepare_devices_filter(self, device_ids): + if not device_ids: + return + LOG.info(_("Preparing filters for devices %s"), device_ids) + devices = self.plugin_rpc.security_group_rules_for_devices( + self.context, list(device_ids)) + with self.firewall.defer_apply(): + for device in devices.values(): + self.firewall.prepare_port_filter(device) + + def security_groups_rule_updated(self, security_groups): + LOG.info(_("Security group " + "rule updated %r"), security_groups) + self._security_group_updated( + security_groups, + 'security_groups') + + def security_groups_member_updated(self, security_groups): + LOG.info(_("Security group " + "member updated %r"), security_groups) + self._security_group_updated( + security_groups, + 'security_group_source_groups') + + def _security_group_updated(self, security_groups, attribute): + devices = [] + sec_grp_set = set(security_groups) + for device in self.firewall.ports.values(): + if sec_grp_set & set(device.get(attribute, [])): + devices.append(device['device']) + if devices: + if self.defer_refresh_firewall: + LOG.debug(_("Adding %s devices to the list of devices " + "for which firewall needs to be refreshed"), + devices) + self.devices_to_refilter |= set(devices) + else: + self.refresh_firewall(devices) + + def security_groups_provider_updated(self): + LOG.info(_("Provider rule updated")) + if self.defer_refresh_firewall: + # NOTE(salv-orlando): A 'global refresh' might not be + # necessary if the subnet for which the provider rules + # were updated is known + self.global_refresh_firewall = True + else: + self.refresh_firewall() + + def remove_devices_filter(self, device_ids): + if not device_ids: + return + LOG.info(_("Remove device filter for %r"), device_ids) + with self.firewall.defer_apply(): + for device_id in device_ids: + device = self.firewall.ports.get(device_id) + if not device: + continue + self.firewall.remove_port_filter(device) + + def refresh_firewall(self, device_ids=None): + LOG.info(_("Refresh firewall rules")) + if not device_ids: + device_ids = self.firewall.ports.keys() + if not device_ids: + LOG.info(_("No ports here to refresh firewall")) + return + devices = self.plugin_rpc.security_group_rules_for_devices( + self.context, device_ids) + with self.firewall.defer_apply(): + for device in devices.values(): + LOG.debug(_("Update port filter for %s"), device['device']) + self.firewall.update_port_filter(device) + + def firewall_refresh_needed(self): + return self.global_refresh_firewall or self.devices_to_refilter + + def setup_port_filters(self, new_devices, updated_devices): + """Configure port filters for devices. + + This routine applies filters for new devices and refreshes firewall + rules when devices have been updated, or when there are changes in + security group membership or rules. + + :param new_devices: set containing identifiers for new devices + :param updated_devices: set containining identifiers for + updated devices + """ + if new_devices: + LOG.debug(_("Preparing device filters for %d new devices"), + len(new_devices)) + self.prepare_devices_filter(new_devices) + # These data structures are cleared here in order to avoid + # losing updates occurring during firewall refresh + devices_to_refilter = self.devices_to_refilter + global_refresh_firewall = self.global_refresh_firewall + self.devices_to_refilter = set() + self.global_refresh_firewall = False + # TODO(salv-orlando): Avoid if possible ever performing the global + # refresh providing a precise list of devices for which firewall + # should be refreshed + if global_refresh_firewall: + LOG.debug(_("Refreshing firewall for all filtered devices")) + self.refresh_firewall() + else: + # If a device is both in new and updated devices + # avoid reprocessing it + updated_devices = ((updated_devices | devices_to_refilter) - + new_devices) + if updated_devices: + LOG.debug(_("Refreshing firewall for %d devices"), + len(updated_devices)) + self.refresh_firewall(updated_devices) + + +class SecurityGroupAgentRpcApiMixin(object): + + def _get_security_group_topic(self): + return topics.get_topic_name(self.topic, + topics.SECURITY_GROUP, + topics.UPDATE) + + def security_groups_rule_updated(self, context, security_groups): + """Notify rule updated security groups.""" + if not security_groups: + return + self.fanout_cast(context, + self.make_msg('security_groups_rule_updated', + security_groups=security_groups), + version=SG_RPC_VERSION, + topic=self._get_security_group_topic()) + + def security_groups_member_updated(self, context, security_groups): + """Notify member updated security groups.""" + if not security_groups: + return + self.fanout_cast(context, + self.make_msg('security_groups_member_updated', + security_groups=security_groups), + version=SG_RPC_VERSION, + topic=self._get_security_group_topic()) + + def security_groups_provider_updated(self, context): + """Notify provider updated security groups.""" + self.fanout_cast(context, + self.make_msg('security_groups_provider_updated'), + version=SG_RPC_VERSION, + topic=self._get_security_group_topic()) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/api/api_common.py b/icehouse-patches/neutron/dvr-patch/neutron/api/api_common.py new file mode 100644 index 00000000..e370e2e5 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/api/api_common.py @@ -0,0 +1,327 @@ +# Copyright 2011 Citrix System. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import urllib + +from oslo.config import cfg +from webob import exc + +from neutron.common import constants +from neutron.common import exceptions +from neutron.openstack.common import log as logging + + +LOG = logging.getLogger(__name__) + + +def get_filters(request, attr_info, skips=[]): + """Extracts the filters from the request string. + + Returns a dict of lists for the filters: + check=a&check=b&name=Bob& + becomes: + {'check': [u'a', u'b'], 'name': [u'Bob']} + """ + res = {} + for key, values in request.GET.dict_of_lists().iteritems(): + if key in skips: + continue + values = [v for v in values if v] + key_attr_info = attr_info.get(key, {}) + if 'convert_list_to' in key_attr_info: + values = key_attr_info['convert_list_to'](values) + elif 'convert_to' in key_attr_info: + convert_to = key_attr_info['convert_to'] + values = [convert_to(v) for v in values] + if values: + res[key] = values + return res + + +def get_previous_link(request, items, id_key): + params = request.GET.copy() + params.pop('marker', None) + if items: + marker = items[0][id_key] + params['marker'] = marker + params['page_reverse'] = True + return "%s?%s" % (request.path_url, urllib.urlencode(params)) + + +def get_next_link(request, items, id_key): + params = request.GET.copy() + params.pop('marker', None) + if items: + marker = items[-1][id_key] + params['marker'] = marker + params.pop('page_reverse', None) + return "%s?%s" % (request.path_url, urllib.urlencode(params)) + + +def get_limit_and_marker(request): + """Return marker, limit tuple from request. + + :param request: `wsgi.Request` possibly containing 'marker' and 'limit' + GET variables. 'marker' is the id of the last element + the client has seen, and 'limit' is the maximum number + of items to return. If limit == 0, it means we needn't + pagination, then return None. + """ + max_limit = _get_pagination_max_limit() + limit = _get_limit_param(request, max_limit) + if max_limit > 0: + limit = min(max_limit, limit) or max_limit + if not limit: + return None, None + marker = request.GET.get('marker', None) + return limit, marker + + +def _get_pagination_max_limit(): + max_limit = -1 + if (cfg.CONF.pagination_max_limit.lower() != + constants.PAGINATION_INFINITE): + try: + max_limit = int(cfg.CONF.pagination_max_limit) + if max_limit == 0: + raise ValueError() + except ValueError: + LOG.warn(_("Invalid value for pagination_max_limit: %s. It " + "should be an integer greater to 0"), + cfg.CONF.pagination_max_limit) + return max_limit + + +def _get_limit_param(request, max_limit): + """Extract integer limit from request or fail.""" + try: + limit = int(request.GET.get('limit', 0)) + if limit >= 0: + return limit + except ValueError: + pass + msg = _("Limit must be an integer 0 or greater and not '%d'") + raise exceptions.BadRequest(resource='limit', msg=msg) + + +def list_args(request, arg): + """Extracts the list of arg from request.""" + return [v for v in request.GET.getall(arg) if v] + + +def get_sorts(request, attr_info): + """Extract sort_key and sort_dir from request. + + Return as: [(key1, value1), (key2, value2)] + """ + sort_keys = list_args(request, "sort_key") + sort_dirs = list_args(request, "sort_dir") + if len(sort_keys) != len(sort_dirs): + msg = _("The number of sort_keys and sort_dirs must be same") + raise exc.HTTPBadRequest(explanation=msg) + valid_dirs = [constants.SORT_DIRECTION_ASC, constants.SORT_DIRECTION_DESC] + absent_keys = [x for x in sort_keys if x not in attr_info] + if absent_keys: + msg = _("%s is invalid attribute for sort_keys") % absent_keys + raise exc.HTTPBadRequest(explanation=msg) + invalid_dirs = [x for x in sort_dirs if x not in valid_dirs] + if invalid_dirs: + msg = (_("%(invalid_dirs)s is invalid value for sort_dirs, " + "valid value is '%(asc)s' and '%(desc)s'") % + {'invalid_dirs': invalid_dirs, + 'asc': constants.SORT_DIRECTION_ASC, + 'desc': constants.SORT_DIRECTION_DESC}) + raise exc.HTTPBadRequest(explanation=msg) + return zip(sort_keys, + [x == constants.SORT_DIRECTION_ASC for x in sort_dirs]) + + +def get_page_reverse(request): + data = request.GET.get('page_reverse', 'False') + return data.lower() == "true" + + +def get_pagination_links(request, items, limit, + marker, page_reverse, key="id"): + key = key if key else 'id' + links = [] + if not limit: + return links + if not (len(items) < limit and not page_reverse): + links.append({"rel": "next", + "href": get_next_link(request, items, + key)}) + if not (len(items) < limit and page_reverse): + links.append({"rel": "previous", + "href": get_previous_link(request, items, + key)}) + return links + + +class PaginationHelper(object): + + def __init__(self, request, primary_key='id'): + self.request = request + self.primary_key = primary_key + + def update_fields(self, original_fields, fields_to_add): + pass + + def update_args(self, args): + pass + + def paginate(self, items): + return items + + def get_links(self, items): + return {} + + +class PaginationEmulatedHelper(PaginationHelper): + + def __init__(self, request, primary_key='id'): + super(PaginationEmulatedHelper, self).__init__(request, primary_key) + self.limit, self.marker = get_limit_and_marker(request) + self.page_reverse = get_page_reverse(request) + + def update_fields(self, original_fields, fields_to_add): + if not original_fields: + return + if self.primary_key not in original_fields: + original_fields.append(self.primary_key) + fields_to_add.append(self.primary_key) + + def paginate(self, items): + if not self.limit: + return items + i = -1 + if self.marker: + for item in items: + i = i + 1 + if item[self.primary_key] == self.marker: + break + if self.page_reverse: + return items[i - self.limit:i] + return items[i + 1:i + self.limit + 1] + + def get_links(self, items): + return get_pagination_links( + self.request, items, self.limit, self.marker, + self.page_reverse, self.primary_key) + + +class PaginationNativeHelper(PaginationEmulatedHelper): + + def update_args(self, args): + if self.primary_key not in dict(args.get('sorts', [])).keys(): + args.setdefault('sorts', []).append((self.primary_key, True)) + args.update({'limit': self.limit, 'marker': self.marker, + 'page_reverse': self.page_reverse}) + + def paginate(self, items): + return items + + +class NoPaginationHelper(PaginationHelper): + pass + + +class SortingHelper(object): + + def __init__(self, request, attr_info): + pass + + def update_args(self, args): + pass + + def update_fields(self, original_fields, fields_to_add): + pass + + def sort(self, items): + return items + + +class SortingEmulatedHelper(SortingHelper): + + def __init__(self, request, attr_info): + super(SortingEmulatedHelper, self).__init__(request, attr_info) + self.sort_dict = get_sorts(request, attr_info) + + def update_fields(self, original_fields, fields_to_add): + if not original_fields: + return + for key in dict(self.sort_dict).keys(): + if key not in original_fields: + original_fields.append(key) + fields_to_add.append(key) + + def sort(self, items): + def cmp_func(obj1, obj2): + for key, direction in self.sort_dict: + ret = cmp(obj1[key], obj2[key]) + if ret: + return ret * (1 if direction else -1) + return 0 + return sorted(items, cmp=cmp_func) + + +class SortingNativeHelper(SortingHelper): + + def __init__(self, request, attr_info): + self.sort_dict = get_sorts(request, attr_info) + + def update_args(self, args): + args['sorts'] = self.sort_dict + + +class NoSortingHelper(SortingHelper): + pass + + +class NeutronController(object): + """Base controller class for Neutron API.""" + # _resource_name will be redefined in sub concrete controller + _resource_name = None + + def __init__(self, plugin): + self._plugin = plugin + super(NeutronController, self).__init__() + + def _prepare_request_body(self, body, params): + """Verifies required parameters are in request body. + + Sets default value for missing optional parameters. + Body argument must be the deserialized body. + """ + try: + if body is None: + # Initialize empty resource for setting default value + body = {self._resource_name: {}} + data = body[self._resource_name] + except KeyError: + # raise if _resource_name is not in req body. + raise exc.HTTPBadRequest(_("Unable to find '%s' in request body") % + self._resource_name) + for param in params: + param_name = param['param-name'] + param_value = data.get(param_name) + # If the parameter wasn't found and it was required, return 400 + if param_value is None and param['required']: + msg = (_("Failed to parse request. " + "Parameter '%s' not specified") % param_name) + LOG.error(msg) + raise exc.HTTPBadRequest(msg) + data[param_name] = param_value or param.get('default-value') + return body diff --git a/icehouse-patches/neutron/dvr-patch/neutron/api/extensions.py b/icehouse-patches/neutron/dvr-patch/neutron/api/extensions.py new file mode 100644 index 00000000..4f9988e0 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/api/extensions.py @@ -0,0 +1,684 @@ +# Copyright 2011 OpenStack Foundation. +# Copyright 2011 Justin Santa Barbara +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import abc +import imp +import itertools +import os + +from oslo.config import cfg +import routes +import six +import webob.dec +import webob.exc + +from neutron.api.v2 import attributes +from neutron.common import exceptions +import neutron.extensions +from neutron import manager +from neutron.openstack.common import log as logging +from neutron import policy +from neutron import wsgi + + +LOG = logging.getLogger(__name__) + + +@six.add_metaclass(abc.ABCMeta) +class PluginInterface(object): + + @classmethod + def __subclasshook__(cls, klass): + """Checking plugin class. + + The __subclasshook__ method is a class method + that will be called every time a class is tested + using issubclass(klass, PluginInterface). + In that case, it will check that every method + marked with the abstractmethod decorator is + provided by the plugin class. + """ + + if not cls.__abstractmethods__: + return NotImplemented + + for method in cls.__abstractmethods__: + if any(method in base.__dict__ for base in klass.__mro__): + continue + return NotImplemented + return True + + +class ExtensionDescriptor(object): + """Base class that defines the contract for extensions. + + Note that you don't have to derive from this class to have a valid + extension; it is purely a convenience. + """ + + def get_name(self): + """The name of the extension. + + e.g. 'Fox In Socks' + """ + raise NotImplementedError() + + def get_alias(self): + """The alias for the extension. + + e.g. 'FOXNSOX' + """ + raise NotImplementedError() + + def get_description(self): + """Friendly description for the extension. + + e.g. 'The Fox In Socks Extension' + """ + raise NotImplementedError() + + def get_namespace(self): + """The XML namespace for the extension. + + e.g. 'http://www.fox.in.socks/api/ext/pie/v1.0' + """ + raise NotImplementedError() + + def get_updated(self): + """The timestamp when the extension was last updated. + + e.g. '2011-01-22T13:25:27-06:00' + """ + # NOTE(justinsb): Not sure of the purpose of this is, vs the XML NS + raise NotImplementedError() + + def get_resources(self): + """List of extensions.ResourceExtension extension objects. + + Resources define new nouns, and are accessible through URLs. + """ + resources = [] + return resources + + def get_actions(self): + """List of extensions.ActionExtension extension objects. + + Actions are verbs callable from the API. + """ + actions = [] + return actions + + def get_request_extensions(self): + """List of extensions.RequestException extension objects. + + Request extensions are used to handle custom request data. + """ + request_exts = [] + return request_exts + + def get_extended_resources(self, version): + """Retrieve extended resources or attributes for core resources. + + Extended attributes are implemented by a core plugin similarly + to the attributes defined in the core, and can appear in + request and response messages. Their names are scoped with the + extension's prefix. The core API version is passed to this + function, which must return a + map[][][] + specifying the extended resource attribute properties required + by that API version. + + Extension can add resources and their attr definitions too. + The returned map can be integrated into RESOURCE_ATTRIBUTE_MAP. + """ + return {} + + def get_plugin_interface(self): + """Returns an abstract class which defines contract for the plugin. + + The abstract class should inherit from extesnions.PluginInterface, + Methods in this abstract class should be decorated as abstractmethod + """ + return None + + def update_attributes_map(self, extended_attributes, + extension_attrs_map=None): + """Update attributes map for this extension. + + This is default method for extending an extension's attributes map. + An extension can use this method and supplying its own resource + attribute map in extension_attrs_map argument to extend all its + attributes that needs to be extended. + + If an extension does not implement update_attributes_map, the method + does nothing and just return. + """ + if not extension_attrs_map: + return + + for resource, attrs in extension_attrs_map.iteritems(): + extended_attrs = extended_attributes.get(resource) + if extended_attrs: + attrs.update(extended_attrs) + + def get_alias_namespace_compatibility_map(self): + """Returns mappings between extension aliases and XML namespaces. + + The mappings are XML namespaces that should, for backward compatibility + reasons, be added to the XML serialization of extended attributes. + This allows an established extended attribute to be provided by + another extension than the original one while keeping its old alias + in the name. + :return: A dictionary of extension_aliases and namespace strings. + """ + return {} + + +class ActionExtensionController(wsgi.Controller): + + def __init__(self, application): + self.application = application + self.action_handlers = {} + + def add_action(self, action_name, handler): + self.action_handlers[action_name] = handler + + def action(self, request, id): + input_dict = self._deserialize(request.body, + request.get_content_type()) + for action_name, handler in self.action_handlers.iteritems(): + if action_name in input_dict: + return handler(input_dict, request, id) + # no action handler found (bump to downstream application) + response = self.application + return response + + +class RequestExtensionController(wsgi.Controller): + + def __init__(self, application): + self.application = application + self.handlers = [] + + def add_handler(self, handler): + self.handlers.append(handler) + + def process(self, request, *args, **kwargs): + res = request.get_response(self.application) + # currently request handlers are un-ordered + for handler in self.handlers: + response = handler(request, res) + return response + + +class ExtensionController(wsgi.Controller): + + def __init__(self, extension_manager): + self.extension_manager = extension_manager + + def _translate(self, ext): + ext_data = {} + ext_data['name'] = ext.get_name() + ext_data['alias'] = ext.get_alias() + ext_data['description'] = ext.get_description() + ext_data['namespace'] = ext.get_namespace() + ext_data['updated'] = ext.get_updated() + ext_data['links'] = [] # TODO(dprince): implement extension links + return ext_data + + def index(self, request): + extensions = [] + for _alias, ext in self.extension_manager.extensions.iteritems(): + extensions.append(self._translate(ext)) + return dict(extensions=extensions) + + def show(self, request, id): + # NOTE(dprince): the extensions alias is used as the 'id' for show + ext = self.extension_manager.extensions.get(id, None) + if not ext: + raise webob.exc.HTTPNotFound( + _("Extension with alias %s does not exist") % id) + return dict(extension=self._translate(ext)) + + def delete(self, request, id): + msg = _('Resource not found.') + raise webob.exc.HTTPNotFound(msg) + + def create(self, request): + msg = _('Resource not found.') + raise webob.exc.HTTPNotFound(msg) + + +class ExtensionMiddleware(wsgi.Middleware): + """Extensions middleware for WSGI.""" + + def __init__(self, application, + ext_mgr=None): + self.ext_mgr = (ext_mgr + or ExtensionManager(get_extensions_path())) + mapper = routes.Mapper() + + # extended resources + for resource in self.ext_mgr.get_resources(): + path_prefix = resource.path_prefix + if resource.parent: + path_prefix = (resource.path_prefix + + "/%s/{%s_id}" % + (resource.parent["collection_name"], + resource.parent["member_name"])) + + LOG.debug(_('Extended resource: %s'), + resource.collection) + for action, method in resource.collection_actions.iteritems(): + conditions = dict(method=[method]) + path = "/%s/%s" % (resource.collection, action) + with mapper.submapper(controller=resource.controller, + action=action, + path_prefix=path_prefix, + conditions=conditions) as submap: + submap.connect(path) + submap.connect("%s.:(format)" % path) + + mapper.resource(resource.collection, resource.collection, + controller=resource.controller, + member=resource.member_actions, + parent_resource=resource.parent, + path_prefix=path_prefix) + + # extended actions + action_controllers = self._action_ext_controllers(application, + self.ext_mgr, mapper) + for action in self.ext_mgr.get_actions(): + LOG.debug(_('Extended action: %s'), action.action_name) + controller = action_controllers[action.collection] + controller.add_action(action.action_name, action.handler) + + # extended requests + req_controllers = self._request_ext_controllers(application, + self.ext_mgr, mapper) + for request_ext in self.ext_mgr.get_request_extensions(): + LOG.debug(_('Extended request: %s'), request_ext.key) + controller = req_controllers[request_ext.key] + controller.add_handler(request_ext.handler) + + self._router = routes.middleware.RoutesMiddleware(self._dispatch, + mapper) + super(ExtensionMiddleware, self).__init__(application) + + @classmethod + def factory(cls, global_config, **local_config): + """Paste factory.""" + def _factory(app): + return cls(app, global_config, **local_config) + return _factory + + def _action_ext_controllers(self, application, ext_mgr, mapper): + """Return a dict of ActionExtensionController-s by collection.""" + action_controllers = {} + for action in ext_mgr.get_actions(): + if action.collection not in action_controllers.keys(): + controller = ActionExtensionController(application) + mapper.connect("/%s/:(id)/action.:(format)" % + action.collection, + action='action', + controller=controller, + conditions=dict(method=['POST'])) + mapper.connect("/%s/:(id)/action" % action.collection, + action='action', + controller=controller, + conditions=dict(method=['POST'])) + action_controllers[action.collection] = controller + + return action_controllers + + def _request_ext_controllers(self, application, ext_mgr, mapper): + """Returns a dict of RequestExtensionController-s by collection.""" + request_ext_controllers = {} + for req_ext in ext_mgr.get_request_extensions(): + if req_ext.key not in request_ext_controllers.keys(): + controller = RequestExtensionController(application) + mapper.connect(req_ext.url_route + '.:(format)', + action='process', + controller=controller, + conditions=req_ext.conditions) + + mapper.connect(req_ext.url_route, + action='process', + controller=controller, + conditions=req_ext.conditions) + request_ext_controllers[req_ext.key] = controller + + return request_ext_controllers + + @webob.dec.wsgify(RequestClass=wsgi.Request) + def __call__(self, req): + """Route the incoming request with router.""" + req.environ['extended.app'] = self.application + return self._router + + @staticmethod + @webob.dec.wsgify(RequestClass=wsgi.Request) + def _dispatch(req): + """Dispatch the request. + + Returns the routed WSGI app's response or defers to the extended + application. + """ + match = req.environ['wsgiorg.routing_args'][1] + if not match: + return req.environ['extended.app'] + app = match['controller'] + return app + + +def plugin_aware_extension_middleware_factory(global_config, **local_config): + """Paste factory.""" + def _factory(app): + ext_mgr = PluginAwareExtensionManager.get_instance() + return ExtensionMiddleware(app, ext_mgr=ext_mgr) + return _factory + + +class ExtensionManager(object): + """Load extensions from the configured extension path. + + See tests/unit/extensions/foxinsocks.py for an + example extension implementation. + """ + + def __init__(self, path): + LOG.info(_('Initializing extension manager.')) + self.path = path + self.extensions = {} + self._load_all_extensions() + policy.reset() + + def get_resources(self): + """Returns a list of ResourceExtension objects.""" + resources = [] + resources.append(ResourceExtension('extensions', + ExtensionController(self))) + for ext in self.extensions.itervalues(): + try: + resources.extend(ext.get_resources()) + except AttributeError: + # NOTE(dprince): Extension aren't required to have resource + # extensions + pass + return resources + + def get_actions(self): + """Returns a list of ActionExtension objects.""" + actions = [] + for ext in self.extensions.itervalues(): + try: + actions.extend(ext.get_actions()) + except AttributeError: + # NOTE(dprince): Extension aren't required to have action + # extensions + pass + return actions + + def get_request_extensions(self): + """Returns a list of RequestExtension objects.""" + request_exts = [] + for ext in self.extensions.itervalues(): + try: + request_exts.extend(ext.get_request_extensions()) + except AttributeError: + # NOTE(dprince): Extension aren't required to have request + # extensions + pass + return request_exts + + def extend_resources(self, version, attr_map): + """Extend resources with additional resources or attributes. + + :param: attr_map, the existing mapping from resource name to + attrs definition. + + After this function, we will extend the attr_map if an extension + wants to extend this map. + """ + update_exts = [] + processed_exts = set() + exts_to_process = self.extensions.copy() + # Iterate until there are unprocessed extensions or if no progress + # is made in a whole iteration + while exts_to_process: + processed_ext_count = len(processed_exts) + for ext_name, ext in exts_to_process.items(): + if not hasattr(ext, 'get_extended_resources'): + del exts_to_process[ext_name] + continue + if hasattr(ext, 'update_attributes_map'): + update_exts.append(ext) + if hasattr(ext, 'get_required_extensions'): + # Process extension only if all required extensions + # have been processed already + required_exts_set = set(ext.get_required_extensions()) + if required_exts_set - processed_exts: + continue + try: + extended_attrs = ext.get_extended_resources(version) + for resource, resource_attrs in extended_attrs.iteritems(): + if attr_map.get(resource, None): + attr_map[resource].update(resource_attrs) + else: + attr_map[resource] = resource_attrs + if extended_attrs: + attributes.EXT_NSES[ext.get_alias()] = ( + ext.get_namespace()) + except AttributeError: + LOG.exception(_("Error fetching extended attributes for " + "extension '%s'"), ext.get_name()) + try: + comp_map = ext.get_alias_namespace_compatibility_map() + attributes.EXT_NSES_BC.update(comp_map) + except AttributeError: + LOG.info(_("Extension '%s' provides no backward " + "compatibility map for extended attributes"), + ext.get_name()) + processed_exts.add(ext_name) + del exts_to_process[ext_name] + if len(processed_exts) == processed_ext_count: + # Exit loop as no progress was made + break + if exts_to_process: + # NOTE(salv-orlando): Consider whether this error should be fatal + LOG.error(_("It was impossible to process the following " + "extensions: %s because of missing requirements."), + ','.join(exts_to_process.keys())) + + # Extending extensions' attributes map. + for ext in update_exts: + ext.update_attributes_map(attr_map) + + def _check_extension(self, extension): + """Checks for required methods in extension objects.""" + try: + LOG.debug(_('Ext name: %s'), extension.get_name()) + LOG.debug(_('Ext alias: %s'), extension.get_alias()) + LOG.debug(_('Ext description: %s'), extension.get_description()) + LOG.debug(_('Ext namespace: %s'), extension.get_namespace()) + LOG.debug(_('Ext updated: %s'), extension.get_updated()) + except AttributeError as ex: + LOG.exception(_("Exception loading extension: %s"), unicode(ex)) + return False + return True + + def _load_all_extensions(self): + """Load extensions from the configured path. + + The extension name is constructed from the module_name. If your + extension module is named widgets.py, the extension class within that + module should be 'Widgets'. + + See tests/unit/extensions/foxinsocks.py for an example extension + implementation. + """ + for path in self.path.split(':'): + if os.path.exists(path): + self._load_all_extensions_from_path(path) + else: + LOG.error(_("Extension path '%s' doesn't exist!"), path) + + def _load_all_extensions_from_path(self, path): + # Sorting the extension list makes the order in which they + # are loaded predictable across a cluster of load-balanced + # Neutron Servers + for f in sorted(os.listdir(path)): + try: + LOG.debug(_('Loading extension file: %s'), f) + mod_name, file_ext = os.path.splitext(os.path.split(f)[-1]) + ext_path = os.path.join(path, f) + if file_ext.lower() == '.py' and not mod_name.startswith('_'): + mod = imp.load_source(mod_name, ext_path) + ext_name = mod_name[0].upper() + mod_name[1:] + new_ext_class = getattr(mod, ext_name, None) + if not new_ext_class: + LOG.warn(_('Did not find expected name ' + '"%(ext_name)s" in %(file)s'), + {'ext_name': ext_name, + 'file': ext_path}) + continue + new_ext = new_ext_class() + self.add_extension(new_ext) + except Exception as exception: + LOG.warn(_("Extension file %(f)s wasn't loaded due to " + "%(exception)s"), {'f': f, 'exception': exception}) + + def add_extension(self, ext): + # Do nothing if the extension doesn't check out + if not self._check_extension(ext): + return + + alias = ext.get_alias() + LOG.info(_('Loaded extension: %s'), alias) + + if alias in self.extensions: + raise exceptions.DuplicatedExtension(alias=alias) + self.extensions[alias] = ext + + +class PluginAwareExtensionManager(ExtensionManager): + + _instance = None + + def __init__(self, path, plugins): + self.plugins = plugins + super(PluginAwareExtensionManager, self).__init__(path) + self.check_if_plugin_extensions_loaded() + + def _check_extension(self, extension): + """Check if an extension is supported by any plugin.""" + extension_is_valid = super(PluginAwareExtensionManager, + self)._check_extension(extension) + return (extension_is_valid and + self._plugins_support(extension) and + self._plugins_implement_interface(extension)) + + def _plugins_support(self, extension): + alias = extension.get_alias() + supports_extension = any((hasattr(plugin, + "supported_extension_aliases") and + alias in plugin.supported_extension_aliases) + for plugin in self.plugins.values()) + if not supports_extension: + LOG.warn(_("Extension %s not supported by any of loaded plugins"), + alias) + return supports_extension + + def _plugins_implement_interface(self, extension): + if(not hasattr(extension, "get_plugin_interface") or + extension.get_plugin_interface() is None): + return True + for plugin in self.plugins.values(): + if isinstance(plugin, extension.get_plugin_interface()): + return True + LOG.warn(_("Loaded plugins do not implement extension %s interface"), + extension.get_alias()) + return False + + @classmethod + def get_instance(cls): + if cls._instance is None: + cls._instance = cls(get_extensions_path(), + manager.NeutronManager.get_service_plugins()) + return cls._instance + + def check_if_plugin_extensions_loaded(self): + """Check if an extension supported by a plugin has been loaded.""" + plugin_extensions = set(itertools.chain.from_iterable([ + getattr(plugin, "supported_extension_aliases", []) + for plugin in self.plugins.values()])) + missing_aliases = plugin_extensions - set(self.extensions) + if missing_aliases: + raise exceptions.ExtensionsNotFound( + extensions=list(missing_aliases)) + + +class RequestExtension(object): + """Extend requests and responses of core Neutron OpenStack API controllers. + + Provide a way to add data to responses and handle custom request data + that is sent to core Neutron OpenStack API controllers. + """ + + def __init__(self, method, url_route, handler): + self.url_route = url_route + self.handler = handler + self.conditions = dict(method=[method]) + self.key = "%s-%s" % (method, url_route) + + +class ActionExtension(object): + """Add custom actions to core Neutron OpenStack API controllers.""" + + def __init__(self, collection, action_name, handler): + self.collection = collection + self.action_name = action_name + self.handler = handler + + +class ResourceExtension(object): + """Add top level resources to the OpenStack API in Neutron.""" + + def __init__(self, collection, controller, parent=None, path_prefix="", + collection_actions={}, member_actions={}, attr_map={}): + self.collection = collection + self.controller = controller + self.parent = parent + self.collection_actions = collection_actions + self.member_actions = member_actions + self.path_prefix = path_prefix + self.attr_map = attr_map + + +# Returns the extension paths from a config entry and the __path__ +# of neutron.extensions +def get_extensions_path(): + paths = ':'.join(neutron.extensions.__path__) + if cfg.CONF.api_extensions_path: + paths = ':'.join([cfg.CONF.api_extensions_path, paths]) + + return paths + + +def append_api_extensions_path(paths): + paths = [cfg.CONF.api_extensions_path] + paths + cfg.CONF.set_override('api_extensions_path', + ':'.join([p for p in paths if p])) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/api/rpc/__init__.py b/icehouse-patches/neutron/dvr-patch/neutron/api/rpc/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/icehouse-patches/neutron/dvr-patch/neutron/api/rpc/agentnotifiers/__init__.py b/icehouse-patches/neutron/dvr-patch/neutron/api/rpc/agentnotifiers/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/icehouse-patches/neutron/dvr-patch/neutron/api/rpc/agentnotifiers/dhcp_rpc_agent_api.py b/icehouse-patches/neutron/dvr-patch/neutron/api/rpc/agentnotifiers/dhcp_rpc_agent_api.py new file mode 100644 index 00000000..96c50bce --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/api/rpc/agentnotifiers/dhcp_rpc_agent_api.py @@ -0,0 +1,177 @@ +# Copyright (c) 2013 OpenStack Foundation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from neutron.common import constants +from neutron.common import rpc as n_rpc +from neutron.common import topics +from neutron.common import utils +from neutron import manager +from neutron.openstack.common import log as logging + + +LOG = logging.getLogger(__name__) + + +class DhcpAgentNotifyAPI(n_rpc.RpcProxy): + """API for plugin to notify DHCP agent.""" + BASE_RPC_API_VERSION = '1.0' + # It seems dhcp agent does not support bulk operation + VALID_RESOURCES = ['network', 'subnet', 'port'] + VALID_METHOD_NAMES = ['network.create.end', + 'network.update.end', + 'network.delete.end', + 'subnet.create.end', + 'subnet.update.end', + 'subnet.delete.end', + 'port.create.end', + 'port.update.end', + 'port.delete.end'] + + def __init__(self, topic=topics.DHCP_AGENT, plugin=None): + super(DhcpAgentNotifyAPI, self).__init__( + topic=topic, default_version=self.BASE_RPC_API_VERSION) + self._plugin = plugin + + @property + def plugin(self): + if self._plugin is None: + self._plugin = manager.NeutronManager.get_plugin() + return self._plugin + + def _schedule_network(self, context, network, existing_agents): + """Schedule the network to new agents + + :return: all agents associated with the network + """ + new_agents = self.plugin.schedule_network(context, network) or [] + if new_agents: + for agent in new_agents: + self._cast_message( + context, 'network_create_end', + {'network': {'id': network['id']}}, agent['host']) + elif not existing_agents: + LOG.warn(_('Unable to schedule network %s: no agents available; ' + 'will retry on subsequent port creation events.'), + network['id']) + return new_agents + existing_agents + + def _get_enabled_agents(self, context, network, agents, method, payload): + """Get the list of agents whose admin_state is UP.""" + network_id = network['id'] + enabled_agents = [x for x in agents if x.admin_state_up] + active_agents = [x for x in agents if x.is_active] + len_enabled_agents = len(enabled_agents) + len_active_agents = len(active_agents) + if len_active_agents < len_enabled_agents: + LOG.warn(_("Only %(active)d of %(total)d DHCP agents associated " + "with network '%(net_id)s' are marked as active, so " + " notifications may be sent to inactive agents.") + % {'active': len_active_agents, + 'total': len_enabled_agents, + 'net_id': network_id}) + if not enabled_agents: + num_ports = self.plugin.get_ports_count( + context, {'network_id': [network_id]}) + notification_required = ( + num_ports > 0 and len(network['subnets']) >= 1) + if notification_required: + LOG.error(_("Will not send event %(method)s for network " + "%(net_id)s: no agent available. Payload: " + "%(payload)s") + % {'method': method, + 'net_id': network_id, + 'payload': payload}) + return enabled_agents + + def _notify_agents(self, context, method, payload, network_id): + """Notify all the agents that are hosting the network.""" + # fanout is required as we do not know who is "listening" + no_agents = not utils.is_extension_supported( + self.plugin, constants.DHCP_AGENT_SCHEDULER_EXT_ALIAS) + fanout_required = method == 'network_delete_end' or no_agents + + # we do nothing on network creation because we want to give the + # admin the chance to associate an agent to the network manually + cast_required = method != 'network_create_end' + + if fanout_required: + self._fanout_message(context, method, payload) + elif cast_required: + admin_ctx = (context if context.is_admin else context.elevated()) + network = self.plugin.get_network(admin_ctx, network_id) + agents = self.plugin.get_dhcp_agents_hosting_networks( + context, [network_id]) + + # schedule the network first, if needed + schedule_required = method == 'port_create_end' + if schedule_required: + agents = self._schedule_network(admin_ctx, network, agents) + + enabled_agents = self._get_enabled_agents( + context, network, agents, method, payload) + for agent in enabled_agents: + self._cast_message( + context, method, payload, agent.host, agent.topic) + + def _cast_message(self, context, method, payload, host, + topic=topics.DHCP_AGENT): + """Cast the payload to the dhcp agent running on the host.""" + self.cast( + context, self.make_msg(method, + payload=payload), + topic='%s.%s' % (topic, host)) + + def _fanout_message(self, context, method, payload): + """Fanout the payload to all dhcp agents.""" + self.fanout_cast( + context, self.make_msg(method, + payload=payload), + topic=topics.DHCP_AGENT) + + def network_removed_from_agent(self, context, network_id, host): + self._cast_message(context, 'network_delete_end', + {'network_id': network_id}, host) + + def network_added_to_agent(self, context, network_id, host): + self._cast_message(context, 'network_create_end', + {'network': {'id': network_id}}, host) + + def agent_updated(self, context, admin_state_up, host): + self._cast_message(context, 'agent_updated', + {'admin_state_up': admin_state_up}, host) + + def notify(self, context, data, method_name): + # data is {'key' : 'value'} with only one key + if method_name not in self.VALID_METHOD_NAMES: + return + obj_type = data.keys()[0] + if obj_type not in self.VALID_RESOURCES: + return + obj_value = data[obj_type] + network_id = None + if obj_type == 'network' and 'id' in obj_value: + network_id = obj_value['id'] + elif obj_type in ['port', 'subnet'] and 'network_id' in obj_value: + network_id = obj_value['network_id'] + if not network_id: + return + method_name = method_name.replace(".", "_") + if method_name.endswith("_delete_end"): + if 'id' in obj_value: + self._notify_agents(context, method_name, + {obj_type + '_id': obj_value['id']}, + network_id) + else: + self._notify_agents(context, method_name, data, network_id) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/api/rpc/agentnotifiers/l3_rpc_agent_api.py b/icehouse-patches/neutron/dvr-patch/neutron/api/rpc/agentnotifiers/l3_rpc_agent_api.py new file mode 100644 index 00000000..d930fde5 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/api/rpc/agentnotifiers/l3_rpc_agent_api.py @@ -0,0 +1,149 @@ +# Copyright (c) 2013 OpenStack Foundation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from neutron.common import constants +from neutron.common import rpc as n_rpc +from neutron.common import topics +from neutron.common import utils +from neutron import manager +from neutron.openstack.common import log as logging +from neutron.plugins.common import constants as service_constants + + +LOG = logging.getLogger(__name__) + + +class L3AgentNotifyAPI(n_rpc.RpcProxy): + """API for plugin to notify L3 agent.""" + BASE_RPC_API_VERSION = '1.0' + + def __init__(self, topic=topics.L3_AGENT): + super(L3AgentNotifyAPI, self).__init__( + topic=topic, default_version=self.BASE_RPC_API_VERSION) + + def _notification_host(self, context, method, payload, host): + """Notify the agent that is hosting the router.""" + LOG.debug(_('Nofity agent at %(host)s the message ' + '%(method)s'), {'host': host, + 'method': method}) + self.cast( + context, self.make_msg(method, + payload=payload), + topic='%s.%s' % (topics.L3_AGENT, host)) + + def _agent_notification(self, context, method, router_ids, + operation, data): + """Notify changed routers to hosting l3 agents.""" + adminContext = context.is_admin and context or context.elevated() + plugin = manager.NeutronManager.get_service_plugins().get( + service_constants.L3_ROUTER_NAT) + for router_id in router_ids: + l3_agents = plugin.get_l3_agents_hosting_routers( + adminContext, [router_id], + admin_state_up=True, + active=True) + for l3_agent in l3_agents: + LOG.debug(_('Notify agent at %(topic)s.%(host)s the message ' + '%(method)s'), + {'topic': l3_agent.topic, + 'host': l3_agent.host, + 'method': method}) + self.cast( + context, self.make_msg(method, + routers=[router_id]), + topic='%s.%s' % (l3_agent.topic, l3_agent.host), + version='1.1') + + def _agent_notification_arp(self, context, method, router_id, + operation, data): + """Notify arp details to l3 agents hosting router.""" + if not router_id: + return + adminContext = (context.is_admin and + context or context.elevated()) + plugin = manager.NeutronManager.get_service_plugins().get( + service_constants.L3_ROUTER_NAT) + l3_agents = (plugin. + get_l3_agents_hosting_routers(adminContext, + [router_id], + admin_state_up=True, + active=True)) + for l3_agent in l3_agents: + LOG.debug(_('Notify agent at %(topic)s.%(host)s the message ' + '%(method)s'), + {'topic': l3_agent.topic, + 'host': l3_agent.host, + 'method': method}) + dvr_arptable = {'router_id': router_id, + 'arp_table': data} + self.cast( + context, self.make_msg(method, + payload=dvr_arptable), + topic='%s.%s' % (l3_agent.topic, l3_agent.host), + version='1.1') + + def _notification(self, context, method, router_ids, operation, data): + """Notify all the agents that are hosting the routers.""" + plugin = manager.NeutronManager.get_service_plugins().get( + service_constants.L3_ROUTER_NAT) + if not plugin: + LOG.error(_('No plugin for L3 routing registered. Cannot notify ' + 'agents with the message %s'), method) + return + if utils.is_extension_supported( + plugin, constants.L3_AGENT_SCHEDULER_EXT_ALIAS): + adminContext = (context.is_admin and + context or context.elevated()) + plugin.schedule_routers(adminContext, router_ids, hints=data) + self._agent_notification( + context, method, router_ids, operation, data) + else: + self.fanout_cast( + context, self.make_msg(method, + routers=router_ids), + topic=topics.L3_AGENT) + + def _notification_fanout(self, context, method, router_id): + """Fanout the deleted router to all L3 agents.""" + LOG.debug(_('Fanout notify agent at %(topic)s the message ' + '%(method)s on router %(router_id)s'), + {'topic': topics.L3_AGENT, + 'method': method, + 'router_id': router_id}) + self.fanout_cast( + context, self.make_msg(method, + router_id=router_id), + topic=topics.L3_AGENT) + + def agent_updated(self, context, admin_state_up, host): + self._notification_host(context, 'agent_updated', + {'admin_state_up': admin_state_up}, + host) + + def router_deleted(self, context, router_id): + self._notification_fanout(context, 'router_deleted', router_id) + + def routers_updated(self, context, router_ids, operation=None, data=None): + if router_ids: + self._notification(context, 'routers_updated', router_ids, + operation, data) + + def router_removed_from_agent(self, context, router_id, host): + self._notification_host(context, 'router_removed_from_agent', + {'router_id': router_id}, host) + + def router_added_to_agent(self, context, router_ids, host): + self._notification_host(context, 'router_added_to_agent', + router_ids, host) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/api/rpc/agentnotifiers/metering_rpc_agent_api.py b/icehouse-patches/neutron/dvr-patch/neutron/api/rpc/agentnotifiers/metering_rpc_agent_api.py new file mode 100644 index 00000000..e00e73b3 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/api/rpc/agentnotifiers/metering_rpc_agent_api.py @@ -0,0 +1,99 @@ +# Copyright (C) 2013 eNovance SAS +# +# Author: Sylvain Afchain +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from neutron.common import constants +from neutron.common import rpc as n_rpc +from neutron.common import topics +from neutron.common import utils +from neutron import manager +from neutron.openstack.common import log as logging +from neutron.plugins.common import constants as service_constants + +LOG = logging.getLogger(__name__) + + +class MeteringAgentNotifyAPI(n_rpc.RpcProxy): + """API for plugin to notify L3 metering agent.""" + BASE_RPC_API_VERSION = '1.0' + + def __init__(self, topic=topics.METERING_AGENT): + super(MeteringAgentNotifyAPI, self).__init__( + topic=topic, default_version=self.BASE_RPC_API_VERSION) + + def _agent_notification(self, context, method, routers): + """Notify l3 metering agents hosted by l3 agent hosts.""" + adminContext = context.is_admin and context or context.elevated() + plugin = manager.NeutronManager.get_service_plugins().get( + service_constants.L3_ROUTER_NAT) + + l3_routers = {} + for router in routers: + l3_agents = plugin.get_l3_agents_hosting_routers( + adminContext, [router['id']], + admin_state_up=True, + active=True) + for l3_agent in l3_agents: + LOG.debug(_('Notify metering agent at %(topic)s.%(host)s ' + 'the message %(method)s'), + {'topic': self.topic, + 'host': l3_agent.host, + 'method': method}) + + l3_router = l3_routers.get(l3_agent.host, []) + l3_router.append(router) + l3_routers[l3_agent.host] = l3_router + + for host, routers in l3_routers.iteritems(): + self.cast(context, self.make_msg(method, routers=routers), + topic='%s.%s' % (self.topic, host)) + + def _notification_fanout(self, context, method, router_id): + LOG.debug(_('Fanout notify metering agent at %(topic)s the message ' + '%(method)s on router %(router_id)s'), + {'topic': self.topic, + 'method': method, + 'router_id': router_id}) + self.fanout_cast( + context, self.make_msg(method, + router_id=router_id), + topic=self.topic) + + def _notification(self, context, method, routers): + """Notify all the agents that are hosting the routers.""" + plugin = manager.NeutronManager.get_service_plugins().get( + service_constants.L3_ROUTER_NAT) + if utils.is_extension_supported( + plugin, constants.L3_AGENT_SCHEDULER_EXT_ALIAS): + self._agent_notification(context, method, routers) + else: + self.fanout_cast(context, self.make_msg(method, routers=routers), + topic=self.topic) + + def router_deleted(self, context, router_id): + self._notification_fanout(context, 'router_deleted', router_id) + + def routers_updated(self, context, routers): + if routers: + self._notification(context, 'routers_updated', routers) + + def update_metering_label_rules(self, context, routers): + self._notification(context, 'update_metering_label_rules', routers) + + def add_metering_label(self, context, routers): + self._notification(context, 'add_metering_label', routers) + + def remove_metering_label(self, context, routers): + self._notification(context, 'remove_metering_label', routers) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/api/rpc/dvr_rpc.py b/icehouse-patches/neutron/dvr-patch/neutron/api/rpc/dvr_rpc.py new file mode 100644 index 00000000..4436f7c4 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/api/rpc/dvr_rpc.py @@ -0,0 +1,122 @@ +# Copyright 2014, Hewlett Packard, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from neutron.common import log +from neutron.common import topics +from neutron import manager +from neutron.openstack.common import log as logging + +LOG = logging.getLogger(__name__) + + +class DVRServerRpcApiMixin(object): + """Agent-side RPC (stub) for agent-to-plugin interaction.""" + + DVR_RPC_VERSION = "1.0" + + @log.log + def get_dvr_mac_address_by_host(self, context, host): + return self.call(context, + self.make_msg('get_dvr_mac_address_by_host', + host=host), + version=self.DVR_RPC_VERSION, + topic=self.topic) + + @log.log + def get_dvr_mac_address_list(self, context): + return self.call(context, + self.make_msg('get_dvr_mac_address_list'), + version=self.DVR_RPC_VERSION, + topic=self.topic) + + @log.log + def get_compute_ports_on_host_by_subnet(self, context, host, subnet): + return self.call(context, + self.make_msg('get_compute_ports_on_host_by_subnet', + host=host, + subnet=subnet), + version=self.DVR_RPC_VERSION, + topic=self.topic) + + @log.log + def get_subnet_for_dvr(self, context, subnet): + return self.call(context, + self.make_msg('get_subnet_for_dvr', + subnet=subnet), + version=self.DVR_RPC_VERSION, + topic=self.topic) + + +class DVRServerRpcCallbackMixin(object): + """Plugin-side RPC (implementation) for agent-to-plugin interaction.""" + + @property + def plugin(self): + if not getattr(self, '_plugin', None): + self._plugin = manager.NeutronManager.get_plugin() + return self._plugin + + def get_dvr_mac_address_list(self, context): + return self.plugin.get_dvr_mac_address_list(context) + + def get_dvr_mac_address_by_host(self, context, host): + return self.plugin.get_dvr_mac_address_by_host(context, host) + + def get_compute_ports_on_host_by_subnet(self, context, host, subnet): + return self.plugin.get_compute_ports_on_host_by_subnet(context, + host, + subnet) + + def get_subnet_for_dvr(self, context, subnet): + return self.plugin.get_subnet_for_dvr(context, subnet) + + +class DVRAgentRpcApiMixin(object): + """Plugin-side RPC (stub) for plugin-to-agent interaction.""" + + DVR_RPC_VERSION = "1.0" + + def _get_dvr_update_topic(self): + return topics.get_topic_name(self.topic, + topics.DVR, + topics.UPDATE) + + def dvr_mac_address_update(self, context, dvr_macs): + """Notify dvr mac address updates.""" + if not dvr_macs: + return + self.fanout_cast(context, + self.make_msg('dvr_mac_address_update', + dvr_macs=dvr_macs), + version=self.DVR_RPC_VERSION, + topic=self._get_dvr_update_topic()) + + +class DVRAgentRpcCallbackMixin(object): + """Agent-side RPC (implementation) for plugin-to-agent interaction.""" + + dvr_agent = None + + def dvr_mac_address_update(self, context, **kwargs): + """Callback for dvr_mac_addresses update. + + :param dvr_macs: list of updated dvr_macs + """ + dvr_macs = kwargs.get('dvr_macs', []) + LOG.debug("dvr_macs updated on remote: %s", dvr_macs) + if not self.dvr_agent: + LOG.warn(_("DVR agent binding currently not set.")) + return + self.dvr_agent.dvr_mac_address_update(dvr_macs) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/api/v2/__init__.py b/icehouse-patches/neutron/dvr-patch/neutron/api/v2/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/icehouse-patches/neutron/dvr-patch/neutron/api/v2/attributes.py b/icehouse-patches/neutron/dvr-patch/neutron/api/v2/attributes.py new file mode 100644 index 00000000..0cf1816a --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/api/v2/attributes.py @@ -0,0 +1,777 @@ +# Copyright (c) 2012 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import netaddr +import re + +from neutron.common import constants +from neutron.common import exceptions as n_exc +from neutron.openstack.common import log as logging +from neutron.openstack.common import uuidutils + + +LOG = logging.getLogger(__name__) + +ATTR_NOT_SPECIFIED = object() +# Defining a constant to avoid repeating string literal in several modules +SHARED = 'shared' + +# Used by range check to indicate no limit for a bound. +UNLIMITED = None + + +def _verify_dict_keys(expected_keys, target_dict, strict=True): + """Allows to verify keys in a dictionary. + + :param expected_keys: A list of keys expected to be present. + :param target_dict: The dictionary which should be verified. + :param strict: Specifies whether additional keys are allowed to be present. + :return: True, if keys in the dictionary correspond to the specification. + """ + if not isinstance(target_dict, dict): + msg = (_("Invalid input. '%(target_dict)s' must be a dictionary " + "with keys: %(expected_keys)s") % + {'target_dict': target_dict, 'expected_keys': expected_keys}) + return msg + + expected_keys = set(expected_keys) + provided_keys = set(target_dict.keys()) + + predicate = expected_keys.__eq__ if strict else expected_keys.issubset + + if not predicate(provided_keys): + msg = (_("Validation of dictionary's keys failed." + "Expected keys: %(expected_keys)s " + "Provided keys: %(provided_keys)s") % + {'expected_keys': expected_keys, + 'provided_keys': provided_keys}) + return msg + + +def is_attr_set(attribute): + return not (attribute is None or attribute is ATTR_NOT_SPECIFIED) + + +def _validate_values(data, valid_values=None): + if data not in valid_values: + msg = (_("'%(data)s' is not in %(valid_values)s") % + {'data': data, 'valid_values': valid_values}) + LOG.debug(msg) + return msg + + +def _validate_not_empty_string_or_none(data, max_len=None): + if data is not None: + return _validate_not_empty_string(data, max_len=max_len) + + +def _validate_not_empty_string(data, max_len=None): + msg = _validate_string(data, max_len=max_len) + if msg: + return msg + if not data.strip(): + return _("'%s' Blank strings are not permitted") % data + + +def _validate_string_or_none(data, max_len=None): + if data is not None: + return _validate_string(data, max_len=max_len) + + +def _validate_string(data, max_len=None): + if not isinstance(data, basestring): + msg = _("'%s' is not a valid string") % data + LOG.debug(msg) + return msg + + if max_len is not None and len(data) > max_len: + msg = (_("'%(data)s' exceeds maximum length of %(max_len)s") % + {'data': data, 'max_len': max_len}) + LOG.debug(msg) + return msg + + +def _validate_boolean(data, valid_values=None): + try: + convert_to_boolean(data) + except n_exc.InvalidInput: + msg = _("'%s' is not a valid boolean value") % data + LOG.debug(msg) + return msg + + +def _validate_range(data, valid_values=None): + """Check that integer value is within a range provided. + + Test is inclusive. Allows either limit to be ignored, to allow + checking ranges where only the lower or upper limit matter. + It is expected that the limits provided are valid integers or + the value None. + """ + + min_value = valid_values[0] + max_value = valid_values[1] + try: + data = int(data) + except (ValueError, TypeError): + msg = _("'%s' is not an integer") % data + LOG.debug(msg) + return msg + if min_value is not UNLIMITED and data < min_value: + msg = _("'%(data)s' is too small - must be at least " + "'%(limit)d'") % {'data': data, 'limit': min_value} + LOG.debug(msg) + return msg + if max_value is not UNLIMITED and data > max_value: + msg = _("'%(data)s' is too large - must be no larger than " + "'%(limit)d'") % {'data': data, 'limit': max_value} + LOG.debug(msg) + return msg + + +def _validate_no_whitespace(data): + """Validates that input has no whitespace.""" + if len(data.split()) > 1: + msg = _("'%s' contains whitespace") % data + LOG.debug(msg) + raise n_exc.InvalidInput(error_message=msg) + return data + + +def _validate_mac_address(data, valid_values=None): + valid_mac = False + try: + valid_mac = netaddr.valid_mac(_validate_no_whitespace(data)) + except Exception: + pass + finally: + # TODO(arosen): The code in this file should be refactored + # so it catches the correct exceptions. _validate_no_whitespace + # raises AttributeError if data is None. + if valid_mac is False: + msg = _("'%s' is not a valid MAC address") % data + LOG.debug(msg) + return msg + + +def _validate_mac_address_or_none(data, valid_values=None): + if data is None: + return + return _validate_mac_address(data, valid_values) + + +def _validate_ip_address(data, valid_values=None): + try: + netaddr.IPAddress(_validate_no_whitespace(data)) + except Exception: + msg = _("'%s' is not a valid IP address") % data + LOG.debug(msg) + return msg + + +def _validate_ip_pools(data, valid_values=None): + """Validate that start and end IP addresses are present. + + In addition to this the IP addresses will also be validated + """ + if not isinstance(data, list): + msg = _("Invalid data format for IP pool: '%s'") % data + LOG.debug(msg) + return msg + + expected_keys = ['start', 'end'] + for ip_pool in data: + msg = _verify_dict_keys(expected_keys, ip_pool) + if msg: + LOG.debug(msg) + return msg + for k in expected_keys: + msg = _validate_ip_address(ip_pool[k]) + if msg: + LOG.debug(msg) + return msg + + +def _validate_fixed_ips(data, valid_values=None): + if not isinstance(data, list): + msg = _("Invalid data format for fixed IP: '%s'") % data + LOG.debug(msg) + return msg + + ips = [] + for fixed_ip in data: + if not isinstance(fixed_ip, dict): + msg = _("Invalid data format for fixed IP: '%s'") % fixed_ip + LOG.debug(msg) + return msg + if 'ip_address' in fixed_ip: + # Ensure that duplicate entries are not set - just checking IP + # suffices. Duplicate subnet_id's are legitimate. + fixed_ip_address = fixed_ip['ip_address'] + if fixed_ip_address in ips: + msg = _("Duplicate IP address '%s'") % fixed_ip_address + else: + msg = _validate_ip_address(fixed_ip_address) + if msg: + LOG.debug(msg) + return msg + ips.append(fixed_ip_address) + if 'subnet_id' in fixed_ip: + msg = _validate_uuid(fixed_ip['subnet_id']) + if msg: + LOG.debug(msg) + return msg + + +def _validate_nameservers(data, valid_values=None): + if not hasattr(data, '__iter__'): + msg = _("Invalid data format for nameserver: '%s'") % data + LOG.debug(msg) + return msg + + ips = [] + for ip in data: + msg = _validate_ip_address(ip) + if msg: + # This may be a hostname + msg = _validate_regex(ip, HOSTNAME_PATTERN) + if msg: + msg = _("'%s' is not a valid nameserver") % ip + LOG.debug(msg) + return msg + if ip in ips: + msg = _("Duplicate nameserver '%s'") % ip + LOG.debug(msg) + return msg + ips.append(ip) + + +def _validate_hostroutes(data, valid_values=None): + if not isinstance(data, list): + msg = _("Invalid data format for hostroute: '%s'") % data + LOG.debug(msg) + return msg + + expected_keys = ['destination', 'nexthop'] + hostroutes = [] + for hostroute in data: + msg = _verify_dict_keys(expected_keys, hostroute) + if msg: + LOG.debug(msg) + return msg + msg = _validate_subnet(hostroute['destination']) + if msg: + LOG.debug(msg) + return msg + msg = _validate_ip_address(hostroute['nexthop']) + if msg: + LOG.debug(msg) + return msg + if hostroute in hostroutes: + msg = _("Duplicate hostroute '%s'") % hostroute + LOG.debug(msg) + return msg + hostroutes.append(hostroute) + + +def _validate_ip_address_or_none(data, valid_values=None): + if data is None: + return None + return _validate_ip_address(data, valid_values) + + +def _validate_subnet(data, valid_values=None): + msg = None + try: + net = netaddr.IPNetwork(_validate_no_whitespace(data)) + if '/' not in data: + msg = _("'%(data)s' isn't a recognized IP subnet cidr," + " '%(cidr)s' is recommended") % {"data": data, + "cidr": net.cidr} + else: + return + except Exception: + msg = _("'%s' is not a valid IP subnet") % data + if msg: + LOG.debug(msg) + return msg + + +def _validate_subnet_list(data, valid_values=None): + if not isinstance(data, list): + msg = _("'%s' is not a list") % data + LOG.debug(msg) + return msg + + if len(set(data)) != len(data): + msg = _("Duplicate items in the list: '%s'") % ', '.join(data) + LOG.debug(msg) + return msg + + for item in data: + msg = _validate_subnet(item) + if msg: + return msg + + +def _validate_subnet_or_none(data, valid_values=None): + if data is None: + return + return _validate_subnet(data, valid_values) + + +def _validate_regex(data, valid_values=None): + try: + if re.match(valid_values, data): + return + except TypeError: + pass + + msg = _("'%s' is not a valid input") % data + LOG.debug(msg) + return msg + + +def _validate_regex_or_none(data, valid_values=None): + if data is None: + return + return _validate_regex(data, valid_values) + + +def _validate_uuid(data, valid_values=None): + if not uuidutils.is_uuid_like(data): + msg = _("'%s' is not a valid UUID") % data + LOG.debug(msg) + return msg + + +def _validate_uuid_or_none(data, valid_values=None): + if data is not None: + return _validate_uuid(data) + + +def _validate_uuid_list(data, valid_values=None): + if not isinstance(data, list): + msg = _("'%s' is not a list") % data + LOG.debug(msg) + return msg + + for item in data: + msg = _validate_uuid(item) + if msg: + LOG.debug(msg) + return msg + + if len(set(data)) != len(data): + msg = _("Duplicate items in the list: '%s'") % ', '.join(data) + LOG.debug(msg) + return msg + + +def _validate_dict_item(key, key_validator, data): + # Find conversion function, if any, and apply it + conv_func = key_validator.get('convert_to') + if conv_func: + data[key] = conv_func(data.get(key)) + # Find validator function + # TODO(salv-orlando): Structure of dict attributes should be improved + # to avoid iterating over items + val_func = val_params = None + for (k, v) in key_validator.iteritems(): + if k.startswith('type:'): + # ask forgiveness, not permission + try: + val_func = validators[k] + except KeyError: + return _("Validator '%s' does not exist.") % k + val_params = v + break + # Process validation + if val_func: + return val_func(data.get(key), val_params) + + +def _validate_dict(data, key_specs=None): + if not isinstance(data, dict): + msg = _("'%s' is not a dictionary") % data + LOG.debug(msg) + return msg + # Do not perform any further validation, if no constraints are supplied + if not key_specs: + return + + # Check whether all required keys are present + required_keys = [key for key, spec in key_specs.iteritems() + if spec.get('required')] + + if required_keys: + msg = _verify_dict_keys(required_keys, data, False) + if msg: + LOG.debug(msg) + return msg + + # Perform validation and conversion of all values + # according to the specifications. + for key, key_validator in [(k, v) for k, v in key_specs.iteritems() + if k in data]: + msg = _validate_dict_item(key, key_validator, data) + if msg: + LOG.debug(msg) + return msg + + +def _validate_dict_or_none(data, key_specs=None): + if data is not None: + return _validate_dict(data, key_specs) + + +def _validate_dict_or_empty(data, key_specs=None): + if data != {}: + return _validate_dict(data, key_specs) + + +def _validate_dict_or_nodata(data, key_specs=None): + if data: + return _validate_dict(data, key_specs) + + +def _validate_non_negative(data, valid_values=None): + try: + data = int(data) + except (ValueError, TypeError): + msg = _("'%s' is not an integer") % data + LOG.debug(msg) + return msg + + if data < 0: + msg = _("'%s' should be non-negative") % data + LOG.debug(msg) + return msg + + +def convert_to_boolean(data): + if isinstance(data, basestring): + val = data.lower() + if val == "true" or val == "1": + return True + if val == "false" or val == "0": + return False + elif isinstance(data, bool): + return data + elif isinstance(data, int): + if data == 0: + return False + elif data == 1: + return True + msg = _("'%s' cannot be converted to boolean") % data + raise n_exc.InvalidInput(error_message=msg) + + +def convert_to_boolean_if_not_none(data): + if data is not None: + return convert_to_boolean(data) + + +def convert_to_int(data): + try: + return int(data) + except (ValueError, TypeError): + msg = _("'%s' is not a integer") % data + raise n_exc.InvalidInput(error_message=msg) + + +def convert_kvp_str_to_list(data): + """Convert a value of the form 'key=value' to ['key', 'value']. + + :raises: n_exc.InvalidInput if any of the strings are malformed + (e.g. do not contain a key). + """ + kvp = [x.strip() for x in data.split('=', 1)] + if len(kvp) == 2 and kvp[0]: + return kvp + msg = _("'%s' is not of the form =[value]") % data + raise n_exc.InvalidInput(error_message=msg) + + +def convert_kvp_list_to_dict(kvp_list): + """Convert a list of 'key=value' strings to a dict. + + :raises: n_exc.InvalidInput if any of the strings are malformed + (e.g. do not contain a key) or if any + of the keys appear more than once. + """ + if kvp_list == ['True']: + # No values were provided (i.e. '--flag-name') + return {} + kvp_map = {} + for kvp_str in kvp_list: + key, value = convert_kvp_str_to_list(kvp_str) + kvp_map.setdefault(key, set()) + kvp_map[key].add(value) + return dict((x, list(y)) for x, y in kvp_map.iteritems()) + + +def convert_none_to_empty_list(value): + return [] if value is None else value + + +def convert_none_to_empty_dict(value): + return {} if value is None else value + + +def convert_to_list(data): + if data is None: + return [] + elif hasattr(data, '__iter__'): + return list(data) + else: + return [data] + + +HOSTNAME_PATTERN = ("(?=^.{1,254}$)(^(?:(?!\d+\.|-)[a-zA-Z0-9_\-]" + "{1,63}(? +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +"""Cron script to generate usage notifications for networks, ports and +subnets. + +""" + +import sys + +from oslo.config import cfg + +from neutron.common import config +from neutron.common import rpc as n_rpc +from neutron import context +from neutron import manager + + +def main(): + config.init(sys.argv[1:]) + config.setup_logging(cfg.CONF) + + cxt = context.get_admin_context() + plugin = manager.NeutronManager.get_plugin() + notifier = n_rpc.get_notifier('network') + for network in plugin.get_networks(cxt): + notifier.info(cxt, 'network.exists', {'network': network}) + for subnet in plugin.get_subnets(cxt): + notifier.info(cxt, 'subnet.exists', {'subnet': subnet}) + for port in plugin.get_ports(cxt): + notifier.info(cxt, 'port.exists', {'port': port}) + for router in plugin.get_routers(cxt): + notifier.info(cxt, 'router.exists', {'router': router}) + for floatingip in plugin.get_floatingips(cxt): + notifier.info(cxt, 'floatingip.exists', {'floatingip': floatingip}) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/common/config.py b/icehouse-patches/neutron/dvr-patch/neutron/common/config.py new file mode 100644 index 00000000..9f543703 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/common/config.py @@ -0,0 +1,190 @@ +# Copyright 2011 VMware, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Routines for configuring Neutron +""" + +import os + +from oslo.config import cfg +from oslo.db import options as db_options +from oslo import messaging +from paste import deploy + +from neutron.api.v2 import attributes +from neutron.common import utils +from neutron.openstack.common import log as logging +from neutron import version + + +LOG = logging.getLogger(__name__) + +core_opts = [ + cfg.StrOpt('bind_host', default='0.0.0.0', + help=_("The host IP to bind to")), + cfg.IntOpt('bind_port', default=9696, + help=_("The port to bind to")), + cfg.StrOpt('api_paste_config', default="api-paste.ini", + help=_("The API paste config file to use")), + cfg.StrOpt('api_extensions_path', default="", + help=_("The path for API extensions")), + cfg.StrOpt('policy_file', default="policy.json", + help=_("The policy file to use")), + cfg.StrOpt('auth_strategy', default='keystone', + help=_("The type of authentication to use")), + cfg.StrOpt('core_plugin', + help=_("The core plugin Neutron will use")), + cfg.ListOpt('service_plugins', default=[], + help=_("The service plugins Neutron will use")), + cfg.StrOpt('base_mac', default="fa:16:3e:00:00:00", + help=_("The base MAC address Neutron will use for VIFs")), + cfg.IntOpt('mac_generation_retries', default=16, + help=_("How many times Neutron will retry MAC generation")), + cfg.BoolOpt('allow_bulk', default=True, + help=_("Allow the usage of the bulk API")), + cfg.BoolOpt('allow_pagination', default=False, + help=_("Allow the usage of the pagination")), + cfg.BoolOpt('allow_sorting', default=False, + help=_("Allow the usage of the sorting")), + cfg.StrOpt('pagination_max_limit', default="-1", + help=_("The maximum number of items returned in a single " + "response, value was 'infinite' or negative integer " + "means no limit")), + cfg.IntOpt('max_dns_nameservers', default=5, + help=_("Maximum number of DNS nameservers")), + cfg.IntOpt('max_subnet_host_routes', default=20, + help=_("Maximum number of host routes per subnet")), + cfg.IntOpt('max_fixed_ips_per_port', default=5, + help=_("Maximum number of fixed ips per port")), + cfg.IntOpt('dhcp_lease_duration', default=86400, + deprecated_name='dhcp_lease_time', + help=_("DHCP lease duration (in seconds). Use -1 to tell " + "dnsmasq to use infinite lease times.")), + cfg.BoolOpt('dhcp_agent_notification', default=True, + help=_("Allow sending resource operation" + " notification to DHCP agent")), + cfg.BoolOpt('allow_overlapping_ips', default=False, + help=_("Allow overlapping IP support in Neutron")), + cfg.StrOpt('host', default=utils.get_hostname(), + help=_("The hostname Neutron is running on")), + cfg.BoolOpt('force_gateway_on_subnet', default=False, + help=_("Ensure that configured gateway is on subnet")), + cfg.BoolOpt('notify_nova_on_port_status_changes', default=True, + help=_("Send notification to nova when port status changes")), + cfg.BoolOpt('notify_nova_on_port_data_changes', default=True, + help=_("Send notification to nova when port data (fixed_ips/" + "floatingip) changes so nova can update its cache.")), + cfg.StrOpt('nova_url', + default='http://127.0.0.1:8774/v2', + help=_('URL for connection to nova')), + cfg.StrOpt('nova_admin_username', + help=_('Username for connecting to nova in admin context')), + cfg.StrOpt('nova_admin_password', + help=_('Password for connection to nova in admin context'), + secret=True), + cfg.StrOpt('nova_admin_tenant_id', + help=_('The uuid of the admin nova tenant')), + cfg.StrOpt('nova_admin_auth_url', + default='http://localhost:5000/v2.0', + help=_('Authorization URL for connecting to nova in admin ' + 'context')), + cfg.StrOpt('nova_ca_certificates_file', + help=_('CA file for novaclient to verify server certificates')), + cfg.BoolOpt('nova_api_insecure', default=False, + help=_("If True, ignore any SSL validation issues")), + cfg.StrOpt('nova_region_name', + help=_('Name of nova region to use. Useful if keystone manages' + ' more than one region.')), + cfg.IntOpt('send_events_interval', default=2, + help=_('Number of seconds between sending events to nova if ' + 'there are any events to send.')), +] + +core_cli_opts = [ + cfg.StrOpt('state_path', + default='/var/lib/neutron', + help=_("Where to store Neutron state files. " + "This directory must be writable by the agent.")), +] + +# Register the configuration options +cfg.CONF.register_opts(core_opts) +cfg.CONF.register_cli_opts(core_cli_opts) + +# Ensure that the control exchange is set correctly +messaging.set_transport_defaults(control_exchange='neutron') +_SQL_CONNECTION_DEFAULT = 'sqlite://' +# Update the default QueuePool parameters. These can be tweaked by the +# configuration variables - max_pool_size, max_overflow and pool_timeout +db_options.set_defaults(cfg.CONF, + connection=_SQL_CONNECTION_DEFAULT, + sqlite_db='', max_pool_size=10, + max_overflow=20, pool_timeout=10) + + +def init(args, **kwargs): + cfg.CONF(args=args, project='neutron', + version='%%prog %s' % version.version_info.release_string(), + **kwargs) + + # FIXME(ihrachys): if import is put in global, circular import + # failure occurs + from neutron.common import rpc as n_rpc + n_rpc.init(cfg.CONF) + + # Validate that the base_mac is of the correct format + msg = attributes._validate_regex(cfg.CONF.base_mac, + attributes.MAC_PATTERN) + if msg: + msg = _("Base MAC: %s") % msg + raise Exception(msg) + + +def setup_logging(conf): + """Sets up the logging options for a log with supplied name. + + :param conf: a cfg.ConfOpts object + """ + product_name = "neutron" + logging.setup(product_name) + LOG.info(_("Logging enabled!")) + + +def load_paste_app(app_name): + """Builds and returns a WSGI app from a paste config file. + + :param app_name: Name of the application to load + :raises ConfigFilesNotFoundError when config file cannot be located + :raises RuntimeError when application cannot be loaded from config file + """ + + config_path = cfg.CONF.find_file(cfg.CONF.api_paste_config) + if not config_path: + raise cfg.ConfigFilesNotFoundError( + config_files=[cfg.CONF.api_paste_config]) + config_path = os.path.abspath(config_path) + LOG.info(_("Config paste file: %s"), config_path) + + try: + app = deploy.loadapp("config:%s" % config_path, name=app_name) + except (LookupError, ImportError): + msg = (_("Unable to load %(app_name)s from " + "configuration file %(config_path)s.") % + {'app_name': app_name, + 'config_path': config_path}) + LOG.exception(msg) + raise RuntimeError(msg) + return app diff --git a/icehouse-patches/neutron/dvr-patch/neutron/common/constants.py b/icehouse-patches/neutron/dvr-patch/neutron/common/constants.py new file mode 100644 index 00000000..8e3d9860 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/common/constants.py @@ -0,0 +1,131 @@ +# Copyright (c) 2012 OpenStack Foundation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# TODO(salv-orlando): Verify if a single set of operational +# status constants is achievable +NET_STATUS_ACTIVE = 'ACTIVE' +NET_STATUS_BUILD = 'BUILD' +NET_STATUS_DOWN = 'DOWN' +NET_STATUS_ERROR = 'ERROR' + +PORT_STATUS_ACTIVE = 'ACTIVE' +PORT_STATUS_BUILD = 'BUILD' +PORT_STATUS_DOWN = 'DOWN' +PORT_STATUS_ERROR = 'ERROR' + +FLOATINGIP_STATUS_ACTIVE = 'ACTIVE' +FLOATINGIP_STATUS_DOWN = 'DOWN' +FLOATINGIP_STATUS_ERROR = 'ERROR' + +DEVICE_OWNER_ROUTER_INTF = "network:router_interface" +DEVICE_OWNER_ROUTER_GW = "network:router_gateway" +DEVICE_OWNER_FLOATINGIP = "network:floatingip" +DEVICE_OWNER_DHCP = "network:dhcp" +DEVICE_OWNER_DVR_INTERFACE = "network:router_interface_distributed" +DEVICE_OWNER_ROUTER_INTF_DISTRIBUTED = "network:router_interface_distributed" +DEVICE_OWNER_AGENT_GW = "network:floatingip_agent_gateway" +DEVICE_OWNER_ROUTER_SNAT = "network:router_centralized_snat" + +DEVICE_ID_RESERVED_DHCP_PORT = "reserved_dhcp_port" + +FLOATINGIP_KEY = '_floatingips' +INTERFACE_KEY = '_interfaces' +METERING_LABEL_KEY = '_metering_labels' +FLOATINGIP_AGENT_INTF_KEY = '_floatingip_agent_interfaces' +SNAT_ROUTER_INTF_KEY = '_snat_router_interfaces' + +IPv4 = 'IPv4' +IPv6 = 'IPv6' + +DHCP_RESPONSE_PORT = 68 + +MIN_VLAN_TAG = 1 +MAX_VLAN_TAG = 4094 +MAX_VXLAN_VNI = 16777215 +FLOODING_ENTRY = ['00:00:00:00:00:00', '0.0.0.0'] + +EXT_NS_COMP = '_backward_comp_e_ns' +EXT_NS = '_extension_ns' +XML_NS_V20 = 'http://openstack.org/quantum/api/v2.0' +XSI_NAMESPACE = "http://www.w3.org/2001/XMLSchema-instance" +XSI_ATTR = "xsi:nil" +XSI_NIL_ATTR = "xmlns:xsi" +ATOM_NAMESPACE = "http://www.w3.org/2005/Atom" +ATOM_XMLNS = "xmlns:atom" +ATOM_LINK_NOTATION = "{%s}link" % ATOM_NAMESPACE +TYPE_XMLNS = "xmlns:quantum" +TYPE_ATTR = "quantum:type" +VIRTUAL_ROOT_KEY = "_v_root" + +TYPE_BOOL = "bool" +TYPE_INT = "int" +TYPE_LONG = "long" +TYPE_FLOAT = "float" +TYPE_LIST = "list" +TYPE_DICT = "dict" + +AGENT_TYPE_DHCP = 'DHCP agent' +AGENT_TYPE_OVS = 'Open vSwitch agent' +AGENT_TYPE_LINUXBRIDGE = 'Linux bridge agent' +AGENT_TYPE_HYPERV = 'HyperV agent' +AGENT_TYPE_NEC = 'NEC plugin agent' +AGENT_TYPE_OFA = 'OFA driver agent' +AGENT_TYPE_L3 = 'L3 agent' +AGENT_TYPE_LOADBALANCER = 'Loadbalancer agent' +AGENT_TYPE_MLNX = 'Mellanox plugin agent' +AGENT_TYPE_METERING = 'Metering agent' +AGENT_TYPE_METADATA = 'Metadata agent' +AGENT_TYPE_SDNVE = 'IBM SDN-VE agent' +L2_AGENT_TOPIC = 'N/A' + +PAGINATION_INFINITE = 'infinite' + +SORT_DIRECTION_ASC = 'asc' +SORT_DIRECTION_DESC = 'desc' + +PORT_BINDING_EXT_ALIAS = 'binding' +L3_AGENT_SCHEDULER_EXT_ALIAS = 'l3_agent_scheduler' +DHCP_AGENT_SCHEDULER_EXT_ALIAS = 'dhcp_agent_scheduler' +LBAAS_AGENT_SCHEDULER_EXT_ALIAS = 'lbaas_agent_scheduler' +L3_DISTRIBUTED_EXT_ALIAS = 'dvr' + +# Protocol names and numbers for Security Groups/Firewalls +PROTO_NAME_TCP = 'tcp' +PROTO_NAME_ICMP = 'icmp' +PROTO_NAME_ICMP_V6 = 'icmpv6' +PROTO_NAME_UDP = 'udp' +PROTO_NUM_TCP = 6 +PROTO_NUM_ICMP = 1 +PROTO_NUM_ICMP_V6 = 58 +PROTO_NUM_UDP = 17 + +# List of ICMPv6 types that should be allowed by default: +# Multicast Listener Query (130), +# Multicast Listener Report (131), +# Multicast Listener Done (132), +# Neighbor Solicitation (135), +# Neighbor Advertisement (136) +ICMPV6_ALLOWED_TYPES = [130, 131, 132, 135, 136] +ICMPV6_TYPE_RA = 134 + +DHCPV6_STATEFUL = 'dhcpv6-stateful' +DHCPV6_STATELESS = 'dhcpv6-stateless' +IPV6_SLAAC = 'slaac' +IPV6_MODES = [DHCPV6_STATEFUL, DHCPV6_STATELESS, IPV6_SLAAC] + +IPV6_LLA_PREFIX = 'fe80::/64' + +# Linux interface max length +DEVICE_NAME_MAX_LEN = 15 diff --git a/icehouse-patches/neutron/dvr-patch/neutron/common/exceptions.py b/icehouse-patches/neutron/dvr-patch/neutron/common/exceptions.py new file mode 100644 index 00000000..7fa63aff --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/common/exceptions.py @@ -0,0 +1,321 @@ +# Copyright 2011 VMware, Inc +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Neutron base exception handling. +""" + +from neutron.openstack.common import excutils + + +class NeutronException(Exception): + """Base Neutron Exception. + + To correctly use this class, inherit from it and define + a 'message' property. That message will get printf'd + with the keyword arguments provided to the constructor. + """ + message = _("An unknown exception occurred.") + + def __init__(self, **kwargs): + try: + super(NeutronException, self).__init__(self.message % kwargs) + self.msg = self.message % kwargs + except Exception: + with excutils.save_and_reraise_exception() as ctxt: + if not self.use_fatal_exceptions(): + ctxt.reraise = False + # at least get the core message out if something happened + super(NeutronException, self).__init__(self.message) + + def __unicode__(self): + return unicode(self.msg) + + def use_fatal_exceptions(self): + return False + + +class BadRequest(NeutronException): + message = _('Bad %(resource)s request: %(msg)s') + + +class NotFound(NeutronException): + pass + + +class Conflict(NeutronException): + pass + + +class NotAuthorized(NeutronException): + message = _("Not authorized.") + + +class ServiceUnavailable(NeutronException): + message = _("The service is unavailable") + + +class AdminRequired(NotAuthorized): + message = _("User does not have admin privileges: %(reason)s") + + +class PolicyNotAuthorized(NotAuthorized): + message = _("Policy doesn't allow %(action)s to be performed.") + + +class NetworkNotFound(NotFound): + message = _("Network %(net_id)s could not be found") + + +class SubnetNotFound(NotFound): + message = _("Subnet %(subnet_id)s could not be found") + + +class PortNotFound(NotFound): + message = _("Port %(port_id)s could not be found") + + +class PortNotFoundOnNetwork(NotFound): + message = _("Port %(port_id)s could not be found " + "on network %(net_id)s") + + +class PolicyFileNotFound(NotFound): + message = _("Policy configuration policy.json could not be found") + + +class PolicyInitError(NeutronException): + message = _("Failed to init policy %(policy)s because %(reason)s") + + +class PolicyCheckError(NeutronException): + message = _("Failed to check policy %(policy)s because %(reason)s") + + +class StateInvalid(BadRequest): + message = _("Unsupported port state: %(port_state)s") + + +class InUse(NeutronException): + message = _("The resource is inuse") + + +class NetworkInUse(InUse): + message = _("Unable to complete operation on network %(net_id)s. " + "There are one or more ports still in use on the network.") + + +class SubnetInUse(InUse): + message = _("Unable to complete operation on subnet %(subnet_id)s. " + "One or more ports have an IP allocation from this subnet.") + + +class PortInUse(InUse): + message = _("Unable to complete operation on port %(port_id)s " + "for network %(net_id)s. Port already has an attached" + "device %(device_id)s.") + + +class MacAddressInUse(InUse): + message = _("Unable to complete operation for network %(net_id)s. " + "The mac address %(mac)s is in use.") + + +class HostRoutesExhausted(BadRequest): + # NOTE(xchenum): probably make sense to use quota exceeded exception? + message = _("Unable to complete operation for %(subnet_id)s. " + "The number of host routes exceeds the limit %(quota)s.") + + +class DNSNameServersExhausted(BadRequest): + # NOTE(xchenum): probably make sense to use quota exceeded exception? + message = _("Unable to complete operation for %(subnet_id)s. " + "The number of DNS nameservers exceeds the limit %(quota)s.") + + +class IpAddressInUse(InUse): + message = _("Unable to complete operation for network %(net_id)s. " + "The IP address %(ip_address)s is in use.") + + +class VlanIdInUse(InUse): + message = _("Unable to create the network. " + "The VLAN %(vlan_id)s on physical network " + "%(physical_network)s is in use.") + + +class FlatNetworkInUse(InUse): + message = _("Unable to create the flat network. " + "Physical network %(physical_network)s is in use.") + + +class TunnelIdInUse(InUse): + message = _("Unable to create the network. " + "The tunnel ID %(tunnel_id)s is in use.") + + +class TenantNetworksDisabled(ServiceUnavailable): + message = _("Tenant network creation is not enabled.") + + +class ResourceExhausted(ServiceUnavailable): + pass + + +class NoNetworkAvailable(ResourceExhausted): + message = _("Unable to create the network. " + "No tenant network is available for allocation.") + + +class SubnetMismatchForPort(BadRequest): + message = _("Subnet on port %(port_id)s does not match " + "the requested subnet %(subnet_id)s") + + +class MalformedRequestBody(BadRequest): + message = _("Malformed request body: %(reason)s") + + +class Invalid(NeutronException): + def __init__(self, message=None): + self.message = message + super(Invalid, self).__init__() + + +class InvalidInput(BadRequest): + message = _("Invalid input for operation: %(error_message)s.") + + +class InvalidAllocationPool(BadRequest): + message = _("The allocation pool %(pool)s is not valid.") + + +class OverlappingAllocationPools(Conflict): + message = _("Found overlapping allocation pools:" + "%(pool_1)s %(pool_2)s for subnet %(subnet_cidr)s.") + + +class OutOfBoundsAllocationPool(BadRequest): + message = _("The allocation pool %(pool)s spans " + "beyond the subnet cidr %(subnet_cidr)s.") + + +class MacAddressGenerationFailure(ServiceUnavailable): + message = _("Unable to generate unique mac on network %(net_id)s.") + + +class IpAddressGenerationFailure(Conflict): + message = _("No more IP addresses available on network %(net_id)s.") + + +class BridgeDoesNotExist(NeutronException): + message = _("Bridge %(bridge)s does not exist.") + + +class PreexistingDeviceFailure(NeutronException): + message = _("Creation failed. %(dev_name)s already exists.") + + +class SudoRequired(NeutronException): + message = _("Sudo privilege is required to run this command.") + + +class QuotaResourceUnknown(NotFound): + message = _("Unknown quota resources %(unknown)s.") + + +class OverQuota(Conflict): + message = _("Quota exceeded for resources: %(overs)s") + + +class QuotaMissingTenant(BadRequest): + message = _("Tenant-id was missing from Quota request") + + +class InvalidQuotaValue(Conflict): + message = _("Change would make usage less than 0 for the following " + "resources: %(unders)s") + + +class InvalidSharedSetting(Conflict): + message = _("Unable to reconfigure sharing settings for network " + "%(network)s. Multiple tenants are using it") + + +class InvalidExtensionEnv(BadRequest): + message = _("Invalid extension environment: %(reason)s") + + +class ExtensionsNotFound(NotFound): + message = _("Extensions not found: %(extensions)s") + + +class InvalidContentType(NeutronException): + message = _("Invalid content type %(content_type)s") + + +class ExternalIpAddressExhausted(BadRequest): + message = _("Unable to find any IP address on external " + "network %(net_id)s.") + + +class TooManyExternalNetworks(NeutronException): + message = _("More than one external network exists") + + +class InvalidConfigurationOption(NeutronException): + message = _("An invalid value was provided for %(opt_name)s: " + "%(opt_value)s") + + +class GatewayConflictWithAllocationPools(InUse): + message = _("Gateway ip %(ip_address)s conflicts with " + "allocation pool %(pool)s") + + +class GatewayIpInUse(InUse): + message = _("Current gateway ip %(ip_address)s already in use " + "by port %(port_id)s. Unable to update.") + + +class NetworkVlanRangeError(NeutronException): + message = _("Invalid network VLAN range: '%(vlan_range)s' - '%(error)s'") + + def __init__(self, **kwargs): + # Convert vlan_range tuple to 'start:end' format for display + if isinstance(kwargs['vlan_range'], tuple): + kwargs['vlan_range'] = "%d:%d" % kwargs['vlan_range'] + super(NetworkVlanRangeError, self).__init__(**kwargs) + + +class NetworkVxlanPortRangeError(NeutronException): + message = _("Invalid network VXLAN port range: '%(vxlan_range)s'") + + +class VxlanNetworkUnsupported(NeutronException): + message = _("VXLAN Network unsupported.") + + +class DuplicatedExtension(NeutronException): + message = _("Found duplicate extension: %(alias)s") + + +class DeviceIDNotOwnedByTenant(Conflict): + message = _("The following device_id %(device_id)s is not owned by your " + "tenant or matches another tenants router.") + + +class InvalidCIDR(BadRequest): + message = _("Invalid CIDR %(input)s given as IP prefix") diff --git a/icehouse-patches/neutron/dvr-patch/neutron/common/ipv6_utils.py b/icehouse-patches/neutron/dvr-patch/neutron/common/ipv6_utils.py new file mode 100644 index 00000000..fbe61e49 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/common/ipv6_utils.py @@ -0,0 +1,39 @@ +# Copyright 2013 IBM Corp. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +IPv6-related utilities and helper functions. +""" + +import netaddr + + +def get_ipv6_addr_by_EUI64(prefix, mac): + # Check if the prefix is IPv4 address + isIPv4 = netaddr.valid_ipv4(prefix) + if isIPv4: + msg = _("Unable to generate IP address by EUI64 for IPv4 prefix") + raise TypeError(msg) + try: + eui64 = int(netaddr.EUI(mac).eui64()) + prefix = netaddr.IPNetwork(prefix) + return netaddr.IPAddress(prefix.first + eui64 ^ (1 << 57)) + except (ValueError, netaddr.AddrFormatError): + raise TypeError(_('Bad prefix or mac format for generating IPv6 ' + 'address by EUI-64: %(prefix)s, %(mac)s:') + % {'prefix': prefix, 'mac': mac}) + except TypeError: + raise TypeError(_('Bad prefix type for generate IPv6 address by ' + 'EUI-64: %s') % prefix) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/common/rpc.py b/icehouse-patches/neutron/dvr-patch/neutron/common/rpc.py new file mode 100644 index 00000000..3800a683 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/common/rpc.py @@ -0,0 +1,278 @@ +# Copyright (c) 2012 OpenStack Foundation. +# Copyright (c) 2014 Red Hat, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo.config import cfg +from oslo import messaging +from oslo.messaging import serializer as om_serializer + +from neutron.common import exceptions +from neutron import context +from neutron.openstack.common import log as logging +from neutron.openstack.common import service + + +LOG = logging.getLogger(__name__) + + +TRANSPORT = None +NOTIFIER = None + +ALLOWED_EXMODS = [ + exceptions.__name__, +] +EXTRA_EXMODS = [] + + +TRANSPORT_ALIASES = { + 'neutron.openstack.common.rpc.impl_fake': 'fake', + 'neutron.openstack.common.rpc.impl_qpid': 'qpid', + 'neutron.openstack.common.rpc.impl_kombu': 'rabbit', + 'neutron.openstack.common.rpc.impl_zmq': 'zmq', + 'neutron.rpc.impl_fake': 'fake', + 'neutron.rpc.impl_qpid': 'qpid', + 'neutron.rpc.impl_kombu': 'rabbit', + 'neutron.rpc.impl_zmq': 'zmq', +} + + +def init(conf): + global TRANSPORT, NOTIFIER + exmods = get_allowed_exmods() + TRANSPORT = messaging.get_transport(conf, + allowed_remote_exmods=exmods, + aliases=TRANSPORT_ALIASES) + serializer = RequestContextSerializer() + NOTIFIER = messaging.Notifier(TRANSPORT, serializer=serializer) + + +def cleanup(): + global TRANSPORT, NOTIFIER + assert TRANSPORT is not None + assert NOTIFIER is not None + TRANSPORT.cleanup() + TRANSPORT = NOTIFIER = None + + +def add_extra_exmods(*args): + EXTRA_EXMODS.extend(args) + + +def clear_extra_exmods(): + del EXTRA_EXMODS[:] + + +def get_allowed_exmods(): + return ALLOWED_EXMODS + EXTRA_EXMODS + + +def get_client(target, version_cap=None, serializer=None): + assert TRANSPORT is not None + serializer = RequestContextSerializer(serializer) + return messaging.RPCClient(TRANSPORT, + target, + version_cap=version_cap, + serializer=serializer) + + +def get_server(target, endpoints, serializer=None): + assert TRANSPORT is not None + serializer = RequestContextSerializer(serializer) + return messaging.get_rpc_server(TRANSPORT, + target, + endpoints, + executor='eventlet', + serializer=serializer) + + +def get_notifier(service=None, host=None, publisher_id=None): + assert NOTIFIER is not None + if not publisher_id: + publisher_id = "%s.%s" % (service, host or cfg.CONF.host) + return NOTIFIER.prepare(publisher_id=publisher_id) + + +class RequestContextSerializer(om_serializer.Serializer): + """This serializer is used to convert RPC common context into + Neutron Context. + """ + def __init__(self, base=None): + super(RequestContextSerializer, self).__init__() + self._base = base + + def serialize_entity(self, ctxt, entity): + if not self._base: + return entity + return self._base.serialize_entity(ctxt, entity) + + def deserialize_entity(self, ctxt, entity): + if not self._base: + return entity + return self._base.deserialize_entity(ctxt, entity) + + def serialize_context(self, ctxt): + return ctxt.to_dict() + + def deserialize_context(self, ctxt): + rpc_ctxt_dict = ctxt.copy() + user_id = rpc_ctxt_dict.pop('user_id', None) + if not user_id: + user_id = rpc_ctxt_dict.pop('user', None) + tenant_id = rpc_ctxt_dict.pop('tenant_id', None) + if not tenant_id: + tenant_id = rpc_ctxt_dict.pop('project_id', None) + return context.Context(user_id, tenant_id, + load_admin_roles=False, **rpc_ctxt_dict) + + +class RpcProxy(object): + ''' + This class is created to facilitate migration from oslo-incubator + RPC layer implementation to oslo.messaging and is intended to + emulate RpcProxy class behaviour using oslo.messaging API once the + migration is applied. + ''' + RPC_API_NAMESPACE = None + + def __init__(self, topic, default_version, version_cap=None): + self.topic = topic + target = messaging.Target(topic=topic, version=default_version) + self._client = get_client(target, version_cap=version_cap) + + def make_msg(self, method, **kwargs): + return {'method': method, + 'namespace': self.RPC_API_NAMESPACE, + 'args': kwargs} + + def call(self, context, msg, **kwargs): + return self.__call_rpc_method( + context, msg, rpc_method='call', **kwargs) + + def cast(self, context, msg, **kwargs): + self.__call_rpc_method(context, msg, rpc_method='cast', **kwargs) + + def fanout_cast(self, context, msg, **kwargs): + kwargs['fanout'] = True + self.__call_rpc_method(context, msg, rpc_method='cast', **kwargs) + + def __call_rpc_method(self, context, msg, **kwargs): + options = dict( + ((opt, kwargs[opt]) + for opt in ('fanout', 'timeout', 'topic', 'version') + if kwargs.get(opt)) + ) + if msg['namespace']: + options['namespace'] = msg['namespace'] + + if options: + callee = self._client.prepare(**options) + else: + callee = self._client + + func = getattr(callee, kwargs['rpc_method']) + return func(context, msg['method'], **msg['args']) + + +class RpcCallback(object): + ''' + This class is created to facilitate migration from oslo-incubator + RPC layer implementation to oslo.messaging and is intended to set + callback version using oslo.messaging API once the migration is + applied. + ''' + RPC_API_VERSION = '1.0' + + def __init__(self): + super(RpcCallback, self).__init__() + self.target = messaging.Target(version=self.RPC_API_VERSION) + + +class Service(service.Service): + """Service object for binaries running on hosts. + + A service enables rpc by listening to queues based on topic and host. + """ + def __init__(self, host, topic, manager=None, serializer=None): + super(Service, self).__init__() + self.host = host + self.topic = topic + self.serializer = serializer + if manager is None: + self.manager = self + else: + self.manager = manager + + def start(self): + super(Service, self).start() + + self.conn = create_connection(new=True) + LOG.debug("Creating Consumer connection for Service %s" % + self.topic) + + endpoints = [self.manager] + + # Share this same connection for these Consumers + self.conn.create_consumer(self.topic, endpoints, fanout=False) + + node_topic = '%s.%s' % (self.topic, self.host) + self.conn.create_consumer(node_topic, endpoints, fanout=False) + + self.conn.create_consumer(self.topic, endpoints, fanout=True) + + # Hook to allow the manager to do other initializations after + # the rpc connection is created. + if callable(getattr(self.manager, 'initialize_service_hook', None)): + self.manager.initialize_service_hook(self) + + # Consume from all consumers in threads + self.conn.consume_in_threads() + + def stop(self): + # Try to shut the connection down, but if we get any sort of + # errors, go ahead and ignore them.. as we're shutting down anyway + try: + self.conn.close() + except Exception: + pass + super(Service, self).stop() + + +class Connection(object): + + def __init__(self): + super(Connection, self).__init__() + self.servers = [] + + def create_consumer(self, topic, endpoints, fanout=False): + target = messaging.Target( + topic=topic, server=cfg.CONF.host, fanout=fanout) + server = get_server(target, endpoints) + self.servers.append(server) + + def consume_in_threads(self): + for server in self.servers: + server.start() + return self.servers + + +# functions +def create_connection(new=True): + return Connection() + + +# exceptions +RPCException = messaging.MessagingException +RemoteError = messaging.RemoteError +MessagingTimeout = messaging.MessagingTimeout diff --git a/icehouse-patches/neutron/dvr-patch/neutron/common/test_lib.py b/icehouse-patches/neutron/dvr-patch/neutron/common/test_lib.py new file mode 100644 index 00000000..994de30f --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/common/test_lib.py @@ -0,0 +1,42 @@ +# Copyright (c) 2010 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Colorizer Code is borrowed from Twisted: +# Copyright (c) 2001-2010 Twisted Matrix Laboratories. +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +# describes parameters used by different unit/functional tests +# a plugin-specific testing mechanism should import this dictionary +# and override the values in it if needed (e.g., run_tests.py in +# neutron/plugins/openvswitch/ ) +test_config = {} diff --git a/icehouse-patches/neutron/dvr-patch/neutron/common/topics.py b/icehouse-patches/neutron/dvr-patch/neutron/common/topics.py new file mode 100644 index 00000000..9bb1956e --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/common/topics.py @@ -0,0 +1,58 @@ +# Copyright (c) 2012 OpenStack Foundation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +NETWORK = 'network' +SUBNET = 'subnet' +PORT = 'port' +SECURITY_GROUP = 'security_group' +L2POPULATION = 'l2population' +DVR = 'dvr' + +CREATE = 'create' +DELETE = 'delete' +UPDATE = 'update' + +AGENT = 'q-agent-notifier' +PLUGIN = 'q-plugin' +L3PLUGIN = 'q-l3-plugin' +DHCP = 'q-dhcp-notifer' +FIREWALL_PLUGIN = 'q-firewall-plugin' +METERING_PLUGIN = 'q-metering-plugin' +LOADBALANCER_PLUGIN = 'n-lbaas-plugin' + +L3_AGENT = 'l3_agent' +DHCP_AGENT = 'dhcp_agent' +METERING_AGENT = 'metering_agent' +LOADBALANCER_AGENT = 'n-lbaas_agent' + + +def get_topic_name(prefix, table, operation, host=None): + """Create a topic name. + + The topic name needs to be synced between the agent and the + plugin. The plugin will send a fanout message to all of the + listening agents so that the agents in turn can perform their + updates accordingly. + + :param prefix: Common prefix for the plugin/agent message queues. + :param table: The table in question (NETWORK, SUBNET, PORT). + :param operation: The operation that invokes notification (CREATE, + DELETE, UPDATE) + :param host: Add host to the topic + :returns: The topic name. + """ + if host: + return '%s-%s-%s.%s' % (prefix, table, operation, host) + return '%s-%s-%s' % (prefix, table, operation) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/common/utils.py b/icehouse-patches/neutron/dvr-patch/neutron/common/utils.py new file mode 100644 index 00000000..8521ec7f --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/common/utils.py @@ -0,0 +1,310 @@ +# Copyright 2011, VMware, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# Borrowed from nova code base, more utilities will be added/borrowed as and +# when needed. + +"""Utilities and helper functions.""" + +import datetime +import functools +import hashlib +import logging as std_logging +import multiprocessing +import os +import random +import signal +import socket +import uuid + +from eventlet.green import subprocess +from oslo.config import cfg + +from neutron.common import constants as q_const +from neutron.openstack.common import lockutils +from neutron.openstack.common import log as logging + + +TIME_FORMAT = "%Y-%m-%dT%H:%M:%SZ" +LOG = logging.getLogger(__name__) +SYNCHRONIZED_PREFIX = 'neutron-' + +synchronized = lockutils.synchronized_with_prefix(SYNCHRONIZED_PREFIX) + + +class cache_method_results(object): + """This decorator is intended for object methods only.""" + + def __init__(self, func): + self.func = func + functools.update_wrapper(self, func) + self._first_call = True + self._not_cached = object() + + def _get_from_cache(self, target_self, *args, **kwargs): + func_name = "%(module)s.%(class)s.%(func_name)s" % { + 'module': target_self.__module__, + 'class': target_self.__class__.__name__, + 'func_name': self.func.__name__, + } + key = (func_name,) + args + if kwargs: + key += dict2tuple(kwargs) + try: + item = target_self._cache.get(key, self._not_cached) + except TypeError: + LOG.debug(_("Method %(func_name)s cannot be cached due to " + "unhashable parameters: args: %(args)s, kwargs: " + "%(kwargs)s"), + {'func_name': func_name, + 'args': args, + 'kwargs': kwargs}) + return self.func(target_self, *args, **kwargs) + + if item is self._not_cached: + item = self.func(target_self, *args, **kwargs) + target_self._cache.set(key, item, None) + + return item + + def __call__(self, target_self, *args, **kwargs): + if not hasattr(target_self, '_cache'): + raise NotImplementedError( + "Instance of class %(module)s.%(class)s must contain _cache " + "attribute" % { + 'module': target_self.__module__, + 'class': target_self.__class__.__name__}) + if not target_self._cache: + if self._first_call: + LOG.debug(_("Instance of class %(module)s.%(class)s doesn't " + "contain attribute _cache therefore results " + "cannot be cached for %(func_name)s."), + {'module': target_self.__module__, + 'class': target_self.__class__.__name__, + 'func_name': self.func.__name__}) + self._first_call = False + return self.func(target_self, *args, **kwargs) + return self._get_from_cache(target_self, *args, **kwargs) + + def __get__(self, obj, objtype): + return functools.partial(self.__call__, obj) + + +def read_cached_file(filename, cache_info, reload_func=None): + """Read from a file if it has been modified. + + :param cache_info: dictionary to hold opaque cache. + :param reload_func: optional function to be called with data when + file is reloaded due to a modification. + + :returns: data from file + + """ + mtime = os.path.getmtime(filename) + if not cache_info or mtime != cache_info.get('mtime'): + LOG.debug(_("Reloading cached file %s"), filename) + with open(filename) as fap: + cache_info['data'] = fap.read() + cache_info['mtime'] = mtime + if reload_func: + reload_func(cache_info['data']) + return cache_info['data'] + + +def find_config_file(options, config_file): + """Return the first config file found. + + We search for the paste config file in the following order: + * If --config-file option is used, use that + * Search for the configuration files via common cfg directories + :retval Full path to config file, or None if no config file found + """ + fix_path = lambda p: os.path.abspath(os.path.expanduser(p)) + if options.get('config_file'): + if os.path.exists(options['config_file']): + return fix_path(options['config_file']) + + dir_to_common = os.path.dirname(os.path.abspath(__file__)) + root = os.path.join(dir_to_common, '..', '..', '..', '..') + # Handle standard directory search for the config file + config_file_dirs = [fix_path(os.path.join(os.getcwd(), 'etc')), + fix_path(os.path.join('~', '.neutron-venv', 'etc', + 'neutron')), + fix_path('~'), + os.path.join(cfg.CONF.state_path, 'etc'), + os.path.join(cfg.CONF.state_path, 'etc', 'neutron'), + fix_path(os.path.join('~', '.local', + 'etc', 'neutron')), + '/usr/etc/neutron', + '/usr/local/etc/neutron', + '/etc/neutron/', + '/etc'] + + if 'plugin' in options: + config_file_dirs = [ + os.path.join(x, 'neutron', 'plugins', options['plugin']) + for x in config_file_dirs + ] + + if os.path.exists(os.path.join(root, 'plugins')): + plugins = [fix_path(os.path.join(root, 'plugins', p, 'etc')) + for p in os.listdir(os.path.join(root, 'plugins'))] + plugins = [p for p in plugins if os.path.isdir(p)] + config_file_dirs.extend(plugins) + + for cfg_dir in config_file_dirs: + cfg_file = os.path.join(cfg_dir, config_file) + if os.path.exists(cfg_file): + return cfg_file + + +def _subprocess_setup(): + # Python installs a SIGPIPE handler by default. This is usually not what + # non-Python subprocesses expect. + signal.signal(signal.SIGPIPE, signal.SIG_DFL) + + +def subprocess_popen(args, stdin=None, stdout=None, stderr=None, shell=False, + env=None): + return subprocess.Popen(args, shell=shell, stdin=stdin, stdout=stdout, + stderr=stderr, preexec_fn=_subprocess_setup, + close_fds=True, env=env) + + +def parse_mappings(mapping_list, unique_values=True): + """Parse a list of of mapping strings into a dictionary. + + :param mapping_list: a list of strings of the form ':' + :param unique_values: values must be unique if True + :returns: a dict mapping keys to values + """ + mappings = {} + for mapping in mapping_list: + mapping = mapping.strip() + if not mapping: + continue + split_result = mapping.split(':') + if len(split_result) != 2: + raise ValueError(_("Invalid mapping: '%s'") % mapping) + key = split_result[0].strip() + if not key: + raise ValueError(_("Missing key in mapping: '%s'") % mapping) + value = split_result[1].strip() + if not value: + raise ValueError(_("Missing value in mapping: '%s'") % mapping) + if key in mappings: + raise ValueError(_("Key %(key)s in mapping: '%(mapping)s' not " + "unique") % {'key': key, 'mapping': mapping}) + if unique_values and value in mappings.itervalues(): + raise ValueError(_("Value %(value)s in mapping: '%(mapping)s' " + "not unique") % {'value': value, + 'mapping': mapping}) + mappings[key] = value + return mappings + + +def get_hostname(): + return socket.gethostname() + + +def compare_elements(a, b): + """Compare elements if a and b have same elements. + + This method doesn't consider ordering + """ + if a is None: + a = [] + if b is None: + b = [] + return set(a) == set(b) + + +def dict2str(dic): + return ','.join("%s=%s" % (key, val) + for key, val in sorted(dic.iteritems())) + + +def str2dict(string): + res_dict = {} + for keyvalue in string.split(','): + (key, value) = keyvalue.split('=', 1) + res_dict[key] = value + return res_dict + + +def dict2tuple(d): + items = d.items() + items.sort() + return tuple(items) + + +def diff_list_of_dict(old_list, new_list): + new_set = set([dict2str(l) for l in new_list]) + old_set = set([dict2str(l) for l in old_list]) + added = new_set - old_set + removed = old_set - new_set + return [str2dict(a) for a in added], [str2dict(r) for r in removed] + + +def is_extension_supported(plugin, ext_alias): + return ext_alias in getattr( + plugin, "supported_extension_aliases", []) + + +def log_opt_values(log): + cfg.CONF.log_opt_values(log, std_logging.DEBUG) + + +def is_valid_vlan_tag(vlan): + return q_const.MIN_VLAN_TAG <= vlan <= q_const.MAX_VLAN_TAG + + +def get_random_mac(base_mac): + mac = [int(base_mac[0], 16), int(base_mac[1], 16), + int(base_mac[2], 16), random.randint(0x00, 0xff), + random.randint(0x00, 0xff), random.randint(0x00, 0xff)] + if base_mac[3] != '00': + mac[3] = int(base_mac[3], 16) + return ':'.join(["%02x" % x for x in mac]) + + +def get_random_string(length): + """Get a random hex string of the specified length. + + based on Cinder library + cinder/transfer/api.py + """ + rndstr = "" + random.seed(datetime.datetime.now().microsecond) + while len(rndstr) < length: + rndstr += hashlib.sha224(str(random.random())).hexdigest() + + return rndstr[0:length] + + +def get_dhcp_agent_device_id(network_id, host): + # Split host so as to always use only the hostname and + # not the domain name. This will guarantee consistentcy + # whether a local hostname or an fqdn is passed in. + local_hostname = host.split('.')[0] + host_uuid = uuid.uuid5(uuid.NAMESPACE_DNS, str(local_hostname)) + return 'dhcp%s-%s' % (host_uuid, network_id) + + +def cpu_count(): + try: + return multiprocessing.cpu_count() + except NotImplementedError: + return 1 diff --git a/icehouse-patches/neutron/dvr-patch/neutron/context.py b/icehouse-patches/neutron/dvr-patch/neutron/context.py new file mode 100644 index 00000000..fd4da923 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/context.py @@ -0,0 +1,174 @@ +# Copyright 2012 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Context: context for security/db session.""" + +import copy + +import datetime + +from neutron.db import api as db_api +from neutron.openstack.common import context as common_context +from neutron.openstack.common import local +from neutron.openstack.common import log as logging +from neutron import policy + + +LOG = logging.getLogger(__name__) + + +class ContextBase(common_context.RequestContext): + """Security context and request information. + + Represents the user taking a given action within the system. + + """ + + def __init__(self, user_id, tenant_id, is_admin=None, read_deleted="no", + roles=None, timestamp=None, load_admin_roles=True, + request_id=None, tenant_name=None, user_name=None, + overwrite=True, **kwargs): + """Object initialization. + + :param read_deleted: 'no' indicates deleted records are hidden, 'yes' + indicates deleted records are visible, 'only' indicates that + *only* deleted records are visible. + + :param overwrite: Set to False to ensure that the greenthread local + copy of the index is not overwritten. + + :param kwargs: Extra arguments that might be present, but we ignore + because they possibly came in from older rpc messages. + """ + super(ContextBase, self).__init__(user=user_id, tenant=tenant_id, + is_admin=is_admin, + request_id=request_id) + self.user_name = user_name + self.tenant_name = tenant_name + + self.read_deleted = read_deleted + if not timestamp: + timestamp = datetime.datetime.utcnow() + self.timestamp = timestamp + self._session = None + self.roles = roles or [] + if self.is_admin is None: + self.is_admin = policy.check_is_admin(self) + elif self.is_admin and load_admin_roles: + # Ensure context is populated with admin roles + admin_roles = policy.get_admin_roles() + if admin_roles: + self.roles = list(set(self.roles) | set(admin_roles)) + # Allow openstack.common.log to access the context + if overwrite or not hasattr(local.store, 'context'): + local.store.context = self + + # Log only once the context has been configured to prevent + # format errors. + if kwargs: + LOG.debug(_('Arguments dropped when creating ' + 'context: %s'), kwargs) + + @property + def project_id(self): + return self.tenant + + @property + def tenant_id(self): + return self.tenant + + @tenant_id.setter + def tenant_id(self, tenant_id): + self.tenant = tenant_id + + @property + def user_id(self): + return self.user + + @user_id.setter + def user_id(self, user_id): + self.user = user_id + + def _get_read_deleted(self): + return self._read_deleted + + def _set_read_deleted(self, read_deleted): + if read_deleted not in ('no', 'yes', 'only'): + raise ValueError(_("read_deleted can only be one of 'no', " + "'yes' or 'only', not %r") % read_deleted) + self._read_deleted = read_deleted + + def _del_read_deleted(self): + del self._read_deleted + + read_deleted = property(_get_read_deleted, _set_read_deleted, + _del_read_deleted) + + def to_dict(self): + return {'user_id': self.user_id, + 'tenant_id': self.tenant_id, + 'project_id': self.project_id, + 'is_admin': self.is_admin, + 'read_deleted': self.read_deleted, + 'roles': self.roles, + 'timestamp': str(self.timestamp), + 'request_id': self.request_id, + 'tenant': self.tenant, + 'user': self.user, + 'tenant_name': self.tenant_name, + 'project_name': self.tenant_name, + 'user_name': self.user_name, + } + + @classmethod + def from_dict(cls, values): + return cls(**values) + + def elevated(self, read_deleted=None): + """Return a version of this context with admin flag set.""" + context = copy.copy(self) + context.is_admin = True + + if 'admin' not in [x.lower() for x in context.roles]: + context.roles.append('admin') + + if read_deleted is not None: + context.read_deleted = read_deleted + + return context + + +class Context(ContextBase): + @property + def session(self): + if self._session is None: + self._session = db_api.get_session() + return self._session + + +def get_admin_context(read_deleted="no", load_admin_roles=True): + return Context(user_id=None, + tenant_id=None, + is_admin=True, + read_deleted=read_deleted, + load_admin_roles=load_admin_roles, + overwrite=False) + + +def get_admin_context_without_session(read_deleted="no"): + return ContextBase(user_id=None, + tenant_id=None, + is_admin=True, + read_deleted=read_deleted) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/db/agents_db.py b/icehouse-patches/neutron/dvr-patch/neutron/db/agents_db.py new file mode 100644 index 00000000..bb805da3 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/db/agents_db.py @@ -0,0 +1,219 @@ +# Copyright (c) 2013 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from eventlet import greenthread + +from oslo.config import cfg +from oslo.db import exception as db_exc +import sqlalchemy as sa +from sqlalchemy.orm import exc + +from neutron.common import rpc as n_rpc +from neutron.db import model_base +from neutron.db import models_v2 +from neutron.extensions import agent as ext_agent +from neutron import manager +from neutron.openstack.common import excutils +from neutron.openstack.common import jsonutils +from neutron.openstack.common import log as logging +from neutron.openstack.common import timeutils + +LOG = logging.getLogger(__name__) +cfg.CONF.register_opt( + cfg.IntOpt('agent_down_time', default=75, + help=_("Seconds to regard the agent is down; should be at " + "least twice report_interval, to be sure the " + "agent is down for good."))) + + +class Agent(model_base.BASEV2, models_v2.HasId): + """Represents agents running in neutron deployments.""" + + __table_args__ = ( + sa.UniqueConstraint('agent_type', 'host', + name='uniq_agents0agent_type0host'), + ) + + # L3 agent, DHCP agent, OVS agent, LinuxBridge + agent_type = sa.Column(sa.String(255), nullable=False) + binary = sa.Column(sa.String(255), nullable=False) + # TOPIC is a fanout exchange topic + topic = sa.Column(sa.String(255), nullable=False) + # TOPIC.host is a target topic + host = sa.Column(sa.String(255), nullable=False) + admin_state_up = sa.Column(sa.Boolean, default=True, + nullable=False) + # the time when first report came from agents + created_at = sa.Column(sa.DateTime, nullable=False) + # the time when first report came after agents start + started_at = sa.Column(sa.DateTime, nullable=False) + # updated when agents report + heartbeat_timestamp = sa.Column(sa.DateTime, nullable=False) + # description is note for admin user + description = sa.Column(sa.String(255)) + # configurations: a json dict string, I think 4095 is enough + configurations = sa.Column(sa.String(4095), nullable=False) + + @property + def is_active(self): + return not AgentDbMixin.is_agent_down(self.heartbeat_timestamp) + + +class AgentDbMixin(ext_agent.AgentPluginBase): + """Mixin class to add agent extension to db_base_plugin_v2.""" + + def _get_agent(self, context, id): + try: + agent = self._get_by_id(context, Agent, id) + except exc.NoResultFound: + raise ext_agent.AgentNotFound(id=id) + return agent + + @classmethod + def is_agent_down(cls, heart_beat_time): + return timeutils.is_older_than(heart_beat_time, + cfg.CONF.agent_down_time) + + def get_configuration_dict(self, agent_db): + try: + conf = jsonutils.loads(agent_db.configurations) + except Exception: + msg = _('Configuration for agent %(agent_type)s on host %(host)s' + ' is invalid.') + LOG.warn(msg, {'agent_type': agent_db.agent_type, + 'host': agent_db.host}) + conf = {} + return conf + + def _make_agent_dict(self, agent, fields=None): + attr = ext_agent.RESOURCE_ATTRIBUTE_MAP.get( + ext_agent.RESOURCE_NAME + 's') + res = dict((k, agent[k]) for k in attr + if k not in ['alive', 'configurations']) + res['alive'] = not AgentDbMixin.is_agent_down( + res['heartbeat_timestamp']) + res['configurations'] = self.get_configuration_dict(agent) + return self._fields(res, fields) + + def delete_agent(self, context, id): + with context.session.begin(subtransactions=True): + agent = self._get_agent(context, id) + context.session.delete(agent) + + def update_agent(self, context, id, agent): + agent_data = agent['agent'] + with context.session.begin(subtransactions=True): + agent = self._get_agent(context, id) + agent.update(agent_data) + return self._make_agent_dict(agent) + + def get_agents_db(self, context, filters=None): + query = self._get_collection_query(context, Agent, filters=filters) + return query.all() + + def get_agents(self, context, filters=None, fields=None): + return self._get_collection(context, Agent, + self._make_agent_dict, + filters=filters, fields=fields) + + def _get_agent_by_type_and_host(self, context, agent_type, host): + query = self._model_query(context, Agent) + try: + agent_db = query.filter(Agent.agent_type == agent_type, + Agent.host == host).one() + return agent_db + except exc.NoResultFound: + raise ext_agent.AgentNotFoundByTypeHost(agent_type=agent_type, + host=host) + except exc.MultipleResultsFound: + raise ext_agent.MultipleAgentFoundByTypeHost(agent_type=agent_type, + host=host) + + def get_agent(self, context, id, fields=None): + agent = self._get_agent(context, id) + return self._make_agent_dict(agent, fields) + + def _create_or_update_agent(self, context, agent): + with context.session.begin(subtransactions=True): + res_keys = ['agent_type', 'binary', 'host', 'topic'] + res = dict((k, agent[k]) for k in res_keys) + + configurations_dict = agent.get('configurations', {}) + res['configurations'] = jsonutils.dumps(configurations_dict) + current_time = timeutils.utcnow() + try: + agent_db = self._get_agent_by_type_and_host( + context, agent['agent_type'], agent['host']) + res['heartbeat_timestamp'] = current_time + if agent.get('start_flag'): + res['started_at'] = current_time + greenthread.sleep(0) + agent_db.update(res) + except ext_agent.AgentNotFoundByTypeHost: + greenthread.sleep(0) + res['created_at'] = current_time + res['started_at'] = current_time + res['heartbeat_timestamp'] = current_time + res['admin_state_up'] = True + agent_db = Agent(**res) + greenthread.sleep(0) + context.session.add(agent_db) + greenthread.sleep(0) + + def create_or_update_agent(self, context, agent): + """Create or update agent according to report.""" + + try: + return self._create_or_update_agent(context, agent) + except db_exc.DBDuplicateEntry as e: + with excutils.save_and_reraise_exception() as ctxt: + if e.columns == ['agent_type', 'host']: + # It might happen that two or more concurrent transactions + # are trying to insert new rows having the same value of + # (agent_type, host) pair at the same time (if there has + # been no such entry in the table and multiple agent status + # updates are being processed at the moment). In this case + # having a unique constraint on (agent_type, host) columns + # guarantees that only one transaction will succeed and + # insert a new agent entry, others will fail and be rolled + # back. That means we must retry them one more time: no + # INSERTs will be issued, because + # _get_agent_by_type_and_host() will return the existing + # agent entry, which will be updated multiple times + ctxt.reraise = False + return self._create_or_update_agent(context, agent) + + +class AgentExtRpcCallback(n_rpc.RpcCallback): + """Processes the rpc report in plugin implementations.""" + + RPC_API_VERSION = '1.0' + START_TIME = timeutils.utcnow() + + def __init__(self, plugin=None): + super(AgentExtRpcCallback, self).__init__() + self.plugin = plugin + + def report_state(self, context, **kwargs): + """Report state from agent to server.""" + time = kwargs['time'] + time = timeutils.parse_strtime(time) + if self.START_TIME > time: + LOG.debug(_("Message with invalid timestamp received")) + return + agent_state = kwargs['agent_state']['agent_state'] + if not self.plugin: + self.plugin = manager.NeutronManager.get_plugin() + self.plugin.create_or_update_agent(context, agent_state) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/db/agentschedulers_db.py b/icehouse-patches/neutron/dvr-patch/neutron/db/agentschedulers_db.py new file mode 100644 index 00000000..2022dbe3 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/db/agentschedulers_db.py @@ -0,0 +1,226 @@ +# Copyright (c) 2013 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo.config import cfg +import sqlalchemy as sa +from sqlalchemy import orm +from sqlalchemy.orm import exc +from sqlalchemy.orm import joinedload + +from neutron.common import constants +from neutron.common import utils +from neutron.db import agents_db +from neutron.db import model_base +from neutron.extensions import agent as ext_agent +from neutron.extensions import dhcpagentscheduler +from neutron.openstack.common import log as logging + + +LOG = logging.getLogger(__name__) + +AGENTS_SCHEDULER_OPTS = [ + cfg.StrOpt('network_scheduler_driver', + default='neutron.scheduler.' + 'dhcp_agent_scheduler.ChanceScheduler', + help=_('Driver to use for scheduling network to DHCP agent')), + cfg.BoolOpt('network_auto_schedule', default=True, + help=_('Allow auto scheduling networks to DHCP agent.')), + cfg.IntOpt('dhcp_agents_per_network', default=1, + help=_('Number of DHCP agents scheduled to host a network.')), +] + +cfg.CONF.register_opts(AGENTS_SCHEDULER_OPTS) + + +class NetworkDhcpAgentBinding(model_base.BASEV2): + """Represents binding between neutron networks and DHCP agents.""" + + network_id = sa.Column(sa.String(36), + sa.ForeignKey("networks.id", ondelete='CASCADE'), + primary_key=True) + dhcp_agent = orm.relation(agents_db.Agent) + dhcp_agent_id = sa.Column(sa.String(36), + sa.ForeignKey("agents.id", + ondelete='CASCADE'), + primary_key=True) + + +class AgentSchedulerDbMixin(agents_db.AgentDbMixin): + """Common class for agent scheduler mixins.""" + + # agent notifiers to handle agent update operations; + # should be updated by plugins; + agent_notifiers = { + constants.AGENT_TYPE_DHCP: None, + constants.AGENT_TYPE_L3: None, + constants.AGENT_TYPE_LOADBALANCER: None, + } + + @staticmethod + def is_eligible_agent(active, agent): + if active is None: + # filtering by activeness is disabled, all agents are eligible + return True + else: + # note(rpodolyaka): original behaviour is saved here: if active + # filter is set, only agents which are 'up' + # (i.e. have a recent heartbeat timestamp) + # are eligible, even if active is False + return not agents_db.AgentDbMixin.is_agent_down( + agent['heartbeat_timestamp']) + + def update_agent(self, context, id, agent): + original_agent = self.get_agent(context, id) + result = super(AgentSchedulerDbMixin, self).update_agent( + context, id, agent) + agent_data = agent['agent'] + agent_notifier = self.agent_notifiers.get(original_agent['agent_type']) + if (agent_notifier and + 'admin_state_up' in agent_data and + original_agent['admin_state_up'] != agent_data['admin_state_up']): + agent_notifier.agent_updated(context, + agent_data['admin_state_up'], + original_agent['host']) + return result + + +class DhcpAgentSchedulerDbMixin(dhcpagentscheduler + .DhcpAgentSchedulerPluginBase, + AgentSchedulerDbMixin): + """Mixin class to add DHCP agent scheduler extension to db_base_plugin_v2. + """ + + network_scheduler = None + + def get_dhcp_agents_hosting_networks( + self, context, network_ids, active=None): + if not network_ids: + return [] + query = context.session.query(NetworkDhcpAgentBinding) + query = query.options(joinedload('dhcp_agent')) + if len(network_ids) == 1: + query = query.filter( + NetworkDhcpAgentBinding.network_id == network_ids[0]) + elif network_ids: + query = query.filter( + NetworkDhcpAgentBinding.network_id in network_ids) + if active is not None: + query = (query.filter(agents_db.Agent.admin_state_up == active)) + + return [binding.dhcp_agent + for binding in query + if AgentSchedulerDbMixin.is_eligible_agent(active, + binding.dhcp_agent)] + + def add_network_to_dhcp_agent(self, context, id, network_id): + self._get_network(context, network_id) + with context.session.begin(subtransactions=True): + agent_db = self._get_agent(context, id) + if (agent_db['agent_type'] != constants.AGENT_TYPE_DHCP or + not agent_db['admin_state_up']): + raise dhcpagentscheduler.InvalidDHCPAgent(id=id) + dhcp_agents = self.get_dhcp_agents_hosting_networks( + context, [network_id]) + for dhcp_agent in dhcp_agents: + if id == dhcp_agent.id: + raise dhcpagentscheduler.NetworkHostedByDHCPAgent( + network_id=network_id, agent_id=id) + binding = NetworkDhcpAgentBinding() + binding.dhcp_agent_id = id + binding.network_id = network_id + context.session.add(binding) + dhcp_notifier = self.agent_notifiers.get(constants.AGENT_TYPE_DHCP) + if dhcp_notifier: + dhcp_notifier.network_added_to_agent( + context, network_id, agent_db.host) + + def remove_network_from_dhcp_agent(self, context, id, network_id): + agent = self._get_agent(context, id) + with context.session.begin(subtransactions=True): + try: + query = context.session.query(NetworkDhcpAgentBinding) + binding = query.filter( + NetworkDhcpAgentBinding.network_id == network_id, + NetworkDhcpAgentBinding.dhcp_agent_id == id).one() + except exc.NoResultFound: + raise dhcpagentscheduler.NetworkNotHostedByDhcpAgent( + network_id=network_id, agent_id=id) + + # reserve the port, so the ip is reused on a subsequent add + device_id = utils.get_dhcp_agent_device_id(network_id, + agent['host']) + filters = dict(device_id=[device_id]) + ports = self.get_ports(context, filters=filters) + for port in ports: + port['device_id'] = constants.DEVICE_ID_RESERVED_DHCP_PORT + self.update_port(context, port['id'], dict(port=port)) + + context.session.delete(binding) + dhcp_notifier = self.agent_notifiers.get(constants.AGENT_TYPE_DHCP) + if dhcp_notifier: + dhcp_notifier.network_removed_from_agent( + context, network_id, agent.host) + + def list_networks_on_dhcp_agent(self, context, id): + query = context.session.query(NetworkDhcpAgentBinding.network_id) + query = query.filter(NetworkDhcpAgentBinding.dhcp_agent_id == id) + + net_ids = [item[0] for item in query] + if net_ids: + return {'networks': + self.get_networks(context, filters={'id': net_ids})} + else: + return {'networks': []} + + def list_active_networks_on_active_dhcp_agent(self, context, host): + try: + agent = self._get_agent_by_type_and_host( + context, constants.AGENT_TYPE_DHCP, host) + except ext_agent.AgentNotFoundByTypeHost: + LOG.debug("DHCP Agent not found on host %s", host) + return [] + + if not agent.admin_state_up: + return [] + query = context.session.query(NetworkDhcpAgentBinding.network_id) + query = query.filter(NetworkDhcpAgentBinding.dhcp_agent_id == agent.id) + + net_ids = [item[0] for item in query] + if net_ids: + return self.get_networks( + context, + filters={'id': net_ids, 'admin_state_up': [True]} + ) + else: + return [] + + def list_dhcp_agents_hosting_network(self, context, network_id): + dhcp_agents = self.get_dhcp_agents_hosting_networks( + context, [network_id]) + agent_ids = [dhcp_agent.id for dhcp_agent in dhcp_agents] + if agent_ids: + return { + 'agents': self.get_agents(context, filters={'id': agent_ids})} + else: + return {'agents': []} + + def schedule_network(self, context, created_network): + if self.network_scheduler: + return self.network_scheduler.schedule( + self, context, created_network) + + def auto_schedule_networks(self, context, host): + if self.network_scheduler: + self.network_scheduler.auto_schedule_networks(self, context, host) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/db/allowedaddresspairs_db.py b/icehouse-patches/neutron/dvr-patch/neutron/db/allowedaddresspairs_db.py new file mode 100644 index 00000000..b648c8c4 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/db/allowedaddresspairs_db.py @@ -0,0 +1,147 @@ +# Copyright 2013 VMware, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +import sqlalchemy as sa +from sqlalchemy import orm + +from neutron.api.v2 import attributes as attr +from neutron.db import db_base_plugin_v2 +from neutron.db import model_base +from neutron.db import models_v2 +from neutron.extensions import allowedaddresspairs as addr_pair + + +class AllowedAddressPair(model_base.BASEV2): + port_id = sa.Column(sa.String(36), + sa.ForeignKey('ports.id', ondelete="CASCADE"), + primary_key=True) + mac_address = sa.Column(sa.String(32), nullable=False, primary_key=True) + ip_address = sa.Column(sa.String(64), nullable=False, primary_key=True) + + port = orm.relationship( + models_v2.Port, + backref=orm.backref("allowed_address_pairs", + lazy="joined", cascade="delete")) + + +class AllowedAddressPairsMixin(object): + """Mixin class for allowed address pairs.""" + + def _process_create_allowed_address_pairs(self, context, port, + allowed_address_pairs): + if not attr.is_attr_set(allowed_address_pairs): + return [] + with context.session.begin(subtransactions=True): + for address_pair in allowed_address_pairs: + # use port.mac_address if no mac address in address pair + if 'mac_address' not in address_pair: + address_pair['mac_address'] = port['mac_address'] + db_pair = AllowedAddressPair( + port_id=port['id'], + mac_address=address_pair['mac_address'], + ip_address=address_pair['ip_address']) + context.session.add(db_pair) + + return allowed_address_pairs + + def get_allowed_address_pairs(self, context, port_id): + pairs = (context.session.query(AllowedAddressPair). + filter_by(port_id=port_id)) + return [self._make_allowed_address_pairs_dict(pair) + for pair in pairs] + + def _extend_port_dict_allowed_address_pairs(self, port_res, port_db): + # If port_db is provided, allowed address pairs will be accessed via + # sqlalchemy models. As they're loaded together with ports this + # will not cause an extra query. + allowed_address_pairs = [ + self._make_allowed_address_pairs_dict(address_pair) for + address_pair in port_db.allowed_address_pairs] + port_res[addr_pair.ADDRESS_PAIRS] = allowed_address_pairs + return port_res + + # Register dict extend functions for ports + db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs( + attr.PORTS, ['_extend_port_dict_allowed_address_pairs']) + + def _delete_allowed_address_pairs(self, context, id): + query = self._model_query(context, AllowedAddressPair) + with context.session.begin(subtransactions=True): + query.filter(AllowedAddressPair.port_id == id).delete() + + def _make_allowed_address_pairs_dict(self, allowed_address_pairs, + fields=None): + res = {'mac_address': allowed_address_pairs['mac_address'], + 'ip_address': allowed_address_pairs['ip_address']} + return self._fields(res, fields) + + def _has_address_pairs(self, port): + return (attr.is_attr_set(port['port'][addr_pair.ADDRESS_PAIRS]) + and port['port'][addr_pair.ADDRESS_PAIRS] != []) + + def _check_update_has_allowed_address_pairs(self, port): + """Determine if request has an allowed address pair. + + Return True if the port parameter has a non-empty + 'allowed_address_pairs' attribute. Otherwise returns False. + """ + return (addr_pair.ADDRESS_PAIRS in port['port'] and + self._has_address_pairs(port)) + + def _check_update_deletes_allowed_address_pairs(self, port): + """Determine if request deletes address pair. + + Return True if port has as a allowed address pair and its value + is either [] or not is_attr_set, otherwise return False + """ + return (addr_pair.ADDRESS_PAIRS in port['port'] and + not self._has_address_pairs(port)) + + def is_address_pairs_attribute_updated(self, port, update_attrs): + """Check if the address pairs attribute is being updated. + + Returns True if there is an update. This can be used to decide + if a port update notification should be sent to agents or third + party controllers. + """ + + new_pairs = update_attrs.get(addr_pair.ADDRESS_PAIRS) + if new_pairs is None: + return False + old_pairs = port.get(addr_pair.ADDRESS_PAIRS) + + # Missing or unchanged address pairs in attributes mean no update + return new_pairs != old_pairs + + def update_address_pairs_on_port(self, context, port_id, port, + original_port, updated_port): + """Update allowed address pairs on port. + + Returns True if an update notification is required. Notification + is not done here because other changes on the port may need + notification. This method is expected to be called within + a transaction. + """ + new_pairs = port['port'].get(addr_pair.ADDRESS_PAIRS) + + if self.is_address_pairs_attribute_updated(original_port, + port['port']): + updated_port[addr_pair.ADDRESS_PAIRS] = new_pairs + self._delete_allowed_address_pairs(context, port_id) + self._process_create_allowed_address_pairs( + context, updated_port, new_pairs) + return True + + return False diff --git a/icehouse-patches/neutron/dvr-patch/neutron/db/api.py b/icehouse-patches/neutron/dvr-patch/neutron/db/api.py new file mode 100644 index 00000000..3749081f --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/db/api.py @@ -0,0 +1,84 @@ +# Copyright 2011 VMware, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo.config import cfg +from oslo.db.sqlalchemy import session +import sqlalchemy as sql + +from neutron.db import model_base +from neutron.openstack.common import log as logging + +LOG = logging.getLogger(__name__) + +BASE = model_base.BASEV2 + +_FACADE = None + + +def _create_facade_lazily(): + global _FACADE + + if _FACADE is None: + _FACADE = session.EngineFacade.from_config(cfg.CONF, sqlite_fk=True) + + return _FACADE + + +def configure_db(): + """Configure database. + + Establish the database, create an engine if needed, and register + the models. + """ + register_models() + + +def clear_db(base=BASE): + unregister_models(base) + + +def get_engine(): + """Helper method to grab engine.""" + facade = _create_facade_lazily() + return facade.get_engine() + + +def get_session(autocommit=True, expire_on_commit=False): + """Helper method to grab session.""" + facade = _create_facade_lazily() + return facade.get_session(autocommit=autocommit, + expire_on_commit=expire_on_commit) + + +def register_models(base=BASE): + """Register Models and create properties.""" + try: + facade = _create_facade_lazily() + engine = facade.get_engine() + base.metadata.create_all(engine) + except sql.exc.OperationalError as e: + LOG.info(_("Database registration exception: %s"), e) + return False + return True + + +def unregister_models(base=BASE): + """Unregister Models, useful clearing out data before testing.""" + try: + facade = _create_facade_lazily() + engine = facade.get_engine() + base.metadata.drop_all(engine) + except Exception: + LOG.exception(_("Database exception")) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/db/db_base_plugin_v2.py b/icehouse-patches/neutron/dvr-patch/neutron/db/db_base_plugin_v2.py new file mode 100644 index 00000000..4d804f55 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/db/db_base_plugin_v2.py @@ -0,0 +1,1625 @@ +# Copyright (c) 2012 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import random +import weakref + +import netaddr +from oslo.config import cfg +from sqlalchemy import event +from sqlalchemy import orm +from sqlalchemy.orm import exc +from sqlalchemy import sql + +from neutron.api.v2 import attributes +from neutron.common import constants +from neutron.common import exceptions as n_exc +from neutron.common import ipv6_utils +from neutron import context as ctx +from neutron.db import api as db +from neutron.db import models_v2 +from neutron.db import sqlalchemyutils +from neutron.extensions import l3 +from neutron import manager +from neutron import neutron_plugin_base_v2 +from neutron.openstack.common import excutils +from neutron.openstack.common import log as logging +from neutron.openstack.common import uuidutils +from neutron.plugins.common import constants as service_constants + + +LOG = logging.getLogger(__name__) + +# Ports with the following 'device_owner' values will not prevent +# network deletion. If delete_network() finds that all ports on a +# network have these owners, it will explicitly delete each port +# and allow network deletion to continue. Similarly, if delete_subnet() +# finds out that all existing IP Allocations are associated with ports +# with these owners, it will allow subnet deletion to proceed with the +# IP allocations being cleaned up by cascade. +AUTO_DELETE_PORT_OWNERS = [constants.DEVICE_OWNER_DHCP] + + +class CommonDbMixin(object): + """Common methods used in core and service plugins.""" + # Plugins, mixin classes implementing extension will register + # hooks into the dict below for "augmenting" the "core way" of + # building a query for retrieving objects from a model class. + # To this aim, the register_model_query_hook and unregister_query_hook + # from this class should be invoked + _model_query_hooks = {} + + # This dictionary will store methods for extending attributes of + # api resources. Mixins can use this dict for adding their own methods + # TODO(salvatore-orlando): Avoid using class-level variables + _dict_extend_functions = {} + + @classmethod + def register_model_query_hook(cls, model, name, query_hook, filter_hook, + result_filters=None): + """Register a hook to be invoked when a query is executed. + + Add the hooks to the _model_query_hooks dict. Models are the keys + of this dict, whereas the value is another dict mapping hook names to + callables performing the hook. + Each hook has a "query" component, used to build the query expression + and a "filter" component, which is used to build the filter expression. + + Query hooks take as input the query being built and return a + transformed query expression. + + Filter hooks take as input the filter expression being built and return + a transformed filter expression + """ + model_hooks = cls._model_query_hooks.get(model) + if not model_hooks: + # add key to dict + model_hooks = {} + cls._model_query_hooks[model] = model_hooks + model_hooks[name] = {'query': query_hook, 'filter': filter_hook, + 'result_filters': result_filters} + + @property + def safe_reference(self): + """Return a weakref to the instance. + + Minimize the potential for the instance persisting + unnecessarily in memory by returning a weakref proxy that + won't prevent deallocation. + """ + return weakref.proxy(self) + + def _model_query(self, context, model): + query = context.session.query(model) + # define basic filter condition for model query + # NOTE(jkoelker) non-admin queries are scoped to their tenant_id + # NOTE(salvatore-orlando): unless the model allows for shared objects + query_filter = None + if not context.is_admin and hasattr(model, 'tenant_id'): + if hasattr(model, 'shared'): + query_filter = ((model.tenant_id == context.tenant_id) | + (model.shared == sql.true())) + else: + query_filter = (model.tenant_id == context.tenant_id) + # Execute query hooks registered from mixins and plugins + for _name, hooks in self._model_query_hooks.get(model, + {}).iteritems(): + query_hook = hooks.get('query') + if isinstance(query_hook, basestring): + query_hook = getattr(self, query_hook, None) + if query_hook: + query = query_hook(context, model, query) + + filter_hook = hooks.get('filter') + if isinstance(filter_hook, basestring): + filter_hook = getattr(self, filter_hook, None) + if filter_hook: + query_filter = filter_hook(context, model, query_filter) + + # NOTE(salvatore-orlando): 'if query_filter' will try to evaluate the + # condition, raising an exception + if query_filter is not None: + query = query.filter(query_filter) + return query + + def _fields(self, resource, fields): + if fields: + return dict(((key, item) for key, item in resource.items() + if key in fields)) + return resource + + def _get_tenant_id_for_create(self, context, resource): + if context.is_admin and 'tenant_id' in resource: + tenant_id = resource['tenant_id'] + elif ('tenant_id' in resource and + resource['tenant_id'] != context.tenant_id): + reason = _('Cannot create resource for another tenant') + raise n_exc.AdminRequired(reason=reason) + else: + tenant_id = context.tenant_id + return tenant_id + + def _get_by_id(self, context, model, id): + query = self._model_query(context, model) + return query.filter(model.id == id).one() + + def _apply_filters_to_query(self, query, model, filters): + if filters: + for key, value in filters.iteritems(): + column = getattr(model, key, None) + if column: + query = query.filter(column.in_(value)) + for _name, hooks in self._model_query_hooks.get(model, + {}).iteritems(): + result_filter = hooks.get('result_filters', None) + if isinstance(result_filter, basestring): + result_filter = getattr(self, result_filter, None) + + if result_filter: + query = result_filter(query, filters) + return query + + def _apply_dict_extend_functions(self, resource_type, + response, db_object): + for func in self._dict_extend_functions.get( + resource_type, []): + args = (response, db_object) + if isinstance(func, basestring): + func = getattr(self, func, None) + else: + # must call unbound method - use self as 1st argument + args = (self,) + args + if func: + func(*args) + + def _get_collection_query(self, context, model, filters=None, + sorts=None, limit=None, marker_obj=None, + page_reverse=False): + collection = self._model_query(context, model) + collection = self._apply_filters_to_query(collection, model, filters) + if limit and page_reverse and sorts: + sorts = [(s[0], not s[1]) for s in sorts] + collection = sqlalchemyutils.paginate_query(collection, model, limit, + sorts, + marker_obj=marker_obj) + return collection + + def _get_collection(self, context, model, dict_func, filters=None, + fields=None, sorts=None, limit=None, marker_obj=None, + page_reverse=False): + query = self._get_collection_query(context, model, filters=filters, + sorts=sorts, + limit=limit, + marker_obj=marker_obj, + page_reverse=page_reverse) + items = [dict_func(c, fields) for c in query] + if limit and page_reverse: + items.reverse() + return items + + def _get_collection_count(self, context, model, filters=None): + return self._get_collection_query(context, model, filters).count() + + def _get_marker_obj(self, context, resource, limit, marker): + if limit and marker: + return getattr(self, '_get_%s' % resource)(context, marker) + return None + + def _filter_non_model_columns(self, data, model): + """Remove all the attributes from data which are not columns of + the model passed as second parameter. + """ + columns = [c.name for c in model.__table__.columns] + return dict((k, v) for (k, v) in + data.iteritems() if k in columns) + + +class NeutronDbPluginV2(neutron_plugin_base_v2.NeutronPluginBaseV2, + CommonDbMixin): + """V2 Neutron plugin interface implementation using SQLAlchemy models. + + Whenever a non-read call happens the plugin will call an event handler + class method (e.g., network_created()). The result is that this class + can be sub-classed by other classes that add custom behaviors on certain + events. + """ + + # This attribute specifies whether the plugin supports or not + # bulk/pagination/sorting operations. Name mangling is used in + # order to ensure it is qualified by class + __native_bulk_support = True + __native_pagination_support = True + __native_sorting_support = True + + def __init__(self): + db.configure_db() + if cfg.CONF.notify_nova_on_port_status_changes: + from neutron.notifiers import nova + # NOTE(arosen) These event listeners are here to hook into when + # port status changes and notify nova about their change. + self.nova_notifier = nova.Notifier() + event.listen(models_v2.Port, 'after_insert', + self.nova_notifier.send_port_status) + event.listen(models_v2.Port, 'after_update', + self.nova_notifier.send_port_status) + event.listen(models_v2.Port.status, 'set', + self.nova_notifier.record_port_status_changed) + + @classmethod + def register_dict_extend_funcs(cls, resource, funcs): + cur_funcs = cls._dict_extend_functions.get(resource, []) + cur_funcs.extend(funcs) + cls._dict_extend_functions[resource] = cur_funcs + + def _get_network(self, context, id): + try: + network = self._get_by_id(context, models_v2.Network, id) + except exc.NoResultFound: + raise n_exc.NetworkNotFound(net_id=id) + return network + + def _get_subnet(self, context, id): + try: + subnet = self._get_by_id(context, models_v2.Subnet, id) + except exc.NoResultFound: + raise n_exc.SubnetNotFound(subnet_id=id) + return subnet + + def _get_port(self, context, id): + try: + port = self._get_by_id(context, models_v2.Port, id) + except exc.NoResultFound: + raise n_exc.PortNotFound(port_id=id) + return port + + def _get_dns_by_subnet(self, context, subnet_id): + dns_qry = context.session.query(models_v2.DNSNameServer) + return dns_qry.filter_by(subnet_id=subnet_id).all() + + def _get_route_by_subnet(self, context, subnet_id): + route_qry = context.session.query(models_v2.SubnetRoute) + return route_qry.filter_by(subnet_id=subnet_id).all() + + def _get_subnets_by_network(self, context, network_id): + subnet_qry = context.session.query(models_v2.Subnet) + return subnet_qry.filter_by(network_id=network_id).all() + + def _get_all_subnets(self, context): + # NOTE(salvatore-orlando): This query might end up putting + # a lot of stress on the db. Consider adding a cache layer + return context.session.query(models_v2.Subnet).all() + + @staticmethod + def _generate_mac(context, network_id): + base_mac = cfg.CONF.base_mac.split(':') + max_retries = cfg.CONF.mac_generation_retries + for i in range(max_retries): + mac = [int(base_mac[0], 16), int(base_mac[1], 16), + int(base_mac[2], 16), random.randint(0x00, 0xff), + random.randint(0x00, 0xff), random.randint(0x00, 0xff)] + if base_mac[3] != '00': + mac[3] = int(base_mac[3], 16) + mac_address = ':'.join(map(lambda x: "%02x" % x, mac)) + if NeutronDbPluginV2._check_unique_mac(context, network_id, + mac_address): + LOG.debug(_("Generated mac for network %(network_id)s " + "is %(mac_address)s"), + {'network_id': network_id, + 'mac_address': mac_address}) + return mac_address + else: + LOG.debug(_("Generated mac %(mac_address)s exists. Remaining " + "attempts %(max_retries)s."), + {'mac_address': mac_address, + 'max_retries': max_retries - (i + 1)}) + LOG.error(_("Unable to generate mac address after %s attempts"), + max_retries) + raise n_exc.MacAddressGenerationFailure(net_id=network_id) + + @staticmethod + def _check_unique_mac(context, network_id, mac_address): + mac_qry = context.session.query(models_v2.Port) + try: + mac_qry.filter_by(network_id=network_id, + mac_address=mac_address).one() + except exc.NoResultFound: + return True + return False + + @staticmethod + def _delete_ip_allocation(context, network_id, subnet_id, ip_address): + + # Delete the IP address from the IPAllocate table + LOG.debug(_("Delete allocated IP %(ip_address)s " + "(%(network_id)s/%(subnet_id)s)"), + {'ip_address': ip_address, + 'network_id': network_id, + 'subnet_id': subnet_id}) + context.session.query(models_v2.IPAllocation).filter_by( + network_id=network_id, + ip_address=ip_address, + subnet_id=subnet_id).delete() + + @staticmethod + def _check_if_subnet_uses_eui64(subnet): + """Check if ipv6 address will be calculated via EUI64.""" + return (subnet['ipv6_address_mode'] == constants.IPV6_SLAAC + or subnet['ipv6_address_mode'] == constants.DHCPV6_STATELESS) + + @staticmethod + def _generate_ip(context, subnets): + try: + return NeutronDbPluginV2._try_generate_ip(context, subnets) + except n_exc.IpAddressGenerationFailure: + NeutronDbPluginV2._rebuild_availability_ranges(context, subnets) + + return NeutronDbPluginV2._try_generate_ip(context, subnets) + + @staticmethod + def _try_generate_ip(context, subnets): + """Generate an IP address. + + The IP address will be generated from one of the subnets defined on + the network. + """ + range_qry = context.session.query( + models_v2.IPAvailabilityRange).join( + models_v2.IPAllocationPool).with_lockmode('update') + for subnet in subnets: + range = range_qry.filter_by(subnet_id=subnet['id']).first() + if not range: + LOG.debug(_("All IPs from subnet %(subnet_id)s (%(cidr)s) " + "allocated"), + {'subnet_id': subnet['id'], 'cidr': subnet['cidr']}) + continue + ip_address = range['first_ip'] + LOG.debug(_("Allocated IP - %(ip_address)s from %(first_ip)s " + "to %(last_ip)s"), + {'ip_address': ip_address, + 'first_ip': range['first_ip'], + 'last_ip': range['last_ip']}) + if range['first_ip'] == range['last_ip']: + # No more free indices on subnet => delete + LOG.debug(_("No more free IP's in slice. Deleting allocation " + "pool.")) + context.session.delete(range) + else: + # increment the first free + range['first_ip'] = str(netaddr.IPAddress(ip_address) + 1) + return {'ip_address': ip_address, 'subnet_id': subnet['id']} + raise n_exc.IpAddressGenerationFailure(net_id=subnets[0]['network_id']) + + @staticmethod + def _rebuild_availability_ranges(context, subnets): + ip_qry = context.session.query( + models_v2.IPAllocation).with_lockmode('update') + # PostgreSQL does not support select...for update with an outer join. + # No join is needed here. + pool_qry = context.session.query( + models_v2.IPAllocationPool).options( + orm.noload('available_ranges')).with_lockmode('update') + for subnet in sorted(subnets): + LOG.debug(_("Rebuilding availability ranges for subnet %s") + % subnet) + + # Create a set of all currently allocated addresses + ip_qry_results = ip_qry.filter_by(subnet_id=subnet['id']) + allocations = netaddr.IPSet([netaddr.IPAddress(i['ip_address']) + for i in ip_qry_results]) + + for pool in pool_qry.filter_by(subnet_id=subnet['id']): + # Create a set of all addresses in the pool + poolset = netaddr.IPSet(netaddr.iter_iprange(pool['first_ip'], + pool['last_ip'])) + + # Use set difference to find free addresses in the pool + available = poolset - allocations + + # Generator compacts an ip set into contiguous ranges + def ipset_to_ranges(ipset): + first, last = None, None + for cidr in ipset.iter_cidrs(): + if last and last + 1 != cidr.first: + yield netaddr.IPRange(first, last) + first = None + first, last = first if first else cidr.first, cidr.last + if first: + yield netaddr.IPRange(first, last) + + # Write the ranges to the db + for range in ipset_to_ranges(available): + available_range = models_v2.IPAvailabilityRange( + allocation_pool_id=pool['id'], + first_ip=str(netaddr.IPAddress(range.first)), + last_ip=str(netaddr.IPAddress(range.last))) + context.session.add(available_range) + + @staticmethod + def _allocate_specific_ip(context, subnet_id, ip_address): + """Allocate a specific IP address on the subnet.""" + ip = int(netaddr.IPAddress(ip_address)) + range_qry = context.session.query( + models_v2.IPAvailabilityRange).join( + models_v2.IPAllocationPool).with_lockmode('update') + results = range_qry.filter_by(subnet_id=subnet_id) + for range in results: + first = int(netaddr.IPAddress(range['first_ip'])) + last = int(netaddr.IPAddress(range['last_ip'])) + if first <= ip <= last: + if first == last: + context.session.delete(range) + return + elif first == ip: + range['first_ip'] = str(netaddr.IPAddress(ip_address) + 1) + return + elif last == ip: + range['last_ip'] = str(netaddr.IPAddress(ip_address) - 1) + return + else: + # Split into two ranges + new_first = str(netaddr.IPAddress(ip_address) + 1) + new_last = range['last_ip'] + range['last_ip'] = str(netaddr.IPAddress(ip_address) - 1) + ip_range = models_v2.IPAvailabilityRange( + allocation_pool_id=range['allocation_pool_id'], + first_ip=new_first, + last_ip=new_last) + context.session.add(ip_range) + return + + @staticmethod + def _check_unique_ip(context, network_id, subnet_id, ip_address): + """Validate that the IP address on the subnet is not in use.""" + ip_qry = context.session.query(models_v2.IPAllocation) + try: + ip_qry.filter_by(network_id=network_id, + subnet_id=subnet_id, + ip_address=ip_address).one() + except exc.NoResultFound: + return True + return False + + @staticmethod + def _check_subnet_ip(cidr, ip_address): + """Validate that the IP address is on the subnet.""" + ip = netaddr.IPAddress(ip_address) + net = netaddr.IPNetwork(cidr) + # Check that the IP is valid on subnet. This cannot be the + # network or the broadcast address + if (ip != net.network and + ip != net.broadcast and + net.netmask & ip == net.network): + return True + return False + + @staticmethod + def _check_ip_in_allocation_pool(context, subnet_id, gateway_ip, + ip_address): + """Validate IP in allocation pool. + + Validates that the IP address is either the default gateway or + in the allocation pools of the subnet. + """ + # Check if the IP is the gateway + if ip_address == gateway_ip: + # Gateway is not in allocation pool + return False + + # Check if the requested IP is in a defined allocation pool + pool_qry = context.session.query(models_v2.IPAllocationPool) + allocation_pools = pool_qry.filter_by(subnet_id=subnet_id) + ip = netaddr.IPAddress(ip_address) + for allocation_pool in allocation_pools: + allocation_pool_range = netaddr.IPRange( + allocation_pool['first_ip'], + allocation_pool['last_ip']) + if ip in allocation_pool_range: + return True + return False + + def _test_fixed_ips_for_port(self, context, network_id, fixed_ips): + """Test fixed IPs for port. + + Check that configured subnets are valid prior to allocating any + IPs. Include the subnet_id in the result if only an IP address is + configured. + + :raises: InvalidInput, IpAddressInUse + """ + fixed_ip_set = [] + for fixed in fixed_ips: + found = False + if 'subnet_id' not in fixed: + if 'ip_address' not in fixed: + msg = _('IP allocation requires subnet_id or ip_address') + raise n_exc.InvalidInput(error_message=msg) + + filter = {'network_id': [network_id]} + subnets = self.get_subnets(context, filters=filter) + for subnet in subnets: + if NeutronDbPluginV2._check_subnet_ip(subnet['cidr'], + fixed['ip_address']): + found = True + subnet_id = subnet['id'] + break + if not found: + msg = _('IP address %s is not a valid IP for the defined ' + 'networks subnets') % fixed['ip_address'] + raise n_exc.InvalidInput(error_message=msg) + else: + subnet = self._get_subnet(context, fixed['subnet_id']) + if subnet['network_id'] != network_id: + msg = (_("Failed to create port on network %(network_id)s" + ", because fixed_ips included invalid subnet " + "%(subnet_id)s") % + {'network_id': network_id, + 'subnet_id': fixed['subnet_id']}) + raise n_exc.InvalidInput(error_message=msg) + subnet_id = subnet['id'] + + if 'ip_address' in fixed: + # Ensure that the IP's are unique + if not NeutronDbPluginV2._check_unique_ip(context, network_id, + subnet_id, + fixed['ip_address']): + raise n_exc.IpAddressInUse(net_id=network_id, + ip_address=fixed['ip_address']) + + # Ensure that the IP is valid on the subnet + if (not found and + not NeutronDbPluginV2._check_subnet_ip( + subnet['cidr'], fixed['ip_address'])): + msg = _('IP address %s is not a valid IP for the defined ' + 'subnet') % fixed['ip_address'] + raise n_exc.InvalidInput(error_message=msg) + + fixed_ip_set.append({'subnet_id': subnet_id, + 'ip_address': fixed['ip_address']}) + else: + fixed_ip_set.append({'subnet_id': subnet_id}) + if len(fixed_ip_set) > cfg.CONF.max_fixed_ips_per_port: + msg = _('Exceeded maximim amount of fixed ips per port') + raise n_exc.InvalidInput(error_message=msg) + return fixed_ip_set + + def _allocate_fixed_ips(self, context, network, fixed_ips): + """Allocate IP addresses according to the configured fixed_ips.""" + ips = [] + for fixed in fixed_ips: + if 'ip_address' in fixed: + # Remove the IP address from the allocation pool + NeutronDbPluginV2._allocate_specific_ip( + context, fixed['subnet_id'], fixed['ip_address']) + ips.append({'ip_address': fixed['ip_address'], + 'subnet_id': fixed['subnet_id']}) + # Only subnet ID is specified => need to generate IP + # from subnet + else: + subnets = [self._get_subnet(context, fixed['subnet_id'])] + # IP address allocation + result = self._generate_ip(context, subnets) + ips.append({'ip_address': result['ip_address'], + 'subnet_id': result['subnet_id']}) + return ips + + def _update_ips_for_port(self, context, network_id, port_id, original_ips, + new_ips): + """Add or remove IPs from the port.""" + ips = [] + # These ips are still on the port and haven't been removed + prev_ips = [] + + # the new_ips contain all of the fixed_ips that are to be updated + if len(new_ips) > cfg.CONF.max_fixed_ips_per_port: + msg = _('Exceeded maximim amount of fixed ips per port') + raise n_exc.InvalidInput(error_message=msg) + + # Remove all of the intersecting elements + for original_ip in original_ips[:]: + for new_ip in new_ips[:]: + if ('ip_address' in new_ip and + original_ip['ip_address'] == new_ip['ip_address']): + original_ips.remove(original_ip) + new_ips.remove(new_ip) + prev_ips.append(original_ip) + + # Check if the IP's to add are OK + to_add = self._test_fixed_ips_for_port(context, network_id, new_ips) + for ip in original_ips: + LOG.debug(_("Port update. Hold %s"), ip) + NeutronDbPluginV2._delete_ip_allocation(context, + network_id, + ip['subnet_id'], + ip['ip_address']) + + if to_add: + LOG.debug(_("Port update. Adding %s"), to_add) + network = self._get_network(context, network_id) + ips = self._allocate_fixed_ips(context, network, to_add) + return ips, prev_ips + + def _allocate_ips_for_port(self, context, network, port): + """Allocate IP addresses for the port. + + If port['fixed_ips'] is set to 'ATTR_NOT_SPECIFIED', allocate IP + addresses for the port. If port['fixed_ips'] contains an IP address or + a subnet_id then allocate an IP address accordingly. + """ + p = port['port'] + ips = [] + + fixed_configured = p['fixed_ips'] is not attributes.ATTR_NOT_SPECIFIED + if fixed_configured: + configured_ips = self._test_fixed_ips_for_port(context, + p["network_id"], + p['fixed_ips']) + ips = self._allocate_fixed_ips(context, network, configured_ips) + else: + filter = {'network_id': [p['network_id']]} + subnets = self.get_subnets(context, filters=filter) + # Split into v4 and v6 subnets + v4 = [] + v6 = [] + for subnet in subnets: + if subnet['ip_version'] == 4: + v4.append(subnet) + else: + v6.append(subnet) + for subnet in v6: + if self._check_if_subnet_uses_eui64(subnet): + #(dzyu) If true, calculate an IPv6 address + # by mac address and prefix, then remove this + # subnet from the array of subnets that will be passed + # to the _generate_ip() function call, since we just + # generated an IP. + mac = p['mac_address'] + prefix = subnet['cidr'] + ip_address = ipv6_utils.get_ipv6_addr_by_EUI64( + prefix, mac) + ips.append({'ip_address': ip_address.format(), + 'subnet_id': subnet['id']}) + v6.remove(subnet) + version_subnets = [v4, v6] + for subnets in version_subnets: + if subnets: + result = NeutronDbPluginV2._generate_ip(context, subnets) + ips.append({'ip_address': result['ip_address'], + 'subnet_id': result['subnet_id']}) + return ips + + def _validate_subnet_cidr(self, context, network, new_subnet_cidr): + """Validate the CIDR for a subnet. + + Verifies the specified CIDR does not overlap with the ones defined + for the other subnets specified for this network, or with any other + CIDR if overlapping IPs are disabled. + """ + new_subnet_ipset = netaddr.IPSet([new_subnet_cidr]) + if cfg.CONF.allow_overlapping_ips: + subnet_list = network.subnets + else: + subnet_list = self._get_all_subnets(context) + for subnet in subnet_list: + if (netaddr.IPSet([subnet.cidr]) & new_subnet_ipset): + # don't give out details of the overlapping subnet + err_msg = (_("Requested subnet with cidr: %(cidr)s for " + "network: %(network_id)s overlaps with another " + "subnet") % + {'cidr': new_subnet_cidr, + 'network_id': network.id}) + LOG.info(_("Validation for CIDR: %(new_cidr)s failed - " + "overlaps with subnet %(subnet_id)s " + "(CIDR: %(cidr)s)"), + {'new_cidr': new_subnet_cidr, + 'subnet_id': subnet.id, + 'cidr': subnet.cidr}) + raise n_exc.InvalidInput(error_message=err_msg) + + def _validate_allocation_pools(self, ip_pools, subnet_cidr): + """Validate IP allocation pools. + + Verify start and end address for each allocation pool are valid, + ie: constituted by valid and appropriately ordered IP addresses. + Also, verify pools do not overlap among themselves. + Finally, verify that each range fall within the subnet's CIDR. + """ + subnet = netaddr.IPNetwork(subnet_cidr) + subnet_first_ip = netaddr.IPAddress(subnet.first + 1) + subnet_last_ip = netaddr.IPAddress(subnet.last - 1) + + LOG.debug(_("Performing IP validity checks on allocation pools")) + ip_sets = [] + for ip_pool in ip_pools: + try: + start_ip = netaddr.IPAddress(ip_pool['start']) + end_ip = netaddr.IPAddress(ip_pool['end']) + except netaddr.AddrFormatError: + LOG.info(_("Found invalid IP address in pool: " + "%(start)s - %(end)s:"), + {'start': ip_pool['start'], + 'end': ip_pool['end']}) + raise n_exc.InvalidAllocationPool(pool=ip_pool) + if (start_ip.version != subnet.version or + end_ip.version != subnet.version): + LOG.info(_("Specified IP addresses do not match " + "the subnet IP version")) + raise n_exc.InvalidAllocationPool(pool=ip_pool) + if end_ip < start_ip: + LOG.info(_("Start IP (%(start)s) is greater than end IP " + "(%(end)s)"), + {'start': ip_pool['start'], 'end': ip_pool['end']}) + raise n_exc.InvalidAllocationPool(pool=ip_pool) + if start_ip < subnet_first_ip or end_ip > subnet_last_ip: + LOG.info(_("Found pool larger than subnet " + "CIDR:%(start)s - %(end)s"), + {'start': ip_pool['start'], + 'end': ip_pool['end']}) + raise n_exc.OutOfBoundsAllocationPool( + pool=ip_pool, + subnet_cidr=subnet_cidr) + # Valid allocation pool + # Create an IPSet for it for easily verifying overlaps + ip_sets.append(netaddr.IPSet(netaddr.IPRange( + ip_pool['start'], + ip_pool['end']).cidrs())) + + LOG.debug(_("Checking for overlaps among allocation pools " + "and gateway ip")) + ip_ranges = ip_pools[:] + + # Use integer cursors as an efficient way for implementing + # comparison and avoiding comparing the same pair twice + for l_cursor in range(len(ip_sets)): + for r_cursor in range(l_cursor + 1, len(ip_sets)): + if ip_sets[l_cursor] & ip_sets[r_cursor]: + l_range = ip_ranges[l_cursor] + r_range = ip_ranges[r_cursor] + LOG.info(_("Found overlapping ranges: %(l_range)s and " + "%(r_range)s"), + {'l_range': l_range, 'r_range': r_range}) + raise n_exc.OverlappingAllocationPools( + pool_1=l_range, + pool_2=r_range, + subnet_cidr=subnet_cidr) + + def _validate_host_route(self, route, ip_version): + try: + netaddr.IPNetwork(route['destination']) + netaddr.IPAddress(route['nexthop']) + except netaddr.core.AddrFormatError: + err_msg = _("Invalid route: %s") % route + raise n_exc.InvalidInput(error_message=err_msg) + except ValueError: + # netaddr.IPAddress would raise this + err_msg = _("Invalid route: %s") % route + raise n_exc.InvalidInput(error_message=err_msg) + self._validate_ip_version(ip_version, route['nexthop'], 'nexthop') + self._validate_ip_version(ip_version, route['destination'], + 'destination') + + def _allocate_pools_for_subnet(self, context, subnet): + """Create IP allocation pools for a given subnet + + Pools are defined by the 'allocation_pools' attribute, + a list of dict objects with 'start' and 'end' keys for + defining the pool range. + """ + pools = [] + # Auto allocate the pool around gateway_ip + net = netaddr.IPNetwork(subnet['cidr']) + first_ip = net.first + 1 + last_ip = net.last - 1 + gw_ip = int(netaddr.IPAddress(subnet['gateway_ip'] or net.last)) + # Use the gw_ip to find a point for splitting allocation pools + # for this subnet + split_ip = min(max(gw_ip, net.first), net.last) + if split_ip > first_ip: + pools.append({'start': str(netaddr.IPAddress(first_ip)), + 'end': str(netaddr.IPAddress(split_ip - 1))}) + if split_ip < last_ip: + pools.append({'start': str(netaddr.IPAddress(split_ip + 1)), + 'end': str(netaddr.IPAddress(last_ip))}) + # return auto-generated pools + # no need to check for their validity + return pools + + def _validate_shared_update(self, context, id, original, updated): + # The only case that needs to be validated is when 'shared' + # goes from True to False + if updated['shared'] == original.shared or updated['shared']: + return + ports = self._model_query( + context, models_v2.Port).filter( + models_v2.Port.network_id == id) + subnets = self._model_query( + context, models_v2.Subnet).filter( + models_v2.Subnet.network_id == id) + tenant_ids = set([port['tenant_id'] for port in ports] + + [subnet['tenant_id'] for subnet in subnets]) + # raise if multiple tenants found or if the only tenant found + # is not the owner of the network + if (len(tenant_ids) > 1 or len(tenant_ids) == 1 and + tenant_ids.pop() != original.tenant_id): + raise n_exc.InvalidSharedSetting(network=original.name) + + def _validate_ipv6_attributes(self, subnet, cur_subnet): + ra_mode_set = attributes.is_attr_set(subnet.get('ipv6_ra_mode')) + address_mode_set = attributes.is_attr_set( + subnet.get('ipv6_address_mode')) + if cur_subnet: + ra_mode = (subnet['ipv6_ra_mode'] if ra_mode_set + else cur_subnet['ipv6_ra_mode']) + addr_mode = (subnet['ipv6_address_mode'] if address_mode_set + else cur_subnet['ipv6_address_mode']) + if ra_mode_set or address_mode_set: + # Check that updated subnet ipv6 attributes do not conflict + self._validate_ipv6_combination(ra_mode, addr_mode) + self._validate_ipv6_update_dhcp(subnet, cur_subnet) + else: + self._validate_ipv6_dhcp(ra_mode_set, address_mode_set, + subnet['enable_dhcp']) + if ra_mode_set and address_mode_set: + self._validate_ipv6_combination(subnet['ipv6_ra_mode'], + subnet['ipv6_address_mode']) + + def _validate_ipv6_combination(self, ra_mode, address_mode): + if ra_mode != address_mode: + msg = _("ipv6_ra_mode set to '%(ra_mode)s' with ipv6_address_mode " + "set to '%(addr_mode)s' is not valid. " + "If both attributes are set, they must be the same value" + ) % {'ra_mode': ra_mode, 'addr_mode': address_mode} + raise n_exc.InvalidInput(error_message=msg) + + def _validate_ipv6_dhcp(self, ra_mode_set, address_mode_set, enable_dhcp): + if (ra_mode_set or address_mode_set) and not enable_dhcp: + msg = _("ipv6_ra_mode or ipv6_address_mode cannot be set when " + "enable_dhcp is set to False.") + raise n_exc.InvalidInput(error_message=msg) + + def _validate_ipv6_update_dhcp(self, subnet, cur_subnet): + if ('enable_dhcp' in subnet and not subnet['enable_dhcp']): + msg = _("Cannot disable enable_dhcp with " + "ipv6 attributes set") + + ra_mode_set = attributes.is_attr_set(subnet.get('ipv6_ra_mode')) + address_mode_set = attributes.is_attr_set( + subnet.get('ipv6_address_mode')) + + if ra_mode_set or address_mode_set: + raise n_exc.InvalidInput(error_message=msg) + + old_ra_mode_set = attributes.is_attr_set( + cur_subnet.get('ipv6_ra_mode')) + old_address_mode_set = attributes.is_attr_set( + cur_subnet.get('ipv6_address_mode')) + + if old_ra_mode_set or old_address_mode_set: + raise n_exc.InvalidInput(error_message=msg) + + def _make_network_dict(self, network, fields=None, + process_extensions=True): + res = {'id': network['id'], + 'name': network['name'], + 'tenant_id': network['tenant_id'], + 'admin_state_up': network['admin_state_up'], + 'status': network['status'], + 'shared': network['shared'], + 'subnets': [subnet['id'] + for subnet in network['subnets']]} + # Call auxiliary extend functions, if any + if process_extensions: + self._apply_dict_extend_functions( + attributes.NETWORKS, res, network) + return self._fields(res, fields) + + def _make_subnet_dict(self, subnet, fields=None): + res = {'id': subnet['id'], + 'name': subnet['name'], + 'tenant_id': subnet['tenant_id'], + 'network_id': subnet['network_id'], + 'ip_version': subnet['ip_version'], + 'cidr': subnet['cidr'], + 'allocation_pools': [{'start': pool['first_ip'], + 'end': pool['last_ip']} + for pool in subnet['allocation_pools']], + 'gateway_ip': subnet['gateway_ip'], + 'enable_dhcp': subnet['enable_dhcp'], + 'ipv6_ra_mode': subnet['ipv6_ra_mode'], + 'ipv6_address_mode': subnet['ipv6_address_mode'], + 'dns_nameservers': [dns['address'] + for dns in subnet['dns_nameservers']], + 'host_routes': [{'destination': route['destination'], + 'nexthop': route['nexthop']} + for route in subnet['routes']], + 'shared': subnet['shared'] + } + return self._fields(res, fields) + + def _make_port_dict(self, port, fields=None, + process_extensions=True): + res = {"id": port["id"], + 'name': port['name'], + "network_id": port["network_id"], + 'tenant_id': port['tenant_id'], + "mac_address": port["mac_address"], + "admin_state_up": port["admin_state_up"], + "status": port["status"], + "fixed_ips": [{'subnet_id': ip["subnet_id"], + 'ip_address': ip["ip_address"]} + for ip in port["fixed_ips"]], + "device_id": port["device_id"], + "device_owner": port["device_owner"]} + # Call auxiliary extend functions, if any + if process_extensions: + self._apply_dict_extend_functions( + attributes.PORTS, res, port) + return self._fields(res, fields) + + def _create_bulk(self, resource, context, request_items): + objects = [] + collection = "%ss" % resource + items = request_items[collection] + context.session.begin(subtransactions=True) + try: + for item in items: + obj_creator = getattr(self, 'create_%s' % resource) + objects.append(obj_creator(context, item)) + context.session.commit() + except Exception: + context.session.rollback() + with excutils.save_and_reraise_exception(): + LOG.error(_("An exception occurred while creating " + "the %(resource)s:%(item)s"), + {'resource': resource, 'item': item}) + return objects + + def create_network_bulk(self, context, networks): + return self._create_bulk('network', context, networks) + + def create_network(self, context, network): + """Handle creation of a single network.""" + # single request processing + n = network['network'] + # NOTE(jkoelker) Get the tenant_id outside of the session to avoid + # unneeded db action if the operation raises + tenant_id = self._get_tenant_id_for_create(context, n) + with context.session.begin(subtransactions=True): + args = {'tenant_id': tenant_id, + 'id': n.get('id') or uuidutils.generate_uuid(), + 'name': n['name'], + 'admin_state_up': n['admin_state_up'], + 'shared': n['shared'], + 'status': n.get('status', constants.NET_STATUS_ACTIVE)} + network = models_v2.Network(**args) + context.session.add(network) + return self._make_network_dict(network, process_extensions=False) + + def update_network(self, context, id, network): + n = network['network'] + with context.session.begin(subtransactions=True): + network = self._get_network(context, id) + # validate 'shared' parameter + if 'shared' in n: + self._validate_shared_update(context, id, network, n) + network.update(n) + # also update shared in all the subnets for this network + subnets = self._get_subnets_by_network(context, id) + for subnet in subnets: + subnet['shared'] = network['shared'] + return self._make_network_dict(network) + + def delete_network(self, context, id): + with context.session.begin(subtransactions=True): + network = self._get_network(context, id) + + filters = {'network_id': [id]} + # NOTE(armando-migliaccio): stick with base plugin + query = context.session.query( + models_v2.Port).enable_eagerloads(False) + ports = self._apply_filters_to_query( + query, models_v2.Port, filters).with_lockmode('update') + + # check if there are any tenant owned ports in-use + only_auto_del = all(p['device_owner'] in AUTO_DELETE_PORT_OWNERS + for p in ports) + + if not only_auto_del: + raise n_exc.NetworkInUse(net_id=id) + + # clean up network owned ports + for port in ports: + self._delete_port(context, port['id']) + + # clean up subnets + subnets_qry = context.session.query(models_v2.Subnet) + subnets_qry.filter_by(network_id=id).delete() + context.session.delete(network) + + def get_network(self, context, id, fields=None): + network = self._get_network(context, id) + return self._make_network_dict(network, fields) + + def get_networks(self, context, filters=None, fields=None, + sorts=None, limit=None, marker=None, + page_reverse=False): + marker_obj = self._get_marker_obj(context, 'network', limit, marker) + return self._get_collection(context, models_v2.Network, + self._make_network_dict, + filters=filters, fields=fields, + sorts=sorts, + limit=limit, + marker_obj=marker_obj, + page_reverse=page_reverse) + + def get_networks_count(self, context, filters=None): + return self._get_collection_count(context, models_v2.Network, + filters=filters) + + def create_subnet_bulk(self, context, subnets): + return self._create_bulk('subnet', context, subnets) + + def _validate_ip_version(self, ip_version, addr, name): + """Check IP field of a subnet match specified ip version.""" + ip = netaddr.IPNetwork(addr) + if ip.version != ip_version: + data = {'name': name, + 'addr': addr, + 'ip_version': ip_version} + msg = _("%(name)s '%(addr)s' does not match " + "the ip_version '%(ip_version)s'") % data + raise n_exc.InvalidInput(error_message=msg) + + def _validate_subnet(self, context, s, cur_subnet=None): + """Validate a subnet spec.""" + + # This method will validate attributes which may change during + # create_subnet() and update_subnet(). + # The method requires the subnet spec 's' has 'ip_version' field. + # If 's' dict does not have 'ip_version' field in an API call + # (e.g., update_subnet()), you need to set 'ip_version' field + # before calling this method. + + ip_ver = s['ip_version'] + + if 'cidr' in s: + self._validate_ip_version(ip_ver, s['cidr'], 'cidr') + + if attributes.is_attr_set(s.get('gateway_ip')): + self._validate_ip_version(ip_ver, s['gateway_ip'], 'gateway_ip') + if (cfg.CONF.force_gateway_on_subnet and + not NeutronDbPluginV2._check_subnet_ip(s['cidr'], + s['gateway_ip'])): + error_message = _("Gateway is not valid on subnet") + raise n_exc.InvalidInput(error_message=error_message) + # Ensure the gateway IP is not assigned to any port + # skip this check in case of create (s parameter won't have id) + # NOTE(salv-orlando): There is slight chance of a race, when + # a subnet-update and a router-interface-add operation are + # executed concurrently + if cur_subnet: + alloc_qry = context.session.query(models_v2.IPAllocation) + allocated = alloc_qry.filter_by( + ip_address=cur_subnet['gateway_ip'], + subnet_id=cur_subnet['id']).first() + if allocated and allocated['port_id']: + raise n_exc.GatewayIpInUse( + ip_address=cur_subnet['gateway_ip'], + port_id=allocated['port_id']) + + if attributes.is_attr_set(s.get('dns_nameservers')): + if len(s['dns_nameservers']) > cfg.CONF.max_dns_nameservers: + raise n_exc.DNSNameServersExhausted( + subnet_id=s.get('id', _('new subnet')), + quota=cfg.CONF.max_dns_nameservers) + for dns in s['dns_nameservers']: + try: + netaddr.IPAddress(dns) + except Exception: + raise n_exc.InvalidInput( + error_message=(_("Error parsing dns address %s") % + dns)) + self._validate_ip_version(ip_ver, dns, 'dns_nameserver') + + if attributes.is_attr_set(s.get('host_routes')): + if len(s['host_routes']) > cfg.CONF.max_subnet_host_routes: + raise n_exc.HostRoutesExhausted( + subnet_id=s.get('id', _('new subnet')), + quota=cfg.CONF.max_subnet_host_routes) + # check if the routes are all valid + for rt in s['host_routes']: + self._validate_host_route(rt, ip_ver) + + if ip_ver == 4: + if attributes.is_attr_set(s.get('ipv6_ra_mode')): + raise n_exc.InvalidInput( + error_message=(_("ipv6_ra_mode is not valid when " + "ip_version is 4"))) + if attributes.is_attr_set(s.get('ipv6_address_mode')): + raise n_exc.InvalidInput( + error_message=(_("ipv6_address_mode is not valid when " + "ip_version is 4"))) + if ip_ver == 6: + self._validate_ipv6_attributes(s, cur_subnet) + + def _validate_gw_out_of_pools(self, gateway_ip, pools): + for allocation_pool in pools: + pool_range = netaddr.IPRange( + allocation_pool['start'], + allocation_pool['end']) + if netaddr.IPAddress(gateway_ip) in pool_range: + raise n_exc.GatewayConflictWithAllocationPools( + pool=pool_range, + ip_address=gateway_ip) + + def create_subnet(self, context, subnet): + + net = netaddr.IPNetwork(subnet['subnet']['cidr']) + # turn the CIDR into a proper subnet + subnet['subnet']['cidr'] = '%s/%s' % (net.network, net.prefixlen) + + s = subnet['subnet'] + + if s['gateway_ip'] is attributes.ATTR_NOT_SPECIFIED: + s['gateway_ip'] = str(netaddr.IPAddress(net.first + 1)) + + if s['allocation_pools'] == attributes.ATTR_NOT_SPECIFIED: + s['allocation_pools'] = self._allocate_pools_for_subnet(context, s) + else: + self._validate_allocation_pools(s['allocation_pools'], s['cidr']) + if s['gateway_ip'] is not None: + self._validate_gw_out_of_pools(s['gateway_ip'], + s['allocation_pools']) + + self._validate_subnet(context, s) + + tenant_id = self._get_tenant_id_for_create(context, s) + with context.session.begin(subtransactions=True): + network = self._get_network(context, s["network_id"]) + self._validate_subnet_cidr(context, network, s['cidr']) + # The 'shared' attribute for subnets is for internal plugin + # use only. It is not exposed through the API + args = {'tenant_id': tenant_id, + 'id': s.get('id') or uuidutils.generate_uuid(), + 'name': s['name'], + 'network_id': s['network_id'], + 'ip_version': s['ip_version'], + 'cidr': s['cidr'], + 'enable_dhcp': s['enable_dhcp'], + 'gateway_ip': s['gateway_ip'], + 'shared': network.shared} + if s['ip_version'] == 6 and s['enable_dhcp']: + if attributes.is_attr_set(s['ipv6_ra_mode']): + args['ipv6_ra_mode'] = s['ipv6_ra_mode'] + if attributes.is_attr_set(s['ipv6_address_mode']): + args['ipv6_address_mode'] = s['ipv6_address_mode'] + subnet = models_v2.Subnet(**args) + + context.session.add(subnet) + if s['dns_nameservers'] is not attributes.ATTR_NOT_SPECIFIED: + for addr in s['dns_nameservers']: + ns = models_v2.DNSNameServer(address=addr, + subnet_id=subnet.id) + context.session.add(ns) + + if s['host_routes'] is not attributes.ATTR_NOT_SPECIFIED: + for rt in s['host_routes']: + route = models_v2.SubnetRoute( + subnet_id=subnet.id, + destination=rt['destination'], + nexthop=rt['nexthop']) + context.session.add(route) + + for pool in s['allocation_pools']: + ip_pool = models_v2.IPAllocationPool(subnet=subnet, + first_ip=pool['start'], + last_ip=pool['end']) + context.session.add(ip_pool) + ip_range = models_v2.IPAvailabilityRange( + ipallocationpool=ip_pool, + first_ip=pool['start'], + last_ip=pool['end']) + context.session.add(ip_range) + + return self._make_subnet_dict(subnet) + + def _update_subnet_dns_nameservers(self, context, id, s): + old_dns_list = self._get_dns_by_subnet(context, id) + new_dns_addr_set = set(s["dns_nameservers"]) + old_dns_addr_set = set([dns['address'] + for dns in old_dns_list]) + + new_dns = list(new_dns_addr_set) + for dns_addr in old_dns_addr_set - new_dns_addr_set: + for dns in old_dns_list: + if dns['address'] == dns_addr: + context.session.delete(dns) + for dns_addr in new_dns_addr_set - old_dns_addr_set: + dns = models_v2.DNSNameServer( + address=dns_addr, + subnet_id=id) + context.session.add(dns) + del s["dns_nameservers"] + return new_dns + + def _update_subnet_host_routes(self, context, id, s): + + def _combine(ht): + return ht['destination'] + "_" + ht['nexthop'] + + old_route_list = self._get_route_by_subnet(context, id) + + new_route_set = set([_combine(route) + for route in s['host_routes']]) + + old_route_set = set([_combine(route) + for route in old_route_list]) + + for route_str in old_route_set - new_route_set: + for route in old_route_list: + if _combine(route) == route_str: + context.session.delete(route) + for route_str in new_route_set - old_route_set: + route = models_v2.SubnetRoute( + destination=route_str.partition("_")[0], + nexthop=route_str.partition("_")[2], + subnet_id=id) + context.session.add(route) + + # Gather host routes for result + new_routes = [] + for route_str in new_route_set: + new_routes.append( + {'destination': route_str.partition("_")[0], + 'nexthop': route_str.partition("_")[2]}) + del s["host_routes"] + return new_routes + + def _update_subnet_allocation_pools(self, context, id, s): + context.session.query(models_v2.IPAllocationPool).filter_by( + subnet_id=id).delete() + new_pools = [models_v2.IPAllocationPool( + first_ip=p['start'], last_ip=p['end'], + subnet_id=id) for p in s['allocation_pools']] + context.session.add_all(new_pools) + NeutronDbPluginV2._rebuild_availability_ranges(context, [s]) + #Gather new pools for result: + result_pools = [{'start': pool['start'], + 'end': pool['end']} + for pool in s['allocation_pools']] + del s['allocation_pools'] + return result_pools + + def update_subnet(self, context, id, subnet): + """Update the subnet with new info. + + The change however will not be realized until the client renew the + dns lease or we support gratuitous DHCP offers + """ + s = subnet['subnet'] + changed_host_routes = False + changed_dns = False + changed_allocation_pools = False + db_subnet = self._get_subnet(context, id) + # Fill 'ip_version' and 'allocation_pools' fields with the current + # value since _validate_subnet() expects subnet spec has 'ip_version' + # and 'allocation_pools' fields. + s['ip_version'] = db_subnet.ip_version + s['cidr'] = db_subnet.cidr + s['id'] = db_subnet.id + self._validate_subnet(context, s, cur_subnet=db_subnet) + + if 'gateway_ip' in s and s['gateway_ip'] is not None: + allocation_pools = [{'start': p['first_ip'], 'end': p['last_ip']} + for p in db_subnet.allocation_pools] + self._validate_gw_out_of_pools(s["gateway_ip"], allocation_pools) + + with context.session.begin(subtransactions=True): + if "dns_nameservers" in s: + changed_dns = True + new_dns = self._update_subnet_dns_nameservers(context, id, s) + + if "host_routes" in s: + changed_host_routes = True + new_routes = self._update_subnet_host_routes(context, id, s) + + if "allocation_pools" in s: + self._validate_allocation_pools(s['allocation_pools'], + s['cidr']) + changed_allocation_pools = True + new_pools = self._update_subnet_allocation_pools(context, + id, s) + subnet = self._get_subnet(context, id) + subnet.update(s) + result = self._make_subnet_dict(subnet) + # Keep up with fields that changed + if changed_dns: + result['dns_nameservers'] = new_dns + if changed_host_routes: + result['host_routes'] = new_routes + if changed_allocation_pools: + result['allocation_pools'] = new_pools + return result + + def delete_subnet(self, context, id): + with context.session.begin(subtransactions=True): + subnet = self._get_subnet(context, id) + # Check if any tenant owned ports are using this subnet + allocated = (context.session.query(models_v2.IPAllocation). + filter_by(subnet_id=subnet['id']). + join(models_v2.Port). + filter_by(network_id=subnet['network_id']). + with_lockmode('update')) + + # remove network owned ports + for a in allocated: + if a.ports.device_owner in AUTO_DELETE_PORT_OWNERS: + NeutronDbPluginV2._delete_ip_allocation( + context, subnet.network_id, id, a.ip_address) + else: + raise n_exc.SubnetInUse(subnet_id=id) + + context.session.delete(subnet) + + def get_subnet(self, context, id, fields=None): + subnet = self._get_subnet(context, id) + return self._make_subnet_dict(subnet, fields) + + def get_subnets(self, context, filters=None, fields=None, + sorts=None, limit=None, marker=None, + page_reverse=False): + marker_obj = self._get_marker_obj(context, 'subnet', limit, marker) + return self._get_collection(context, models_v2.Subnet, + self._make_subnet_dict, + filters=filters, fields=fields, + sorts=sorts, + limit=limit, + marker_obj=marker_obj, + page_reverse=page_reverse) + + def get_subnets_count(self, context, filters=None): + return self._get_collection_count(context, models_v2.Subnet, + filters=filters) + + def create_port_bulk(self, context, ports): + return self._create_bulk('port', context, ports) + + def create_port(self, context, port): + p = port['port'] + port_id = p.get('id') or uuidutils.generate_uuid() + network_id = p['network_id'] + # NOTE(jkoelker) Get the tenant_id outside of the session to avoid + # unneeded db action if the operation raises + tenant_id = self._get_tenant_id_for_create(context, p) + if p.get('device_owner') == constants.DEVICE_OWNER_ROUTER_INTF: + self._enforce_device_owner_not_router_intf_or_device_id(context, p, + tenant_id) + + with context.session.begin(subtransactions=True): + network = self._get_network(context, network_id) + + # Ensure that a MAC address is defined and it is unique on the + # network + if p['mac_address'] is attributes.ATTR_NOT_SPECIFIED: + #Note(scollins) Add the generated mac_address to the port, + #since _allocate_ips_for_port will need the mac when + #calculating an EUI-64 address for a v6 subnet + p['mac_address'] = NeutronDbPluginV2._generate_mac(context, + network_id) + else: + # Ensure that the mac on the network is unique + if not NeutronDbPluginV2._check_unique_mac(context, + network_id, + p['mac_address']): + raise n_exc.MacAddressInUse(net_id=network_id, + mac=p['mac_address']) + + # Returns the IP's for the port + ips = self._allocate_ips_for_port(context, network, port) + + if 'status' not in p: + status = constants.PORT_STATUS_ACTIVE + else: + status = p['status'] + + port = models_v2.Port(tenant_id=tenant_id, + name=p['name'], + id=port_id, + network_id=network_id, + mac_address=p['mac_address'], + admin_state_up=p['admin_state_up'], + status=status, + device_id=p['device_id'], + device_owner=p['device_owner']) + context.session.add(port) + + # Update the allocated IP's + if ips: + for ip in ips: + ip_address = ip['ip_address'] + subnet_id = ip['subnet_id'] + LOG.debug(_("Allocated IP %(ip_address)s " + "(%(network_id)s/%(subnet_id)s/%(port_id)s)"), + {'ip_address': ip_address, + 'network_id': network_id, + 'subnet_id': subnet_id, + 'port_id': port_id}) + allocated = models_v2.IPAllocation( + network_id=network_id, + port_id=port_id, + ip_address=ip_address, + subnet_id=subnet_id, + ) + context.session.add(allocated) + + return self._make_port_dict(port, process_extensions=False) + + def update_port(self, context, id, port): + p = port['port'] + + changed_ips = False + with context.session.begin(subtransactions=True): + port = self._get_port(context, id) + if 'device_owner' in p: + current_device_owner = p['device_owner'] + changed_device_owner = True + else: + current_device_owner = port['device_owner'] + changed_device_owner = False + if p.get('device_id') != port['device_id']: + changed_device_id = True + + # if the current device_owner is ROUTER_INF and the device_id or + # device_owner changed check device_id is not another tenants + # router + if ((current_device_owner == constants.DEVICE_OWNER_ROUTER_INTF) + and (changed_device_id or changed_device_owner)): + self._enforce_device_owner_not_router_intf_or_device_id( + context, p, port['tenant_id'], port) + + # Check if the IPs need to be updated + if 'fixed_ips' in p: + changed_ips = True + original = self._make_port_dict(port, process_extensions=False) + added_ips, prev_ips = self._update_ips_for_port( + context, port["network_id"], id, original["fixed_ips"], + p['fixed_ips']) + + # Update ips if necessary + for ip in added_ips: + allocated = models_v2.IPAllocation( + network_id=port['network_id'], port_id=port.id, + ip_address=ip['ip_address'], subnet_id=ip['subnet_id']) + context.session.add(allocated) + # Remove all attributes in p which are not in the port DB model + # and then update the port + port.update(self._filter_non_model_columns(p, models_v2.Port)) + + result = self._make_port_dict(port) + # Keep up with fields that changed + if changed_ips: + result['fixed_ips'] = prev_ips + added_ips + return result + + def delete_port(self, context, id): + with context.session.begin(subtransactions=True): + self._delete_port(context, id) + + def delete_ports_by_device_id(self, context, device_id, network_id=None): + query = (context.session.query(models_v2.Port.id) + .enable_eagerloads(False) + .filter(models_v2.Port.device_id == device_id)) + if network_id: + query = query.filter(models_v2.Port.network_id == network_id) + port_ids = [p[0] for p in query] + for port_id in port_ids: + try: + self.delete_port(context, port_id) + except n_exc.PortNotFound: + # Don't raise if something else concurrently deleted the port + LOG.debug(_("Ignoring PortNotFound when deleting port '%s'. " + "The port has already been deleted."), + port_id) + + def _delete_port(self, context, id): + query = (context.session.query(models_v2.Port). + enable_eagerloads(False).filter_by(id=id)) + if not context.is_admin: + query = query.filter_by(tenant_id=context.tenant_id) + query.delete() + + def get_port(self, context, id, fields=None): + port = self._get_port(context, id) + return self._make_port_dict(port, fields) + + def _get_ports_query(self, context, filters=None, sorts=None, limit=None, + marker_obj=None, page_reverse=False): + Port = models_v2.Port + IPAllocation = models_v2.IPAllocation + + if not filters: + filters = {} + + query = self._model_query(context, Port) + + fixed_ips = filters.pop('fixed_ips', {}) + ip_addresses = fixed_ips.get('ip_address') + subnet_ids = fixed_ips.get('subnet_id') + if ip_addresses or subnet_ids: + query = query.join(Port.fixed_ips) + if ip_addresses: + query = query.filter(IPAllocation.ip_address.in_(ip_addresses)) + if subnet_ids: + query = query.filter(IPAllocation.subnet_id.in_(subnet_ids)) + + query = self._apply_filters_to_query(query, Port, filters) + if limit and page_reverse and sorts: + sorts = [(s[0], not s[1]) for s in sorts] + query = sqlalchemyutils.paginate_query(query, Port, limit, + sorts, marker_obj) + return query + + def get_ports(self, context, filters=None, fields=None, + sorts=None, limit=None, marker=None, + page_reverse=False): + marker_obj = self._get_marker_obj(context, 'port', limit, marker) + query = self._get_ports_query(context, filters=filters, + sorts=sorts, limit=limit, + marker_obj=marker_obj, + page_reverse=page_reverse) + items = [self._make_port_dict(c, fields) for c in query] + if limit and page_reverse: + items.reverse() + return items + + def get_ports_count(self, context, filters=None): + return self._get_ports_query(context, filters).count() + + def _enforce_device_owner_not_router_intf_or_device_id(self, context, + port_request, + tenant_id, + db_port=None): + if not context.is_admin: + # find the device_id. If the call was update_port and the + # device_id was not passed in we use the device_id from the + # db. + device_id = port_request.get('device_id') + if not device_id and db_port: + device_id = db_port.get('device_id') + # check to make sure device_id does not match another tenants + # router. + if device_id: + if hasattr(self, 'get_router'): + try: + ctx_admin = ctx.get_admin_context() + router = self.get_router(ctx_admin, device_id) + except l3.RouterNotFound: + return + else: + l3plugin = ( + manager.NeutronManager.get_service_plugins().get( + service_constants.L3_ROUTER_NAT)) + if l3plugin: + try: + ctx_admin = ctx.get_admin_context() + router = l3plugin.get_router(ctx_admin, + device_id) + except l3.RouterNotFound: + return + else: + # raise as extension doesn't support L3 anyways. + raise n_exc.DeviceIDNotOwnedByTenant( + device_id=device_id) + if tenant_id != router['tenant_id']: + raise n_exc.DeviceIDNotOwnedByTenant(device_id=device_id) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/db/dhcp_rpc_base.py b/icehouse-patches/neutron/dvr-patch/neutron/db/dhcp_rpc_base.py new file mode 100644 index 00000000..bb64df6d --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/db/dhcp_rpc_base.py @@ -0,0 +1,287 @@ +# Copyright (c) 2012 OpenStack Foundation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from oslo.config import cfg +from oslo.db import exception as db_exc + +from neutron.api.v2 import attributes +from neutron.common import constants +from neutron.common import exceptions as n_exc +from neutron.common import utils +from neutron.extensions import portbindings +from neutron import manager +from neutron.openstack.common import excutils +from neutron.openstack.common import log as logging + + +LOG = logging.getLogger(__name__) + + +class DhcpRpcCallbackMixin(object): + """A mix-in that enable DHCP agent support in plugin implementations.""" + + def _get_active_networks(self, context, **kwargs): + """Retrieve and return a list of the active networks.""" + host = kwargs.get('host') + plugin = manager.NeutronManager.get_plugin() + if utils.is_extension_supported( + plugin, constants.DHCP_AGENT_SCHEDULER_EXT_ALIAS): + if cfg.CONF.network_auto_schedule: + plugin.auto_schedule_networks(context, host) + nets = plugin.list_active_networks_on_active_dhcp_agent( + context, host) + else: + filters = dict(admin_state_up=[True]) + nets = plugin.get_networks(context, filters=filters) + return nets + + def _port_action(self, plugin, context, port, action): + """Perform port operations taking care of concurrency issues.""" + try: + if action == 'create_port': + return plugin.create_port(context, port) + elif action == 'update_port': + return plugin.update_port(context, port['id'], port['port']) + else: + msg = _('Unrecognized action') + raise n_exc.Invalid(message=msg) + except (db_exc.DBError, n_exc.NetworkNotFound, + n_exc.SubnetNotFound, n_exc.IpAddressGenerationFailure) as e: + with excutils.save_and_reraise_exception(reraise=False) as ctxt: + if isinstance(e, n_exc.IpAddressGenerationFailure): + # Check if the subnet still exists and if it does not, + # this is the reason why the ip address generation failed. + # In any other unlikely event re-raise + try: + subnet_id = port['port']['fixed_ips'][0]['subnet_id'] + plugin.get_subnet(context, subnet_id) + except n_exc.SubnetNotFound: + pass + else: + ctxt.reraise = True + net_id = port['port']['network_id'] + LOG.warn(_("Action %(action)s for network %(net_id)s " + "could not complete successfully: %(reason)s") + % {"action": action, "net_id": net_id, 'reason': e}) + + def get_active_networks(self, context, **kwargs): + """Retrieve and return a list of the active network ids.""" + # NOTE(arosen): This method is no longer used by the DHCP agent but is + # left so that neutron-dhcp-agents will still continue to work if + # neutron-server is upgraded and not the agent. + host = kwargs.get('host') + LOG.debug(_('get_active_networks requested from %s'), host) + nets = self._get_active_networks(context, **kwargs) + return [net['id'] for net in nets] + + def get_active_networks_info(self, context, **kwargs): + """Returns all the networks/subnets/ports in system.""" + host = kwargs.get('host') + LOG.debug(_('get_active_networks_info from %s'), host) + networks = self._get_active_networks(context, **kwargs) + plugin = manager.NeutronManager.get_plugin() + filters = {'network_id': [network['id'] for network in networks]} + ports = plugin.get_ports(context, filters=filters) + filters['enable_dhcp'] = [True] + subnets = plugin.get_subnets(context, filters=filters) + + for network in networks: + network['subnets'] = [subnet for subnet in subnets + if subnet['network_id'] == network['id']] + network['ports'] = [port for port in ports + if port['network_id'] == network['id']] + + return networks + + def get_network_info(self, context, **kwargs): + """Retrieve and return a extended information about a network.""" + network_id = kwargs.get('network_id') + host = kwargs.get('host') + LOG.debug(_('Network %(network_id)s requested from ' + '%(host)s'), {'network_id': network_id, + 'host': host}) + plugin = manager.NeutronManager.get_plugin() + try: + network = plugin.get_network(context, network_id) + except n_exc.NetworkNotFound: + LOG.warn(_("Network %s could not be found, it might have " + "been deleted concurrently."), network_id) + return + filters = dict(network_id=[network_id]) + network['subnets'] = plugin.get_subnets(context, filters=filters) + network['ports'] = plugin.get_ports(context, filters=filters) + return network + + def get_dhcp_port(self, context, **kwargs): + """Allocate a DHCP port for the host and return port information. + + This method will re-use an existing port if one already exists. When a + port is re-used, the fixed_ip allocation will be updated to the current + network state. If an expected failure occurs, a None port is returned. + + """ + host = kwargs.get('host') + network_id = kwargs.get('network_id') + device_id = kwargs.get('device_id') + # There could be more than one dhcp server per network, so create + # a device id that combines host and network ids + + LOG.debug(_('Port %(device_id)s for %(network_id)s requested from ' + '%(host)s'), {'device_id': device_id, + 'network_id': network_id, + 'host': host}) + plugin = manager.NeutronManager.get_plugin() + retval = None + + filters = dict(network_id=[network_id]) + subnets = dict([(s['id'], s) for s in + plugin.get_subnets(context, filters=filters)]) + + dhcp_enabled_subnet_ids = [s['id'] for s in + subnets.values() if s['enable_dhcp']] + + try: + filters = dict(network_id=[network_id], device_id=[device_id]) + ports = plugin.get_ports(context, filters=filters) + if ports: + # Ensure that fixed_ips cover all dhcp_enabled subnets. + port = ports[0] + for fixed_ip in port['fixed_ips']: + if fixed_ip['subnet_id'] in dhcp_enabled_subnet_ids: + dhcp_enabled_subnet_ids.remove(fixed_ip['subnet_id']) + port['fixed_ips'].extend( + [dict(subnet_id=s) for s in dhcp_enabled_subnet_ids]) + + retval = plugin.update_port(context, port['id'], + dict(port=port)) + + except n_exc.NotFound as e: + LOG.warning(e) + + if retval is None: + # No previous port exists, so create a new one. + LOG.debug(_('DHCP port %(device_id)s on network %(network_id)s ' + 'does not exist on %(host)s'), + {'device_id': device_id, + 'network_id': network_id, + 'host': host}) + try: + network = plugin.get_network(context, network_id) + except n_exc.NetworkNotFound: + LOG.warn(_("Network %s could not be found, it might have " + "been deleted concurrently."), network_id) + return + + port_dict = dict( + admin_state_up=True, + device_id=device_id, + network_id=network_id, + tenant_id=network['tenant_id'], + mac_address=attributes.ATTR_NOT_SPECIFIED, + name='', + device_owner=constants.DEVICE_OWNER_DHCP, + fixed_ips=[dict(subnet_id=s) for s in dhcp_enabled_subnet_ids]) + + retval = self._port_action(plugin, context, {'port': port_dict}, + 'create_port') + if not retval: + return + + # Convert subnet_id to subnet dict + for fixed_ip in retval['fixed_ips']: + subnet_id = fixed_ip.pop('subnet_id') + fixed_ip['subnet'] = subnets[subnet_id] + + return retval + + def release_dhcp_port(self, context, **kwargs): + """Release the port currently being used by a DHCP agent.""" + host = kwargs.get('host') + network_id = kwargs.get('network_id') + device_id = kwargs.get('device_id') + + LOG.debug(_('DHCP port deletion for %(network_id)s request from ' + '%(host)s'), + {'network_id': network_id, 'host': host}) + plugin = manager.NeutronManager.get_plugin() + plugin.delete_ports_by_device_id(context, device_id, network_id) + + def release_port_fixed_ip(self, context, **kwargs): + """Release the fixed_ip associated the subnet on a port.""" + host = kwargs.get('host') + network_id = kwargs.get('network_id') + device_id = kwargs.get('device_id') + subnet_id = kwargs.get('subnet_id') + + LOG.debug(_('DHCP port remove fixed_ip for %(subnet_id)s request ' + 'from %(host)s'), + {'subnet_id': subnet_id, 'host': host}) + plugin = manager.NeutronManager.get_plugin() + filters = dict(network_id=[network_id], device_id=[device_id]) + ports = plugin.get_ports(context, filters=filters) + + if ports: + port = ports[0] + + fixed_ips = port.get('fixed_ips', []) + for i in range(len(fixed_ips)): + if fixed_ips[i]['subnet_id'] == subnet_id: + del fixed_ips[i] + break + plugin.update_port(context, port['id'], dict(port=port)) + + def update_lease_expiration(self, context, **kwargs): + """Release the fixed_ip associated the subnet on a port.""" + # NOTE(arosen): This method is no longer used by the DHCP agent but is + # left so that neutron-dhcp-agents will still continue to work if + # neutron-server is upgraded and not the agent. + host = kwargs.get('host') + + LOG.warning(_('Updating lease expiration is now deprecated. Issued ' + 'from host %s.'), host) + + def create_dhcp_port(self, context, **kwargs): + """Create and return dhcp port information. + + If an expected failure occurs, a None port is returned. + + """ + host = kwargs.get('host') + port = kwargs.get('port') + LOG.debug(_('Create dhcp port %(port)s ' + 'from %(host)s.'), + {'port': port, + 'host': host}) + + port['port']['device_owner'] = constants.DEVICE_OWNER_DHCP + port['port'][portbindings.HOST_ID] = host + if 'mac_address' not in port['port']: + port['port']['mac_address'] = attributes.ATTR_NOT_SPECIFIED + plugin = manager.NeutronManager.get_plugin() + return self._port_action(plugin, context, port, 'create_port') + + def update_dhcp_port(self, context, **kwargs): + """Update the dhcp port.""" + host = kwargs.get('host') + port_id = kwargs.get('port_id') + port = kwargs.get('port') + LOG.debug(_('Update dhcp port %(port)s ' + 'from %(host)s.'), + {'port': port, + 'host': host}) + plugin = manager.NeutronManager.get_plugin() + return self._port_action(plugin, context, + {'id': port_id, 'port': port}, + 'update_port') diff --git a/icehouse-patches/neutron/dvr-patch/neutron/db/dvr_mac_db.py b/icehouse-patches/neutron/dvr-patch/neutron/db/dvr_mac_db.py new file mode 100644 index 00000000..02fdb8ef --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/db/dvr_mac_db.py @@ -0,0 +1,156 @@ +# Copyright 2014 Hewlett Packard, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo.db import exception as db_exc + +import sqlalchemy as sa + +from neutron.common import exceptions as q_exc +from neutron.common import log +from neutron.common import utils +from neutron.db import model_base +from neutron.extensions import dvr as ext_dvr +from neutron import manager +from neutron.openstack.common import log as logging +from oslo.config import cfg +from sqlalchemy.orm import exc + +LOG = logging.getLogger(__name__) + +dvr_mac_address_opts = [ + cfg.StrOpt('dvr_base_mac', + default="fa:16:3f:00:00:00", + help=_('The base mac address used for unique ' + 'DVR instances by Neutron')), +] +cfg.CONF.register_opts(dvr_mac_address_opts) + + +class DistributedVirtualRouterMacAddress(model_base.BASEV2): + """Represents a v2 neutron distributed virtual router mac address.""" + + __tablename__ = 'dvr_host_macs' + + host = sa.Column(sa.String(255), primary_key=True, nullable=False) + mac_address = sa.Column(sa.String(32), nullable=False, unique=True) + + +class DVRDbMixin(ext_dvr.DVRMacAddressPluginBase): + """Mixin class to add dvr mac address to db_plugin_base_v2.""" + + @property + def plugin(self): + try: + if self._plugin is not None: + return self._plugin + except AttributeError: + pass + self._plugin = manager.NeutronManager.get_plugin() + return self._plugin + + def _get_dvr_mac_address_by_host(self, context, host): + try: + query = context.session.query(DistributedVirtualRouterMacAddress) + dvrma = query.filter( + DistributedVirtualRouterMacAddress.host == host).one() + except exc.NoResultFound: + raise ext_dvr.DVRMacAddressNotFound(host=host) + return dvrma + + def _create_dvr_mac_address(self, context, host): + """Create dvr mac address for a given host.""" + base_mac = cfg.CONF.dvr_base_mac.split(':') + max_retries = cfg.CONF.mac_generation_retries + for attempt in reversed(range(max_retries)): + try: + with context.session.begin(subtransactions=True): + mac_address = utils.get_random_mac(base_mac) + dvr_mac_binding = DistributedVirtualRouterMacAddress( + host=host, mac_address=mac_address) + context.session.add(dvr_mac_binding) + LOG.debug("Generated dvr mac for host %(host)s " + "is %(mac_address)s", + {'host': host, 'mac_address': mac_address}) + dvr_macs = self.get_dvr_mac_address_list(context) + self.notifier.dvr_mac_address_update(context, dvr_macs) + return self._make_dvr_mac_address_dict(dvr_mac_binding) + except db_exc.DBDuplicateEntry: + LOG.debug("Generated dvr mac %(mac)s exists." + " Remaining attempts %(attempts_left)s.", + {'mac': mac_address, 'attempts_left': attempt}) + LOG.error(_("MAC generation error after %s attempts"), max_retries) + raise ext_dvr.MacAddressGenerationFailure(host=host) + + def delete_dvr_mac_address(self, context, host): + query = context.session.query(DistributedVirtualRouterMacAddress) + query.filter(DistributedVirtualRouterMacAddress.host == host).delete() + + def get_dvr_mac_address_list(self, context): + with context.session.begin(subtransactions=True): + query = context.session.query(DistributedVirtualRouterMacAddress) + dvrmacs = query.all() + return dvrmacs + + def get_dvr_mac_address_by_host(self, context, host): + if not host: + LOG.warn(_("get_dvr_mac_address_by_host, host input is empty")) + return + + try: + return self._get_dvr_mac_address_by_host(context, host) + except ext_dvr.DVRMacAddressNotFound: + return self._create_dvr_mac_address(context, host) + + def _make_dvr_mac_address_dict(self, dvr_mac_entry, fields=None): + return {'host': dvr_mac_entry['host'], + 'mac_address': dvr_mac_entry['mac_address']} + + @log.log + def get_compute_ports_on_host_by_subnet(self, context, host, subnet): + #FIXME(vivek): need to optimize this code to do away two-step filtering + vm_ports_by_host = [] + filter = {'fixed_ips': {'subnet_id': [subnet]}} + ports = self.plugin.get_ports(context, filters=filter) + LOG.debug("List of Ports on subnet %(subnet) received as %(ports)", + {'subnet': subnet, 'ports': ports}) + for port in ports: + if 'compute:' in port['device_owner']: + if port['binding:host_id'] == host: + port_dict = self.plugin._make_port_dict( + port, process_extensions=False) + vm_ports_by_host.append(port_dict) + LOG.debug("Returning list of VM Ports on host %(host) for subnet " + " %(subnet) ports %(ports)", + {'host': host, 'subnet': subnet, 'ports': vm_ports_by_host}) + return vm_ports_by_host + + @log.log + def get_subnet_for_dvr(self, context, subnet): + try: + subnet_info = self.plugin.get_subnet(context, subnet) + except q_exc.SubnetNotFound: + return {} + else: + # retrieve the gateway port on this subnet + filter = {'fixed_ips': {'subnet_id': [subnet], + 'ip_address': [subnet_info['gateway_ip']]}} + internal_gateway_ports = self.plugin.get_ports( + context, filters=filter) + if not internal_gateway_ports: + LOG.error(_("Could not retrieve gateway port " + "for subnet %s"), subnet_info) + return {} + internal_port = internal_gateway_ports[0] + subnet_info['gateway_mac'] = internal_port['mac_address'] + return subnet_info diff --git a/icehouse-patches/neutron/dvr-patch/neutron/db/external_net_db.py b/icehouse-patches/neutron/dvr-patch/neutron/db/external_net_db.py new file mode 100644 index 00000000..53f38953 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/db/external_net_db.py @@ -0,0 +1,163 @@ +# Copyright (c) 2013 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import sqlalchemy as sa +from sqlalchemy import orm +from sqlalchemy.orm import exc +from sqlalchemy.sql import expression as expr + +from neutron.api.v2 import attributes +from neutron.common import constants as l3_constants +from neutron.common import exceptions as n_exc +from neutron.db import db_base_plugin_v2 +from neutron.db import model_base +from neutron.db import models_v2 +from neutron.extensions import external_net +from neutron import manager +from neutron.plugins.common import constants as service_constants + + +DEVICE_OWNER_ROUTER_GW = l3_constants.DEVICE_OWNER_ROUTER_GW + + +class ExternalNetwork(model_base.BASEV2): + network_id = sa.Column(sa.String(36), + sa.ForeignKey('networks.id', ondelete="CASCADE"), + primary_key=True) + + # Add a relationship to the Network model in order to instruct + # SQLAlchemy to eagerly load this association + network = orm.relationship( + models_v2.Network, + backref=orm.backref("external", lazy='joined', + uselist=False, cascade='delete')) + + +class External_net_db_mixin(object): + """Mixin class to add external network methods to db_base_plugin_v2.""" + + def _network_model_hook(self, context, original_model, query): + query = query.outerjoin(ExternalNetwork, + (original_model.id == + ExternalNetwork.network_id)) + return query + + def _network_filter_hook(self, context, original_model, conditions): + if conditions is not None and not hasattr(conditions, '__iter__'): + conditions = (conditions, ) + # Apply the external network filter only in non-admin context + if not context.is_admin and hasattr(original_model, 'tenant_id'): + conditions = expr.or_(ExternalNetwork.network_id != expr.null(), + *conditions) + return conditions + + def _network_result_filter_hook(self, query, filters): + vals = filters and filters.get(external_net.EXTERNAL, []) + if not vals: + return query + if vals[0]: + return query.filter((ExternalNetwork.network_id != expr.null())) + return query.filter((ExternalNetwork.network_id == expr.null())) + + # TODO(salvatore-orlando): Perform this operation without explicitly + # referring to db_base_plugin_v2, as plugins that do not extend from it + # might exist in the future + db_base_plugin_v2.NeutronDbPluginV2.register_model_query_hook( + models_v2.Network, + "external_net", + '_network_model_hook', + '_network_filter_hook', + '_network_result_filter_hook') + + def _network_is_external(self, context, net_id): + try: + context.session.query(ExternalNetwork).filter_by( + network_id=net_id).one() + return True + except exc.NoResultFound: + return False + + def _extend_network_dict_l3(self, network_res, network_db): + # Comparing with None for converting uuid into bool + network_res[external_net.EXTERNAL] = network_db.external is not None + return network_res + + # Register dict extend functions for networks + db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs( + attributes.NETWORKS, ['_extend_network_dict_l3']) + + def _process_l3_create(self, context, net_data, req_data): + external = req_data.get(external_net.EXTERNAL) + external_set = attributes.is_attr_set(external) + + if not external_set: + return + + if external: + # expects to be called within a plugin's session + context.session.add(ExternalNetwork(network_id=net_data['id'])) + net_data[external_net.EXTERNAL] = external + + def _process_l3_update(self, context, net_data, req_data): + + new_value = req_data.get(external_net.EXTERNAL) + net_id = net_data['id'] + if not attributes.is_attr_set(new_value): + return + + if net_data.get(external_net.EXTERNAL) == new_value: + return + + if new_value: + context.session.add(ExternalNetwork(network_id=net_id)) + net_data[external_net.EXTERNAL] = True + else: + # must make sure we do not have any external gateway ports + # (and thus, possible floating IPs) on this network before + # allow it to be update to external=False + port = context.session.query(models_v2.Port).filter_by( + device_owner=DEVICE_OWNER_ROUTER_GW, + network_id=net_data['id']).first() + if port: + raise external_net.ExternalNetworkInUse(net_id=net_id) + + context.session.query(ExternalNetwork).filter_by( + network_id=net_id).delete() + net_data[external_net.EXTERNAL] = False + + def _process_l3_delete(self, context, network_id): + l3plugin = manager.NeutronManager.get_service_plugins().get( + service_constants.L3_ROUTER_NAT) + if l3plugin: + l3plugin.delete_disassociated_floatingips(context, network_id) + + def _filter_nets_l3(self, context, nets, filters): + vals = filters and filters.get(external_net.EXTERNAL, []) + if not vals: + return nets + + ext_nets = set(en['network_id'] + for en in context.session.query(ExternalNetwork)) + if vals[0]: + return [n for n in nets if n['id'] in ext_nets] + else: + return [n for n in nets if n['id'] not in ext_nets] + + def get_external_network_id(self, context): + nets = self.get_networks(context, {external_net.EXTERNAL: [True]}) + if len(nets) > 1: + raise n_exc.TooManyExternalNetworks() + else: + return nets[0]['id'] if nets else None diff --git a/icehouse-patches/neutron/dvr-patch/neutron/db/extraroute_db.py b/icehouse-patches/neutron/dvr-patch/neutron/db/extraroute_db.py new file mode 100644 index 00000000..c4d2ada8 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/db/extraroute_db.py @@ -0,0 +1,185 @@ +# Copyright 2013, Nachi Ueno, NTT MCL, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import netaddr +from oslo.config import cfg +import sqlalchemy as sa +from sqlalchemy import orm + +from neutron.common import utils +from neutron.db import db_base_plugin_v2 +from neutron.db import l3_db +from neutron.db import model_base +from neutron.db import models_v2 +from neutron.extensions import extraroute +from neutron.extensions import l3 +from neutron.openstack.common import log as logging + + +LOG = logging.getLogger(__name__) + +extra_route_opts = [ + #TODO(nati): use quota framework when it support quota for attributes + cfg.IntOpt('max_routes', default=30, + help=_("Maximum number of routes")), +] + +cfg.CONF.register_opts(extra_route_opts) + + +class RouterRoute(model_base.BASEV2, models_v2.Route): + router_id = sa.Column(sa.String(36), + sa.ForeignKey('routers.id', + ondelete="CASCADE"), + primary_key=True) + + router = orm.relationship(l3_db.Router, + backref=orm.backref("route_list", + lazy='joined', + cascade='delete')) + + +class ExtraRoute_db_mixin(l3_db.L3_NAT_db_mixin): + """Mixin class to support extra route configuration on router.""" + + def _extend_router_dict_extraroute(self, router_res, router_db): + router_res['routes'] = (ExtraRoute_db_mixin. + _make_extra_route_list( + router_db['route_list'] + )) + + db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs( + l3.ROUTERS, ['_extend_router_dict_extraroute']) + + def update_router(self, context, id, router): + r = router['router'] + with context.session.begin(subtransactions=True): + #check if route exists and have permission to access + router_db = self._get_router(context, id) + if 'routes' in r: + self._update_extra_routes(context, router_db, r['routes']) + routes = self._get_extra_routes_by_router_id(context, id) + router_updated = super(ExtraRoute_db_mixin, self).update_router( + context, id, router) + router_updated['routes'] = routes + + return router_updated + + def _get_subnets_by_cidr(self, context, cidr): + query_subnets = context.session.query(models_v2.Subnet) + return query_subnets.filter_by(cidr=cidr).all() + + def _validate_routes_nexthop(self, cidrs, ips, routes, nexthop): + #Note(nati): Nexthop should be connected, + # so we need to check + # nexthop belongs to one of cidrs of the router ports + if not netaddr.all_matching_cidrs(nexthop, cidrs): + raise extraroute.InvalidRoutes( + routes=routes, + reason=_('the nexthop is not connected with router')) + #Note(nati) nexthop should not be same as fixed_ips + if nexthop in ips: + raise extraroute.InvalidRoutes( + routes=routes, + reason=_('the nexthop is used by router')) + + def _validate_routes(self, context, + router_id, routes): + if len(routes) > cfg.CONF.max_routes: + raise extraroute.RoutesExhausted( + router_id=router_id, + quota=cfg.CONF.max_routes) + + filters = {'device_id': [router_id]} + ports = self._core_plugin.get_ports(context, filters) + cidrs = [] + ips = [] + for port in ports: + for ip in port['fixed_ips']: + cidrs.append(self._core_plugin._get_subnet( + context, ip['subnet_id'])['cidr']) + ips.append(ip['ip_address']) + for route in routes: + self._validate_routes_nexthop( + cidrs, ips, routes, route['nexthop']) + + def _update_extra_routes(self, context, router, routes): + self._validate_routes(context, router['id'], + routes) + old_routes, routes_dict = self._get_extra_routes_dict_by_router_id( + context, router['id']) + added, removed = utils.diff_list_of_dict(old_routes, + routes) + LOG.debug(_('Added routes are %s'), added) + for route in added: + router_routes = RouterRoute( + router_id=router['id'], + destination=route['destination'], + nexthop=route['nexthop']) + context.session.add(router_routes) + + LOG.debug(_('Removed routes are %s'), removed) + for route in removed: + context.session.delete( + routes_dict[(route['destination'], route['nexthop'])]) + + @staticmethod + def _make_extra_route_list(extra_routes): + return [{'destination': route['destination'], + 'nexthop': route['nexthop']} + for route in extra_routes] + + def _get_extra_routes_by_router_id(self, context, id): + query = context.session.query(RouterRoute) + query = query.filter_by(router_id=id) + return self._make_extra_route_list(query) + + def _get_extra_routes_dict_by_router_id(self, context, id): + query = context.session.query(RouterRoute) + query = query.filter_by(router_id=id) + routes = [] + routes_dict = {} + for route in query: + routes.append({'destination': route['destination'], + 'nexthop': route['nexthop']}) + routes_dict[(route['destination'], route['nexthop'])] = route + return routes, routes_dict + + def get_router(self, context, id, fields=None): + with context.session.begin(subtransactions=True): + router = super(ExtraRoute_db_mixin, self).get_router( + context, id, fields) + return router + + def get_routers(self, context, filters=None, fields=None, + sorts=None, limit=None, marker=None, + page_reverse=False): + with context.session.begin(subtransactions=True): + routers = super(ExtraRoute_db_mixin, self).get_routers( + context, filters, fields, sorts=sorts, limit=limit, + marker=marker, page_reverse=page_reverse) + return routers + + def _confirm_router_interface_not_in_use(self, context, router_id, + subnet_id): + super(ExtraRoute_db_mixin, self)._confirm_router_interface_not_in_use( + context, router_id, subnet_id) + subnet_db = self._core_plugin._get_subnet(context, subnet_id) + subnet_cidr = netaddr.IPNetwork(subnet_db['cidr']) + extra_routes = self._get_extra_routes_by_router_id(context, router_id) + for route in extra_routes: + if netaddr.all_matching_cidrs(route['nexthop'], [subnet_cidr]): + raise extraroute.RouterInterfaceInUseByRoute( + router_id=router_id, subnet_id=subnet_id) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/db/firewall/__init__.py b/icehouse-patches/neutron/dvr-patch/neutron/db/firewall/__init__.py new file mode 100644 index 00000000..f3d0cdef --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/db/firewall/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/icehouse-patches/neutron/dvr-patch/neutron/db/firewall/firewall_db.py b/icehouse-patches/neutron/dvr-patch/neutron/db/firewall/firewall_db.py new file mode 100644 index 00000000..58b930d9 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/db/firewall/firewall_db.py @@ -0,0 +1,479 @@ +# Copyright 2013 Big Switch Networks, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Sumit Naiksatam, sumitnaiksatam@gmail.com, Big Switch Networks, Inc. + +import sqlalchemy as sa +from sqlalchemy.ext.orderinglist import ordering_list +from sqlalchemy import orm +from sqlalchemy.orm import exc + +from neutron.db import db_base_plugin_v2 as base_db +from neutron.db import model_base +from neutron.db import models_v2 +from neutron.extensions import firewall +from neutron import manager +from neutron.openstack.common import log as logging +from neutron.openstack.common import uuidutils +from neutron.plugins.common import constants as const + + +LOG = logging.getLogger(__name__) + + +class FirewallRule(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant): + """Represents a Firewall rule.""" + __tablename__ = 'firewall_rules' + name = sa.Column(sa.String(255)) + description = sa.Column(sa.String(1024)) + firewall_policy_id = sa.Column(sa.String(36), + sa.ForeignKey('firewall_policies.id'), + nullable=True) + shared = sa.Column(sa.Boolean) + protocol = sa.Column(sa.String(40)) + ip_version = sa.Column(sa.Integer, nullable=False) + source_ip_address = sa.Column(sa.String(46)) + destination_ip_address = sa.Column(sa.String(46)) + source_port_range_min = sa.Column(sa.Integer) + source_port_range_max = sa.Column(sa.Integer) + destination_port_range_min = sa.Column(sa.Integer) + destination_port_range_max = sa.Column(sa.Integer) + action = sa.Column(sa.Enum('allow', 'deny', name='firewallrules_action')) + enabled = sa.Column(sa.Boolean) + position = sa.Column(sa.Integer) + + +class Firewall(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant): + """Represents a Firewall resource.""" + __tablename__ = 'firewalls' + name = sa.Column(sa.String(255)) + description = sa.Column(sa.String(1024)) + shared = sa.Column(sa.Boolean) + admin_state_up = sa.Column(sa.Boolean) + status = sa.Column(sa.String(16)) + firewall_policy_id = sa.Column(sa.String(36), + sa.ForeignKey('firewall_policies.id'), + nullable=True) + + +class FirewallPolicy(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant): + """Represents a Firewall Policy resource.""" + __tablename__ = 'firewall_policies' + name = sa.Column(sa.String(255)) + description = sa.Column(sa.String(1024)) + shared = sa.Column(sa.Boolean) + firewall_rules = orm.relationship( + FirewallRule, + backref=orm.backref('firewall_policies', cascade='all, delete'), + order_by='FirewallRule.position', + collection_class=ordering_list('position', count_from=1)) + audited = sa.Column(sa.Boolean) + firewalls = orm.relationship(Firewall, backref='firewall_policies') + + +class Firewall_db_mixin(firewall.FirewallPluginBase, base_db.CommonDbMixin): + """Mixin class for Firewall DB implementation.""" + + @property + def _core_plugin(self): + return manager.NeutronManager.get_plugin() + + def _get_firewall(self, context, id): + try: + return self._get_by_id(context, Firewall, id) + except exc.NoResultFound: + raise firewall.FirewallNotFound(firewall_id=id) + + def _get_firewall_policy(self, context, id): + try: + return self._get_by_id(context, FirewallPolicy, id) + except exc.NoResultFound: + raise firewall.FirewallPolicyNotFound(firewall_policy_id=id) + + def _get_firewall_rule(self, context, id): + try: + return self._get_by_id(context, FirewallRule, id) + except exc.NoResultFound: + raise firewall.FirewallRuleNotFound(firewall_rule_id=id) + + def _make_firewall_dict(self, fw, fields=None): + res = {'id': fw['id'], + 'tenant_id': fw['tenant_id'], + 'name': fw['name'], + 'description': fw['description'], + 'shared': fw['shared'], + 'admin_state_up': fw['admin_state_up'], + 'status': fw['status'], + 'firewall_policy_id': fw['firewall_policy_id']} + return self._fields(res, fields) + + def _make_firewall_policy_dict(self, firewall_policy, fields=None): + fw_rules = [rule['id'] for rule in firewall_policy['firewall_rules']] + firewalls = [fw['id'] for fw in firewall_policy['firewalls']] + res = {'id': firewall_policy['id'], + 'tenant_id': firewall_policy['tenant_id'], + 'name': firewall_policy['name'], + 'description': firewall_policy['description'], + 'shared': firewall_policy['shared'], + 'audited': firewall_policy['audited'], + 'firewall_rules': fw_rules, + 'firewall_list': firewalls} + return self._fields(res, fields) + + def _make_firewall_rule_dict(self, firewall_rule, fields=None): + position = None + # We return the position only if the firewall_rule is bound to a + # firewall_policy. + if firewall_rule['firewall_policy_id']: + position = firewall_rule['position'] + src_port_range = self._get_port_range_from_min_max_ports( + firewall_rule['source_port_range_min'], + firewall_rule['source_port_range_max']) + dst_port_range = self._get_port_range_from_min_max_ports( + firewall_rule['destination_port_range_min'], + firewall_rule['destination_port_range_max']) + res = {'id': firewall_rule['id'], + 'tenant_id': firewall_rule['tenant_id'], + 'name': firewall_rule['name'], + 'description': firewall_rule['description'], + 'firewall_policy_id': firewall_rule['firewall_policy_id'], + 'shared': firewall_rule['shared'], + 'protocol': firewall_rule['protocol'], + 'ip_version': firewall_rule['ip_version'], + 'source_ip_address': firewall_rule['source_ip_address'], + 'destination_ip_address': + firewall_rule['destination_ip_address'], + 'source_port': src_port_range, + 'destination_port': dst_port_range, + 'action': firewall_rule['action'], + 'position': position, + 'enabled': firewall_rule['enabled']} + return self._fields(res, fields) + + def _set_rules_for_policy(self, context, firewall_policy_db, rule_id_list): + fwp_db = firewall_policy_db + with context.session.begin(subtransactions=True): + if not rule_id_list: + fwp_db.firewall_rules = [] + fwp_db.audited = False + return + # We will first check if the new list of rules is valid + filters = {'id': [r_id for r_id in rule_id_list]} + rules_in_db = self._get_collection_query(context, FirewallRule, + filters=filters) + rules_dict = dict((fwr_db['id'], fwr_db) for fwr_db in rules_in_db) + for fwrule_id in rule_id_list: + if fwrule_id not in rules_dict: + # If we find an invalid rule in the list we + # do not perform the update since this breaks + # the integrity of this list. + raise firewall.FirewallRuleNotFound(firewall_rule_id= + fwrule_id) + elif rules_dict[fwrule_id]['firewall_policy_id']: + if (rules_dict[fwrule_id]['firewall_policy_id'] != + fwp_db['id']): + raise firewall.FirewallRuleInUse( + firewall_rule_id=fwrule_id) + # New list of rules is valid so we will first reset the existing + # list and then add each rule in order. + # Note that the list could be empty in which case we interpret + # it as clearing existing rules. + fwp_db.firewall_rules = [] + for fwrule_id in rule_id_list: + fwp_db.firewall_rules.append(rules_dict[fwrule_id]) + fwp_db.firewall_rules.reorder() + fwp_db.audited = False + + def _process_rule_for_policy(self, context, firewall_policy_id, + firewall_rule_db, position): + with context.session.begin(subtransactions=True): + fwp_query = context.session.query( + FirewallPolicy).with_lockmode('update') + fwp_db = fwp_query.filter_by(id=firewall_policy_id).one() + if position: + # Note that although position numbering starts at 1, + # internal ordering of the list starts at 0, so we compensate. + fwp_db.firewall_rules.insert(position - 1, firewall_rule_db) + else: + fwp_db.firewall_rules.remove(firewall_rule_db) + fwp_db.firewall_rules.reorder() + fwp_db.audited = False + return self._make_firewall_policy_dict(fwp_db) + + def _get_min_max_ports_from_range(self, port_range): + if not port_range: + return [None, None] + min_port, sep, max_port = port_range.partition(":") + if not max_port: + max_port = min_port + return [int(min_port), int(max_port)] + + def _get_port_range_from_min_max_ports(self, min_port, max_port): + if not min_port: + return None + if min_port == max_port: + return str(min_port) + else: + return '%d:%d' % (min_port, max_port) + + def _validate_fwr_protocol_parameters(self, fwr): + protocol = fwr['protocol'] + if protocol not in (const.TCP, const.UDP): + if fwr['source_port'] or fwr['destination_port']: + raise firewall.FirewallRuleInvalidICMPParameter( + param="Source, destination port") + + def create_firewall(self, context, firewall): + LOG.debug(_("create_firewall() called")) + fw = firewall['firewall'] + tenant_id = self._get_tenant_id_for_create(context, fw) + with context.session.begin(subtransactions=True): + firewall_db = Firewall(id=uuidutils.generate_uuid(), + tenant_id=tenant_id, + name=fw['name'], + description=fw['description'], + firewall_policy_id= + fw['firewall_policy_id'], + admin_state_up=fw['admin_state_up'], + status=const.PENDING_CREATE) + context.session.add(firewall_db) + return self._make_firewall_dict(firewall_db) + + def update_firewall(self, context, id, firewall): + LOG.debug(_("update_firewall() called")) + fw = firewall['firewall'] + with context.session.begin(subtransactions=True): + fw_query = context.session.query( + Firewall).with_lockmode('update') + firewall_db = fw_query.filter_by(id=id).one() + firewall_db.update(fw) + return self._make_firewall_dict(firewall_db) + + def delete_firewall(self, context, id): + LOG.debug(_("delete_firewall() called")) + with context.session.begin(subtransactions=True): + fw_query = context.session.query( + Firewall).with_lockmode('update') + firewall_db = fw_query.filter_by(id=id).one() + # Note: Plugin should ensure that it's okay to delete if the + # firewall is active + context.session.delete(firewall_db) + + def get_firewall(self, context, id, fields=None): + LOG.debug(_("get_firewall() called")) + fw = self._get_firewall(context, id) + return self._make_firewall_dict(fw, fields) + + def get_firewalls(self, context, filters=None, fields=None): + LOG.debug(_("get_firewalls() called")) + return self._get_collection(context, Firewall, + self._make_firewall_dict, + filters=filters, fields=fields) + + def get_firewalls_count(self, context, filters=None): + LOG.debug(_("get_firewalls_count() called")) + return self._get_collection_count(context, Firewall, + filters=filters) + + def create_firewall_policy(self, context, firewall_policy): + LOG.debug(_("create_firewall_policy() called")) + fwp = firewall_policy['firewall_policy'] + tenant_id = self._get_tenant_id_for_create(context, fwp) + with context.session.begin(subtransactions=True): + fwp_db = FirewallPolicy(id=uuidutils.generate_uuid(), + tenant_id=tenant_id, + name=fwp['name'], + description=fwp['description'], + shared=fwp['shared']) + context.session.add(fwp_db) + self._set_rules_for_policy(context, fwp_db, + fwp['firewall_rules']) + fwp_db.audited = fwp['audited'] + return self._make_firewall_policy_dict(fwp_db) + + def update_firewall_policy(self, context, id, firewall_policy): + LOG.debug(_("update_firewall_policy() called")) + fwp = firewall_policy['firewall_policy'] + with context.session.begin(subtransactions=True): + fwp_db = self._get_firewall_policy(context, id) + if 'firewall_rules' in fwp: + self._set_rules_for_policy(context, fwp_db, + fwp['firewall_rules']) + del fwp['firewall_rules'] + fwp_db.update(fwp) + return self._make_firewall_policy_dict(fwp_db) + + def delete_firewall_policy(self, context, id): + LOG.debug(_("delete_firewall_policy() called")) + with context.session.begin(subtransactions=True): + fwp = self._get_firewall_policy(context, id) + # Ensure that the firewall_policy is not + # being used + qry = context.session.query(Firewall) + if qry.filter_by(firewall_policy_id=id).first(): + raise firewall.FirewallPolicyInUse(firewall_policy_id=id) + else: + context.session.delete(fwp) + + def get_firewall_policy(self, context, id, fields=None): + LOG.debug(_("get_firewall_policy() called")) + fwp = self._get_firewall_policy(context, id) + return self._make_firewall_policy_dict(fwp, fields) + + def get_firewall_policies(self, context, filters=None, fields=None): + LOG.debug(_("get_firewall_policies() called")) + return self._get_collection(context, FirewallPolicy, + self._make_firewall_policy_dict, + filters=filters, fields=fields) + + def get_firewalls_policies_count(self, context, filters=None): + LOG.debug(_("get_firewall_policies_count() called")) + return self._get_collection_count(context, FirewallPolicy, + filters=filters) + + def create_firewall_rule(self, context, firewall_rule): + LOG.debug(_("create_firewall_rule() called")) + fwr = firewall_rule['firewall_rule'] + self._validate_fwr_protocol_parameters(fwr) + tenant_id = self._get_tenant_id_for_create(context, fwr) + src_port_min, src_port_max = self._get_min_max_ports_from_range( + fwr['source_port']) + dst_port_min, dst_port_max = self._get_min_max_ports_from_range( + fwr['destination_port']) + with context.session.begin(subtransactions=True): + fwr_db = FirewallRule(id=uuidutils.generate_uuid(), + tenant_id=tenant_id, + name=fwr['name'], + description=fwr['description'], + shared=fwr['shared'], + protocol=fwr['protocol'], + ip_version=fwr['ip_version'], + source_ip_address=fwr['source_ip_address'], + destination_ip_address= + fwr['destination_ip_address'], + source_port_range_min=src_port_min, + source_port_range_max=src_port_max, + destination_port_range_min=dst_port_min, + destination_port_range_max=dst_port_max, + action=fwr['action'], + enabled=fwr['enabled']) + context.session.add(fwr_db) + return self._make_firewall_rule_dict(fwr_db) + + def update_firewall_rule(self, context, id, firewall_rule): + LOG.debug(_("update_firewall_rule() called")) + fwr = firewall_rule['firewall_rule'] + if 'source_port' in fwr: + src_port_min, src_port_max = self._get_min_max_ports_from_range( + fwr['source_port']) + fwr['source_port_range_min'] = src_port_min + fwr['source_port_range_max'] = src_port_max + del fwr['source_port'] + if 'destination_port' in fwr: + dst_port_min, dst_port_max = self._get_min_max_ports_from_range( + fwr['destination_port']) + fwr['destination_port_range_min'] = dst_port_min + fwr['destination_port_range_max'] = dst_port_max + del fwr['destination_port'] + with context.session.begin(subtransactions=True): + fwr_db = self._get_firewall_rule(context, id) + fwr_db.update(fwr) + if fwr_db.firewall_policy_id: + fwp_db = self._get_firewall_policy(context, + fwr_db.firewall_policy_id) + fwp_db.audited = False + return self._make_firewall_rule_dict(fwr_db) + + def delete_firewall_rule(self, context, id): + LOG.debug(_("delete_firewall_rule() called")) + with context.session.begin(subtransactions=True): + fwr = self._get_firewall_rule(context, id) + if fwr.firewall_policy_id: + raise firewall.FirewallRuleInUse(firewall_rule_id=id) + context.session.delete(fwr) + + def get_firewall_rule(self, context, id, fields=None): + LOG.debug(_("get_firewall_rule() called")) + fwr = self._get_firewall_rule(context, id) + return self._make_firewall_rule_dict(fwr, fields) + + def get_firewall_rules(self, context, filters=None, fields=None): + LOG.debug(_("get_firewall_rules() called")) + return self._get_collection(context, FirewallRule, + self._make_firewall_rule_dict, + filters=filters, fields=fields) + + def get_firewalls_rules_count(self, context, filters=None): + LOG.debug(_("get_firewall_rules_count() called")) + return self._get_collection_count(context, FirewallRule, + filters=filters) + + def _validate_insert_remove_rule_request(self, id, rule_info): + if not rule_info or 'firewall_rule_id' not in rule_info: + raise firewall.FirewallRuleInfoMissing() + + def insert_rule(self, context, id, rule_info): + LOG.debug(_("insert_rule() called")) + self._validate_insert_remove_rule_request(id, rule_info) + firewall_rule_id = rule_info['firewall_rule_id'] + insert_before = True + ref_firewall_rule_id = None + if not firewall_rule_id: + raise firewall.FirewallRuleNotFound(firewall_rule_id=None) + if 'insert_before' in rule_info: + ref_firewall_rule_id = rule_info['insert_before'] + if not ref_firewall_rule_id and 'insert_after' in rule_info: + # If insert_before is set, we will ignore insert_after. + ref_firewall_rule_id = rule_info['insert_after'] + insert_before = False + with context.session.begin(subtransactions=True): + fwr_db = self._get_firewall_rule(context, firewall_rule_id) + if fwr_db.firewall_policy_id: + raise firewall.FirewallRuleInUse(firewall_rule_id=fwr_db['id']) + if ref_firewall_rule_id: + # If reference_firewall_rule_id is set, the new rule + # is inserted depending on the value of insert_before. + # If insert_before is set, the new rule is inserted before + # reference_firewall_rule_id, and if it is not set the new + # rule is inserted after reference_firewall_rule_id. + ref_fwr_db = self._get_firewall_rule( + context, ref_firewall_rule_id) + if insert_before: + position = ref_fwr_db.position + else: + position = ref_fwr_db.position + 1 + else: + # If reference_firewall_rule_id is not set, it is assumed + # that the new rule needs to be inserted at the top. + # insert_before field is ignored. + # So default insertion is always at the top. + # Also note that position numbering starts at 1. + position = 1 + return self._process_rule_for_policy(context, id, fwr_db, + position) + + def remove_rule(self, context, id, rule_info): + LOG.debug(_("remove_rule() called")) + self._validate_insert_remove_rule_request(id, rule_info) + firewall_rule_id = rule_info['firewall_rule_id'] + if not firewall_rule_id: + raise firewall.FirewallRuleNotFound(firewall_rule_id=None) + with context.session.begin(subtransactions=True): + fwr_db = self._get_firewall_rule(context, firewall_rule_id) + if fwr_db.firewall_policy_id != id: + raise firewall.FirewallRuleNotAssociatedWithPolicy( + firewall_rule_id=fwr_db['id'], + firewall_policy_id=id) + return self._process_rule_for_policy(context, id, fwr_db, None) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/db/l3_agentschedulers_db.py b/icehouse-patches/neutron/dvr-patch/neutron/db/l3_agentschedulers_db.py new file mode 100644 index 00000000..18b3efe8 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/db/l3_agentschedulers_db.py @@ -0,0 +1,398 @@ +# Copyright (c) 2013 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo.config import cfg +import sqlalchemy as sa +from sqlalchemy import func +from sqlalchemy import orm +from sqlalchemy.orm import exc +from sqlalchemy.orm import joinedload + +from neutron.common import constants +from neutron.db import agents_db +from neutron.db import agentschedulers_db +from neutron.db import model_base +from neutron.db import models_v2 +from neutron.extensions import l3agentscheduler +from neutron import manager + +L3_AGENTS_SCHEDULER_OPTS = [ + cfg.StrOpt('router_scheduler_driver', + default='neutron.scheduler.l3_agent_scheduler.ChanceScheduler', + help=_('Driver to use for scheduling ' + 'router to a default L3 agent')), + cfg.BoolOpt('router_auto_schedule', default=True, + help=_('Allow auto scheduling of routers to L3 agent.')), +] + +cfg.CONF.register_opts(L3_AGENTS_SCHEDULER_OPTS) + + +class RouterL3AgentBinding(model_base.BASEV2, models_v2.HasId): + """Represents binding between neutron routers and L3 agents.""" + + router_id = sa.Column(sa.String(36), + sa.ForeignKey("routers.id", ondelete='CASCADE')) + l3_agent = orm.relation(agents_db.Agent) + l3_agent_id = sa.Column(sa.String(36), + sa.ForeignKey("agents.id", + ondelete='CASCADE')) + + +class CentralizedSnatL3AgentBinding(model_base.BASEV2, models_v2.HasId): + """Represents binding between neutron Centralized SNAT and L3 agents.""" + + router_id = sa.Column(sa.String(36), + sa.ForeignKey("routers.id", ondelete='CASCADE'), + primary_key=True) + csnat_gw_port_id = sa.Column(sa.String(36), sa.ForeignKey('ports.id')) + host_id = sa.Column(sa.String(255)) + l3_agent = orm.relation(agents_db.Agent) + l3_agent_id = sa.Column(sa.String(36), + sa.ForeignKey("agents.id", + ondelete='CASCADE')) + csnat_gw_port = orm.relationship(models_v2.Port) + + +class L3AgentSchedulerDbMixin(l3agentscheduler.L3AgentSchedulerPluginBase, + agentschedulers_db.AgentSchedulerDbMixin): + """Mixin class to add l3 agent scheduler extension to plugins + using the l3 agent for routing. + """ + + router_scheduler = None + + def add_router_to_l3_agent(self, context, agent_id, router_id): + """Add a l3 agent to host a router.""" + router = self.get_router(context, router_id) + with context.session.begin(subtransactions=True): + agent_db = self._get_agent(context, agent_id) + agent_conf = self.get_configuration_dict(agent_db) + distributed_router_enable = agent_conf.get( + 'distributed_agent', False) + distributed = router.get('distributed', False) + if (distributed and not distributed_router_enable): + raise l3agentscheduler.DistributedRouterNotHostedByL3Agent( + router_id=router_id, agent_id=agent_id) + if (not distributed and distributed_router_enable): + raise l3agentscheduler.RouterNotHostedByDistributedL3Agent( + router_id=router_id, agent_id=agent_id) + if (agent_db['agent_type'] != constants.AGENT_TYPE_L3 or + not agent_db['admin_state_up'] or + not self.get_l3_agent_candidates(context, + router, + [agent_db])): + raise l3agentscheduler.InvalidL3Agent(id=agent_id) + query = context.session.query(RouterL3AgentBinding) + if distributed: + binding = query.filter_by(router_id=router_id, + l3_agent_id=agent_id).first() + if binding: + raise l3agentscheduler.RouterHostedByL3Agent( + router_id=router_id, + agent_id=binding.l3_agent_id) + else: + try: + binding = query.filter_by(router_id=router_id).one() + + raise l3agentscheduler.RouterHostedByL3Agent( + router_id=router_id, + agent_id=binding.l3_agent_id) + except exc.NoResultFound: + pass + + result = self.auto_schedule_routers(context, + agent_db.host, + [router_id]) + if not result: + raise l3agentscheduler.RouterSchedulingFailed( + router_id=router_id, agent_id=agent_id) + + l3_notifier = self.agent_notifiers.get(constants.AGENT_TYPE_L3) + if l3_notifier: + l3_notifier.router_added_to_agent( + context, [router_id], agent_db.host) + + def remove_router_from_l3_agent(self, context, agent_id, router_id): + """Remove the router from l3 agent. + + After removal, the router will be non-hosted until there is update + which leads to re-schedule or be added to another agent manually. + """ + agent = self._get_agent(context, agent_id) + floating_ips = self._get_sync_floating_ips(context, [router_id]) + if floating_ips: + raise l3agentscheduler.RemoveFloatingIPforRouter( + router_id=router_id, agent_id=agent_id) + self._unbind_router(context, router_id, agent_id) + l3_notifier = self.agent_notifiers.get(constants.AGENT_TYPE_L3) + if l3_notifier: + l3_notifier.router_removed_from_agent( + context, router_id, agent.host) + + def _unbind_router(self, context, router_id, agent_id): + with context.session.begin(subtransactions=True): + query = context.session.query(RouterL3AgentBinding) + query = query.filter( + RouterL3AgentBinding.router_id == router_id, + RouterL3AgentBinding.l3_agent_id == agent_id) + try: + binding = query.one() + except exc.NoResultFound: + raise l3agentscheduler.RouterNotHostedByL3Agent( + router_id=router_id, agent_id=agent_id) + context.session.delete(binding) + + def reschedule_router(self, context, router_id, candidates=None): + """Reschedule router to a new l3 agent + + Remove the router from the agent(s) currently hosting it and + schedule it again + """ + cur_agents = self.list_l3_agents_hosting_router( + context, router_id)['agents'] + with context.session.begin(subtransactions=True): + for agent in cur_agents: + self._unbind_router(context, router_id, agent['id']) + + new_agent = self.schedule_router(context, router_id, + candidates=candidates) + if not new_agent: + raise l3agentscheduler.RouterReschedulingFailed( + router_id=router_id) + + l3_notifier = self.agent_notifiers.get(constants.AGENT_TYPE_L3) + if l3_notifier: + for agent in cur_agents: + l3_notifier.router_removed_from_agent( + context, router_id, agent['host']) + l3_notifier.router_added_to_agent( + context, [router_id], new_agent.host) + + def list_routers_on_l3_agent(self, context, agent_id): + query = context.session.query(RouterL3AgentBinding.router_id) + query = query.filter(RouterL3AgentBinding.l3_agent_id == agent_id) + + router_ids = [item[0] for item in query] + if router_ids: + return {'routers': + self.get_routers(context, filters={'id': router_ids})} + else: + return {'routers': []} + + def list_active_sync_routers_on_active_l3_agent( + self, context, host, router_ids): + agent = self._get_agent_by_type_and_host( + context, constants.AGENT_TYPE_L3, host) + if not agent.admin_state_up: + return [] + query = context.session.query(RouterL3AgentBinding.router_id) + query = query.filter( + RouterL3AgentBinding.l3_agent_id == agent.id) + + if not router_ids: + pass + else: + query = query.filter( + RouterL3AgentBinding.router_id.in_(router_ids)) + router_ids = [item[0] for item in query] + if router_ids: + return self.get_sync_data(context, router_ids=router_ids, + active=True) + else: + return [] + + def get_l3_agents_hosting_routers(self, context, router_ids, + admin_state_up=None, + active=None): + if not router_ids: + return [] + query = context.session.query(RouterL3AgentBinding) + if len(router_ids) > 1: + query = query.options(joinedload('l3_agent')).filter( + RouterL3AgentBinding.router_id.in_(router_ids)) + else: + query = query.options(joinedload('l3_agent')).filter( + RouterL3AgentBinding.router_id == router_ids[0]) + if admin_state_up is not None: + query = (query.filter(agents_db.Agent.admin_state_up == + admin_state_up)) + l3_agents = [binding.l3_agent for binding in query] + if active is not None: + l3_agents = [l3_agent for l3_agent in + l3_agents if not + agents_db.AgentDbMixin.is_agent_down( + l3_agent['heartbeat_timestamp'])] + return l3_agents + + def _get_l3_bindings_hosting_routers(self, context, router_ids): + if not router_ids: + return [] + query = context.session.query(RouterL3AgentBinding) + if len(router_ids) > 1: + query = query.options(joinedload('l3_agent')).filter( + RouterL3AgentBinding.router_id.in_(router_ids)) + else: + query = query.options(joinedload('l3_agent')).filter( + RouterL3AgentBinding.router_id == router_ids[0]) + return query.all() + + def list_l3_agents_hosting_router(self, context, router_id): + with context.session.begin(subtransactions=True): + bindings = self._get_l3_bindings_hosting_routers( + context, [router_id]) + results = [] + for binding in bindings: + l3_agent_dict = self._make_agent_dict(binding.l3_agent) + results.append(l3_agent_dict) + if results: + return {'agents': results} + else: + return {'agents': []} + + def get_l3_agents(self, context, active=None, filters=None): + query = context.session.query(agents_db.Agent) + query = query.filter( + agents_db.Agent.agent_type == constants.AGENT_TYPE_L3) + if active is not None: + query = (query.filter(agents_db.Agent.admin_state_up == active)) + if filters: + for key, value in filters.iteritems(): + column = getattr(agents_db.Agent, key, None) + if column: + query = query.filter(column.in_(value)) + + return [l3_agent + for l3_agent in query + if agentschedulers_db.AgentSchedulerDbMixin.is_eligible_agent( + active, l3_agent)] + + def list_interfaces_onrouter(self, context, router_id): + if router_id: + return (context.session.query(models_v2.Port). + filter_by(device_id=router_id).all()) + + def check_vmexists_on_l3agent(self, context, l3_agent, router_id, + subnet_id): + core_plugin = manager.NeutronManager.get_plugin() + if not subnet_id: + return True + filter = {'fixed_ips': {'subnet_id': [subnet_id]}} + ports = core_plugin.get_ports(context, filters=filter) + for port in ports: + if "compute:" in port['device_owner']: + if l3_agent['host'] == port['binding:host_id']: + return True + return False + + def get_snat_candidates(self, sync_router, l3_agents): + """ Get the valid snat enabled l3 agents for the distributed router""" + candidates = [] + if not sync_router.get('distributed', False): + return [] + for l3_agent in l3_agents: + if not l3_agent.admin_state_up: + continue + agent_conf = self.get_configuration_dict(l3_agent) + router_id = agent_conf.get('router_id', None) + use_namespaces = agent_conf.get('use_namespaces', True) + handle_internal_only_routers = agent_conf.get( + 'handle_internal_only_routers', True) + gateway_external_network_id = agent_conf.get( + 'gateway_external_network_id', None) + distributed_router_enable = agent_conf.get( + 'distributed_agent', False) + centralized_snat_enable = agent_conf.get( + 'centralized_snat', False) + if (not distributed_router_enable or + not centralized_snat_enable): + continue + if not use_namespaces and router_id != sync_router['id']: + continue + ex_net_id = (sync_router['external_gateway_info'] or {}).get( + 'network_id') + if ((not ex_net_id and not handle_internal_only_routers) or + (ex_net_id and gateway_external_network_id and + ex_net_id != gateway_external_network_id)): + continue + if (sync_router.get('distributed', False) and + distributed_router_enable and + centralized_snat_enable): + candidates.append(l3_agent) + return candidates + + def get_l3_agent_candidates(self, context, sync_router, l3_agents, + subnet_id=None): + """Get the valid l3 agents for the router from a list of l3_agents.""" + candidates = [] + for l3_agent in l3_agents: + if not l3_agent.admin_state_up: + continue + agent_conf = self.get_configuration_dict(l3_agent) + router_id = agent_conf.get('router_id', None) + use_namespaces = agent_conf.get('use_namespaces', True) + handle_internal_only_routers = agent_conf.get( + 'handle_internal_only_routers', True) + gateway_external_network_id = agent_conf.get( + 'gateway_external_network_id', None) + distributed_router_enable = agent_conf.get( + 'distributed_agent', False) + centralized = agent_conf.get( + 'centralized_router', True) + if not use_namespaces and router_id != sync_router['id']: + continue + ex_net_id = (sync_router['external_gateway_info'] or {}).get( + 'network_id') + if ((not ex_net_id and not handle_internal_only_routers) or + (ex_net_id and gateway_external_network_id and + ex_net_id != gateway_external_network_id)): + continue + if not sync_router.get('distributed', False): + if (not distributed_router_enable) or centralized: + candidates.append(l3_agent) + else: + if (distributed_router_enable and + self.check_vmexists_on_l3agent(context, + l3_agent, + sync_router['id'], + subnet_id)): + candidates.append(l3_agent) + return candidates + + def auto_schedule_routers(self, context, host, router_ids): + if self.router_scheduler: + return self.router_scheduler.auto_schedule_routers( + self, context, host, router_ids) + + def schedule_router(self, context, router, candidates=None, hints=None): + if self.router_scheduler: + return self.router_scheduler.schedule( + self, context, router, candidates=candidates, hints=hints) + + def schedule_routers(self, context, routers, hints=None): + """Schedule the routers to l3 agents.""" + for router in routers: + self.schedule_router(context, router, candidates=None, hints=hints) + + def get_l3_agent_with_min_routers(self, context, agent_ids): + """Return l3 agent with the least number of routers.""" + query = context.session.query( + agents_db.Agent, + func.count( + RouterL3AgentBinding.router_id + ).label('count')).outerjoin(RouterL3AgentBinding).group_by( + RouterL3AgentBinding.l3_agent_id).order_by('count') + res = query.filter(agents_db.Agent.id.in_(agent_ids)).first() + return res[0] diff --git a/icehouse-patches/neutron/dvr-patch/neutron/db/l3_attrs_db.py b/icehouse-patches/neutron/dvr-patch/neutron/db/l3_attrs_db.py new file mode 100644 index 00000000..662e4fde --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/db/l3_attrs_db.py @@ -0,0 +1,76 @@ +# Copyright (c) 2014 OpenStack Foundation. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import sqlalchemy as sa +from sqlalchemy import orm + +from neutron.db import db_base_plugin_v2 +from neutron.db import l3_db +from neutron.db import model_base +from neutron.extensions import l3 + + +class RouterExtraAttributes(model_base.BASEV2): + """Additional attributes for a Virtual Router.""" + + # NOTE(armando-migliaccio): this model can be a good place to + # add extension attributes to a Router model. Each case needs + # to be individually examined, however 'distributed' and other + # simple ones fit the pattern well. + __tablename__ = "router_extra_attributes" + router_id = sa.Column(sa.String(36), + sa.ForeignKey('routers.id', ondelete="CASCADE"), + primary_key=True) + # Whether the router is a legacy (centralized) or a distributed one + distributed = sa.Column(sa.Boolean, default=False, nullable=False) + router = orm.relationship( + l3_db.Router, + backref=orm.backref("extra_attributes", lazy='joined', + uselist=False, cascade='delete')) + + +class ExtraAttributesMixin(object): + """Mixin class to enable router's extra attributes.""" + + extra_attributes = [] + + def _extend_extra_router_dict(self, router_res, router_db): + extra_attrs = router_db['extra_attributes'] + for attr in self.extra_attributes: + name = attr['name'] + default = attr['default'] + router_res[name] = ( + extra_attrs and extra_attrs[name] or default) + + def _get_extra_attributes(self, router, extra_attributes): + return (dict((attr['name'], + router.get(attr['name'], attr['default'])) + for attr in extra_attributes)) + + def _process_extra_attr_router_create( + self, context, router_db, router_req): + kwargs = self._get_extra_attributes(router_req, self.extra_attributes) + # extra_attributes reference is populated via backref + if not router_db['extra_attributes']: + attributes_db = RouterExtraAttributes( + router_id=router_db['id'], **kwargs) + context.session.add(attributes_db) + router_db['extra_attributes'] = attributes_db + else: + # The record will exist if RouterExtraAttributes model's + # attributes are added with db migrations over time + router_db['extra_attributes'].update(kwargs) + + db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs( + l3.ROUTERS, ['_extend_extra_router_dict']) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/db/l3_db.py b/icehouse-patches/neutron/dvr-patch/neutron/db/l3_db.py new file mode 100644 index 00000000..e8df6b6f --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/db/l3_db.py @@ -0,0 +1,1057 @@ +# Copyright 2012 VMware, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import netaddr +import sqlalchemy as sa +from sqlalchemy import orm +from sqlalchemy.orm import exc + +from neutron.api.rpc.agentnotifiers import l3_rpc_agent_api +from neutron.api.v2 import attributes +from neutron.common import constants as l3_constants +from neutron.common import exceptions as n_exc +from neutron.common import rpc as n_rpc +from neutron.common import utils +from neutron.db import model_base +from neutron.db import models_v2 +from neutron.extensions import external_net +from neutron.extensions import l3 +from neutron import manager +from neutron.openstack.common import log as logging +from neutron.openstack.common import uuidutils +from neutron.plugins.common import constants + +LOG = logging.getLogger(__name__) + + +DEVICE_OWNER_ROUTER_INTF = l3_constants.DEVICE_OWNER_ROUTER_INTF +DEVICE_OWNER_ROUTER_GW = l3_constants.DEVICE_OWNER_ROUTER_GW +DEVICE_OWNER_FLOATINGIP = l3_constants.DEVICE_OWNER_FLOATINGIP +EXTERNAL_GW_INFO = l3.EXTERNAL_GW_INFO + +# Maps API field to DB column +# API parameter name and Database column names may differ. +# Useful to keep the filtering between API and Database. +API_TO_DB_COLUMN_MAP = {'port_id': 'fixed_port_id'} +CORE_ROUTER_ATTRS = ('id', 'name', 'tenant_id', 'admin_state_up', 'status') + + +class Router(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant): + """Represents a v2 neutron router.""" + + name = sa.Column(sa.String(255)) + status = sa.Column(sa.String(16)) + admin_state_up = sa.Column(sa.Boolean) + gw_port_id = sa.Column(sa.String(36), sa.ForeignKey('ports.id')) + gw_port = orm.relationship(models_v2.Port, lazy='joined') + + +class FloatingIP(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant): + """Represents a floating IP address. + + This IP address may or may not be allocated to a tenant, and may or + may not be associated with an internal port/ip address/router. + """ + + floating_ip_address = sa.Column(sa.String(64), nullable=False) + floating_network_id = sa.Column(sa.String(36), nullable=False) + floating_port_id = sa.Column(sa.String(36), sa.ForeignKey('ports.id'), + nullable=False) + fixed_port_id = sa.Column(sa.String(36), sa.ForeignKey('ports.id')) + fixed_ip_address = sa.Column(sa.String(64)) + router_id = sa.Column(sa.String(36), sa.ForeignKey('routers.id')) + # Additional attribute for keeping track of the router where the floating + # ip was associated in order to be able to ensure consistency even if an + # aysnchronous backend is unavailable when the floating IP is disassociated + last_known_router_id = sa.Column(sa.String(36)) + status = sa.Column(sa.String(16)) + + +class L3_NAT_db_mixin(l3.RouterPluginBase): + """Mixin class to add L3/NAT router methods to db_base_plugin_v2.""" + + router_device_owners = ( + DEVICE_OWNER_ROUTER_INTF, + DEVICE_OWNER_ROUTER_GW, + DEVICE_OWNER_FLOATINGIP + ) + + @property + def l3_rpc_notifier(self): + if not hasattr(self, '_l3_rpc_notifier'): + self._l3_rpc_notifier = l3_rpc_agent_api.L3AgentNotifyAPI() + return self._l3_rpc_notifier + + @l3_rpc_notifier.setter + def l3_rpc_notifier(self, value): + self._l3_rpc_notifier = value + + @property + def _core_plugin(self): + return manager.NeutronManager.get_plugin() + + def _get_router(self, context, router_id): + try: + router = self._get_by_id(context, Router, router_id) + except exc.NoResultFound: + raise l3.RouterNotFound(router_id=router_id) + return router + + def _make_router_dict(self, router, fields=None, process_extensions=True): + res = dict((key, router[key]) for key in CORE_ROUTER_ATTRS) + if router['gw_port_id']: + ext_gw_info = {'network_id': router.gw_port['network_id']} + else: + ext_gw_info = None + res.update({ + EXTERNAL_GW_INFO: ext_gw_info, + 'gw_port_id': router['gw_port_id'], + }) + # NOTE(salv-orlando): The following assumes this mixin is used in a + # class inheriting from CommonDbMixin, which is true for all existing + # plugins. + if process_extensions: + self._apply_dict_extend_functions(l3.ROUTERS, res, router) + return self._fields(res, fields) + + def _create_router_db(self, context, router, tenant_id): + """Create the DB object.""" + with context.session.begin(subtransactions=True): + # pre-generate id so it will be available when + # configuring external gw port + router_db = Router(id=uuidutils.generate_uuid(), + tenant_id=tenant_id, + name=router['name'], + admin_state_up=router['admin_state_up'], + status="ACTIVE") + context.session.add(router_db) + return router_db + + def create_router(self, context, router): + r = router['router'] + gw_info = r.pop(EXTERNAL_GW_INFO, None) + tenant_id = self._get_tenant_id_for_create(context, r) + with context.session.begin(subtransactions=True): + router_db = self._create_router_db(context, r, tenant_id) + if gw_info: + self._update_router_gw_info(context, router_db['id'], + gw_info, router=router_db) + return self._make_router_dict(router_db) + + def _update_router_db(self, context, router_id, data, gw_info): + """Update the DB object and related gw info, if available.""" + with context.session.begin(subtransactions=True): + if gw_info != attributes.ATTR_NOT_SPECIFIED: + self._update_router_gw_info(context, router_id, gw_info) + router_db = self._get_router(context, router_id) + if data: + router_db.update(data) + return router_db + + def update_router(self, context, id, router): + r = router['router'] + has_gw_info = False + payload = {} + if EXTERNAL_GW_INFO in r: + has_gw_info = True + gw_info = r.pop(EXTERNAL_GW_INFO, attributes.ATTR_NOT_SPECIFIED) + # check whether router needs and can be rescheduled to the proper + # l3 agent (associated with given external network); + # do check before update in DB as an exception will be raised + # in case no proper l3 agent found + candidates = None + if gw_info != attributes.ATTR_NOT_SPECIFIED: + candidates = self._check_router_needs_rescheduling( + context, id, gw_info) + router_db = self._update_router_db(context, id, r, gw_info) + if candidates: + l3_plugin = manager.NeutronManager.get_service_plugins().get( + constants.L3_ROUTER_NAT) + l3_plugin.reschedule_router(context, id, candidates) + if has_gw_info: + if gw_info: + payload = {'gw_exists': True} + else: + payload = {'gw_exists': False} + self.l3_rpc_notifier.routers_updated(context, [router_db['id']], + None, payload) + return self._make_router_dict(router_db) + + def _check_router_needs_rescheduling(self, context, router_id, gw_info): + """Checks whether router's l3 agent can handle the given network + + When external_network_bridge is set, each L3 agent can be associated + with at most one external network. If router's new external gateway + is on other network then the router needs to be rescheduled to the + proper l3 agent. + If external_network_bridge is not set then the agent + can support multiple external networks and rescheduling is not needed + + :return: list of candidate agents if rescheduling needed, + None otherwise; raises exception if there is no eligible l3 agent + associated with target external network + """ + # TODO(obondarev): rethink placement of this func as l3 db manager is + # not really a proper place for agent scheduling stuff + network_id = gw_info.get('network_id') if gw_info else None + if not network_id: + return + + nets = self._core_plugin.get_networks( + context, {external_net.EXTERNAL: [True]}) + # nothing to do if there is only one external network + if len(nets) <= 1: + return + + # first get plugin supporting l3 agent scheduling + # (either l3 service plugin or core_plugin) + l3_plugin = manager.NeutronManager.get_service_plugins().get( + constants.L3_ROUTER_NAT) + if (not utils.is_extension_supported( + l3_plugin, + l3_constants.L3_AGENT_SCHEDULER_EXT_ALIAS) or + l3_plugin.router_scheduler is None): + # that might mean that we are dealing with non-agent-based + # implementation of l3 services + return + + cur_agents = l3_plugin.list_l3_agents_hosting_router( + context, router_id)['agents'] + for agent in cur_agents: + ext_net_id = agent['configurations'].get( + 'gateway_external_network_id') + ext_bridge = agent['configurations'].get( + 'external_network_bridge', 'br-ex') + if (ext_net_id == network_id or + (not ext_net_id and not ext_bridge)): + return + + # otherwise find l3 agent with matching gateway_external_network_id + active_agents = l3_plugin.get_l3_agents(context, active=True) + router = { + 'id': router_id, + 'external_gateway_info': {'network_id': network_id} + } + candidates = l3_plugin.get_l3_agent_candidates(context, + router, + active_agents) + if not candidates: + msg = (_('No eligible l3 agent associated with external network ' + '%s found') % network_id) + raise n_exc.BadRequest(resource='router', msg=msg) + + return candidates + + def _create_router_gw_port(self, context, router, network_id): + # Port has no 'tenant-id', as it is hidden from user + gw_port = self._core_plugin.create_port(context.elevated(), { + 'port': {'tenant_id': '', # intentionally not set + 'network_id': network_id, + 'mac_address': attributes.ATTR_NOT_SPECIFIED, + 'fixed_ips': attributes.ATTR_NOT_SPECIFIED, + 'device_id': router['id'], + 'device_owner': DEVICE_OWNER_ROUTER_GW, + 'admin_state_up': True, + 'name': ''}}) + + if not gw_port['fixed_ips']: + self._core_plugin.delete_port(context.elevated(), gw_port['id'], + l3_port_check=False) + msg = (_('No IPs available for external network %s') % + network_id) + raise n_exc.BadRequest(resource='router', msg=msg) + + with context.session.begin(subtransactions=True): + router.gw_port = self._core_plugin._get_port(context.elevated(), + gw_port['id']) + context.session.add(router) + + def _validate_gw_info(self, context, gw_port, info): + network_id = info['network_id'] if info else None + if network_id: + network_db = self._core_plugin._get_network(context, network_id) + if not network_db.external: + msg = _("Network %s is not an external network") % network_id + raise n_exc.BadRequest(resource='router', msg=msg) + return network_id + + def _delete_current_gw_port(self, context, router_id, router, new_network): + """Delete gw port, if it is attached to an old network.""" + is_gw_port_attached_to_existing_network = ( + router.gw_port and router.gw_port['network_id'] != new_network) + admin_ctx = context.elevated() + if is_gw_port_attached_to_existing_network: + if self.get_floatingips_count( + admin_ctx, {'router_id': [router_id]}): + raise l3.RouterExternalGatewayInUseByFloatingIp( + router_id=router_id, net_id=router.gw_port['network_id']) + with context.session.begin(subtransactions=True): + gw_port_id = router.gw_port['id'] + router.gw_port = None + context.session.add(router) + self._core_plugin.delete_port( + admin_ctx, gw_port_id, l3_port_check=False) + + def _create_gw_port(self, context, router_id, router, new_network): + new_valid_gw_port_attachment = ( + new_network and (not router.gw_port or + router.gw_port['network_id'] != new_network)) + if new_valid_gw_port_attachment: + subnets = self._core_plugin._get_subnets_by_network(context, + new_network) + for subnet in subnets: + self._check_for_dup_router_subnet(context, router_id, + new_network, subnet['id'], + subnet['cidr']) + self._create_router_gw_port(context, router, new_network) + + def _update_router_gw_info(self, context, router_id, info, router=None): + # TODO(salvatore-orlando): guarantee atomic behavior also across + # operations that span beyond the model classes handled by this + # class (e.g.: delete_port) + router = router or self._get_router(context, router_id) + gw_port = router.gw_port + network_id = self._validate_gw_info(context, gw_port, info) + self._delete_current_gw_port(context, router_id, router, network_id) + self._create_gw_port(context, router_id, router, network_id) + + def _ensure_router_not_in_use(self, context, router_id): + admin_ctx = context.elevated() + router = self._get_router(context, router_id) + if self.get_floatingips_count( + admin_ctx, filters={'router_id': [router_id]}): + raise l3.RouterInUse(router_id=router_id) + device_owner = self._get_device_owner(context, router) + device_filter = {'device_id': [router_id], + 'device_owner': [device_owner]} + port_count = self._core_plugin.get_ports_count( + admin_ctx, filters=device_filter) + if port_count: + raise l3.RouterInUse(router_id=router_id) + return router + + def delete_router(self, context, id): + with context.session.begin(subtransactions=True): + router = self._ensure_router_not_in_use(context, id) + + #TODO(nati) Refactor here when we have router insertion model + vpnservice = manager.NeutronManager.get_service_plugins().get( + constants.VPN) + if vpnservice: + vpnservice.check_router_in_use(context, id) + + context.session.delete(router) + + # Delete the gw port after the router has been removed to + # avoid a constraint violation. + device_filter = {'device_id': [id], + 'device_owner': [DEVICE_OWNER_ROUTER_GW]} + ports = self._core_plugin.get_ports(context.elevated(), + filters=device_filter) + if ports: + self._core_plugin._delete_port(context.elevated(), + ports[0]['id']) + + self.l3_rpc_notifier.router_deleted(context, id) + + def get_router(self, context, id, fields=None): + router = self._get_router(context, id) + return self._make_router_dict(router, fields) + + def get_routers(self, context, filters=None, fields=None, + sorts=None, limit=None, marker=None, + page_reverse=False): + marker_obj = self._get_marker_obj(context, 'router', limit, marker) + return self._get_collection(context, Router, + self._make_router_dict, + filters=filters, fields=fields, + sorts=sorts, + limit=limit, + marker_obj=marker_obj, + page_reverse=page_reverse) + + def get_routers_count(self, context, filters=None): + return self._get_collection_count(context, Router, + filters=filters) + + def _check_for_dup_router_subnet(self, context, router_id, + network_id, subnet_id, subnet_cidr): + try: + rport_qry = context.session.query(models_v2.Port) + rports = rport_qry.filter_by(device_id=router_id) + # It's possible these ports are on the same network, but + # different subnets. + new_ipnet = netaddr.IPNetwork(subnet_cidr) + for p in rports: + for ip in p['fixed_ips']: + if ip['subnet_id'] == subnet_id: + msg = (_("Router already has a port on subnet %s") + % subnet_id) + raise n_exc.BadRequest(resource='router', msg=msg) + sub_id = ip['subnet_id'] + cidr = self._core_plugin._get_subnet(context.elevated(), + sub_id)['cidr'] + ipnet = netaddr.IPNetwork(cidr) + match1 = netaddr.all_matching_cidrs(new_ipnet, [cidr]) + match2 = netaddr.all_matching_cidrs(ipnet, [subnet_cidr]) + if match1 or match2: + data = {'subnet_cidr': subnet_cidr, + 'subnet_id': subnet_id, + 'cidr': cidr, + 'sub_id': sub_id} + msg = (_("Cidr %(subnet_cidr)s of subnet " + "%(subnet_id)s overlaps with cidr %(cidr)s " + "of subnet %(sub_id)s") % data) + raise n_exc.BadRequest(resource='router', msg=msg) + except exc.NoResultFound: + pass + + def _get_device_owner(self, context, router=None): + """Get device_owner for the specified router.""" + # NOTE(armando-migliaccio): in the base case this is invariant + return DEVICE_OWNER_ROUTER_INTF + + def _validate_interface_info(self, interface_info): + if not interface_info: + msg = _("Either subnet_id or port_id must be specified") + raise n_exc.BadRequest(resource='router', msg=msg) + port_id_specified = 'port_id' in interface_info + subnet_id_specified = 'subnet_id' in interface_info + if port_id_specified and subnet_id_specified: + msg = _("Cannot specify both subnet-id and port-id") + raise n_exc.BadRequest(resource='router', msg=msg) + return port_id_specified, subnet_id_specified + + def _add_interface_by_port(self, context, router_id, port_id, owner): + with context.session.begin(subtransactions=True): + port = self._core_plugin._get_port(context, port_id) + if port['device_id']: + raise n_exc.PortInUse(net_id=port['network_id'], + port_id=port['id'], + device_id=port['device_id']) + fixed_ips = [ip for ip in port['fixed_ips']] + if len(fixed_ips) != 1: + msg = _('Router port must have exactly one fixed IP') + raise n_exc.BadRequest(resource='router', msg=msg) + subnet_id = fixed_ips[0]['subnet_id'] + subnet = self._core_plugin._get_subnet(context, subnet_id) + self._check_for_dup_router_subnet(context, router_id, + port['network_id'], + subnet['id'], + subnet['cidr']) + port.update({'device_id': router_id, 'device_owner': owner}) + return (port, subnet) + + def _add_interface_by_subnet(self, context, router_id, subnet_id, owner): + subnet = self._core_plugin._get_subnet(context, subnet_id) + if not subnet['gateway_ip']: + msg = _('Subnet for router interface must have a gateway IP') + raise n_exc.BadRequest(resource='router', msg=msg) + self._check_for_dup_router_subnet(context, router_id, + subnet['network_id'], + subnet_id, + subnet['cidr']) + fixed_ip = {'ip_address': subnet['gateway_ip'], + 'subnet_id': subnet['id']} + return self._core_plugin.create_port(context, { + 'port': + {'tenant_id': subnet['tenant_id'], + 'network_id': subnet['network_id'], + 'fixed_ips': [fixed_ip], + 'mac_address': attributes.ATTR_NOT_SPECIFIED, + 'admin_state_up': True, + 'device_id': router_id, + 'device_owner': owner, + 'name': ''}}) + + def add_router_interface(self, context, router_id, interface_info): + add_by_port, add_by_sub = self._validate_interface_info(interface_info) + device_owner = self._get_device_owner(context, router_id) + payload = {} + + if add_by_port: + port, subnet = self._add_interface_by_port( + context, router_id, interface_info['port_id'], device_owner) + subnet_id = subnet['id'] + elif add_by_sub: + port = self._add_interface_by_subnet( + context, router_id, interface_info['subnet_id'], device_owner) + subnet_id = interface_info['subnet_id'] + + payload = {'subnet_id': subnet_id} + self.l3_rpc_notifier.routers_updated( + context, [router_id], 'add_router_interface', payload) + info = {'id': router_id, + 'tenant_id': port['tenant_id'], + 'port_id': port['id'], + 'subnet_id': port['fixed_ips'][0]['subnet_id']} + notifier = n_rpc.get_notifier('network') + notifier.info( + context, 'router.interface.create', {'router_interface': info}) + return info + + def _confirm_router_interface_not_in_use(self, context, router_id, + subnet_id): + subnet_db = self._core_plugin._get_subnet(context, subnet_id) + subnet_cidr = netaddr.IPNetwork(subnet_db['cidr']) + fip_qry = context.session.query(FloatingIP) + for fip_db in fip_qry.filter_by(router_id=router_id): + if netaddr.IPAddress(fip_db['fixed_ip_address']) in subnet_cidr: + raise l3.RouterInterfaceInUseByFloatingIP( + router_id=router_id, subnet_id=subnet_id) + + def _remove_interface_by_port(self, context, router_id, + port_id, subnet_id, owner): + port_db = self._core_plugin._get_port(context, port_id) + if not (port_db['device_owner'] == owner and + port_db['device_id'] == router_id): + raise l3.RouterInterfaceNotFound(router_id=router_id, + port_id=port_id) + port_subnet_id = port_db['fixed_ips'][0]['subnet_id'] + if subnet_id and port_subnet_id != subnet_id: + raise n_exc.SubnetMismatchForPort( + port_id=port_id, subnet_id=subnet_id) + subnet = self._core_plugin._get_subnet(context, port_subnet_id) + self._confirm_router_interface_not_in_use( + context, router_id, port_subnet_id) + self._core_plugin.delete_port(context, port_db['id'], + l3_port_check=False) + return (port_db, subnet) + + def _remove_interface_by_subnet(self, context, + router_id, subnet_id, owner): + self._confirm_router_interface_not_in_use( + context, router_id, subnet_id) + subnet = self._core_plugin._get_subnet(context, subnet_id) + + try: + rport_qry = context.session.query(models_v2.Port) + ports = rport_qry.filter_by( + device_id=router_id, + device_owner=owner, + network_id=subnet['network_id']) + + for p in ports: + if p['fixed_ips'][0]['subnet_id'] == subnet_id: + self._core_plugin.delete_port(context, p['id'], + l3_port_check=False) + return (p, subnet) + except exc.NoResultFound: + pass + raise l3.RouterInterfaceNotFoundForSubnet(router_id=router_id, + subnet_id=subnet_id) + + def remove_router_interface(self, context, router_id, interface_info): + if not interface_info: + msg = _("Either subnet_id or port_id must be specified") + raise n_exc.BadRequest(resource='router', msg=msg) + port_id = interface_info.get('port_id') + subnet_id = interface_info.get('subnet_id') + device_owner = self._get_device_owner(context, router_id) + if port_id: + port, subnet = self._remove_interface_by_port(context, router_id, + port_id, subnet_id, + device_owner) + subnet_id = subnet['id'] + elif subnet_id: + port, subnet = self._remove_interface_by_subnet( + context, router_id, subnet_id, device_owner) + + payload = {'subnet_id': subnet_id} + self.l3_rpc_notifier.routers_updated( + context, [router_id], 'remove_router_interface', payload) + info = {'id': router_id, + 'tenant_id': port['tenant_id'], + 'port_id': port['id'], + 'subnet_id': subnet['id']} + notifier = n_rpc.get_notifier('network') + notifier.info( + context, 'router.interface.delete', {'router_interface': info}) + return info + + def _get_floatingip(self, context, id): + try: + floatingip = self._get_by_id(context, FloatingIP, id) + except exc.NoResultFound: + raise l3.FloatingIPNotFound(floatingip_id=id) + return floatingip + + def _make_floatingip_dict(self, floatingip, fields=None): + res = {'id': floatingip['id'], + 'tenant_id': floatingip['tenant_id'], + 'floating_ip_address': floatingip['floating_ip_address'], + 'floating_network_id': floatingip['floating_network_id'], + 'router_id': floatingip['router_id'], + 'port_id': floatingip['fixed_port_id'], + 'fixed_ip_address': floatingip['fixed_ip_address'], + 'status': floatingip['status']} + return self._fields(res, fields) + + def _get_interface_ports_for_network(self, context, network_id): + router_intf_qry = context.session.query(models_v2.Port) + return router_intf_qry.filter_by( + network_id=network_id, + device_owner=DEVICE_OWNER_ROUTER_INTF) + + def _get_router_for_floatingip(self, context, internal_port, + internal_subnet_id, + external_network_id): + subnet_db = self._core_plugin._get_subnet(context, + internal_subnet_id) + if not subnet_db['gateway_ip']: + msg = (_('Cannot add floating IP to port on subnet %s ' + 'which has no gateway_ip') % internal_subnet_id) + raise n_exc.BadRequest(resource='floatingip', msg=msg) + + router_intf_ports = self._get_interface_ports_for_network( + context, internal_port['network_id']) + + for intf_p in router_intf_ports: + if intf_p['fixed_ips'][0]['subnet_id'] == internal_subnet_id: + router_id = intf_p['device_id'] + router_gw_qry = context.session.query(models_v2.Port) + has_gw_port = router_gw_qry.filter_by( + network_id=external_network_id, + device_id=router_id, + device_owner=DEVICE_OWNER_ROUTER_GW).count() + if has_gw_port: + return router_id + + raise l3.ExternalGatewayForFloatingIPNotFound( + subnet_id=internal_subnet_id, + external_network_id=external_network_id, + port_id=internal_port['id']) + + def _internal_fip_assoc_data(self, context, fip): + """Retrieve internal port data for floating IP. + + Retrieve information concerning the internal port where + the floating IP should be associated to. + """ + internal_port = self._core_plugin._get_port(context, fip['port_id']) + if not internal_port['tenant_id'] == fip['tenant_id']: + port_id = fip['port_id'] + if 'id' in fip: + floatingip_id = fip['id'] + data = {'port_id': port_id, + 'floatingip_id': floatingip_id} + msg = (_('Port %(port_id)s is associated with a different ' + 'tenant than Floating IP %(floatingip_id)s and ' + 'therefore cannot be bound.') % data) + else: + msg = (_('Cannot create floating IP and bind it to ' + 'Port %s, since that port is owned by a ' + 'different tenant.') % port_id) + raise n_exc.BadRequest(resource='floatingip', msg=msg) + + internal_subnet_id = None + if 'fixed_ip_address' in fip and fip['fixed_ip_address']: + internal_ip_address = fip['fixed_ip_address'] + for ip in internal_port['fixed_ips']: + if ip['ip_address'] == internal_ip_address: + internal_subnet_id = ip['subnet_id'] + if not internal_subnet_id: + msg = (_('Port %(id)s does not have fixed ip %(address)s') % + {'id': internal_port['id'], + 'address': internal_ip_address}) + raise n_exc.BadRequest(resource='floatingip', msg=msg) + else: + ips = [ip['ip_address'] for ip in internal_port['fixed_ips']] + if not ips: + msg = (_('Cannot add floating IP to port %s that has' + 'no fixed IP addresses') % internal_port['id']) + raise n_exc.BadRequest(resource='floatingip', msg=msg) + if len(ips) > 1: + msg = (_('Port %s has multiple fixed IPs. Must provide' + ' a specific IP when assigning a floating IP') % + internal_port['id']) + raise n_exc.BadRequest(resource='floatingip', msg=msg) + internal_ip_address = internal_port['fixed_ips'][0]['ip_address'] + internal_subnet_id = internal_port['fixed_ips'][0]['subnet_id'] + return internal_port, internal_subnet_id, internal_ip_address + + def get_assoc_data(self, context, fip, floating_network_id): + """Determine/extract data associated with the internal port. + + When a floating IP is associated with an internal port, + we need to extract/determine some data associated with the + internal port, including the internal_ip_address, and router_id. + We also need to confirm that this internal port is owned by the + tenant who owns the floating IP. + """ + (internal_port, internal_subnet_id, + internal_ip_address) = self._internal_fip_assoc_data(context, fip) + router_id = self._get_router_for_floatingip(context, + internal_port, + internal_subnet_id, + floating_network_id) + # confirm that this router has a floating + # ip enabled gateway with support for this floating IP network + try: + port_qry = context.elevated().session.query(models_v2.Port) + port_qry.filter_by( + network_id=floating_network_id, + device_id=router_id, + device_owner=DEVICE_OWNER_ROUTER_GW).one() + except exc.NoResultFound: + raise l3.ExternalGatewayForFloatingIPNotFound( + subnet_id=internal_subnet_id, + port_id=internal_port['id']) + + return (fip['port_id'], internal_ip_address, router_id) + + def _update_fip_assoc(self, context, fip, floatingip_db, external_port): + previous_router_id = floatingip_db.router_id + port_id = internal_ip_address = router_id = None + if (('fixed_ip_address' in fip and fip['fixed_ip_address']) and + not ('port_id' in fip and fip['port_id'])): + msg = _("fixed_ip_address cannot be specified without a port_id") + raise n_exc.BadRequest(resource='floatingip', msg=msg) + if 'port_id' in fip and fip['port_id']: + port_id, internal_ip_address, router_id = self.get_assoc_data( + context, + fip, + floatingip_db['floating_network_id']) + fip_qry = context.session.query(FloatingIP) + try: + fip_qry.filter_by( + fixed_port_id=fip['port_id'], + floating_network_id=floatingip_db['floating_network_id'], + fixed_ip_address=internal_ip_address).one() + raise l3.FloatingIPPortAlreadyAssociated( + port_id=fip['port_id'], + fip_id=floatingip_db['id'], + floating_ip_address=floatingip_db['floating_ip_address'], + fixed_ip=internal_ip_address, + net_id=floatingip_db['floating_network_id']) + except exc.NoResultFound: + pass + floatingip_db.update({'fixed_ip_address': internal_ip_address, + 'fixed_port_id': port_id, + 'router_id': router_id, + 'last_known_router_id': previous_router_id}) + + def create_floatingip( + self, context, floatingip, + initial_status=l3_constants.FLOATINGIP_STATUS_ACTIVE): + fip = floatingip['floatingip'] + tenant_id = self._get_tenant_id_for_create(context, fip) + fip_id = uuidutils.generate_uuid() + + f_net_id = fip['floating_network_id'] + if not self._core_plugin._network_is_external(context, f_net_id): + msg = _("Network %s is not a valid external network") % f_net_id + raise n_exc.BadRequest(resource='floatingip', msg=msg) + + with context.session.begin(subtransactions=True): + # This external port is never exposed to the tenant. + # it is used purely for internal system and admin use when + # managing floating IPs. + external_port = self._core_plugin.create_port(context.elevated(), { + 'port': + {'tenant_id': '', # tenant intentionally not set + 'network_id': f_net_id, + 'mac_address': attributes.ATTR_NOT_SPECIFIED, + 'fixed_ips': attributes.ATTR_NOT_SPECIFIED, + 'admin_state_up': True, + 'device_id': fip_id, + 'device_owner': DEVICE_OWNER_FLOATINGIP, + 'name': ''}}) + # Ensure IP addresses are allocated on external port + if not external_port['fixed_ips']: + raise n_exc.ExternalIpAddressExhausted(net_id=f_net_id) + + floating_fixed_ip = external_port['fixed_ips'][0] + floating_ip_address = floating_fixed_ip['ip_address'] + floatingip_db = FloatingIP( + id=fip_id, + tenant_id=tenant_id, + status=initial_status, + floating_network_id=fip['floating_network_id'], + floating_ip_address=floating_ip_address, + floating_port_id=external_port['id']) + fip['tenant_id'] = tenant_id + # Update association with internal port + # and define external IP address + self._update_fip_assoc(context, fip, + floatingip_db, external_port) + context.session.add(floatingip_db) + + router_id = floatingip_db['router_id'] + if router_id: + self.l3_rpc_notifier.routers_updated( + context, [router_id], + 'create_floatingip', {}) + return self._make_floatingip_dict(floatingip_db) + + def update_floatingip(self, context, id, floatingip): + fip = floatingip['floatingip'] + with context.session.begin(subtransactions=True): + floatingip_db = self._get_floatingip(context, id) + fip['tenant_id'] = floatingip_db['tenant_id'] + fip['id'] = id + fip_port_id = floatingip_db['floating_port_id'] + before_router_id = floatingip_db['router_id'] + self._update_fip_assoc(context, fip, floatingip_db, + self._core_plugin.get_port( + context.elevated(), fip_port_id)) + router_ids = [] + if before_router_id: + router_ids.append(before_router_id) + router_id = floatingip_db['router_id'] + if router_id and router_id != before_router_id: + router_ids.append(router_id) + if router_ids: + self.l3_rpc_notifier.routers_updated( + context, router_ids, 'update_floatingip', {}) + return self._make_floatingip_dict(floatingip_db) + + def update_floatingip_status(self, context, floatingip_id, status): + """Update operational status for floating IP in neutron DB.""" + fip_query = self._model_query(context, FloatingIP).filter( + FloatingIP.id == floatingip_id) + fip_query.update({'status': status}, synchronize_session=False) + + def delete_floatingip(self, context, id): + floatingip = self._get_floatingip(context, id) + router_id = floatingip['router_id'] + with context.session.begin(subtransactions=True): + context.session.delete(floatingip) + self._core_plugin.delete_port(context.elevated(), + floatingip['floating_port_id'], + l3_port_check=False) + if router_id: + self.l3_rpc_notifier.routers_updated( + context, [router_id], + 'delete_floatingip', {}) + + def get_floatingip(self, context, id, fields=None): + floatingip = self._get_floatingip(context, id) + return self._make_floatingip_dict(floatingip, fields) + + def get_floatingips(self, context, filters=None, fields=None, + sorts=None, limit=None, marker=None, + page_reverse=False): + marker_obj = self._get_marker_obj(context, 'floatingip', limit, + marker) + if filters is not None: + for key, val in API_TO_DB_COLUMN_MAP.iteritems(): + if key in filters: + filters[val] = filters.pop(key) + + return self._get_collection(context, FloatingIP, + self._make_floatingip_dict, + filters=filters, fields=fields, + sorts=sorts, + limit=limit, + marker_obj=marker_obj, + page_reverse=page_reverse) + + def delete_disassociated_floatingips(self, context, network_id): + query = self._model_query(context, FloatingIP) + query = query.filter_by(floating_network_id=network_id, + fixed_port_id=None, + router_id=None) + for fip in query: + self.delete_floatingip(context, fip.id) + + def get_floatingips_count(self, context, filters=None): + return self._get_collection_count(context, FloatingIP, + filters=filters) + + def prevent_l3_port_deletion(self, context, port_id): + """Checks to make sure a port is allowed to be deleted. + + Raises an exception if this is not the case. This should be called by + any plugin when the API requests the deletion of a port, since some + ports for L3 are not intended to be deleted directly via a DELETE + to /ports, but rather via other API calls that perform the proper + deletion checks. + """ + port_db = self._core_plugin._get_port(context, port_id) + if port_db['device_owner'] in self.router_device_owners: + # Raise port in use only if the port has IP addresses + # Otherwise it's a stale port that can be removed + fixed_ips = port_db['fixed_ips'] + if fixed_ips: + raise l3.L3PortInUse(port_id=port_id, + device_owner=port_db['device_owner']) + else: + LOG.debug(_("Port %(port_id)s has owner %(port_owner)s, but " + "no IP address, so it can be deleted"), + {'port_id': port_db['id'], + 'port_owner': port_db['device_owner']}) + + def disassociate_floatingips(self, context, port_id): + router_ids = set() + + with context.session.begin(subtransactions=True): + fip_qry = context.session.query(FloatingIP) + floating_ips = fip_qry.filter_by(fixed_port_id=port_id) + for floating_ip in floating_ips: + router_ids.add(floating_ip['router_id']) + floating_ip.update({'fixed_port_id': None, + 'fixed_ip_address': None, + 'router_id': None}) + + if router_ids: + self.l3_rpc_notifier.routers_updated( + context, list(router_ids), + 'disassociate_floatingips', {}) + + def _build_routers_list(self, context, routers, gw_ports): + gw_port_id_gw_port_dict = dict((gw_port['id'], gw_port) + for gw_port in gw_ports) + for router in routers: + gw_port_id = router['gw_port_id'] + if gw_port_id: + router['gw_port'] = gw_port_id_gw_port_dict[gw_port_id] + return routers + + def _get_sync_routers(self, context, router_ids=None, active=None): + """Query routers and their gw ports for l3 agent. + + Query routers with the router_ids. The gateway ports, if any, + will be queried too. + l3 agent has an option to deal with only one router id. In addition, + when we need to notify the agent the data about only one router + (when modification of router, its interfaces, gw_port and floatingips), + we will have router_ids. + @param router_ids: the list of router ids which we want to query. + if it is None, all of routers will be queried. + @return: a list of dicted routers with dicted gw_port populated if any + """ + filters = {'id': router_ids} if router_ids else {} + if active is not None: + filters['admin_state_up'] = [active] + router_dicts = self.get_routers(context, filters=filters) + gw_port_ids = [] + if not router_dicts: + return [] + for router_dict in router_dicts: + gw_port_id = router_dict['gw_port_id'] + if gw_port_id: + gw_port_ids.append(gw_port_id) + gw_ports = [] + if gw_port_ids: + gw_ports = self.get_sync_gw_ports(context, gw_port_ids) + return self._build_routers_list(context, router_dicts, gw_ports) + + def _get_sync_floating_ips(self, context, router_ids): + """Query floating_ips that relate to list of router_ids.""" + if not router_ids: + return [] + return self.get_floatingips(context, {'router_id': router_ids}) + + def get_sync_gw_ports(self, context, gw_port_ids): + if not gw_port_ids: + return [] + filters = {'id': gw_port_ids} + gw_ports = self._core_plugin.get_ports(context, filters) + if gw_ports: + self._populate_subnet_for_ports(context, gw_ports) + return gw_ports + + def get_sync_interfaces(self, context, router_ids, device_owners=None): + """Query router interfaces that relate to list of router_ids.""" + device_owners = device_owners or [DEVICE_OWNER_ROUTER_INTF] + if not router_ids: + return [] + filters = {'device_id': router_ids, + 'device_owner': device_owners} + interfaces = self._core_plugin.get_ports(context, filters) + if interfaces: + self._populate_subnet_for_ports(context, interfaces) + return interfaces + + def _populate_subnet_for_ports(self, context, ports): + """Populate ports with subnet. + + These ports already have fixed_ips populated. + """ + if not ports: + return + + def each_port_with_ip(): + for port in ports: + fixed_ips = port.get('fixed_ips', []) + if len(fixed_ips) > 1: + LOG.info(_("Ignoring multiple IPs on router port %s"), + port['id']) + continue + elif not fixed_ips: + # Skip ports without IPs, which can occur if a subnet + # attached to a router is deleted + LOG.info(_("Skipping port %s as no IP is configure on it"), + port['id']) + continue + yield (port, fixed_ips[0]) + + network_ids = set(p['network_id'] for p, _ in each_port_with_ip()) + filters = {'network_id': [id for id in network_ids]} + fields = ['id', 'cidr', 'gateway_ip', 'network_id'] + + subnets_by_network = dict((id, []) for id in network_ids) + for subnet in self._core_plugin.get_subnets(context, filters, fields): + subnets_by_network[subnet['network_id']].append(subnet) + + for port, fixed_ip in each_port_with_ip(): + port['extra_subnets'] = [] + for subnet in subnets_by_network[port['network_id']]: + subnet_info = {'id': subnet['id'], + 'cidr': subnet['cidr'], + 'gateway_ip': subnet['gateway_ip']} + + if subnet['id'] == fixed_ip['subnet_id']: + port['subnet'] = subnet_info + else: + port['extra_subnets'].append(subnet_info) + + def _process_floating_ips(self, context, routers_dict, floating_ips): + for floating_ip in floating_ips: + router = routers_dict.get(floating_ip['router_id']) + if router: + router_floatingips = router.get(l3_constants.FLOATINGIP_KEY, + []) + router_floatingips.append(floating_ip) + router[l3_constants.FLOATINGIP_KEY] = router_floatingips + + def _process_interfaces(self, routers_dict, interfaces): + for interface in interfaces: + router = routers_dict.get(interface['device_id']) + if router: + router_interfaces = router.get(l3_constants.INTERFACE_KEY, []) + router_interfaces.append(interface) + router[l3_constants.INTERFACE_KEY] = router_interfaces + + def _get_router_info_list(self, context, router_ids=None, active=None, + device_owners=None): + """Query routers and their related floating_ips, interfaces.""" + with context.session.begin(subtransactions=True): + routers = self._get_sync_routers(context, + router_ids=router_ids, + active=active) + router_ids = [router['id'] for router in routers] + interfaces = self.get_sync_interfaces( + context, router_ids, device_owners) + floating_ips = self._get_sync_floating_ips(context, router_ids) + return (routers, interfaces, floating_ips) + + def get_sync_data(self, context, router_ids=None, active=None): + routers, interfaces, floating_ips = self._get_router_info_list( + context, router_ids=router_ids, active=active) + routers_dict = dict((router['id'], router) for router in routers) + self._process_floating_ips(context, routers_dict, floating_ips) + self._process_interfaces(routers_dict, interfaces) + return routers_dict.values() diff --git a/icehouse-patches/neutron/dvr-patch/neutron/db/l3_dvr_db.py b/icehouse-patches/neutron/dvr-patch/neutron/db/l3_dvr_db.py new file mode 100644 index 00000000..374a8e80 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/db/l3_dvr_db.py @@ -0,0 +1,434 @@ +# Copyright (c) 2014 OpenStack Foundation. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo.config import cfg + +from neutron.api.v2 import attributes +from neutron.common import constants as l3_const +from neutron.common import exceptions as n_exc +from neutron.db import l3_attrs_db +from neutron.db import l3_db +from neutron.db import models_v2 +from neutron.extensions import portbindings +from neutron.openstack.common import log as logging + + +LOG = logging.getLogger(__name__) + +DEVICE_OWNER_DVR_INTERFACE = l3_const.DEVICE_OWNER_DVR_INTERFACE +DEVICE_OWNER_DVR_SNAT = l3_const.DEVICE_OWNER_ROUTER_SNAT +FLOATINGIP_AGENT_INTF_KEY = l3_const.FLOATINGIP_AGENT_INTF_KEY +DEVICE_OWNER_AGENT_GW = l3_const.DEVICE_OWNER_AGENT_GW +SNAT_ROUTER_INTF_KEY = l3_const.SNAT_ROUTER_INTF_KEY + + +router_distributed_opts = [ + cfg.BoolOpt('router_distributed', + default=False, + help=_("System-wide flag to determine the type of router " + "that tenants can create. Only admin can override.")), +] +cfg.CONF.register_opts(router_distributed_opts) + + +class L3_NAT_with_dvr_db_mixin(l3_db.L3_NAT_db_mixin, + l3_attrs_db.ExtraAttributesMixin): + """Mixin class to enable DVR support.""" + + router_device_owners = ( + l3_db.L3_NAT_db_mixin. + router_device_owners + (DEVICE_OWNER_DVR_INTERFACE,)) + + extra_attributes = ( + l3_attrs_db.ExtraAttributesMixin.extra_attributes + [{ + 'name': "distributed", + 'default': cfg.CONF.router_distributed + }]) + + def _create_router_db(self, context, router, tenant_id): + """Create a router db object with dvr additions.""" + router['distributed'] = _is_distributed_router(router) + with context.session.begin(subtransactions=True): + router_db = super( + L3_NAT_with_dvr_db_mixin, self)._create_router_db( + context, router, tenant_id) + self._process_extra_attr_router_create(context, router_db, router) + return router_db + + def _validate_router_migration(self, router_db, router_res): + """Allow centralized -> distributed state transition only.""" + if (router_db.extra_attributes.distributed and + not router_res.get('distributed')): + # NOTE(armando-migliaccio): tell the client that there is no + # going back for a distributed router, at least for now. + raise NotImplementedError() + + def _update_distributed_attr( + self, context, router_id, router_db, data, gw_info): + """Update the model to support the dvr case of a router.""" + if not gw_info and 'distributed' in data and data['distributed']: + admin_ctx = context.elevated() + filters = {'device_id': [router_id], + 'device_owner': [l3_const.DEVICE_OWNER_ROUTER_INTF]} + ports = self._core_plugin.get_ports(admin_ctx, filters=filters) + for p in ports: + port_db = self._core_plugin._get_port(admin_ctx, p['id']) + port_db.update({'device_owner': DEVICE_OWNER_DVR_INTERFACE}) + + def _update_router_db(self, context, router_id, data, gw_info): + with context.session.begin(subtransactions=True): + router_db = super( + L3_NAT_with_dvr_db_mixin, self)._update_router_db( + context, router_id, data, gw_info) + self._validate_router_migration(router_db, data) + router_db.extra_attributes.update(data) + self._update_distributed_attr( + context, router_id, router_db, data, gw_info) + return router_db + + def _delete_current_gw_port(self, context, router_id, router, new_network): + super(L3_NAT_with_dvr_db_mixin, + self)._delete_current_gw_port(context, router_id, + router, new_network) + if router.extra_attributes.distributed: + self.delete_csnat_router_interface_ports(context, router) + + def _create_gw_port(self, context, router_id, router, new_network): + super(L3_NAT_with_dvr_db_mixin, + self)._create_gw_port(context, router_id, + router, new_network) + if router.extra_attributes.distributed: + snat_p_list = self.create_snat_intf_ports_if_not_exists( + context.elevated(), router['id']) + if not snat_p_list: + LOG.debug(_("SNAT interface ports not created: %s"), + snat_p_list) + + def _get_device_owner(self, context, router=None): + """Get device_owner for the specified router.""" + router_is_uuid = isinstance(router, basestring) + if router_is_uuid: + router = self._get_router(context, router) + if _is_distributed_router(router): + return DEVICE_OWNER_DVR_INTERFACE + return super(L3_NAT_with_dvr_db_mixin, + self)._get_device_owner(context, router) + + def _get_interface_ports_for_network(self, context, network_id): + router_intf_qry = (context.session.query(models_v2.Port). + filter_by(network_id=network_id)) + return (router_intf_qry. + filter(models_v2.Port.device_owner.in_( + [l3_const.DEVICE_OWNER_ROUTER_INTF, + DEVICE_OWNER_DVR_INTERFACE]))) + + def get_snat_sync_interfaces(self, context, router_ids): + """Query router interfaces that relate to list of router_ids.""" + if not router_ids: + return [] + filters = {'device_id': router_ids, + 'device_owner': [DEVICE_OWNER_DVR_SNAT]} + interfaces = self._core_plugin.get_ports(context, filters) + LOG.debug("Return the SNAT ports: %s ", interfaces) + if interfaces: + self._populate_subnet_for_ports(context, interfaces) + return interfaces + + def _process_routers(self, context, routers): + routers_dict = {} + for router in routers: + routers_dict[router['id']] = router + router_ids = [router['id']] + if router['gw_port_id']: + snat_router_intfs = self.get_snat_sync_interfaces(context, + router_ids) + LOG.info(_("SNAT ports returned : %s "), snat_router_intfs) + router[SNAT_ROUTER_INTF_KEY] = snat_router_intfs + return routers_dict + + def _process_floating_ips(self, context, routers_dict, floating_ips): + for floating_ip in floating_ips: + router = routers_dict.get(floating_ip['router_id']) + if router: + router_floatingips = router.get(l3_const.FLOATINGIP_KEY, []) + floatingip_agent_intfs = [] + if router['distributed']: + floating_ip['host'] = self.get_vm_port_hostid( + context, floating_ip['port_id']) + LOG.debug("Floating IP host: %s", floating_ip['host']) + fip_agent = self._get_agent_by_type_and_host( + context, l3_const.AGENT_TYPE_L3, + floating_ip['host']) + LOG.debug("FIP Agent : %s ", fip_agent['id']) + floatingip_agent_intfs = self.get_fip_sync_interfaces( + context, fip_agent['id']) + LOG.debug("FIP Agent ports: %s", floatingip_agent_intfs) + router_floatingips.append(floating_ip) + #router_floatingip_agent_intfs.append(floatingip_agent_intfs) + router[l3_const.FLOATINGIP_KEY] = router_floatingips + router[l3_const.FLOATINGIP_AGENT_INTF_KEY] = ( + floatingip_agent_intfs) + + def get_sync_data(self, context, router_ids=None, active=None): + routers, interfaces, floating_ips = self._get_router_info_list( + context, router_ids=router_ids, active=active, + device_owners=[l3_const.DEVICE_OWNER_ROUTER_INTF, + DEVICE_OWNER_DVR_INTERFACE]) + # Add the port binding host to the floatingip dictionary + for fip in floating_ips: + fip['host'] = self.get_vm_port_hostid(context, fip['port_id']) + routers_dict = self._process_routers(context, routers) + self._process_floating_ips(context, routers_dict, floating_ips) + self._process_interfaces(routers_dict, interfaces) + return routers_dict.values() + + def get_vm_port_hostid(self, context, port_id, port=None): + """Return the portbinding host_id.""" + vm_port_db = port or self._core_plugin.get_port(context, port_id) + if vm_port_db and ( + "compute:" in vm_port_db['device_owner'] or + DEVICE_OWNER_AGENT_GW in vm_port_db['device_owner'] or + "neutron:LOADBALANCER" in vm_port_db['device_owner']): + return vm_port_db[portbindings.HOST_ID] + + def get_agent_gw_ports_exist_for_network( + self, context, network_id, host, agent_id): + """Return agent gw port if exist, or None otherwise.""" + if not network_id: + LOG.debug("No Network id exists") + return + filters = {'network_id': network_id, + 'device_owner': DEVICE_OWNER_AGENT_GW} + ports = self._core_plugin.get_ports(context.elevated(), filters) + for p in ports: + if(self.get_vm_port_hostid(context, p['id'], p) == host and + p['device_id'] == agent_id): + return p + + def check_for_floatingip_and_return_with_hostid(self, context, router_id): + """Helper function to check for FIP and return Host id.""" + # FIXME(swami): what is the "break" condition for fp_host to be + # returned to the caller? + fp = self._get_sync_floating_ips(context, [router_id]) + fp_host = None + for fid in fp: + port_db = self._core_plugin._get_port(context, fid['port_id']) + fp_host = self.get_vm_port_hostid(context, port_db['id'], port_db) + return fp_host + + def check_fips_availability_on_host(self, context, fip_id, host_id): + """Query all floating_ips and filter by particular host.""" + fip_count_on_host = 0 + admin_ctx = context.elevated() + with context.session.begin(subtransactions=True): + routers = self._get_sync_routers(admin_ctx, router_ids=None) + router_ids = [router['id'] for router in routers] + floating_ips = self._get_sync_floating_ips(admin_ctx, router_ids) + # Check for the active floatingip in the host + for fip in floating_ips: + f_host = self.get_vm_port_hostid(admin_ctx, fip['port_id']) + if f_host == host_id: + fip_count_on_host += 1 + # If fip_count greater than 1 or equal to zero no action taken + # if the fip_count is equal to 1, then this would be last active + # fip in the host, so the agent gateway port can be deleted. + if fip_count_on_host == 1: + return True + return False + + def delete_floatingip_agent_gateway_port(self, context, host_id): + """Function to delete the FIP agent gateway port on host.""" + # delete any fip agent gw port + device_filter = {'device_owner': [DEVICE_OWNER_AGENT_GW]} + ports = self._core_plugin.get_ports(context.elevated(), + filters=device_filter) + for p in ports: + if self.get_vm_port_hostid(context, p['id'], p) == host_id: + self._core_plugin._delete_port(context.elevated(), + p['id']) + return + + def create_fip_agent_gw_port_if_not_exists( + self, context, network_id, host): + """Function to return the FIP Agent GW port. + + This function will create a FIP Agent GW port + if required. If the port already exists, it + will return the existing port and will not + create a new one. + """ + l3_agent_db = self._get_agent_by_type_and_host( + context, l3_const.AGENT_TYPE_L3, host) + if l3_agent_db: + LOG.info(_("Agent ID exists: %s"), l3_agent_db['id']) + # TODO(Swami): is this call still valid for external agent gw port? + f_port = self.get_agent_gw_ports_exist_for_network( + context, network_id, host, l3_agent_db['id']) + if not f_port: + LOG.info(_('Agent Gateway port does not exist,' + ' so create one: %s'), f_port) + agent_port = self._core_plugin.create_port( + context.elevated(), + {'port': {'tenant_id': '', + 'network_id': network_id, + 'mac_address': attributes.ATTR_NOT_SPECIFIED, + 'fixed_ips': attributes.ATTR_NOT_SPECIFIED, + 'device_id': l3_agent_db['id'], + 'device_owner': DEVICE_OWNER_AGENT_GW, + 'admin_state_up': True, + 'name': ''}}) + if agent_port: + self._populate_subnet_for_ports(context, [agent_port]) + return agent_port + msg = _("Unable to create the Agent Gateway Port") + raise n_exc.BadRequest(resource='router', msg=msg) + else: + self._populate_subnet_for_ports(context, [f_port]) + return f_port + + def get_snat_interface_ports_for_router(self, context, router_id): + """Return all existing snat_router_interface ports.""" + filters = {'device_id': [router_id], + 'device_owner': [DEVICE_OWNER_DVR_SNAT]} + return self._core_plugin.get_ports(context.elevated(), filters) + + def add_csnat_router_interface_port( + self, context, router_id, network_id, subnet_payload): + """Function to create SNAT router interface ports.""" + snat_port = self._core_plugin.create_port( + context.elevated(), + {'port': {'tenant_id': '', + 'network_id': network_id, + 'mac_address': attributes.ATTR_NOT_SPECIFIED, + 'fixed_ips': [subnet_payload], + 'device_id': router_id, + 'device_owner': DEVICE_OWNER_DVR_SNAT, + 'admin_state_up': True, + 'name': ''}}) + if snat_port: + return self._populate_subnet_for_ports(context, [snat_port]) + msg = _("Unable to create the SNAT Interface Port") + raise n_exc.BadRequest(resource='router', msg=msg) + + def create_snat_intf_ports_if_not_exists( + self, context, router_id): + """Function to return the snat interface port list. + + This function will return the snat interface port list + if it exists. If the port does not exist it will create + new ports and then return the list. + """ + port_list = self.get_snat_interface_ports_for_router( + context, router_id) + if port_list: + self._populate_subnet_for_ports(context, port_list) + return port_list + admin_ctx = context.elevated() + port_list = [] + filters = { + 'device_id': [router_id], + 'device_owner': [DEVICE_OWNER_DVR_INTERFACE]} + int_ports = self._core_plugin.get_ports(admin_ctx, filters) + LOG.info(_('SNAT interface port list does not exist,' + ' so create one: %s'), port_list) + for intf in int_ports: + if 'fixed_ips' in intf and intf['fixed_ips']: + # Passing the subnet for the port to make sure the IP's + # are assigned on the right subnet if multiple subnet + # exists + intf_subnet = intf['fixed_ips'][0]['subnet_id'] + port_data = { + 'tenant_id': '', + 'network_id': intf['network_id'], + 'mac_address': attributes.ATTR_NOT_SPECIFIED, + 'fixed_ips': [{'subnet_id': intf_subnet}], + 'device_id': router_id, + 'device_owner': DEVICE_OWNER_DVR_SNAT, + 'admin_state_up': True, + 'name': '' + } + snat_port = self._core_plugin.create_port( + admin_ctx, {'port': port_data}) + if not snat_port: + msg = _("Unable to create the SNAT Interface Port") + raise n_exc.BadRequest(resource='router', msg=msg) + port_list.append(snat_port) + if port_list: + self._populate_subnet_for_ports(context, port_list) + return port_list + + def l3_agent_notify_for_vmarp_table(self, context, port_id, action): + """Function provides notification to L3 agent. + + Function provides the details of the VM ARP to the + L3 agent when a Nova instance gets created or deleted. + """ + port_dict = self._core_plugin._get_port(context, port_id) + if "compute:" not in port_dict['device_owner']: + return + ip_address = port_dict['fixed_ips'][0]['ip_address'] + subnet = port_dict['fixed_ips'][0]['subnet_id'] + filters = {'fixed_ips': {'subnet_id': [subnet]}} + ports = self._core_plugin.get_ports(context, filters=filters) + for port in ports: + if port['device_owner'] == DEVICE_OWNER_DVR_INTERFACE: + router_id = port['device_id'] + router_dict = self._get_router(context, router_id) + if router_dict.extra_attributes.distributed: + arp_table = {'ip_address': ip_address, + 'mac_address': port_dict['mac_address'], + 'subnet_id': subnet} + if action == "add": + notify_action = self.l3_rpc_notifier.add_arp_entry + elif action == "del": + notify_action = self.l3_rpc_notifier.del_arp_entry + notify_action(context, router_id, arp_table) + return + + def delete_csnat_router_interface_ports(self, context, + router, subnet_id=None): + # Each csnat router interface port is associated + # with a subnet, so we need to pass the subnet id to + # delete the right ports. + admin_ctx = context.elevated() + device_filter = { + 'device_id': [router['id']], + 'device_owner': [DEVICE_OWNER_DVR_SNAT]} + c_snat_ports = self._core_plugin.get_ports( + admin_ctx, filters=device_filter) + for p in c_snat_ports: + if subnet_id is None: + self._core_plugin.delete_port(admin_ctx, + p['id'], + l3_port_check=False) + else: + if p['fixed_ips'][0]['subnet_id'] == subnet_id: + LOG.info(_("Subnet matches: %s"), subnet_id) + self._core_plugin.delete_port(admin_ctx, + p['id'], + l3_port_check=False) + break + + +def _is_distributed_router(router): + """Return True if router to be handled is distributed.""" + try: + # See if router is a DB object first + requested_router_type = router.extra_attributes.distributed + except AttributeError: + # if not, try to see if it is a request body + requested_router_type = router.get('distributed') + if attributes.is_attr_set(requested_router_type): + return requested_router_type + return cfg.CONF.router_distributed diff --git a/icehouse-patches/neutron/dvr-patch/neutron/db/l3_dvrscheduler_db.py b/icehouse-patches/neutron/dvr-patch/neutron/db/l3_dvrscheduler_db.py new file mode 100644 index 00000000..7cdd8aca --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/db/l3_dvrscheduler_db.py @@ -0,0 +1,276 @@ +# +# (c) Copyright 2014 Hewlett-Packard Development Company, L.P. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import random + +from sqlalchemy.orm import exc + +from neutron.common import constants as q_const +from neutron.db import l3_agentschedulers_db as l3agent_sch_db +from neutron.db import l3_db +from neutron.db import l3_gwmode_db # noqa +from neutron.extensions import l3agentscheduler +from neutron.openstack.common import log as logging + +LOG = logging.getLogger(__name__) + + +class L3_DVRsch_db_mixin(l3_db.L3_NAT_db_mixin, + l3agent_sch_db.L3AgentSchedulerDbMixin): + """Mixin class for L3 DVR scheduler. + + @l3_db.L3_NAT_db_mixin db mixin class for L3 + @l3agent_sch_db.L3AgentSchedulerDbMixin + """ + def dvr_update_router_addvm(self, context, port): + ips = port['fixed_ips'] + for ip in ips: + subnet = ip['subnet_id'] + filter_sub = {'fixed_ips': {'subnet_id': [subnet]}, + 'device_owner': + [q_const.DEVICE_OWNER_DVR_INTERFACE]} + router_id = None + ports = self._core_plugin.get_ports(context, + filters=filter_sub) + for port in ports: + router_id = port['device_id'] + router_dict = self._get_router(context, router_id) + if router_dict.get('distributed', False): + payload = {'subnet_id': subnet} + self.l3_rpc_notifier.routers_updated( + context, [router_id], None, payload) + break + LOG.debug('DVR: dvr_update_router_addvm %s ', router_id) + + def get_dvrrouters_by_vmportid(self, context, port_id): + """Gets the dvr routers on vmport subnets.""" + router_ids = set() + port_dict = self._core_plugin._get_port(context, port_id) + fixed_ips = port_dict['fixed_ips'] + for fixedip in fixed_ips: + vm_subnet = fixedip['subnet_id'] + filter_sub = {'fixed_ips': {'subnet_id': [vm_subnet]}, + 'device_owner': + [q_const.DEVICE_OWNER_DVR_INTERFACE]} + subnetports = self._core_plugin.get_ports(context, + filters=filter_sub) + for subnetport in subnetports: + routerid = subnetport['device_id'] + router_ids.add(routerid) + return router_ids + + def get_subnetids_on_router(self, context, router_id): + """Only get subnet IDs for interfaces that are + attached to the given router. + """ + subnet_ids = set() + filter_rtr = {'device_id': [router_id]} + int_ports = self._core_plugin.get_ports(context, + filters=filter_rtr) + for int_port in int_ports: + int_ips = int_port['fixed_ips'] + int_subnet = int_ips[0]['subnet_id'] + subnet_ids.add(int_subnet) + return subnet_ids + + def check_vm_exists_onsubnet(self, context, host, port_id, subnet_id): + """Check if there is any vm exists on the subnet_id.""" + filter_sub = {'fixed_ips': {'subnet_id': [subnet_id]}} + ports = self._core_plugin.get_ports(context, + filters=filter_sub) + for port in ports: + if ("compute:" in port['device_owner'] + and port['status'] == 'ACTIVE' + and port['binding:host_id'] == host + and port['id'] != port_id + ): + LOG.debug('DVR- Vm exists for subnet %(subnet_id)s on host ' + '%(host)s', {'subnet_id': subnet_id, + 'host': host}) + return True + return False + + def delete_namespace_onhost(self, context, host, router_id): + """Delete the given router namespace on the host.""" + agent = self._core_plugin._get_agent_by_type_and_host( + context, q_const.AGENT_TYPE_L3, host) + agent_id = str(agent.id) + with context.session.begin(subtransactions=True): + bindings = (context.session. + query(l3agent_sch_db.RouterL3AgentBinding). + filter_by(router_id=router_id)) + for bind in bindings: + if bind.l3_agent_id == agent_id: + context.session.delete(bind) + break + self.l3_rpc_notifier.router_removed_from_agent(context, + router_id, + host) + LOG.debug('Deleted router %(router_id)s on agent.id %(id)s', + {'router_id': router_id, + 'id': agent.id}) + + def dvr_deletens_ifnovm(self, context, port_id): + """Delete the DVR namespace if no VM exists.""" + router_ids = self.get_dvrrouters_by_vmportid(context, port_id) + port_host = self._core_plugin.get_bindinghost_by_portid(port_id) + if not router_ids: + LOG.debug('No namespaces available for this port %(port)s ' + 'on host %(host)s', {'port': port_id, + 'host': port_host}) + return + for router_id in router_ids: + subnet_ids = self.get_subnetids_on_router(context, router_id) + for subnet in subnet_ids: + if self.check_vm_exists_onsubnet(context, + port_host, + port_id, + subnet): + return + filter_rtr = {'device_id': [router_id], + 'device_owner': + [q_const.DEVICE_OWNER_DVR_INTERFACE]} + int_ports = self._core_plugin.get_ports(context, + filters=filter_rtr) + for prt in int_ports: + dvr_binding = (self._core_plugin. + get_dvr_port_binding_by_host(context, + prt['id'], + port_host)) + if dvr_binding: + # unbind this port from router + dvr_binding['router_id'] = None + dvr_binding.update(dvr_binding) + self.delete_namespace_onhost(context, port_host, router_id) + LOG.debug('Deleted router namespace %(router_id)s ' + 'on host %(host)s', {'router_id': router_id, + 'host': port_host}) + + def bind_snat_router(self, context, router_id, chosen_agent): + """Bind the router to the chosen l3 agent.""" + with context.session.begin(subtransactions=True): + binding = l3agent_sch_db.CentralizedSnatL3AgentBinding() + binding.l3_agent = chosen_agent + binding.router_id = router_id + context.session.add(binding) + LOG.debug('SNAT Router %(router_id)s is scheduled to L3 agent ' + '%(agent_id)s', {'router_id': router_id, + 'agent_id': chosen_agent.id}) + + def bind_dvrrouter_servicenode(self, context, router_id, + chosen_snat_agent): + """Bind the IR router to service node if not already hosted.""" + query = (context.session.query(l3agent_sch_db.RouterL3AgentBinding). + filter_by(router_id=router_id)) + for bind in query: + if bind.l3_agent_id == chosen_snat_agent.id: + LOG.debug('Distributed Router %(router_id)s already hosted ' + 'on snat l3_agent %(id)s', + {'router_id': router_id, 'id': chosen_snat_agent.id}) + return + + LOG.debug('Binding the distributed router %(router_id)s to ' + 'the snat agent %(id)s', + {'router_id': router_id, + 'id': chosen_snat_agent.id}) + self.bind_router(context, router_id, chosen_snat_agent) + + def bind_snat_servicenode(self, context, router_id, snat_candidates): + """Bind the snat router to the chosen l3 service agent.""" + chosen_snat_agent = random.choice(snat_candidates) + self.bind_snat_router(context, router_id, chosen_snat_agent) + + def unbind_snat_servicenode(self, context, router_id): + """Unbind the snat router to the chosen l3 service agent.""" + vm_exists = False + agent_id = None + vm_ports = [] + host = None + with context.session.begin(subtransactions=True): + query = (context.session. + query(l3agent_sch_db.CentralizedSnatL3AgentBinding). + filter_by(router_id=router_id)) + try: + binding = query.one() + except exc.NoResultFound: + LOG.debug('no snat router is binding entry found ' + '%(router_id)s', {'router_id': router_id}) + return + + host = binding.l3_agent.host + subnet_ids = self.get_subnetids_on_router(context, router_id) + for subnet in subnet_ids: + vm_ports = ( + self._core_plugin.get_compute_ports_on_host_by_subnet( + context, host, subnet)) + if vm_ports: + vm_exists = True + LOG.debug('vm exists on the snat enabled l3_agent ' + 'host %(host)s and router_id ' + '%(router_id)s', {'host': host, + 'router_id': + router_id}) + break + agent_id = binding.l3_agent_id + LOG.debug('Delete the binding the SNAT router %(router_id)s ' + 'from agent %(id)s', {'router_id': router_id, + 'id': agent_id}) + context.session.delete(binding) + + if not vm_exists: + query = (context.session. + query(l3agent_sch_db.RouterL3AgentBinding). + filter_by(router_id=router_id)) + for bind in query: + if bind.l3_agent_id == agent_id: + context.session.delete(bind) + self.l3_rpc_notifier.router_removed_from_agent( + context, router_id, host) + LOG.debug('Removed the binding for router ' + '%(router_id)s from agent %(id)s', + {'router_id': router_id, 'id': agent_id}) + break + + def schedule_snat_router(self, plugin, context, router_id, gw_exists): + """Schedule the snat router on l3 service agent.""" + sync_router = plugin.get_router(context, router_id) + if gw_exists: + query = (context.session. + query(l3agent_sch_db.CentralizedSnatL3AgentBinding). + filter_by(router_id=router_id)) + for bind in query: + agt_id = bind.l3_agent_id + LOG.debug('SNAT Router %(router_id)s has already been ' + 'hosted by L3 agent ' + '%(agent_id)s', {'router_id': router_id, + 'agent_id': agt_id}) + self.bind_dvrrouter_servicenode(context, + router_id, + bind.l3_agent) + return + active_l3_agents = plugin.get_l3_agents(context, active=True) + if not active_l3_agents: + LOG.warn(_('No active L3 agents')) + return + snat_candidates = plugin.get_snat_candidates(sync_router, + active_l3_agents) + if snat_candidates: + self.bind_snat_servicenode(context, router_id, snat_candidates) + else: + raise (l3agentscheduler. + NoSnatEnabledL3Agent(router_id=router_id)) + else: + self.unbind_snat_servicenode(context, router_id) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/db/l3_gwmode_db.py b/icehouse-patches/neutron/dvr-patch/neutron/db/l3_gwmode_db.py new file mode 100644 index 00000000..2f1ee07b --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/db/l3_gwmode_db.py @@ -0,0 +1,85 @@ +# Copyright 2013 VMware, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +import sqlalchemy as sa +from sqlalchemy.orm import exc + +from neutron.db import db_base_plugin_v2 +from neutron.db import l3_agentschedulers_db as l3agent_sch_db +from neutron.db import l3_db +from neutron.extensions import l3 +from neutron.openstack.common import log as logging + + +LOG = logging.getLogger(__name__) +EXTERNAL_GW_INFO = l3.EXTERNAL_GW_INFO + +# Modify the Router Data Model adding the enable_snat attribute +setattr(l3_db.Router, 'enable_snat', + sa.Column(sa.Boolean, default=True, nullable=False)) + + +class L3_NAT_db_mixin(l3_db.L3_NAT_db_mixin): + """Mixin class to add configurable gateway modes.""" + + # Register dict extend functions for ports and networks + db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs( + l3.ROUTERS, ['_extend_router_dict_gw_mode']) + + def _extend_router_dict_gw_mode(self, router_res, router_db): + if router_db.gw_port_id: + nw_id = router_db.gw_port['network_id'] + router_res[EXTERNAL_GW_INFO] = { + 'network_id': nw_id, + 'enable_snat': router_db.enable_snat} + + def _update_router_gw_info(self, context, router_id, info, router=None): + # Load the router only if necessary + if not router: + router = self._get_router(context, router_id) + # if enable_snat is not specified use the value + # stored in the database (default:True) + enable_snat = not info or info.get('enable_snat', router.enable_snat) + with context.session.begin(subtransactions=True): + router.enable_snat = enable_snat + + # Calls superclass, pass router db object for avoiding re-loading + super(L3_NAT_db_mixin, self)._update_router_gw_info( + context, router_id, info, router=router) + # Returning the router might come back useful if this + # method is overriden in child classes + return router + + def _build_routers_list(self, context, routers, gw_ports): + gw_port_id_gw_port_dict = {} + for gw_port in gw_ports: + gw_port_id_gw_port_dict[gw_port['id']] = gw_port + for rtr in routers: + gw_port_id = rtr['gw_port_id'] + if gw_port_id: + rtr['gw_port'] = gw_port_id_gw_port_dict[gw_port_id] + # Add enable_snat key + rtr['enable_snat'] = rtr[EXTERNAL_GW_INFO]['enable_snat'] + query = (context.session. + query(l3agent_sch_db.CentralizedSnatL3AgentBinding). + filter_by(router_id=rtr['id'])) + try: + binding = query.one() + rtr['gw_port_host'] = binding.l3_agent.host + except exc.NoResultFound: + rtr['gw_port_host'] = None + LOG.debug('no snat router is binding entry ' + 'found router_id %s', rtr['id']) + return routers diff --git a/icehouse-patches/neutron/dvr-patch/neutron/db/l3_rpc_base.py b/icehouse-patches/neutron/dvr-patch/neutron/db/l3_rpc_base.py new file mode 100644 index 00000000..b499e1cd --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/db/l3_rpc_base.py @@ -0,0 +1,198 @@ +# Copyright (c) 2012 OpenStack Foundation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from oslo.config import cfg + +from neutron.common import constants +from neutron.common import utils +from neutron import context as neutron_context +from neutron.extensions import l3 +from neutron.extensions import portbindings +from neutron import manager +from neutron.openstack.common import jsonutils +from neutron.openstack.common import log as logging +from neutron.plugins.common import constants as plugin_constants + + +LOG = logging.getLogger(__name__) + + +class L3RpcCallbackMixin(object): + """A mix-in that enable L3 agent rpc support in plugin implementations.""" + + def sync_routers(self, context, **kwargs): + """Sync routers according to filters to a specific agent. + + @param context: contain user information + @param kwargs: host, router_ids + @return: a list of routers + with their interfaces and floating_ips + """ + router_ids = kwargs.get('router_ids') + host = kwargs.get('host') + context = neutron_context.get_admin_context() + l3plugin = manager.NeutronManager.get_service_plugins()[ + plugin_constants.L3_ROUTER_NAT] + if not l3plugin: + routers = {} + LOG.error(_('No plugin for L3 routing registered! Will reply ' + 'to l3 agent with empty router dictionary.')) + elif utils.is_extension_supported( + l3plugin, constants.L3_AGENT_SCHEDULER_EXT_ALIAS): + if cfg.CONF.router_auto_schedule: + l3plugin.auto_schedule_routers(context, host, router_ids) + routers = l3plugin.list_active_sync_routers_on_active_l3_agent( + context, host, router_ids) + else: + routers = l3plugin.get_sync_data(context, router_ids) + plugin = manager.NeutronManager.get_plugin() + if utils.is_extension_supported( + plugin, constants.PORT_BINDING_EXT_ALIAS): + self._ensure_host_set_on_ports(context, plugin, host, routers) + LOG.debug(_("Routers returned to l3 agent:\n %s"), + jsonutils.dumps(routers, indent=5)) + return routers + + def _ensure_host_set_on_ports(self, context, plugin, host, routers): + for router in routers: + LOG.debug(_("Checking router: %(id)s for host: %(host)s"), + {'id': router['id'], 'host': host}) + self._ensure_host_set_on_port(context, plugin, host, + router.get('gw_port'), + router['id']) + for interface in router.get(constants.INTERFACE_KEY, []): + self._ensure_host_set_on_port(context, plugin, host, + interface, router['id']) + + def _ensure_host_set_on_port(self, context, plugin, host, port, + router_id=None): + if (port and + port.get('device_owner') == + constants.DEVICE_OWNER_DVR_INTERFACE): + # Ports that are DVR interfaces have multiple bindings (based on + # of hosts on which DVR router interfaces are spawned). Such + # bindings are created/updated here by invoking + # update_dvr_port_binding + plugin.update_dvr_port_binding(context, port['id'], + {'port': + {portbindings.HOST_ID: host, + 'device_id': router_id} + }) + elif (port and + (port.get(portbindings.HOST_ID) != host or + port.get(portbindings.VIF_TYPE) == + portbindings.VIF_TYPE_BINDING_FAILED)): + # All ports, including ports created for SNAT'ing for + # DVR are handled here + plugin.update_port(context, port['id'], + {'port': {portbindings.HOST_ID: host}}) + + + def get_external_network_id(self, context, **kwargs): + """Get one external network id for l3 agent. + + l3 agent expects only on external network when it performs + this query. + """ + context = neutron_context.get_admin_context() + plugin = manager.NeutronManager.get_plugin() + net_id = plugin.get_external_network_id(context) + LOG.debug(_("External network ID returned to l3 agent: %s"), + net_id) + return net_id + + def update_floatingip_statuses(self, context, router_id, fip_statuses): + """Update operational status for a floating IP.""" + l3_plugin = manager.NeutronManager.get_service_plugins()[ + plugin_constants.L3_ROUTER_NAT] + with context.session.begin(subtransactions=True): + for (floatingip_id, status) in fip_statuses.iteritems(): + LOG.debug(_("New status for floating IP %(floatingip_id)s: " + "%(status)s"), {'floatingip_id': floatingip_id, + 'status': status}) + try: + l3_plugin.update_floatingip_status(context, + floatingip_id, + status) + except l3.FloatingIPNotFound: + LOG.debug(_("Floating IP: %s no longer present."), + floatingip_id) + # Find all floating IPs known to have been the given router + # for which an update was not received. Set them DOWN mercilessly + # This situation might occur for some asynchronous backends if + # notifications were missed + known_router_fips = l3_plugin.get_floatingips( + context, {'last_known_router_id': [router_id]}) + # Consider only floating ips which were disassociated in the API + # FIXME(salv-orlando): Filtering in code should be avoided. + # the plugin should offer a way to specify a null filter + fips_to_disable = (fip['id'] for fip in known_router_fips + if not fip['router_id']) + for fip_id in fips_to_disable: + l3_plugin.update_floatingip_status( + context, fip_id, constants.FLOATINGIP_STATUS_DOWN) + + def get_ports_by_subnet(self, context, **kwargs): + """DVR: RPC called by dvr-agent to get all ports for subnet.""" + subnet_id = kwargs.get('subnet_id') + LOG.debug("DVR: subnet_id: %s", subnet_id) + filters = {'fixed_ips': {'subnet_id': [subnet_id]}} + plugin = manager.NeutronManager.get_plugin() + return plugin.get_ports(context, filters=filters) + + def get_agent_gateway_port(self, context, **kwargs): + """Get Agent Gateway port for FIP. + + l3 agent expects an Agent Gateway Port to be returned + for this query. + """ + network_id = kwargs.get('network_id') + host = kwargs.get('host') + context = neutron_context.get_admin_context() + plugin = manager.NeutronManager.get_plugin() + l3plugin = manager.NeutronManager.get_service_plugins()[ + plugin_constants.L3_ROUTER_NAT] + agent_port = l3plugin.create_fip_agent_gw_port_if_not_exists( + context, network_id, host) + self._ensure_host_set_on_port(context, plugin, host, + agent_port) + LOG.debug('Agent Gateway port returned : %(agent_port)s with ' + 'host %(host)s', {'agent_port': agent_port, + 'host': host}) + return agent_port + + def get_snat_router_interface_ports(self, context, **kwargs): + """Get SNAT serviced Router Port List. + + The Service Node that hosts the SNAT service requires + the ports to service the router interfaces. + This function will check if any available ports, if not + it will create ports on the routers interfaces and + will send a list to the L3 agent. + """ + router_id = kwargs.get('router_id') + host = kwargs.get('host') + context = neutron_context.get_admin_context() + plugin = manager.NeutronManager.get_plugin() + l3plugin = manager.NeutronManager.get_service_plugins()[ + plugin_constants.L3_ROUTER_NAT] + snat_port_list = l3plugin.create_snat_intf_port_list_if_not_exists( + context, router_id) + for p in snat_port_list: + self._ensure_host_set_on_port(context, plugin, host, p) + LOG.debug('SNAT interface ports returned : %(snat_port_list)s ' + 'and on host %(host)s', {'snat_port_list': snat_port_list, + 'host': host}) + return snat_port_list diff --git a/icehouse-patches/neutron/dvr-patch/neutron/db/loadbalancer/__init__.py b/icehouse-patches/neutron/dvr-patch/neutron/db/loadbalancer/__init__.py new file mode 100644 index 00000000..fb181bf6 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/db/loadbalancer/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/icehouse-patches/neutron/dvr-patch/neutron/db/loadbalancer/loadbalancer_db.py b/icehouse-patches/neutron/dvr-patch/neutron/db/loadbalancer/loadbalancer_db.py new file mode 100644 index 00000000..0940a945 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/db/loadbalancer/loadbalancer_db.py @@ -0,0 +1,800 @@ +# Copyright 2013 OpenStack Foundation. All rights reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +from oslo.db import exception +import sqlalchemy as sa +from sqlalchemy import orm +from sqlalchemy.orm import exc +from sqlalchemy.orm import validates + +from neutron.api.v2 import attributes +from neutron.common import exceptions as n_exc +from neutron.db import db_base_plugin_v2 as base_db +from neutron.db import model_base +from neutron.db import models_v2 +from neutron.db import servicetype_db as st_db +from neutron.extensions import loadbalancer +from neutron import manager +from neutron.openstack.common import excutils +from neutron.openstack.common import log as logging +from neutron.openstack.common import uuidutils +from neutron.plugins.common import constants +from neutron.services.loadbalancer import constants as lb_const + + +LOG = logging.getLogger(__name__) + + +class SessionPersistence(model_base.BASEV2): + + vip_id = sa.Column(sa.String(36), + sa.ForeignKey("vips.id"), + primary_key=True) + type = sa.Column(sa.Enum("SOURCE_IP", + "HTTP_COOKIE", + "APP_COOKIE", + name="sesssionpersistences_type"), + nullable=False) + cookie_name = sa.Column(sa.String(1024)) + + +class PoolStatistics(model_base.BASEV2): + """Represents pool statistics.""" + + pool_id = sa.Column(sa.String(36), sa.ForeignKey("pools.id"), + primary_key=True) + bytes_in = sa.Column(sa.BigInteger, nullable=False) + bytes_out = sa.Column(sa.BigInteger, nullable=False) + active_connections = sa.Column(sa.BigInteger, nullable=False) + total_connections = sa.Column(sa.BigInteger, nullable=False) + + @validates('bytes_in', 'bytes_out', + 'active_connections', 'total_connections') + def validate_non_negative_int(self, key, value): + if value < 0: + data = {'key': key, 'value': value} + raise ValueError(_('The %(key)s field can not have ' + 'negative value. ' + 'Current value is %(value)d.') % data) + return value + + +class Vip(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant, + models_v2.HasStatusDescription): + """Represents a v2 neutron loadbalancer vip.""" + + name = sa.Column(sa.String(255)) + description = sa.Column(sa.String(255)) + port_id = sa.Column(sa.String(36), sa.ForeignKey('ports.id')) + protocol_port = sa.Column(sa.Integer, nullable=False) + protocol = sa.Column(sa.Enum("HTTP", "HTTPS", "TCP", name="lb_protocols"), + nullable=False) + pool_id = sa.Column(sa.String(36), nullable=False, unique=True) + session_persistence = orm.relationship(SessionPersistence, + uselist=False, + backref="vips", + cascade="all, delete-orphan") + admin_state_up = sa.Column(sa.Boolean(), nullable=False) + connection_limit = sa.Column(sa.Integer) + port = orm.relationship(models_v2.Port) + + +class Member(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant, + models_v2.HasStatusDescription): + """Represents a v2 neutron loadbalancer member.""" + + __table_args__ = ( + sa.schema.UniqueConstraint('pool_id', 'address', 'protocol_port', + name='uniq_member0pool_id0address0port'), + ) + pool_id = sa.Column(sa.String(36), sa.ForeignKey("pools.id"), + nullable=False) + address = sa.Column(sa.String(64), nullable=False) + protocol_port = sa.Column(sa.Integer, nullable=False) + weight = sa.Column(sa.Integer, nullable=False) + admin_state_up = sa.Column(sa.Boolean(), nullable=False) + + +class Pool(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant, + models_v2.HasStatusDescription): + """Represents a v2 neutron loadbalancer pool.""" + + vip_id = sa.Column(sa.String(36), sa.ForeignKey("vips.id")) + name = sa.Column(sa.String(255)) + description = sa.Column(sa.String(255)) + subnet_id = sa.Column(sa.String(36), nullable=False) + protocol = sa.Column(sa.Enum("HTTP", "HTTPS", "TCP", name="lb_protocols"), + nullable=False) + lb_method = sa.Column(sa.Enum("ROUND_ROBIN", + "LEAST_CONNECTIONS", + "SOURCE_IP", + name="pools_lb_method"), + nullable=False) + admin_state_up = sa.Column(sa.Boolean(), nullable=False) + stats = orm.relationship(PoolStatistics, + uselist=False, + backref="pools", + cascade="all, delete-orphan") + members = orm.relationship(Member, backref="pools", + cascade="all, delete-orphan") + monitors = orm.relationship("PoolMonitorAssociation", backref="pools", + cascade="all, delete-orphan") + vip = orm.relationship(Vip, backref='pool') + + provider = orm.relationship( + st_db.ProviderResourceAssociation, + uselist=False, + lazy="joined", + primaryjoin="Pool.id==ProviderResourceAssociation.resource_id", + foreign_keys=[st_db.ProviderResourceAssociation.resource_id] + ) + + +class HealthMonitor(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant): + """Represents a v2 neutron loadbalancer healthmonitor.""" + + type = sa.Column(sa.Enum("PING", "TCP", "HTTP", "HTTPS", + name="healthmontiors_type"), + nullable=False) + delay = sa.Column(sa.Integer, nullable=False) + timeout = sa.Column(sa.Integer, nullable=False) + max_retries = sa.Column(sa.Integer, nullable=False) + http_method = sa.Column(sa.String(16)) + url_path = sa.Column(sa.String(255)) + expected_codes = sa.Column(sa.String(64)) + admin_state_up = sa.Column(sa.Boolean(), nullable=False) + + pools = orm.relationship( + "PoolMonitorAssociation", backref="healthmonitor", + cascade="all", lazy="joined" + ) + + +class PoolMonitorAssociation(model_base.BASEV2, + models_v2.HasStatusDescription): + """Many-to-many association between pool and healthMonitor classes.""" + + pool_id = sa.Column(sa.String(36), + sa.ForeignKey("pools.id"), + primary_key=True) + monitor_id = sa.Column(sa.String(36), + sa.ForeignKey("healthmonitors.id"), + primary_key=True) + + +class LoadBalancerPluginDb(loadbalancer.LoadBalancerPluginBase, + base_db.CommonDbMixin): + """Wraps loadbalancer with SQLAlchemy models. + + A class that wraps the implementation of the Neutron loadbalancer + plugin database access interface using SQLAlchemy models. + """ + + @property + def _core_plugin(self): + return manager.NeutronManager.get_plugin() + + def update_status(self, context, model, id, status, + status_description=None): + with context.session.begin(subtransactions=True): + if issubclass(model, Vip): + try: + v_db = (self._model_query(context, model). + filter(model.id == id). + options(orm.noload('port')). + one()) + except exc.NoResultFound: + raise loadbalancer.VipNotFound(vip_id=id) + else: + v_db = self._get_resource(context, model, id) + if v_db.status != status: + v_db.status = status + # update status_description in two cases: + # - new value is passed + # - old value is not None (needs to be updated anyway) + if status_description or v_db['status_description']: + v_db.status_description = status_description + + def _get_resource(self, context, model, id): + try: + r = self._get_by_id(context, model, id) + except exc.NoResultFound: + with excutils.save_and_reraise_exception(reraise=False) as ctx: + if issubclass(model, Vip): + raise loadbalancer.VipNotFound(vip_id=id) + elif issubclass(model, Pool): + raise loadbalancer.PoolNotFound(pool_id=id) + elif issubclass(model, Member): + raise loadbalancer.MemberNotFound(member_id=id) + elif issubclass(model, HealthMonitor): + raise loadbalancer.HealthMonitorNotFound(monitor_id=id) + ctx.reraise = True + return r + + def assert_modification_allowed(self, obj): + status = getattr(obj, 'status', None) + + if status == constants.PENDING_DELETE: + raise loadbalancer.StateInvalid(id=id, state=status) + + ######################################################## + # VIP DB access + def _make_vip_dict(self, vip, fields=None): + fixed_ip = (vip.port.fixed_ips or [{}])[0] + + res = {'id': vip['id'], + 'tenant_id': vip['tenant_id'], + 'name': vip['name'], + 'description': vip['description'], + 'subnet_id': fixed_ip.get('subnet_id'), + 'address': fixed_ip.get('ip_address'), + 'port_id': vip['port_id'], + 'protocol_port': vip['protocol_port'], + 'protocol': vip['protocol'], + 'pool_id': vip['pool_id'], + 'session_persistence': None, + 'connection_limit': vip['connection_limit'], + 'admin_state_up': vip['admin_state_up'], + 'status': vip['status'], + 'status_description': vip['status_description']} + + if vip['session_persistence']: + s_p = { + 'type': vip['session_persistence']['type'] + } + + if vip['session_persistence']['type'] == 'APP_COOKIE': + s_p['cookie_name'] = vip['session_persistence']['cookie_name'] + + res['session_persistence'] = s_p + + return self._fields(res, fields) + + def _check_session_persistence_info(self, info): + """Performs sanity check on session persistence info. + + :param info: Session persistence info + """ + if info['type'] == 'APP_COOKIE': + if not info.get('cookie_name'): + raise ValueError(_("'cookie_name' should be specified for this" + " type of session persistence.")) + else: + if 'cookie_name' in info: + raise ValueError(_("'cookie_name' is not allowed for this type" + " of session persistence")) + + def _create_session_persistence_db(self, session_info, vip_id): + self._check_session_persistence_info(session_info) + + sesspersist_db = SessionPersistence( + type=session_info['type'], + cookie_name=session_info.get('cookie_name'), + vip_id=vip_id) + return sesspersist_db + + def _update_vip_session_persistence(self, context, vip_id, info): + self._check_session_persistence_info(info) + + vip = self._get_resource(context, Vip, vip_id) + + with context.session.begin(subtransactions=True): + # Update sessionPersistence table + sess_qry = context.session.query(SessionPersistence) + sesspersist_db = sess_qry.filter_by(vip_id=vip_id).first() + + # Insert a None cookie_info if it is not present to overwrite an + # an existing value in the database. + if 'cookie_name' not in info: + info['cookie_name'] = None + + if sesspersist_db: + sesspersist_db.update(info) + else: + sesspersist_db = SessionPersistence( + type=info['type'], + cookie_name=info['cookie_name'], + vip_id=vip_id) + context.session.add(sesspersist_db) + # Update vip table + vip.session_persistence = sesspersist_db + context.session.add(vip) + + def _delete_session_persistence(self, context, vip_id): + with context.session.begin(subtransactions=True): + sess_qry = context.session.query(SessionPersistence) + sess_qry.filter_by(vip_id=vip_id).delete() + + def _create_port_for_vip(self, context, vip_db, subnet_id, ip_address): + # resolve subnet and create port + subnet = self._core_plugin.get_subnet(context, subnet_id) + fixed_ip = {'subnet_id': subnet['id']} + if ip_address and ip_address != attributes.ATTR_NOT_SPECIFIED: + fixed_ip['ip_address'] = ip_address + + port_data = { + 'tenant_id': vip_db.tenant_id, + 'name': 'vip-' + vip_db.id, + 'network_id': subnet['network_id'], + 'mac_address': attributes.ATTR_NOT_SPECIFIED, + 'admin_state_up': False, + 'device_id': '', + 'device_owner': '', + 'fixed_ips': [fixed_ip] + } + + port = self._core_plugin.create_port(context, {'port': port_data}) + vip_db.port_id = port['id'] + + def create_vip(self, context, vip): + v = vip['vip'] + tenant_id = self._get_tenant_id_for_create(context, v) + + with context.session.begin(subtransactions=True): + if v['pool_id']: + pool = self._get_resource(context, Pool, v['pool_id']) + # validate that the pool has same tenant + if pool['tenant_id'] != tenant_id: + raise n_exc.NotAuthorized() + # validate that the pool has same protocol + if pool['protocol'] != v['protocol']: + raise loadbalancer.ProtocolMismatch( + vip_proto=v['protocol'], + pool_proto=pool['protocol']) + if pool['status'] == constants.PENDING_DELETE: + raise loadbalancer.StateInvalid(state=pool['status'], + id=pool['id']) + else: + pool = None + + vip_db = Vip(id=uuidutils.generate_uuid(), + tenant_id=tenant_id, + name=v['name'], + description=v['description'], + port_id=None, + protocol_port=v['protocol_port'], + protocol=v['protocol'], + pool_id=v['pool_id'], + connection_limit=v['connection_limit'], + admin_state_up=v['admin_state_up'], + status=constants.PENDING_CREATE) + + session_info = v['session_persistence'] + + if session_info: + s_p = self._create_session_persistence_db( + session_info, + vip_db['id']) + vip_db.session_persistence = s_p + + try: + context.session.add(vip_db) + context.session.flush() + except exception.DBDuplicateEntry: + raise loadbalancer.VipExists(pool_id=v['pool_id']) + + # create a port to reserve address for IPAM + self._create_port_for_vip( + context, + vip_db, + v['subnet_id'], + v.get('address') + ) + + if pool: + pool['vip_id'] = vip_db['id'] + + return self._make_vip_dict(vip_db) + + def update_vip(self, context, id, vip): + v = vip['vip'] + + sess_persist = v.pop('session_persistence', None) + with context.session.begin(subtransactions=True): + vip_db = self._get_resource(context, Vip, id) + + self.assert_modification_allowed(vip_db) + + if sess_persist: + self._update_vip_session_persistence(context, id, sess_persist) + else: + self._delete_session_persistence(context, id) + + if v: + try: + # in case new pool already has a vip + # update will raise integrity error at first query + old_pool_id = vip_db['pool_id'] + vip_db.update(v) + # If the pool_id is changed, we need to update + # the associated pools + if 'pool_id' in v: + new_pool = self._get_resource(context, Pool, + v['pool_id']) + self.assert_modification_allowed(new_pool) + + # check that the pool matches the tenant_id + if new_pool['tenant_id'] != vip_db['tenant_id']: + raise n_exc.NotAuthorized() + # validate that the pool has same protocol + if new_pool['protocol'] != vip_db['protocol']: + raise loadbalancer.ProtocolMismatch( + vip_proto=vip_db['protocol'], + pool_proto=new_pool['protocol']) + if new_pool['status'] == constants.PENDING_DELETE: + raise loadbalancer.StateInvalid( + state=new_pool['status'], + id=new_pool['id']) + + if old_pool_id: + old_pool = self._get_resource( + context, + Pool, + old_pool_id + ) + old_pool['vip_id'] = None + + new_pool['vip_id'] = vip_db['id'] + except exception.DBDuplicateEntry: + raise loadbalancer.VipExists(pool_id=v['pool_id']) + + return self._make_vip_dict(vip_db) + + def delete_vip(self, context, id): + with context.session.begin(subtransactions=True): + vip = self._get_resource(context, Vip, id) + qry = context.session.query(Pool) + for pool in qry.filter_by(vip_id=id): + pool.update({"vip_id": None}) + + context.session.delete(vip) + if vip.port: # this is a Neutron port + self._core_plugin.delete_port(context, vip.port.id) + + def get_vip(self, context, id, fields=None): + vip = self._get_resource(context, Vip, id) + return self._make_vip_dict(vip, fields) + + def get_vips(self, context, filters=None, fields=None): + return self._get_collection(context, Vip, + self._make_vip_dict, + filters=filters, fields=fields) + + ######################################################## + # Pool DB access + def _make_pool_dict(self, pool, fields=None): + res = {'id': pool['id'], + 'tenant_id': pool['tenant_id'], + 'name': pool['name'], + 'description': pool['description'], + 'subnet_id': pool['subnet_id'], + 'protocol': pool['protocol'], + 'vip_id': pool['vip_id'], + 'lb_method': pool['lb_method'], + 'admin_state_up': pool['admin_state_up'], + 'status': pool['status'], + 'status_description': pool['status_description'], + 'provider': '' + } + + if pool.provider: + res['provider'] = pool.provider.provider_name + + # Get the associated members + res['members'] = [member['id'] for member in pool['members']] + + # Get the associated health_monitors + res['health_monitors'] = [ + monitor['monitor_id'] for monitor in pool['monitors']] + res['health_monitors_status'] = [ + {'monitor_id': monitor['monitor_id'], + 'status': monitor['status'], + 'status_description': monitor['status_description']} + for monitor in pool['monitors']] + return self._fields(res, fields) + + def update_pool_stats(self, context, pool_id, data=None): + """Update a pool with new stats structure.""" + data = data or {} + with context.session.begin(subtransactions=True): + pool_db = self._get_resource(context, Pool, pool_id) + self.assert_modification_allowed(pool_db) + pool_db.stats = self._create_pool_stats(context, pool_id, data) + + for member, stats in data.get('members', {}).items(): + stats_status = stats.get(lb_const.STATS_STATUS) + if stats_status: + self.update_status(context, Member, member, stats_status) + + def _create_pool_stats(self, context, pool_id, data=None): + # This is internal method to add pool statistics. It won't + # be exposed to API + if not data: + data = {} + stats_db = PoolStatistics( + pool_id=pool_id, + bytes_in=data.get(lb_const.STATS_IN_BYTES, 0), + bytes_out=data.get(lb_const.STATS_OUT_BYTES, 0), + active_connections=data.get(lb_const.STATS_ACTIVE_CONNECTIONS, 0), + total_connections=data.get(lb_const.STATS_TOTAL_CONNECTIONS, 0) + ) + return stats_db + + def _delete_pool_stats(self, context, pool_id): + # This is internal method to delete pool statistics. It won't + # be exposed to API + with context.session.begin(subtransactions=True): + stats_qry = context.session.query(PoolStatistics) + try: + stats = stats_qry.filter_by(pool_id=pool_id).one() + except exc.NoResultFound: + raise loadbalancer.PoolStatsNotFound(pool_id=pool_id) + context.session.delete(stats) + + def create_pool(self, context, pool): + v = pool['pool'] + + tenant_id = self._get_tenant_id_for_create(context, v) + with context.session.begin(subtransactions=True): + pool_db = Pool(id=uuidutils.generate_uuid(), + tenant_id=tenant_id, + name=v['name'], + description=v['description'], + subnet_id=v['subnet_id'], + protocol=v['protocol'], + lb_method=v['lb_method'], + admin_state_up=v['admin_state_up'], + status=constants.PENDING_CREATE) + pool_db.stats = self._create_pool_stats(context, pool_db['id']) + context.session.add(pool_db) + + return self._make_pool_dict(pool_db) + + def update_pool(self, context, id, pool): + p = pool['pool'] + with context.session.begin(subtransactions=True): + pool_db = self._get_resource(context, Pool, id) + self.assert_modification_allowed(pool_db) + if p: + pool_db.update(p) + + return self._make_pool_dict(pool_db) + + def _ensure_pool_delete_conditions(self, context, pool_id): + if context.session.query(Vip).filter_by(pool_id=pool_id).first(): + raise loadbalancer.PoolInUse(pool_id=pool_id) + + def delete_pool(self, context, pool_id): + # Check if the pool is in use + self._ensure_pool_delete_conditions(context, pool_id) + + with context.session.begin(subtransactions=True): + self._delete_pool_stats(context, pool_id) + pool_db = self._get_resource(context, Pool, pool_id) + context.session.delete(pool_db) + + def get_pool(self, context, id, fields=None): + pool = self._get_resource(context, Pool, id) + return self._make_pool_dict(pool, fields) + + def get_pools(self, context, filters=None, fields=None): + collection = self._model_query(context, Pool) + collection = self._apply_filters_to_query(collection, Pool, filters) + return [self._make_pool_dict(c, fields) + for c in collection] + + def stats(self, context, pool_id): + with context.session.begin(subtransactions=True): + pool = self._get_resource(context, Pool, pool_id) + stats = pool['stats'] + + res = {lb_const.STATS_IN_BYTES: stats['bytes_in'], + lb_const.STATS_OUT_BYTES: stats['bytes_out'], + lb_const.STATS_ACTIVE_CONNECTIONS: stats['active_connections'], + lb_const.STATS_TOTAL_CONNECTIONS: stats['total_connections']} + return {'stats': res} + + def create_pool_health_monitor(self, context, health_monitor, pool_id): + monitor_id = health_monitor['health_monitor']['id'] + with context.session.begin(subtransactions=True): + assoc_qry = context.session.query(PoolMonitorAssociation) + assoc = assoc_qry.filter_by(pool_id=pool_id, + monitor_id=monitor_id).first() + if assoc: + raise loadbalancer.PoolMonitorAssociationExists( + monitor_id=monitor_id, pool_id=pool_id) + + pool = self._get_resource(context, Pool, pool_id) + + assoc = PoolMonitorAssociation(pool_id=pool_id, + monitor_id=monitor_id, + status=constants.PENDING_CREATE) + pool.monitors.append(assoc) + monitors = [monitor['monitor_id'] for monitor in pool['monitors']] + + res = {"health_monitor": monitors} + return res + + def delete_pool_health_monitor(self, context, id, pool_id): + with context.session.begin(subtransactions=True): + assoc = self._get_pool_health_monitor(context, id, pool_id) + pool = self._get_resource(context, Pool, pool_id) + pool.monitors.remove(assoc) + + def _get_pool_health_monitor(self, context, id, pool_id): + try: + assoc_qry = context.session.query(PoolMonitorAssociation) + return assoc_qry.filter_by(monitor_id=id, pool_id=pool_id).one() + except exc.NoResultFound: + raise loadbalancer.PoolMonitorAssociationNotFound( + monitor_id=id, pool_id=pool_id) + + def get_pool_health_monitor(self, context, id, pool_id, fields=None): + pool_hm = self._get_pool_health_monitor(context, id, pool_id) + # need to add tenant_id for admin_or_owner policy check to pass + hm = self.get_health_monitor(context, id) + res = {'pool_id': pool_id, + 'monitor_id': id, + 'status': pool_hm['status'], + 'status_description': pool_hm['status_description'], + 'tenant_id': hm['tenant_id']} + return self._fields(res, fields) + + def update_pool_health_monitor(self, context, id, pool_id, + status, status_description=None): + with context.session.begin(subtransactions=True): + assoc = self._get_pool_health_monitor(context, id, pool_id) + self.assert_modification_allowed(assoc) + assoc.status = status + assoc.status_description = status_description + + ######################################################## + # Member DB access + def _make_member_dict(self, member, fields=None): + res = {'id': member['id'], + 'tenant_id': member['tenant_id'], + 'pool_id': member['pool_id'], + 'address': member['address'], + 'protocol_port': member['protocol_port'], + 'weight': member['weight'], + 'admin_state_up': member['admin_state_up'], + 'status': member['status'], + 'status_description': member['status_description']} + + return self._fields(res, fields) + + def create_member(self, context, member): + v = member['member'] + tenant_id = self._get_tenant_id_for_create(context, v) + + try: + with context.session.begin(subtransactions=True): + # ensuring that pool exists + self._get_resource(context, Pool, v['pool_id']) + member_db = Member(id=uuidutils.generate_uuid(), + tenant_id=tenant_id, + pool_id=v['pool_id'], + address=v['address'], + protocol_port=v['protocol_port'], + weight=v['weight'], + admin_state_up=v['admin_state_up'], + status=constants.PENDING_CREATE) + context.session.add(member_db) + return self._make_member_dict(member_db) + except exception.DBDuplicateEntry: + raise loadbalancer.MemberExists( + address=v['address'], + port=v['protocol_port'], + pool=v['pool_id']) + + def update_member(self, context, id, member): + v = member['member'] + try: + with context.session.begin(subtransactions=True): + member_db = self._get_resource(context, Member, id) + self.assert_modification_allowed(member_db) + if v: + member_db.update(v) + return self._make_member_dict(member_db) + except exception.DBDuplicateEntry: + raise loadbalancer.MemberExists( + address=member_db['address'], + port=member_db['protocol_port'], + pool=member_db['pool_id']) + + def delete_member(self, context, id): + with context.session.begin(subtransactions=True): + member_db = self._get_resource(context, Member, id) + context.session.delete(member_db) + + def get_member(self, context, id, fields=None): + member = self._get_resource(context, Member, id) + return self._make_member_dict(member, fields) + + def get_members(self, context, filters=None, fields=None): + return self._get_collection(context, Member, + self._make_member_dict, + filters=filters, fields=fields) + + ######################################################## + # HealthMonitor DB access + def _make_health_monitor_dict(self, health_monitor, fields=None): + res = {'id': health_monitor['id'], + 'tenant_id': health_monitor['tenant_id'], + 'type': health_monitor['type'], + 'delay': health_monitor['delay'], + 'timeout': health_monitor['timeout'], + 'max_retries': health_monitor['max_retries'], + 'admin_state_up': health_monitor['admin_state_up']} + # no point to add the values below to + # the result if the 'type' is not HTTP/S + if res['type'] in ['HTTP', 'HTTPS']: + for attr in ['url_path', 'http_method', 'expected_codes']: + res[attr] = health_monitor[attr] + res['pools'] = [{'pool_id': p['pool_id'], + 'status': p['status'], + 'status_description': p['status_description']} + for p in health_monitor.pools] + return self._fields(res, fields) + + def create_health_monitor(self, context, health_monitor): + v = health_monitor['health_monitor'] + tenant_id = self._get_tenant_id_for_create(context, v) + with context.session.begin(subtransactions=True): + # setting ACTIVE status since healthmon is shared DB object + monitor_db = HealthMonitor(id=uuidutils.generate_uuid(), + tenant_id=tenant_id, + type=v['type'], + delay=v['delay'], + timeout=v['timeout'], + max_retries=v['max_retries'], + http_method=v['http_method'], + url_path=v['url_path'], + expected_codes=v['expected_codes'], + admin_state_up=v['admin_state_up']) + context.session.add(monitor_db) + return self._make_health_monitor_dict(monitor_db) + + def update_health_monitor(self, context, id, health_monitor): + v = health_monitor['health_monitor'] + with context.session.begin(subtransactions=True): + monitor_db = self._get_resource(context, HealthMonitor, id) + self.assert_modification_allowed(monitor_db) + if v: + monitor_db.update(v) + return self._make_health_monitor_dict(monitor_db) + + def delete_health_monitor(self, context, id): + """Delete health monitor object from DB + + Raises an error if the monitor has associations with pools + """ + query = self._model_query(context, PoolMonitorAssociation) + has_associations = query.filter_by(monitor_id=id).first() + if has_associations: + raise loadbalancer.HealthMonitorInUse(monitor_id=id) + + with context.session.begin(subtransactions=True): + monitor_db = self._get_resource(context, HealthMonitor, id) + context.session.delete(monitor_db) + + def get_health_monitor(self, context, id, fields=None): + healthmonitor = self._get_resource(context, HealthMonitor, id) + return self._make_health_monitor_dict(healthmonitor, fields) + + def get_health_monitors(self, context, filters=None, fields=None): + return self._get_collection(context, HealthMonitor, + self._make_health_monitor_dict, + filters=filters, fields=fields) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/db/metering/metering_db.py b/icehouse-patches/neutron/dvr-patch/neutron/db/metering/metering_db.py new file mode 100644 index 00000000..fe48ae4f --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/db/metering/metering_db.py @@ -0,0 +1,239 @@ +# Copyright (C) 2013 eNovance SAS +# +# Author: Sylvain Afchain +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import netaddr +import sqlalchemy as sa +from sqlalchemy import orm + +from neutron.api.rpc.agentnotifiers import metering_rpc_agent_api +from neutron.common import constants +from neutron.db import api as dbapi +from neutron.db import db_base_plugin_v2 as base_db +from neutron.db import l3_db +from neutron.db import model_base +from neutron.db import models_v2 +from neutron.extensions import metering +from neutron.openstack.common import log as logging +from neutron.openstack.common import uuidutils + + +LOG = logging.getLogger(__name__) + + +class MeteringLabelRule(model_base.BASEV2, models_v2.HasId): + direction = sa.Column(sa.Enum('ingress', 'egress', + name='meteringlabels_direction')) + remote_ip_prefix = sa.Column(sa.String(64)) + metering_label_id = sa.Column(sa.String(36), + sa.ForeignKey("meteringlabels.id", + ondelete="CASCADE"), + nullable=False) + excluded = sa.Column(sa.Boolean, default=False) + + +class MeteringLabel(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant): + name = sa.Column(sa.String(255)) + description = sa.Column(sa.String(1024)) + rules = orm.relationship(MeteringLabelRule, backref="label", + cascade="delete", lazy="joined") + routers = orm.relationship( + l3_db.Router, + primaryjoin="MeteringLabel.tenant_id==Router.tenant_id", + foreign_keys='MeteringLabel.tenant_id', + uselist=True) + + +class MeteringDbMixin(metering.MeteringPluginBase, + base_db.CommonDbMixin): + + def __init__(self): + dbapi.register_models() + + self.meter_rpc = metering_rpc_agent_api.MeteringAgentNotifyAPI() + + def _make_metering_label_dict(self, metering_label, fields=None): + res = {'id': metering_label['id'], + 'name': metering_label['name'], + 'description': metering_label['description'], + 'tenant_id': metering_label['tenant_id']} + return self._fields(res, fields) + + def create_metering_label(self, context, metering_label): + m = metering_label['metering_label'] + tenant_id = self._get_tenant_id_for_create(context, m) + + with context.session.begin(subtransactions=True): + metering_db = MeteringLabel(id=uuidutils.generate_uuid(), + description=m['description'], + tenant_id=tenant_id, + name=m['name']) + context.session.add(metering_db) + + return self._make_metering_label_dict(metering_db) + + def delete_metering_label(self, context, label_id): + with context.session.begin(subtransactions=True): + try: + label = self._get_by_id(context, MeteringLabel, label_id) + except orm.exc.NoResultFound: + raise metering.MeteringLabelNotFound(label_id=label_id) + + context.session.delete(label) + + def get_metering_label(self, context, label_id, fields=None): + try: + metering_label = self._get_by_id(context, MeteringLabel, label_id) + except orm.exc.NoResultFound: + raise metering.MeteringLabelNotFound(label_id=label_id) + + return self._make_metering_label_dict(metering_label, fields) + + def get_metering_labels(self, context, filters=None, fields=None, + sorts=None, limit=None, marker=None, + page_reverse=False): + marker_obj = self._get_marker_obj(context, 'metering_labels', limit, + marker) + return self._get_collection(context, MeteringLabel, + self._make_metering_label_dict, + filters=filters, fields=fields, + sorts=sorts, + limit=limit, + marker_obj=marker_obj, + page_reverse=page_reverse) + + def _make_metering_label_rule_dict(self, metering_label_rule, fields=None): + res = {'id': metering_label_rule['id'], + 'metering_label_id': metering_label_rule['metering_label_id'], + 'direction': metering_label_rule['direction'], + 'remote_ip_prefix': metering_label_rule['remote_ip_prefix'], + 'excluded': metering_label_rule['excluded']} + return self._fields(res, fields) + + def get_metering_label_rules(self, context, filters=None, fields=None, + sorts=None, limit=None, marker=None, + page_reverse=False): + marker_obj = self._get_marker_obj(context, 'metering_label_rules', + limit, marker) + + return self._get_collection(context, MeteringLabelRule, + self._make_metering_label_rule_dict, + filters=filters, fields=fields, + sorts=sorts, + limit=limit, + marker_obj=marker_obj, + page_reverse=page_reverse) + + def get_metering_label_rule(self, context, rule_id, fields=None): + try: + metering_label_rule = self._get_by_id(context, + MeteringLabelRule, rule_id) + except orm.exc.NoResultFound: + raise metering.MeteringLabelRuleNotFound(rule_id=rule_id) + + return self._make_metering_label_rule_dict(metering_label_rule, fields) + + def _validate_cidr(self, context, label_id, remote_ip_prefix, + direction, excluded): + r_ips = self.get_metering_label_rules(context, + filters={'metering_label_id': + label_id, + 'direction': + [direction], + 'excluded': + [excluded]}, + fields=['remote_ip_prefix']) + + cidrs = [r['remote_ip_prefix'] for r in r_ips] + new_cidr_ipset = netaddr.IPSet([remote_ip_prefix]) + if (netaddr.IPSet(cidrs) & new_cidr_ipset): + raise metering.MeteringLabelRuleOverlaps(remote_ip_prefix= + remote_ip_prefix) + + def create_metering_label_rule(self, context, metering_label_rule): + m = metering_label_rule['metering_label_rule'] + with context.session.begin(subtransactions=True): + label_id = m['metering_label_id'] + ip_prefix = m['remote_ip_prefix'] + direction = m['direction'] + excluded = m['excluded'] + + self._validate_cidr(context, label_id, ip_prefix, direction, + excluded) + metering_db = MeteringLabelRule(id=uuidutils.generate_uuid(), + metering_label_id=label_id, + direction=direction, + excluded=m['excluded'], + remote_ip_prefix=ip_prefix) + context.session.add(metering_db) + + return self._make_metering_label_rule_dict(metering_db) + + def delete_metering_label_rule(self, context, rule_id): + with context.session.begin(subtransactions=True): + try: + rule = self._get_by_id(context, MeteringLabelRule, rule_id) + except orm.exc.NoResultFound: + raise metering.MeteringLabelRuleNotFound(rule_id=rule_id) + + context.session.delete(rule) + + def _get_metering_rules_dict(self, metering_label): + rules = [] + for rule in metering_label.rules: + rule_dict = self._make_metering_label_rule_dict(rule) + rules.append(rule_dict) + + return rules + + def _make_router_dict(self, router): + res = {'id': router['id'], + 'name': router['name'], + 'tenant_id': router['tenant_id'], + 'admin_state_up': router['admin_state_up'], + 'status': router['status'], + 'gw_port_id': router['gw_port_id'], + constants.METERING_LABEL_KEY: []} + + return res + + def _process_sync_metering_data(self, labels): + routers_dict = {} + for label in labels: + routers = label.routers + for router in routers: + router_dict = routers_dict.get( + router['id'], + self._make_router_dict(router)) + + rules = self._get_metering_rules_dict(label) + + data = {'id': label['id'], 'rules': rules} + router_dict[constants.METERING_LABEL_KEY].append(data) + + routers_dict[router['id']] = router_dict + + return routers_dict.values() + + def get_sync_data_metering(self, context, label_id=None, router_ids=None): + labels = context.session.query(MeteringLabel) + + if label_id: + labels = labels.filter(MeteringLabel.id == label_id) + elif router_ids: + labels = (labels.join(MeteringLabel.routers). + filter(l3_db.Router.id.in_(router_ids))) + + return self._process_sync_metering_data(labels) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/db/metering/metering_rpc.py b/icehouse-patches/neutron/dvr-patch/neutron/db/metering/metering_rpc.py new file mode 100644 index 00000000..c0bbd51a --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/db/metering/metering_rpc.py @@ -0,0 +1,55 @@ +# Copyright (C) 2014 eNovance SAS +# +# Author: Sylvain Afchain +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from neutron.common import constants as consts +from neutron.common import utils +from neutron import manager +from neutron.openstack.common import log as logging +from neutron.plugins.common import constants as service_constants + +LOG = logging.getLogger(__name__) + + +class MeteringRpcCallbacks(object): + + RPC_API_VERSION = '1.0' + + def __init__(self, meter_plugin): + self.meter_plugin = meter_plugin + + def get_sync_data_metering(self, context, **kwargs): + l3_plugin = manager.NeutronManager.get_service_plugins().get( + service_constants.L3_ROUTER_NAT) + if not l3_plugin: + return + + host = kwargs.get('host') + if not utils.is_extension_supported( + l3_plugin, consts.L3_AGENT_SCHEDULER_EXT_ALIAS) or not host: + return self.meter_plugin.get_sync_data_metering(context) + else: + agents = l3_plugin.get_l3_agents(context, filters={'host': [host]}) + if not agents: + LOG.error(_('Unable to find agent %s.'), host) + return + + routers = l3_plugin.list_routers_on_l3_agent(context, agents[0].id) + router_ids = [router['id'] for router in routers['routers']] + if not router_ids: + return + + return self.meter_plugin.get_sync_data_metering(context, + router_ids=router_ids) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/db/migration/__init__.py b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/__init__.py new file mode 100644 index 00000000..6b367233 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/__init__.py @@ -0,0 +1,53 @@ +# Copyright 2012 New Dream Network, LLC (DreamHost) +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Mark McClain, DreamHost + +from alembic import op +import sqlalchemy as sa + +OVS_PLUGIN = ('neutron.plugins.openvswitch.ovs_neutron_plugin' + '.OVSNeutronPluginV2') +CISCO_PLUGIN = 'neutron.plugins.cisco.network_plugin.PluginV2' + + +def should_run(active_plugins, migrate_plugins): + if '*' in migrate_plugins: + return True + else: + if (CISCO_PLUGIN not in migrate_plugins and + OVS_PLUGIN in migrate_plugins): + migrate_plugins.append(CISCO_PLUGIN) + return set(active_plugins) & set(migrate_plugins) + + +def alter_enum(table, column, enum_type, nullable): + bind = op.get_bind() + engine = bind.engine + if engine.name == 'postgresql': + values = {'table': table, + 'column': column, + 'name': enum_type.name} + op.execute("ALTER TYPE %(name)s RENAME TO old_%(name)s" % values) + enum_type.create(bind, checkfirst=False) + op.execute("ALTER TABLE %(table)s RENAME COLUMN %(column)s TO " + "old_%(column)s" % values) + op.add_column(table, sa.Column(column, enum_type, nullable=nullable)) + op.execute("UPDATE %(table)s SET %(column)s = " + "old_%(column)s::text::%(name)s" % values) + op.execute("ALTER TABLE %(table)s DROP COLUMN old_%(column)s" % values) + op.execute("DROP TYPE old_%(name)s" % values) + else: + op.alter_column(table, column, type_=enum_type, + existing_nullable=nullable) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/env.py b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/env.py new file mode 100644 index 00000000..88502779 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/env.py @@ -0,0 +1,106 @@ +# Copyright 2012 New Dream Network, LLC (DreamHost) +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Mark McClain, DreamHost + +from logging import config as logging_config + +from alembic import context +from sqlalchemy import create_engine, pool + +from neutron.db import model_base +from neutron.openstack.common import importutils + + +DATABASE_QUOTA_DRIVER = 'neutron.extensions._quotav2_driver.DbQuotaDriver' + +# this is the Alembic Config object, which provides +# access to the values within the .ini file in use. +config = context.config +neutron_config = config.neutron_config + +# Interpret the config file for Python logging. +# This line sets up loggers basically. +logging_config.fileConfig(config.config_file_name) + +plugin_class_path = neutron_config.core_plugin +active_plugins = [plugin_class_path] +active_plugins += neutron_config.service_plugins + +for class_path in active_plugins: + importutils.import_class(class_path) + +# set the target for 'autogenerate' support +target_metadata = model_base.BASEV2.metadata + + +def run_migrations_offline(): + """Run migrations in 'offline' mode. + + This configures the context with either a URL + or an Engine. + + Calls to context.execute() here emit the given string to the + script output. + + """ + kwargs = dict() + if neutron_config.database.connection: + kwargs['url'] = neutron_config.database.connection + else: + kwargs['dialect_name'] = neutron_config.database.engine + context.configure(**kwargs) + + with context.begin_transaction(): + context.run_migrations(active_plugins=active_plugins, + options=build_options()) + + +def run_migrations_online(): + """Run migrations in 'online' mode. + + In this scenario we need to create an Engine + and associate a connection with the context. + + """ + engine = create_engine( + neutron_config.database.connection, + poolclass=pool.NullPool) + + connection = engine.connect() + context.configure( + connection=connection, + target_metadata=target_metadata + ) + + try: + with context.begin_transaction(): + context.run_migrations(active_plugins=active_plugins, + options=build_options()) + finally: + connection.close() + + +def build_options(): + return {'folsom_quota_db_enabled': is_db_quota_enabled()} + + +def is_db_quota_enabled(): + return neutron_config.QUOTAS.quota_driver == DATABASE_QUOTA_DRIVER + + +if context.is_offline_mode(): + run_migrations_offline() +else: + run_migrations_online() diff --git a/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/1064e98b7917_nec_pf_port_del.py b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/1064e98b7917_nec_pf_port_del.py new file mode 100644 index 00000000..f6d2ce31 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/1064e98b7917_nec_pf_port_del.py @@ -0,0 +1,61 @@ +# Copyright 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""nec-pf-port-del + +Revision ID: 1064e98b7917 +Revises: 3d6fae8b70b0 +Create Date: 2013-09-24 05:33:54.602618 + +""" + +# revision identifiers, used by Alembic. +revision = '1064e98b7917' +down_revision = '3d6fae8b70b0' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + 'neutron.plugins.nec.nec_plugin.NECPluginV2' +] + +from alembic import op +import sqlalchemy as sa + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.alter_column('packetfilters', 'in_port', + existing_type=sa.String(length=36), + nullable=True) + op.create_foreign_key( + 'packetfilters_ibfk_2', + source='packetfilters', referent='ports', + local_cols=['in_port'], remote_cols=['id'], + ondelete='CASCADE') + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.drop_constraint('packetfilters_ibfk_2', 'packetfilters', 'foreignkey') + op.alter_column('packetfilters', 'in_port', + existing_type=sa.String(length=36), + nullable=False) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/10cd28e692e9_nuage_extraroute.py b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/10cd28e692e9_nuage_extraroute.py new file mode 100644 index 00000000..2949813b --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/10cd28e692e9_nuage_extraroute.py @@ -0,0 +1,68 @@ +# Copyright 2014 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""nuage_extraroute + +Revision ID: 10cd28e692e9 +Revises: 1b837a7125a9 +Create Date: 2014-05-14 14:47:53.148132 + +""" + +# revision identifiers, used by Alembic. +revision = '10cd28e692e9' +down_revision = '1b837a7125a9' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + 'neutron.plugins.nuage.plugin.NuagePlugin' +] + +from alembic import op +import sqlalchemy as sa + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.create_table( + 'routerroutes_mapping', + sa.Column('router_id', sa.String(length=36), nullable=False), + sa.Column('nuage_route_id', sa.String(length=36), nullable=True), + sa.ForeignKeyConstraint(['router_id'], ['routers.id'], + ondelete='CASCADE'), + ) + op.create_table( + 'routerroutes', + sa.Column('destination', sa.String(length=64), nullable=False), + sa.Column('nexthop', sa.String(length=64), nullable=False), + sa.Column('router_id', sa.String(length=36), nullable=False), + sa.ForeignKeyConstraint(['router_id'], ['routers.id'], + ondelete='CASCADE'), + sa.PrimaryKeyConstraint('destination', 'nexthop', + 'router_id'), + ) + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.drop_table('routerroutes') + op.drop_table('routerroutes_mapping') diff --git a/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/1149d7de0cfa_port_security.py b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/1149d7de0cfa_port_security.py new file mode 100644 index 00000000..f313879f --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/1149d7de0cfa_port_security.py @@ -0,0 +1,82 @@ +# Copyright 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""initial port security + +Revision ID: 1149d7de0cfa +Revises: 1b693c095aa3 +Create Date: 2013-01-22 14:05:20.696502 + +""" + +# revision identifiers, used by Alembic. +revision = '1149d7de0cfa' +down_revision = '1b693c095aa3' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + 'neutron.plugins.nicira.NeutronPlugin.NvpPluginV2', + 'neutron.plugins.nicira.NeutronServicePlugin.NvpAdvancedPlugin', + 'neutron.plugins.vmware.plugin.NsxPlugin', + 'neutron.plugins.vmware.plugin.NsxServicePlugin' +] + +from alembic import op +import sqlalchemy as sa + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + ### commands auto generated by Alembic - please adjust! ### + op.create_table('networksecuritybindings', + sa.Column('network_id', sa.String(length=36), + nullable=False), + sa.Column('port_security_enabled', sa.Boolean(), + nullable=False), + sa.ForeignKeyConstraint(['network_id'], ['networks.id'], + ondelete='CASCADE'), + sa.PrimaryKeyConstraint('network_id')) + op.create_table('portsecuritybindings', + sa.Column('port_id', sa.String(length=36), + nullable=False), + sa.Column('port_security_enabled', sa.Boolean(), + nullable=False), + sa.ForeignKeyConstraint(['port_id'], ['ports.id'], + ondelete='CASCADE'), + sa.PrimaryKeyConstraint('port_id')) + ### end Alembic commands ### + + # Copy network and port ids over to network|port(securitybindings) table + # and set port_security_enabled to false as ip address pairs were not + # configured in NVP/NSX originally. + op.execute("INSERT INTO networksecuritybindings SELECT id as " + "network_id, False as port_security_enabled from networks") + op.execute("INSERT INTO portsecuritybindings SELECT id as port_id, " + "False as port_security_enabled from ports") + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + ### commands auto generated by Alembic - please adjust! ### + op.drop_table('portsecuritybindings') + op.drop_table('networksecuritybindings') + ### end Alembic commands ### diff --git a/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/11c6e18605c8_pool_monitor_status_.py b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/11c6e18605c8_pool_monitor_status_.py new file mode 100644 index 00000000..99b1c2f3 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/11c6e18605c8_pool_monitor_status_.py @@ -0,0 +1,60 @@ +# Copyright 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""Pool Monitor status field + +Revision ID: 11c6e18605c8 +Revises: 52ff27f7567a +Create Date: 2013-07-10 06:07:20.878520 + +""" + +# revision identifiers, used by Alembic. +revision = '11c6e18605c8' +down_revision = '52ff27f7567a' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + 'neutron.services.loadbalancer.plugin.LoadBalancerPlugin', +] + +from alembic import op +import sqlalchemy as sa + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.add_column('poolmonitorassociations', sa.Column('status', + sa.String(16), + server_default='', + nullable=False)) + op.add_column('poolmonitorassociations', sa.Column('status_description', + sa.String(255))) + + # Set status to ACTIVE for existing associations + op.execute("UPDATE poolmonitorassociations SET status='ACTIVE'") + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.drop_column('poolmonitorassociations', 'status') + op.drop_column('poolmonitorassociations', 'status_description') diff --git a/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/128e042a2b68_ext_gw_mode.py b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/128e042a2b68_ext_gw_mode.py new file mode 100644 index 00000000..59ef4090 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/128e042a2b68_ext_gw_mode.py @@ -0,0 +1,69 @@ +# Copyright 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""ext_gw_mode + +Revision ID: 128e042a2b68 +Revises: 32b517556ec9 +Create Date: 2013-03-27 00:35:17.323280 + +""" + +# revision identifiers, used by Alembic. +revision = '128e042a2b68' +down_revision = '32b517556ec9' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + 'neutron.plugins.hyperv.hyperv_neutron_plugin.HyperVNeutronPlugin', + 'neutron.plugins.linuxbridge.lb_neutron_plugin.LinuxBridgePluginV2', + 'neutron.plugins.metaplugin.meta_neutron_plugin.MetaPluginV2', + 'neutron.plugins.ml2.plugin.Ml2Plugin', + 'neutron.plugins.nec.nec_plugin.NECPluginV2', + 'neutron.plugins.nicira.NeutronPlugin.NvpPluginV2', + 'neutron.plugins.nicira.NeutronServicePlugin.NvpAdvancedPlugin', + 'neutron.plugins.openvswitch.ovs_neutron_plugin.OVSNeutronPluginV2', + 'neutron.plugins.ryu.ryu_neutron_plugin.RyuNeutronPluginV2', + 'neutron.plugins.vmware.plugin.NsxPlugin', + 'neutron.plugins.vmware.plugin.NsxServicePlugin', + 'neutron.plugins.embrane.plugins.embrane_ovs_plugin.EmbraneOvsPlugin', + 'neutron.plugins.ibm.sdnve_neutron_plugin.SdnvePluginV2', + 'neutron.plugins.oneconvergence.plugin.OneConvergencePluginV2', + 'neutron.plugins.cisco.network_plugin.PluginV2', +] + +from alembic import op +import sqlalchemy as sa + + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.add_column('routers', sa.Column('enable_snat', sa.Boolean(), + nullable=False, default=True)) + # Set enable_snat to True for existing routers + op.execute("UPDATE routers SET enable_snat=True") + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.drop_column('routers', 'enable_snat') diff --git a/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/1341ed32cc1e_nvp_netbinding_update.py b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/1341ed32cc1e_nvp_netbinding_update.py new file mode 100644 index 00000000..117552bd --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/1341ed32cc1e_nvp_netbinding_update.py @@ -0,0 +1,68 @@ +# Copyright 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""nvp_net_binding + +Revision ID: 1341ed32cc1e +Revises: 4692d074d587 +Create Date: 2013-02-26 01:28:29.182195 + +""" + +# revision identifiers, used by Alembic. +revision = '1341ed32cc1e' +down_revision = '4692d074d587' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + 'neutron.plugins.nicira.NeutronPlugin.NvpPluginV2', + 'neutron.plugins.nicira.NeutronServicePlugin.NvpAdvancedPlugin', + 'neutron.plugins.vmware.plugin.NsxPlugin', + 'neutron.plugins.vmware.plugin.NsxServicePlugin' +] + +from alembic import op +import sqlalchemy as sa + + +from neutron.db import migration + +new_type = sa.Enum('flat', 'vlan', 'stt', 'gre', 'l3_ext', + name='nvp_network_bindings_binding_type') +old_type = sa.Enum('flat', 'vlan', 'stt', 'gre', + name='nvp_network_bindings_binding_type') + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + op.alter_column('nvp_network_bindings', 'tz_uuid', + name='phy_uuid', + existing_type=sa.String(36), + existing_nullable=True) + migration.alter_enum('nvp_network_bindings', 'binding_type', new_type, + nullable=False) + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + op.alter_column('nvp_network_bindings', 'phy_uuid', + name='tz_uuid', + existing_type=sa.String(36), + existing_nullable=True) + migration.alter_enum('nvp_network_bindings', 'binding_type', old_type, + nullable=False) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/13de305df56e_add_nec_pf_name.py b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/13de305df56e_add_nec_pf_name.py new file mode 100644 index 00000000..bba2e436 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/13de305df56e_add_nec_pf_name.py @@ -0,0 +1,53 @@ +# Copyright 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""nec_add_pf_name + +Revision ID: 13de305df56e +Revises: b7a8863760e +Create Date: 2013-07-06 00:42:26.991175 + +""" + +# revision identifiers, used by Alembic. +revision = '13de305df56e' +down_revision = 'b7a8863760e' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + 'neutron.plugins.nec.nec_plugin.NECPluginV2' +] + +from alembic import op +import sqlalchemy as sa + + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.add_column('packetfilters', + sa.Column('name', sa.String(length=255), nullable=True)) + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.drop_column('packetfilters', 'name') diff --git a/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/14f24494ca31_arista_ml2.py b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/14f24494ca31_arista_ml2.py new file mode 100644 index 00000000..015002e7 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/14f24494ca31_arista_ml2.py @@ -0,0 +1,76 @@ +# Copyright 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""DB Migration for Arista ml2 mechanism driver + +Revision ID: 14f24494ca31 +Revises: 2a3bae1ceb8 +Create Date: 2013-08-15 18:54:16.083640 + +""" + +# revision identifiers, used by Alembic. +revision = '14f24494ca31' +down_revision = '2a3bae1ceb8' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + 'neutron.plugins.ml2.plugin.Ml2Plugin' +] + +from alembic import op +import sqlalchemy as sa + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.create_table( + 'arista_provisioned_nets', + sa.Column('tenant_id', sa.String(length=255), nullable=True), + sa.Column('id', sa.String(length=36), nullable=False), + sa.Column('network_id', sa.String(length=36), nullable=True), + sa.Column('segmentation_id', sa.Integer(), + autoincrement=False, nullable=True), + sa.PrimaryKeyConstraint('id')) + + op.create_table( + 'arista_provisioned_vms', + sa.Column('tenant_id', sa.String(length=255), nullable=True), + sa.Column('id', sa.String(length=36), nullable=False), + sa.Column('vm_id', sa.String(length=255), nullable=True), + sa.Column('host_id', sa.String(length=255), nullable=True), + sa.Column('port_id', sa.String(length=36), nullable=True), + sa.Column('network_id', sa.String(length=36), nullable=True), + sa.PrimaryKeyConstraint('id')) + + op.create_table( + 'arista_provisioned_tenants', + sa.Column('tenant_id', sa.String(length=255), nullable=True), + sa.Column('id', sa.String(length=36), nullable=False), + sa.PrimaryKeyConstraint('id')) + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.drop_table('arista_provisioned_tenants') + op.drop_table('arista_provisioned_vms') + op.drop_table('arista_provisioned_nets') diff --git a/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/157a5d299379_ml2_binding_profile.py b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/157a5d299379_ml2_binding_profile.py new file mode 100644 index 00000000..f7900b69 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/157a5d299379_ml2_binding_profile.py @@ -0,0 +1,53 @@ +# Copyright 2014 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""ml2 binding:profile + +Revision ID: 157a5d299379 +Revises: 50d5ba354c23 +Create Date: 2014-02-13 23:48:25.147279 + +""" + +# revision identifiers, used by Alembic. +revision = '157a5d299379' +down_revision = '50d5ba354c23' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + 'neutron.plugins.ml2.plugin.Ml2Plugin' +] + +from alembic import op +import sqlalchemy as sa + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.add_column('ml2_port_bindings', + sa.Column('profile', sa.String(length=4095), + nullable=False, server_default='')) + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.drop_column('ml2_port_bindings', 'profile') diff --git a/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/176a85fc7d79_add_portbindings_db.py b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/176a85fc7d79_add_portbindings_db.py new file mode 100644 index 00000000..1982b466 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/176a85fc7d79_add_portbindings_db.py @@ -0,0 +1,64 @@ +# Copyright 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""Add portbindings db + +Revision ID: 176a85fc7d79 +Revises: f489cf14a79c +Create Date: 2013-03-21 14:59:53.052600 + +""" + +# revision identifiers, used by Alembic. +revision = '176a85fc7d79' +down_revision = 'f489cf14a79c' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + 'neutron.plugins.bigswitch.plugin.NeutronRestProxyV2', + 'neutron.plugins.linuxbridge.lb_neutron_plugin.LinuxBridgePluginV2', + 'neutron.plugins.openvswitch.ovs_neutron_plugin.OVSNeutronPluginV2', + 'neutron.plugins.nicira.NeutronPlugin.NvpPluginV2', + 'neutron.plugins.nicira.NeutronServicePlugin.NvpAdvancedPlugin', + 'neutron.plugins.vmware.plugin.NsxPlugin', + 'neutron.plugins.vmware.plugin.NsxServicePlugin', + 'neutron.plugins.ibm.sdnve_neutron_plugin.SdnvePluginV2', + 'neutron.plugins.oneconvergence.plugin.OneConvergencePluginV2', +] + +from alembic import op +import sqlalchemy as sa + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.create_table( + 'portbindingports', + sa.Column('port_id', sa.String(length=36), nullable=False), + sa.Column('host', sa.String(length=255), nullable=False), + sa.ForeignKeyConstraint(['port_id'], ['ports.id'], ondelete='CASCADE'), + sa.PrimaryKeyConstraint('port_id') + ) + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + op.drop_table('portbindingports') diff --git a/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/1b693c095aa3_quota_ext_db_grizzly.py b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/1b693c095aa3_quota_ext_db_grizzly.py new file mode 100644 index 00000000..8149a28c --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/1b693c095aa3_quota_ext_db_grizzly.py @@ -0,0 +1,62 @@ +# Copyright 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""Quota ext support added in Grizzly + +Revision ID: 1b693c095aa3 +Revises: 1d76643bcec4 +Create Date: 2013-01-19 02:58:17.667524 + +""" + +# revision identifiers, used by Alembic. +revision = '1b693c095aa3' +down_revision = '2a6d0b51f4bb' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + 'neutron.plugins.nec.nec_plugin.NECPluginV2' +] + +from alembic import op +import sqlalchemy as sa + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + ### commands auto generated by Alembic - please adjust! ### + op.create_table( + 'quotas', + sa.Column('id', sa.String(length=36), nullable=False), + sa.Column('tenant_id', sa.String(length=255), nullable=True), + sa.Column('resource', sa.String(length=255), nullable=True), + sa.Column('limit', sa.Integer(), nullable=True), + sa.PrimaryKeyConstraint('id') + ) + ### end Alembic commands ### + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + ### commands auto generated by Alembic - please adjust! ### + op.drop_table('quotas') + ### end Alembic commands ### diff --git a/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/1b837a7125a9_cisco_apic_driver.py b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/1b837a7125a9_cisco_apic_driver.py new file mode 100644 index 00000000..92b13264 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/1b837a7125a9_cisco_apic_driver.py @@ -0,0 +1,74 @@ +# Copyright 2014 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""Cisco APIC Mechanism Driver + +Revision ID: 1b837a7125a9 +Revises: 6be312499f9 +Create Date: 2014-02-13 09:35:19.147619 + +""" + +# revision identifiers, used by Alembic. +revision = '1b837a7125a9' +down_revision = '6be312499f9' + +migration_for_plugins = [ + 'neutron.plugins.ml2.plugin.Ml2Plugin' +] + +from alembic import op +import sqlalchemy as sa + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.create_table( + 'cisco_ml2_apic_epgs', + sa.Column('network_id', sa.String(length=255), nullable=False), + sa.Column('epg_id', sa.String(length=64), nullable=False), + sa.Column('segmentation_id', sa.String(length=64), nullable=False), + sa.Column('provider', sa.Boolean(), default=False, nullable=False), + sa.PrimaryKeyConstraint('network_id')) + + op.create_table( + 'cisco_ml2_apic_port_profiles', + sa.Column('node_id', sa.String(length=255), nullable=False), + sa.Column('profile_id', sa.String(length=64), nullable=False), + sa.Column('hpselc_id', sa.String(length=64), nullable=False), + sa.Column('module', sa.String(length=10), nullable=False), + sa.Column('from_port', sa.Integer(), nullable=False), + sa.Column('to_port', sa.Integer(), nullable=False), + sa.PrimaryKeyConstraint('node_id')) + + op.create_table( + 'cisco_ml2_apic_contracts', + sa.Column('tenant_id', sa.String(length=255), nullable=False), + sa.Column('contract_id', sa.String(length=64), nullable=False), + sa.Column('filter_id', sa.String(length=64), nullable=False), + sa.PrimaryKeyConstraint('tenant_id')) + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.drop_table('cisco_ml2_apic_contracts') + op.drop_table('cisco_ml2_apic_port_profiles') + op.drop_table('cisco_ml2_apic_epgs') diff --git a/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/1c33fa3cd1a1_extra_route_config.py b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/1c33fa3cd1a1_extra_route_config.py new file mode 100644 index 00000000..02d8988e --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/1c33fa3cd1a1_extra_route_config.py @@ -0,0 +1,80 @@ +# Copyright 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""Support routing table configuration on Router + +Revision ID: 1c33fa3cd1a1 +Revises: 45680af419f9 +Create Date: 2013-01-17 14:35:09.386975 + +""" + +# revision identifiers, used by Alembic. +revision = '1c33fa3cd1a1' +down_revision = '45680af419f9' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + 'neutron.plugins.openvswitch.ovs_neutron_plugin.OVSNeutronPluginV2', + 'neutron.plugins.linuxbridge.lb_neutron_plugin.LinuxBridgePluginV2', + 'neutron.plugins.metaplugin.meta_neutron_plugin.MetaPluginV2', + 'neutron.plugins.ml2.plugin.Ml2Plugin', + 'neutron.plugins.nec.nec_plugin.NECPluginV2', + 'neutron.plugins.nicira.NeutronPlugin.NvpPluginV2', + 'neutron.plugins.nicira.NeutronServicePlugin.NvpAdvancedPlugin', + 'neutron.plugins.ryu.ryu_neutron_plugin.RyuNeutronPluginV2', + 'neutron.plugins.vmware.plugin.NsxPlugin', + 'neutron.plugins.vmware.plugin.NsxServicePlugin', + 'neutron.plugins.oneconvergence.plugin.OneConvergencePluginV2', + 'neutron.plugins.cisco.network_plugin.PluginV2', + 'neutron.plugins.bigswitch.plugin.NeutronRestProxyV2' +] + +from alembic import op +import sqlalchemy as sa + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.rename_table( + 'routes', + 'subnetroutes', + ) + op.create_table( + 'routerroutes', + sa.Column('destination', sa.String(length=64), nullable=False), + sa.Column( + 'nexthop', sa.String(length=64), nullable=False), + sa.Column('router_id', sa.String(length=36), nullable=False), + sa.ForeignKeyConstraint( + ['router_id'], ['routers.id'], ondelete='CASCADE'), + sa.PrimaryKeyConstraint('destination', 'nexthop', 'router_id') + ) + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.rename_table( + 'subnetroutes', + 'routes', + ) + op.drop_table('routerroutes') diff --git a/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/1d76643bcec4_nvp_netbinding.py b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/1d76643bcec4_nvp_netbinding.py new file mode 100644 index 00000000..e896a8f0 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/1d76643bcec4_nvp_netbinding.py @@ -0,0 +1,65 @@ +# Copyright 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""nvp_netbinding + +Revision ID: 1d76643bcec4 +Revises: 3cb5d900c5de +Create Date: 2013-01-15 07:36:10.024346 + +""" + +# revision identifiers, used by Alembic. +revision = '1d76643bcec4' +down_revision = '3cb5d900c5de' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + 'neutron.plugins.nicira.NeutronPlugin.NvpPluginV2', + 'neutron.plugins.nicira.NeutronServicePlugin.NvpAdvancedPlugin', + 'neutron.plugins.vmware.plugin.NsxPlugin', + 'neutron.plugins.vmware.plugin.NsxServicePlugin' +] + +from alembic import op +import sqlalchemy as sa + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.create_table( + 'nvp_network_bindings', + sa.Column('network_id', sa.String(length=36), nullable=False), + sa.Column('binding_type', + sa.Enum('flat', 'vlan', 'stt', 'gre', + name='nvp_network_bindings_binding_type'), + nullable=False), + sa.Column('tz_uuid', sa.String(length=36), nullable=True), + sa.Column('vlan_id', sa.Integer(), nullable=True), + sa.ForeignKeyConstraint(['network_id'], ['networks.id'], + ondelete='CASCADE'), + sa.PrimaryKeyConstraint('network_id')) + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.drop_table('nvp_network_bindings') diff --git a/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/1e5dd1d09b22_set_not_null_fields_lb_stats.py b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/1e5dd1d09b22_set_not_null_fields_lb_stats.py new file mode 100644 index 00000000..6d4d3ade --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/1e5dd1d09b22_set_not_null_fields_lb_stats.py @@ -0,0 +1,66 @@ +# Copyright 2014 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""set_not_null_fields_lb_stats + +Revision ID: 1e5dd1d09b22 +Revises: 54f7549a0e5f +Create Date: 2014-03-17 11:00:35.370618 + +""" + +# revision identifiers, used by Alembic. +revision = '1e5dd1d09b22' +down_revision = '54f7549a0e5f' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + 'neutron.services.loadbalancer.plugin.LoadBalancerPlugin' +] + +from alembic import op +import sqlalchemy as sa + + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.alter_column('poolstatisticss', 'bytes_in', nullable=False, + existing_type=sa.BigInteger()) + op.alter_column('poolstatisticss', 'bytes_out', nullable=False, + existing_type=sa.BigInteger()) + op.alter_column('poolstatisticss', 'active_connections', nullable=False, + existing_type=sa.BigInteger()) + op.alter_column('poolstatisticss', 'total_connections', nullable=False, + existing_type=sa.BigInteger()) + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.alter_column('poolstatisticss', 'bytes_in', nullable=True, + existing_type=sa.BigInteger()) + op.alter_column('poolstatisticss', 'bytes_out', nullable=True, + existing_type=sa.BigInteger()) + op.alter_column('poolstatisticss', 'active_connections', nullable=True, + existing_type=sa.BigInteger()) + op.alter_column('poolstatisticss', 'total_connections', nullable=True, + existing_type=sa.BigInteger()) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/1efb85914233_allowedaddresspairs.py b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/1efb85914233_allowedaddresspairs.py new file mode 100644 index 00000000..f28974a3 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/1efb85914233_allowedaddresspairs.py @@ -0,0 +1,65 @@ +# Copyright 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""allowedaddresspairs + +Revision ID: 1efb85914233 +Revises: 51b4de912379 +Create Date: 2013-07-23 12:56:00.402855 + +""" + +# revision identifiers, used by Alembic. +revision = '1efb85914233' +down_revision = '51b4de912379' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + 'neutron.plugins.cisco.network_plugin.PluginV2', + 'neutron.plugins.nicira.NeutronPlugin.NvpPluginV2', + 'neutron.plugins.nicira.NeutronServicePlugin.NvpAdvancedPlugin', + 'neutron.plugins.ml2.plugin.Ml2Plugin', + 'neutron.plugins.nec.nec_plugin.NECPluginV2', + 'neutron.plugins.openvswitch.ovs_neutron_plugin.OVSNeutronPluginV2', + 'neutron.plugins.vmware.plugin.NsxPlugin', + 'neutron.plugins.vmware.plugin.NsxServicePlugin', +] + +from alembic import op +import sqlalchemy as sa + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.create_table( + 'allowedaddresspairs', + sa.Column('port_id', sa.String(length=36), nullable=False), + sa.Column('mac_address', sa.String(length=32), nullable=False), + sa.Column('ip_address', sa.String(length=64), nullable=False), + sa.ForeignKeyConstraint(['port_id'], ['ports.id'], ondelete='CASCADE'), + sa.PrimaryKeyConstraint('port_id', 'mac_address', 'ip_address'), + ) + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.drop_table('allowedaddresspairs') diff --git a/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/1fcfc149aca4_agents_unique_by_type_and_host.py b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/1fcfc149aca4_agents_unique_by_type_and_host.py new file mode 100644 index 00000000..7d532e8a --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/1fcfc149aca4_agents_unique_by_type_and_host.py @@ -0,0 +1,71 @@ +# Copyright 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""Add a unique constraint on (agent_type, host) columns to prevent a race +condition when an agent entry is 'upserted'. + +Revision ID: 1fcfc149aca4 +Revises: e197124d4b9 +Create Date: 2013-11-27 18:35:28.148680 + +""" + +revision = '1fcfc149aca4' +down_revision = 'e197124d4b9' + +migration_for_plugins = [ + 'neutron.plugins.brocade.NeutronPlugin.BrocadePluginV2', + 'neutron.plugins.openvswitch.ovs_neutron_plugin.OVSNeutronPluginV2', + 'neutron.plugins.linuxbridge.lb_neutron_plugin.LinuxBridgePluginV2', + 'neutron.plugins.nec.nec_plugin.NECPluginV2', + 'neutron.plugins.nicira.NeutronPlugin.NvpPluginV2', + 'neutron.plugins.nicira.NeutronServicePlugin.NvpAdvancedPlugin', + 'neutron.plugins.vmware.plugin.NsxPlugin', + 'neutron.plugins.vmware.plugin.NsxServicePlugin', + 'neutron.services.loadbalancer.plugin.LoadBalancerPlugin', + 'neutron.plugins.ibm.sdnve_neutron_plugin.SdnvePluginV2', + 'neutron.plugins.oneconvergence.plugin.OneConvergencePluginV2', + 'neutron.plugins.mlnx.mlnx_plugin.MellanoxEswitchPlugin', +] + +from alembic import op + +from neutron.db import migration + + +TABLE_NAME = 'agents' +UC_NAME = 'uniq_agents0agent_type0host' + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.create_unique_constraint( + name=UC_NAME, + source=TABLE_NAME, + local_cols=['agent_type', 'host'] + ) + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.drop_constraint( + name=UC_NAME, + table_name=TABLE_NAME, + type_='unique' + ) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/2026156eab2f_l2_dvr_models.py b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/2026156eab2f_l2_dvr_models.py new file mode 100644 index 00000000..50c9c37d --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/2026156eab2f_l2_dvr_models.py @@ -0,0 +1,78 @@ +# Copyright 2014 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""L2 models to support DVR + +Revision ID: 2026156eab2f +Revises: 3927f7f7c456 +Create Date: 2014-06-23 19:12:43.392912 + +""" + +# revision identifiers, used by Alembic. +revision = '2026156eab2f' +down_revision = '3927f7f7c456' + +migration_for_plugins = [ + 'neutron.plugins.ml2.plugin.Ml2Plugin' +] + +from alembic import op +import sqlalchemy as sa + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.create_table( + 'dvr_host_macs', + sa.Column('host', sa.String(length=255), nullable=False), + sa.Column('mac_address', sa.String(length=32), + nullable=False, unique=True), + sa.PrimaryKeyConstraint('host') + ) + op.create_table( + 'ml2_dvr_port_bindings', + sa.Column('port_id', sa.String(length=36), nullable=False), + sa.Column('host', sa.String(length=255), nullable=False), + sa.Column('router_id', sa.String(length=36), nullable=True), + sa.Column('vif_type', sa.String(length=64), nullable=False), + sa.Column('vif_details', sa.String(length=4095), + nullable=False, server_default=''), + sa.Column('vnic_type', sa.String(length=64), + nullable=False, server_default='normal'), + sa.Column('profile', sa.String(length=4095), + nullable=False, server_default=''), + sa.Column('cap_port_filter', sa.Boolean(), nullable=False), + sa.Column('driver', sa.String(length=64), nullable=True), + sa.Column('segment', sa.String(length=36), nullable=True), + sa.Column(u'status', sa.String(16), nullable=False), + sa.ForeignKeyConstraint(['port_id'], ['ports.id'], + ondelete='CASCADE'), + sa.ForeignKeyConstraint(['segment'], ['ml2_network_segments.id'], + ondelete='SET NULL'), + sa.PrimaryKeyConstraint('port_id', 'host') + ) + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.drop_table('ml2_dvr_port_bindings') + op.drop_table('dvr_host_macs') diff --git a/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/2032abe8edac_lbaas_add_status_des.py b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/2032abe8edac_lbaas_add_status_des.py new file mode 100644 index 00000000..f466e41a --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/2032abe8edac_lbaas_add_status_des.py @@ -0,0 +1,55 @@ +# Copyright 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""LBaaS add status description + +Revision ID: 2032abe8edac +Revises: 477a4488d3f4 +Create Date: 2013-06-24 06:51:47.308545 + +""" + +# revision identifiers, used by Alembic. +revision = '2032abe8edac' +down_revision = '477a4488d3f4' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + 'neutron.services.loadbalancer.plugin.LoadBalancerPlugin', +] + +from alembic import op +import sqlalchemy as sa + +from neutron.db import migration + +ENTITIES = ['vips', 'pools', 'members', 'healthmonitors'] + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + for entity in ENTITIES: + op.add_column(entity, sa.Column('status_description', sa.String(255))) + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + for entity in ENTITIES: + op.drop_column(entity, 'status_description') diff --git a/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/20ae61555e95_ml2_gre_type_driver.py b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/20ae61555e95_ml2_gre_type_driver.py new file mode 100644 index 00000000..65f4ef7e --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/20ae61555e95_ml2_gre_type_driver.py @@ -0,0 +1,64 @@ +# Copyright 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""DB Migration for ML2 GRE Type Driver + +Revision ID: 20ae61555e95 +Revises: 13de305df56e +Create Date: 2013-07-10 17:19:03.021937 + +""" + +# revision identifiers, used by Alembic. +revision = '20ae61555e95' +down_revision = '13de305df56e' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + 'neutron.plugins.ml2.plugin.Ml2Plugin' +] + +from alembic import op +import sqlalchemy as sa + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.create_table( + 'ml2_gre_allocations', + sa.Column('gre_id', sa.Integer, nullable=False, + autoincrement=False), + sa.Column('allocated', sa.Boolean, nullable=False), + sa.PrimaryKeyConstraint('gre_id') + ) + + op.create_table( + 'ml2_gre_endpoints', + sa.Column('ip_address', sa.String(length=64)), + sa.PrimaryKeyConstraint('ip_address') + ) + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.drop_table('ml2_gre_allocations') + op.drop_table('ml2_gre_endpoints') diff --git a/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/2447ad0e9585_add_ipv6_mode_props.py b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/2447ad0e9585_add_ipv6_mode_props.py new file mode 100644 index 00000000..f3b444a2 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/2447ad0e9585_add_ipv6_mode_props.py @@ -0,0 +1,81 @@ +# Copyright 2014 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author Sean M. Collins (Comcast) + +"""Add IPv6 Subnet properties + +Revision ID: 2447ad0e9585 +Revises: 33dd0a9fa487 +Create Date: 2013-10-23 16:36:44.188904 + +""" + +# revision identifiers, used by Alembic. +revision = '2447ad0e9585' +down_revision = '33dd0a9fa487' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + '*' +] + +from alembic import op +import sqlalchemy as sa + + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + # Workaround for Alemic bug #89 + # https://bitbucket.org/zzzeek/alembic/issue/89 + context = op.get_context() + if context.bind.dialect.name == 'postgresql': + op.execute("CREATE TYPE ipv6_ra_modes AS ENUM ('%s', '%s', '%s')" + % ('slaac', 'dhcpv6-stateful', 'dhcpv6-stateless')) + op.execute("CREATE TYPE ipv6_address_modes AS ENUM ('%s', '%s', '%s')" + % ('slaac', 'dhcpv6-stateful', 'dhcpv6-stateless')) + op.add_column('subnets', + sa.Column('ipv6_ra_mode', + sa.Enum('slaac', + 'dhcpv6-stateful', + 'dhcpv6-stateless', + name='ipv6_ra_modes'), + nullable=True) + ) + op.add_column('subnets', + sa.Column('ipv6_address_mode', + sa.Enum('slaac', + 'dhcpv6-stateful', + 'dhcpv6-stateless', + name='ipv6_address_modes'), + nullable=True) + ) + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.drop_column('subnets', 'ipv6_ra_mode') + op.drop_column('subnets', 'ipv6_address_mode') + context = op.get_context() + if context.bind.dialect.name == 'postgresql': + op.execute('DROP TYPE ipv6_ra_modes') + op.execute('DROP TYPE ipv6_address_modes') diff --git a/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/2528ceb28230_nec_pf_netid_fix.py b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/2528ceb28230_nec_pf_netid_fix.py new file mode 100644 index 00000000..ea4467d3 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/2528ceb28230_nec_pf_netid_fix.py @@ -0,0 +1,59 @@ +# Copyright 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""NEC PacketFilter network_id nullable fix + +Revision ID: 2528ceb28230 +Revises: 1064e98b7917 +Create Date: 2013-09-24 12:07:43.124256 + +""" + +# revision identifiers, used by Alembic. +revision = '2528ceb28230' +down_revision = '1064e98b7917' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + 'neutron.plugins.nec.nec_plugin.NECPluginV2' +] + +from alembic import op +import sqlalchemy as sa + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.alter_column('packetfilters', 'network_id', + existing_type=sa.String(length=36), + nullable=False) + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + # NOTE(amotoki): There is a bug that nullable of network_id is + # set to True by mistake in folsom_initial (bug 1229508). + # To make sure nullable=False in any revision, nullable is set + # to False in both upgrade and downgrade. + op.alter_column('packetfilters', 'network_id', + existing_type=sa.String(length=36), + nullable=False) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/263772d65691_cisco_db_cleanup_2.py b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/263772d65691_cisco_db_cleanup_2.py new file mode 100644 index 00000000..8f93bf8e --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/263772d65691_cisco_db_cleanup_2.py @@ -0,0 +1,64 @@ +# Copyright 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""Cisco plugin db cleanup part II + +Revision ID: 263772d65691 +Revises: 35c7c198ddea +Create Date: 2013-07-29 02:31:26.646343 + +""" + +# revision identifiers, used by Alembic. +revision = '263772d65691' +down_revision = '35c7c198ddea' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + 'neutron.plugins.cisco.network_plugin.PluginV2' +] + +from alembic import op +import sqlalchemy as sa + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.rename_table('credentials', 'cisco_credentials') + op.rename_table('nexusport_bindings', 'cisco_nexusport_bindings') + op.rename_table('qoss', 'cisco_qos_policies') + + op.drop_table('cisco_vlan_ids') + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.create_table( + 'cisco_vlan_ids', + sa.Column('vlan_id', sa.Integer, nullable=False), + sa.Column('vlan_used', sa.Boolean), + sa.PrimaryKeyConstraint('vlan_id'), + ) + + op.rename_table('cisco_credentials', 'credentials') + op.rename_table('cisco_nexusport_bindings', 'nexusport_bindings') + op.rename_table('cisco_qos_policies', 'qoss') diff --git a/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/27ef74513d33_quota_in_plumgrid_pl.py b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/27ef74513d33_quota_in_plumgrid_pl.py new file mode 100644 index 00000000..c7101b83 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/27ef74513d33_quota_in_plumgrid_pl.py @@ -0,0 +1,63 @@ +# Copyright 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""quota_in_plumgrid_plugin + +Revision ID: 27ef74513d33 +Revises: 3a520dd165d0 +Create Date: 2013-10-08 10:59:19.860397 + +""" + +# revision identifiers, used by Alembic. +revision = '27ef74513d33' +down_revision = '3a520dd165d0' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + 'neutron.plugins.plumgrid.plumgrid_plugin.plumgrid_plugin.' + 'NeutronPluginPLUMgridV2' +] + +from alembic import op +import sqlalchemy as sa + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + ### commands auto generated by Alembic - please adjust! ### + op.create_table( + 'quotas', + sa.Column('id', sa.String(length=36), nullable=False), + sa.Column('tenant_id', sa.String(length=255), nullable=True), + sa.Column('resource', sa.String(length=255), nullable=True), + sa.Column('limit', sa.Integer(), nullable=True), + sa.PrimaryKeyConstraint('id') + ) + ### end Alembic commands ### + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + ### commands auto generated by Alembic - please adjust! ### + op.drop_table('quotas') + ### end Alembic commands ### diff --git a/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/2a3bae1ceb8_nec_port_binding.py b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/2a3bae1ceb8_nec_port_binding.py new file mode 100644 index 00000000..e2abc4da --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/2a3bae1ceb8_nec_port_binding.py @@ -0,0 +1,63 @@ +# Copyright 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""NEC Port Binding + +Revision ID: 2a3bae1ceb8 +Revises: 46a0efbd8f0 +Create Date: 2013-08-22 11:09:19.955386 + +""" + +# revision identifiers, used by Alembic. +revision = '2a3bae1ceb8' +down_revision = '46a0efbd8f0' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + 'neutron.plugins.nec.nec_plugin.NECPluginV2' +] + +from alembic import op +import sqlalchemy as sa + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.create_table( + 'portbindingports', + sa.Column('port_id', sa.String(length=36), nullable=False), + sa.Column('host', sa.String(length=255), nullable=False), + sa.ForeignKeyConstraint(['port_id'], ['ports.id'], ondelete='CASCADE'), + sa.PrimaryKeyConstraint('port_id') + ) + op.create_foreign_key( + 'portinfos_ibfk_1', + source='portinfos', referent='ports', + local_cols=['id'], remote_cols=['id'], + ondelete='CASCADE') + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.drop_constraint('portinfos_ibfk_1', 'portinfos', 'foreignkey') + op.drop_table('portbindingports') diff --git a/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/2a6d0b51f4bb_cisco_plugin_cleanup.py b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/2a6d0b51f4bb_cisco_plugin_cleanup.py new file mode 100644 index 00000000..48c7c6db --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/2a6d0b51f4bb_cisco_plugin_cleanup.py @@ -0,0 +1,86 @@ +# Copyright 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""cisco plugin cleanup + +Revision ID: 2a6d0b51f4bb +Revises: 1d76643bcec4 +Create Date: 2013-01-17 22:24:37.730466 + +""" + +# revision identifiers, used by Alembic. +revision = '2a6d0b51f4bb' +down_revision = '1d76643bcec4' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + 'neutron.plugins.cisco.network_plugin.PluginV2' +] + +from alembic import op +import sqlalchemy as sa + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.drop_table(u'portprofile_bindings') + op.drop_table(u'portprofiles') + op.drop_table(u'port_bindings') + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.create_table( + u'port_bindings', + sa.Column(u'id', sa.Integer(), autoincrement=True, + nullable=False), + sa.Column(u'port_id', sa.String(255), nullable=False), + sa.Column(u'blade_intf_dn', sa.String(255), nullable=False), + sa.Column(u'portprofile_name', sa.String(255), + nullable=True), + sa.Column(u'vlan_name', sa.String(255), nullable=True), + sa.Column(u'vlan_id', sa.Integer(), nullable=True), + sa.Column(u'qos', sa.String(255), nullable=True), + sa.Column(u'tenant_id', sa.String(255), nullable=True), + sa.Column(u'vif_id', sa.String(255), nullable=True), + sa.PrimaryKeyConstraint(u'id') + ) + op.create_table( + u'portprofiles', + sa.Column(u'uuid', sa.String(255), nullable=False), + sa.Column(u'name', sa.String(255), nullable=True), + sa.Column(u'vlan_id', sa.Integer(), nullable=True), + sa.Column(u'qos', sa.String(255), nullable=True), + sa.PrimaryKeyConstraint(u'uuid') + ) + op.create_table( + u'portprofile_bindings', + sa.Column(u'id', sa.String(255), nullable=False), + sa.Column(u'tenant_id', sa.String(255), nullable=True), + sa.Column(u'port_id', sa.String(255), nullable=True), + sa.Column(u'portprofile_id', sa.String(255), nullable=True), + sa.Column(u'default', sa.Boolean(), nullable=False), + sa.ForeignKeyConstraint(['portprofile_id'], ['portprofiles.uuid'], ), + sa.ForeignKeyConstraint(['port_id'], ['ports.id'], ), + sa.PrimaryKeyConstraint(u'id') + ) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/2c4af419145b_l3_support.py b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/2c4af419145b_l3_support.py new file mode 100644 index 00000000..1f41e599 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/2c4af419145b_l3_support.py @@ -0,0 +1,54 @@ +# Copyright 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""l3_support + +Revision ID: 2c4af419145b +Revises: folsom +Create Date: 2013-03-11 19:26:45.697774 + +""" + +# revision identifiers, used by Alembic. +revision = '2c4af419145b' +down_revision = 'folsom' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + 'neutron.plugins.bigswitch.plugin.NeutronRestProxyV2', + 'neutron.plugins.hyperv.hyperv_neutron_plugin.HyperVNeutronPlugin', + 'neutron.plugins.midonet.plugin.MidonetPluginV2', + 'neutron.plugins.nicira.NeutronPlugin.NvpPluginV2', + 'neutron.plugins.nicira.NeutronServicePlugin.NvpAdvancedPlugin', + 'neutron.plugins.vmware.plugin.NsxPlugin', + 'neutron.plugins.vmware.plugin.NsxServicePlugin', + 'neutron.plugins.embrane.plugins.embrane_ovs_plugin.EmbraneOvsPlugin', +] + +from neutron.db import migration +from neutron.db.migration.alembic_migrations import common_ext_ops + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + common_ext_ops.upgrade_l3() + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + common_ext_ops.downgrade_l3() diff --git a/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/2db5203cb7a9_nuage_floatingip.py b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/2db5203cb7a9_nuage_floatingip.py new file mode 100644 index 00000000..57876d09 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/2db5203cb7a9_nuage_floatingip.py @@ -0,0 +1,83 @@ +# Copyright 2014 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""nuage_floatingip + +Revision ID: 2db5203cb7a9 +Revises: 10cd28e692e9 +Create Date: 2014-05-19 16:39:42.048125 + +""" + +# revision identifiers, used by Alembic. +revision = '2db5203cb7a9' +down_revision = '10cd28e692e9' + +migration_for_plugins = [ + 'neutron.plugins.nuage.plugin.NuagePlugin' +] + +from alembic import op +import sqlalchemy as sa + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.create_table( + 'nuage_floatingip_pool_mapping', + sa.Column('fip_pool_id', sa.String(length=36), nullable=False), + sa.Column('net_id', sa.String(length=36), nullable=True), + sa.Column('router_id', sa.String(length=36), nullable=True), + sa.ForeignKeyConstraint(['net_id'], ['networks.id'], + ondelete='CASCADE'), + sa.ForeignKeyConstraint(['router_id'], ['routers.id'], + ondelete='CASCADE'), + sa.PrimaryKeyConstraint('fip_pool_id'), + ) + op.create_table( + 'nuage_floatingip_mapping', + sa.Column('fip_id', sa.String(length=36), nullable=False), + sa.Column('router_id', sa.String(length=36), nullable=True), + sa.Column('nuage_fip_id', sa.String(length=36), nullable=True), + sa.ForeignKeyConstraint(['fip_id'], ['floatingips.id'], + ondelete='CASCADE'), + sa.PrimaryKeyConstraint('fip_id'), + ) + op.rename_table('net_partitions', 'nuage_net_partitions') + op.rename_table('net_partition_router_mapping', + 'nuage_net_partition_router_mapping') + op.rename_table('router_zone_mapping', 'nuage_router_zone_mapping') + op.rename_table('subnet_l2dom_mapping', 'nuage_subnet_l2dom_mapping') + op.rename_table('port_mapping', 'nuage_port_mapping') + op.rename_table('routerroutes_mapping', 'nuage_routerroutes_mapping') + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.drop_table('nuage_floatingip_mapping') + op.drop_table('nuage_floatingip_pool_mapping') + op.rename_table('nuage_net_partitions', 'net_partitions') + op.rename_table('nuage_net_partition_router_mapping', + 'net_partition_router_mapping') + op.rename_table('nuage_router_zone_mapping', 'router_zone_mapping') + op.rename_table('nuage_subnet_l2dom_mapping', 'subnet_l2dom_mapping') + op.rename_table('nuage_port_mapping', 'port_mapping') + op.rename_table('nuage_routerroutes_mapping', 'routerroutes_mapping') diff --git a/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/2eeaf963a447_floatingip_status.py b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/2eeaf963a447_floatingip_status.py new file mode 100644 index 00000000..7da8f841 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/2eeaf963a447_floatingip_status.py @@ -0,0 +1,79 @@ +# Copyright 2014 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""floatingip_status + +Revision ID: 2eeaf963a447 +Revises: f44ab9871cd6 +Create Date: 2014-01-14 11:58:13.754747 + +""" + +# revision identifiers, used by Alembic. +revision = '2eeaf963a447' +down_revision = 'f44ab9871cd6' + +# This migration is applied to all L3 capable plugins + +migration_for_plugins = [ + 'neutron.plugins.bigswitch.plugin.NeutronRestProxyV2', + 'neutron.plugins.brocade.NeutronPlugin.BrocadePluginV2', + 'neutron.plugins.cisco.network_plugin.PluginV2', + 'neutron.plugins.cisco.n1kv.n1kv_neutron_plugin.N1kvNeutronPluginV2', + 'neutron.plugins.embrane.plugins.embrane_ovs_plugin.EmbraneOvsPlugin', + 'neutron.plugins.hyperv.hyperv_neutron_plugin.HyperVNeutronPlugin', + 'neutron.plugins.ibm.sdnve_neutron_plugin.SdnvePluginV2', + 'neutron.plugins.linuxbridge.lb_neutron_plugin.LinuxBridgePluginV2', + 'neutron.plugins.metaplugin.meta_neutron_plugin.MetaPluginV2', + 'neutron.plugins.mlnx.mlnx_plugin.MellanoxEswitchPlugin', + 'neutron.plugins.midonet.plugin.MidonetPluginV2', + 'neutron.plugins.ml2.plugin.Ml2Plugin', + 'neutron.plugins.nec.nec_plugin.NECPluginV2', + 'neutron.plugins.nicira.NeutronPlugin.NvpPluginV2', + 'neutron.plugins.nicira.NeutronServicePlugin.NvpAdvancedPlugin', + 'neutron.plugins.nuage.plugin.NuagePlugin', + 'neutron.plugins.oneconvergence.plugin.OneConvergencePluginV2', + 'neutron.plugins.openvswitch.ovs_neutron_plugin.OVSNeutronPluginV2', + 'neutron.plugins.plumgrid.plumgrid_plugin.plumgrid_plugin.' + 'NeutronPluginPLUMgridV2', + 'neutron.plugins.ryu.ryu_neutron_plugin.RyuNeutronPluginV2', + 'neutron.plugins.vmware.plugin.NsxPlugin', + 'neutron.plugins.vmware.plugin.NsxServicePlugin', +] + +from alembic import op +import sqlalchemy as sa + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + op.add_column('floatingips', + sa.Column('last_known_router_id', + sa.String(length=36), + nullable=True)) + op.add_column('floatingips', + sa.Column('status', + sa.String(length=16), + nullable=True)) + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + op.drop_column('floatingips', 'last_known_router_id') + op.drop_column('floatingips', 'status') diff --git a/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/32a65f71af51_ml2_portbinding.py b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/32a65f71af51_ml2_portbinding.py new file mode 100644 index 00000000..24fe4348 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/32a65f71af51_ml2_portbinding.py @@ -0,0 +1,68 @@ +# Copyright 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""ml2 portbinding + +Revision ID: 32a65f71af51 +Revises: 14f24494ca31 +Create Date: 2013-09-03 08:40:22.706651 + +""" + +# revision identifiers, used by Alembic. +revision = '32a65f71af51' +down_revision = '14f24494ca31' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + 'neutron.plugins.ml2.plugin.Ml2Plugin' +] + +from alembic import op +import sqlalchemy as sa + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.create_table( + 'ml2_port_bindings', + sa.Column('port_id', sa.String(length=36), nullable=False), + sa.Column('host', sa.String(length=255), nullable=False), + sa.Column('vif_type', sa.String(length=64), nullable=False), + sa.Column('cap_port_filter', sa.Boolean(), nullable=False), + sa.Column('driver', sa.String(length=64), nullable=True), + sa.Column('segment', sa.String(length=36), nullable=True), + sa.ForeignKeyConstraint(['port_id'], ['ports.id'], + ondelete='CASCADE'), + sa.ForeignKeyConstraint(['segment'], ['ml2_network_segments.id'], + ondelete='SET NULL'), + sa.PrimaryKeyConstraint('port_id') + ) + + # Note that 176a85fc7d79_add_portbindings_db.py was never enabled + # for ml2, so there is no need to drop the portbindingports table + # that is no longer used. + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.drop_table('ml2_port_bindings') diff --git a/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/32b517556ec9_remove_tunnelip_mode.py b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/32b517556ec9_remove_tunnelip_mode.py new file mode 100644 index 00000000..7bf54098 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/32b517556ec9_remove_tunnelip_mode.py @@ -0,0 +1,56 @@ +# Copyright 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""remove TunnelIP model + +Revision ID: 32b517556ec9 +Revises: 176a85fc7d79 +Create Date: 2013-05-23 06:46:57.390838 + +""" + +# revision identifiers, used by Alembic. +revision = '32b517556ec9' +down_revision = '176a85fc7d79' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + 'neutron.plugins.openvswitch.ovs_neutron_plugin.OVSNeutronPluginV2' +] + +from alembic import op +import sqlalchemy as sa + + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.drop_table('ovs_tunnel_ips') + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.create_table( + 'ovs_tunnel_ips', + sa.Column('ip_address', sa.String(length=255), nullable=False), + sa.PrimaryKeyConstraint('ip_address') + ) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/338d7508968c_vpnaas_peer_address_.py b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/338d7508968c_vpnaas_peer_address_.py new file mode 100644 index 00000000..6eb46fee --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/338d7508968c_vpnaas_peer_address_.py @@ -0,0 +1,53 @@ +# Copyright 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""vpnaas peer_address size increase + +Revision ID: 338d7508968c +Revises: 4a666eb208c2 +Create Date: 2013-09-16 11:31:39.410189 + +""" + +# revision identifiers, used by Alembic. +revision = '338d7508968c' +down_revision = '4a666eb208c2' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + 'neutron.services.vpn.plugin.VPNDriverPlugin' +] + +from alembic import op +import sqlalchemy as sa + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.alter_column('ipsec_site_connections', 'peer_address', + type_=sa.String(255), existing_type=sa.String(64)) + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.alter_column('ipsec_site_connections', 'peer_address', + type_=sa.String(64), existing_type=sa.String(255)) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/33c3db036fe4_set_length_of_description_field_metering.py b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/33c3db036fe4_set_length_of_description_field_metering.py new file mode 100644 index 00000000..0882aa7f --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/33c3db036fe4_set_length_of_description_field_metering.py @@ -0,0 +1,58 @@ +# Copyright 2014 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""set_length_of_description_field_metering + +Revision ID: 33c3db036fe4 +Revises: b65aa907aec +Create Date: 2014-03-25 11:04:27.341830 + +""" + +# revision identifiers, used by Alembic. +revision = '33c3db036fe4' +down_revision = 'b65aa907aec' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + 'neutron.services.metering.metering_plugin.MeteringPlugin' +] + +from alembic import op +import sqlalchemy as sa + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.execute("CREATE TABLE IF NOT EXISTS meteringlabels( " + "tenant_id VARCHAR(255) NULL, " + "id VARCHAR(36) PRIMARY KEY NOT NULL, " + "name VARCHAR(255) NULL, " + "description VARCHAR(255) NULL)") + + op.alter_column('meteringlabels', 'description', type_=sa.String(1024), + existing_nullable=True) + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + pass diff --git a/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/33dd0a9fa487_embrane_lbaas_driver.py b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/33dd0a9fa487_embrane_lbaas_driver.py new file mode 100644 index 00000000..f1d5fd01 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/33dd0a9fa487_embrane_lbaas_driver.py @@ -0,0 +1,59 @@ +# Copyright 2014 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""embrane_lbaas_driver + +Revision ID: 33dd0a9fa487 +Revises: 19180cf98af6 +Create Date: 2014-02-25 00:15:35.567111 + +""" + +# revision identifiers, used by Alembic. +revision = '33dd0a9fa487' +down_revision = '19180cf98af6' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + 'neutron.services.loadbalancer.plugin.LoadBalancerPlugin' +] + +from alembic import op +import sqlalchemy as sa + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.create_table( + u'embrane_pool_port', + sa.Column(u'pool_id', sa.String(length=36), nullable=False), + sa.Column(u'port_id', sa.String(length=36), nullable=False), + sa.ForeignKeyConstraint(['pool_id'], [u'pools.id'], + name=u'embrane_pool_port_ibfk_1'), + sa.ForeignKeyConstraint(['port_id'], [u'ports.id'], + name=u'embrane_pool_port_ibfk_2'), + sa.PrimaryKeyConstraint(u'pool_id')) + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.drop_table(u'embrane_pool_port') diff --git a/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/35c7c198ddea_lbaas_healthmon_del_status.py b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/35c7c198ddea_lbaas_healthmon_del_status.py new file mode 100644 index 00000000..96806601 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/35c7c198ddea_lbaas_healthmon_del_status.py @@ -0,0 +1,56 @@ +# Copyright 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""remove status from HealthMonitor + +Revision ID: 35c7c198ddea +Revises: 11c6e18605c8 +Create Date: 2013-08-02 23:14:54.037976 + +""" + +# revision identifiers, used by Alembic. +revision = '35c7c198ddea' +down_revision = '11c6e18605c8' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + 'neutron.services.loadbalancer.plugin.LoadBalancerPlugin', +] + +from alembic import op +import sqlalchemy as sa + + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + op.drop_column('healthmonitors', 'status') + op.drop_column('healthmonitors', 'status_description') + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.add_column('healthmonitors', sa.Column('status', + sa.String(16), + nullable=False)) + op.add_column('healthmonitors', sa.Column('status_description', + sa.String(255))) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/363468ac592c_nvp_network_gw.py b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/363468ac592c_nvp_network_gw.py new file mode 100644 index 00000000..a115c432 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/363468ac592c_nvp_network_gw.py @@ -0,0 +1,98 @@ +# Copyright 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""nvp_network_gw + +Revision ID: 363468ac592c +Revises: 1c33fa3cd1a1 +Create Date: 2013-02-07 03:19:14.455372 + +""" + +# revision identifiers, used by Alembic. +revision = '363468ac592c' +down_revision = '1c33fa3cd1a1' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + 'neutron.plugins.nicira.NeutronPlugin.NvpPluginV2', + 'neutron.plugins.nicira.NeutronServicePlugin.NvpAdvancedPlugin', + 'neutron.plugins.vmware.plugin.NsxPlugin', + 'neutron.plugins.vmware.plugin.NsxServicePlugin' +] + +from alembic import op +import sqlalchemy as sa + + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + op.create_table('networkgateways', + sa.Column('id', sa.String(length=36), nullable=False), + sa.Column('name', sa.String(length=255), nullable=True), + sa.Column('tenant_id', sa.String(length=36), + nullable=True), + sa.Column('default', sa.Boolean(), nullable=True), + sa.PrimaryKeyConstraint('id')) + op.create_table('networkgatewaydevices', + sa.Column('id', sa.String(length=36), nullable=False), + sa.Column('network_gateway_id', sa.String(length=36), + nullable=True), + sa.Column('interface_name', sa.String(length=64), + nullable=True), + sa.ForeignKeyConstraint(['network_gateway_id'], + ['networkgateways.id'], + ondelete='CASCADE'), + sa.PrimaryKeyConstraint('id')) + op.create_table('networkconnections', + sa.Column('tenant_id', sa.String(length=255), + nullable=True), + sa.Column('network_gateway_id', sa.String(length=36), + nullable=True), + sa.Column('network_id', sa.String(length=36), + nullable=True), + sa.Column('segmentation_type', + sa.Enum('flat', 'vlan', + name="net_conn_seg_type"), + nullable=True), + sa.Column('segmentation_id', sa.Integer(), + nullable=True), + sa.Column('port_id', sa.String(length=36), + nullable=False), + sa.ForeignKeyConstraint(['network_gateway_id'], + ['networkgateways.id'], + ondelete='CASCADE'), + sa.ForeignKeyConstraint(['network_id'], ['networks.id'], + ondelete='CASCADE'), + sa.ForeignKeyConstraint(['port_id'], ['ports.id'], + ondelete='CASCADE'), + sa.PrimaryKeyConstraint('port_id'), + sa.UniqueConstraint('network_gateway_id', + 'segmentation_type', + 'segmentation_id')) + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.drop_table('networkconnections') + op.drop_table('networkgatewaydevices') + op.drop_table('networkgateways') diff --git a/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/38335592a0dc_nvp_portmap.py b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/38335592a0dc_nvp_portmap.py new file mode 100644 index 00000000..e7148167 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/38335592a0dc_nvp_portmap.py @@ -0,0 +1,60 @@ +# Copyright 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""nvp_portmap + +Revision ID: 38335592a0dc +Revises: 49332180ca96 +Create Date: 2013-01-15 06:04:56.328991 + +""" + +# revision identifiers, used by Alembic. +revision = '38335592a0dc' +down_revision = '49332180ca96' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + 'neutron.plugins.nicira.NeutronPlugin.NvpPluginV2', + 'neutron.plugins.nicira.NeutronServicePlugin.NvpAdvancedPlugin', + 'neutron.plugins.vmware.plugin.NsxPlugin', + 'neutron.plugins.vmware.plugin.NsxServicePlugin' +] + +from alembic import op +import sqlalchemy as sa + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.create_table( + 'quantum_nvp_port_mapping', + sa.Column('quantum_id', sa.String(length=36), nullable=False), + sa.Column('nvp_id', sa.String(length=36), nullable=True), + sa.ForeignKeyConstraint(['quantum_id'], ['ports.id'], + ondelete='CASCADE'), + sa.PrimaryKeyConstraint('quantum_id')) + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.drop_table('quantum_nvp_port_mapping') diff --git a/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/38fc1f6789f8_cisco_n1kv_overlay.py b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/38fc1f6789f8_cisco_n1kv_overlay.py new file mode 100644 index 00000000..3c43ea55 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/38fc1f6789f8_cisco_n1kv_overlay.py @@ -0,0 +1,55 @@ +# Copyright 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""Cisco N1KV overlay support + +Revision ID: 38fc1f6789f8 +Revises: 1efb85914233 +Create Date: 2013-08-20 18:31:16.158387 + +""" + +revision = '38fc1f6789f8' +down_revision = '1efb85914233' + +migration_for_plugins = [ + 'neutron.plugins.cisco.network_plugin.PluginV2' +] + +import sqlalchemy as sa + +from neutron.db import migration + + +new_type = sa.Enum('vlan', 'overlay', 'trunk', 'multi-segment', + name='vlan_type') +old_type = sa.Enum('vlan', 'vxlan', 'trunk', 'multi-segment', + name='vlan_type') + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + migration.alter_enum('cisco_network_profiles', 'segment_type', new_type, + nullable=False) + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + migration.alter_enum('cisco_network_profiles', 'segment_type', old_type, + nullable=False) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/3927f7f7c456_l3_extension_distributed_mode.py b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/3927f7f7c456_l3_extension_distributed_mode.py new file mode 100644 index 00000000..4a467269 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/3927f7f7c456_l3_extension_distributed_mode.py @@ -0,0 +1,59 @@ +# Copyright 2014 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""L3 extension distributed mode + +Revision ID: 3927f7f7c456 +Revises: 2db5203cb7a9 +Create Date: 2014-04-02 23:26:19.303633 +""" + +# revision identifiers, used by Alembic. +revision = '3927f7f7c456' +down_revision = '2db5203cb7a9' + +migration_for_plugins = [ + '*' +] + + +from alembic import op +import sqlalchemy as sa + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.create_table( + 'router_extra_attributes', + sa.Column('router_id', sa.String(length=36), nullable=False), + sa.Column('distributed', sa.Boolean(), nullable=False, + server_default=sa.text('false')), + sa.ForeignKeyConstraint( + ['router_id'], ['routers.id'], ondelete='CASCADE'), + sa.PrimaryKeyConstraint('router_id') + ) + op.execute("INSERT INTO router_extra_attributes SELECT id as router_id, " + "False as distributed from routers") + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.drop_table('router_extra_attributes') diff --git a/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/39cf3f799352_fwaas_havana_2_model.py b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/39cf3f799352_fwaas_havana_2_model.py new file mode 100644 index 00000000..d5b1a7bd --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/39cf3f799352_fwaas_havana_2_model.py @@ -0,0 +1,107 @@ +# Copyright 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""FWaaS Havana-2 model + +Revision ID: 39cf3f799352 +Revises: e6b16a30d97 +Create Date: 2013-07-10 16:16:51.302943 + +""" + +# revision identifiers, used by Alembic. +revision = '39cf3f799352' +down_revision = 'e6b16a30d97' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + 'neutron.services.firewall.fwaas_plugin.FirewallPlugin', +] + +from alembic import op +import sqlalchemy as sa + +from neutron.db import migration + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.drop_table('firewall_rules') + op.drop_table('firewalls') + op.drop_table('firewall_policies') + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.create_table( + 'firewall_policies', + sa.Column('tenant_id', sa.String(length=255), nullable=True), + sa.Column('id', sa.String(length=36), nullable=False), + sa.Column('name', sa.String(length=255), nullable=True), + sa.Column('description', sa.String(length=1024), nullable=True), + sa.Column('shared', sa.Boolean(), autoincrement=False, nullable=True), + sa.Column('audited', sa.Boolean(), autoincrement=False, + nullable=True), + sa.PrimaryKeyConstraint('id')) + op.create_table( + 'firewalls', sa.Column('tenant_id', sa.String(length=255), + nullable=True), + sa.Column('id', sa.String(length=36), nullable=False), + sa.Column('name', sa.String(length=255), nullable=True), + sa.Column('description', sa.String(length=1024), nullable=True), + sa.Column('shared', sa.Boolean(), autoincrement=False, nullable=True), + sa.Column('admin_state_up', sa.Boolean(), autoincrement=False, + nullable=True), + sa.Column('status', sa.String(length=16), nullable=True), + sa.Column('firewall_policy_id', sa.String(length=36), nullable=True), + sa.ForeignKeyConstraint(['firewall_policy_id'], + ['firewall_policies.id'], + name='firewalls_ibfk_1'), + sa.PrimaryKeyConstraint('id')) + op.create_table( + 'firewall_rules', + sa.Column('tenant_id', sa.String(length=255), nullable=True), + sa.Column('id', sa.String(length=36), nullable=False), + sa.Column('name', sa.String(length=255), nullable=True), + sa.Column('description', sa.String(length=1024), nullable=True), + sa.Column('firewall_policy_id', sa.String(length=36), nullable=True), + sa.Column('shared', sa.Boolean(), autoincrement=False, + nullable=True), + sa.Column('protocol', sa.String(length=24), nullable=True), + sa.Column('ip_version', sa.Integer(), autoincrement=False, + nullable=False), + sa.Column('source_ip_address', sa.String(length=46), nullable=True), + sa.Column('destination_ip_address', sa.String(length=46), + nullable=True), + sa.Column('source_port_range_min', sa.Integer(), nullable=True), + sa.Column('source_port_range_max', sa.Integer(), nullable=True), + sa.Column('destination_port_range_min', sa.Integer(), nullable=True), + sa.Column('destination_port_range_max', sa.Integer(), nullable=True), + sa.Column('action', + sa.Enum('allow', 'deny', name='firewallrules_action'), + nullable=True), + sa.Column('enabled', sa.Boolean(), autoincrement=False, + nullable=True), + sa.Column('position', sa.Integer(), autoincrement=False, + nullable=True), + sa.ForeignKeyConstraint(['firewall_policy_id'], + ['firewall_policies.id'], + name='firewall_rules_ibfk_1'), + sa.PrimaryKeyConstraint('id')) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/3a520dd165d0_cisco_nexus_multi_switch.py b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/3a520dd165d0_cisco_nexus_multi_switch.py new file mode 100644 index 00000000..a38d84dd --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/3a520dd165d0_cisco_nexus_multi_switch.py @@ -0,0 +1,57 @@ +# Copyright 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""Cisco Nexus multi-switch + +Revision ID: 3a520dd165d0 +Revises: 2528ceb28230 +Create Date: 2013-09-28 15:23:38.872682 + +""" + +# revision identifiers, used by Alembic. +revision = '3a520dd165d0' +down_revision = '2528ceb28230' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + 'neutron.plugins.cisco.network_plugin.PluginV2' +] + +from alembic import op +import sqlalchemy as sa + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.add_column( + 'cisco_nexusport_bindings', + sa.Column('instance_id', sa.String(length=255), nullable=False)) + op.add_column( + 'cisco_nexusport_bindings', + sa.Column('switch_ip', sa.String(length=255), nullable=False)) + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.drop_column('cisco_nexusport_bindings', 'switch_ip') + op.drop_column('cisco_nexusport_bindings', 'instance_id') diff --git a/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/3b54bf9e29f7_nec_plugin_sharednet.py b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/3b54bf9e29f7_nec_plugin_sharednet.py new file mode 100644 index 00000000..87b87165 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/3b54bf9e29f7_nec_plugin_sharednet.py @@ -0,0 +1,82 @@ +# Copyright 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""NEC plugin sharednet + +Revision ID: 3b54bf9e29f7 +Revises: 511471cc46b +Create Date: 2013-02-17 09:21:48.287134 + +""" + +# revision identifiers, used by Alembic. +revision = '3b54bf9e29f7' +down_revision = '511471cc46b' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + 'neutron.plugins.nec.nec_plugin.NECPluginV2' +] + +from alembic import op +import sqlalchemy as sa + + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.create_table( + 'ofctenantmappings', + sa.Column('ofc_id', sa.String(length=255), nullable=False), + sa.Column('quantum_id', sa.String(length=36), nullable=False), + sa.PrimaryKeyConstraint('quantum_id'), + sa.UniqueConstraint('ofc_id') + ) + op.create_table( + 'ofcnetworkmappings', + sa.Column('ofc_id', sa.String(length=255), nullable=False), + sa.Column('quantum_id', sa.String(length=36), nullable=False), + sa.PrimaryKeyConstraint('quantum_id'), + sa.UniqueConstraint('ofc_id') + ) + op.create_table( + 'ofcportmappings', + sa.Column('ofc_id', sa.String(length=255), nullable=False), + sa.Column('quantum_id', sa.String(length=36), nullable=False), + sa.PrimaryKeyConstraint('quantum_id'), + sa.UniqueConstraint('ofc_id') + ) + op.create_table( + 'ofcfiltermappings', + sa.Column('ofc_id', sa.String(length=255), nullable=False), + sa.Column('quantum_id', sa.String(length=36), nullable=False), + sa.PrimaryKeyConstraint('quantum_id'), + sa.UniqueConstraint('ofc_id') + ) + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.drop_table('ofcfiltermappings') + op.drop_table('ofcportmappings') + op.drop_table('ofcnetworkmappings') + op.drop_table('ofctenantmappings') diff --git a/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/3c6e57a23db4_add_multiprovider.py b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/3c6e57a23db4_add_multiprovider.py new file mode 100644 index 00000000..3cbfb542 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/3c6e57a23db4_add_multiprovider.py @@ -0,0 +1,101 @@ +# Copyright 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""add multiprovider + +Revision ID: 3c6e57a23db4 +Revises: 86cf4d88bd3 +Create Date: 2013-07-10 12:43:35.769283 + +""" + +# revision identifiers, used by Alembic. +revision = '3c6e57a23db4' +down_revision = '86cf4d88bd3' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + 'neutron.plugins.nicira.NeutronPlugin.NvpPluginV2', + 'neutron.plugins.nicira.NeutronServicePlugin.NvpAdvancedPlugin', + 'neutron.plugins.vmware.plugin.NsxPlugin', + 'neutron.plugins.vmware.plugin.NsxServicePlugin' +] + +from alembic import op +import sqlalchemy as sa + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.create_table( + 'nvp_multi_provider_networks', + sa.Column('network_id', sa.String(length=36), nullable=False), + sa.ForeignKeyConstraint(['network_id'], ['networks.id'], + ondelete='CASCADE'), + sa.PrimaryKeyConstraint('network_id'), + mysql_engine='InnoDB' + ) + op.create_table('rename_nvp_network_bindings', + sa.Column('network_id', sa.String(length=36), + primary_key=True), + sa.Column('binding_type', + sa.Enum( + 'flat', 'vlan', 'stt', 'gre', 'l3_ext', + name=( + 'nvp_network_bindings_binding_type')), + nullable=False, primary_key=True), + sa.Column('phy_uuid', sa.String(36), primary_key=True, + nullable=True), + sa.Column('vlan_id', sa.Integer, primary_key=True, + nullable=True, autoincrement=False)) + # copy data from nvp_network_bindings into rename_nvp_network_bindings + op.execute("INSERT INTO rename_nvp_network_bindings SELECT network_id, " + "binding_type, phy_uuid, vlan_id from nvp_network_bindings") + + op.drop_table('nvp_network_bindings') + op.rename_table('rename_nvp_network_bindings', 'nvp_network_bindings') + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + # Delete the multi_provider_network entries from nvp_network_bindings + op.execute("DELETE from nvp_network_bindings WHERE network_id IN " + "(SELECT network_id from nvp_multi_provider_networks)") + + # create table with previous contains + op.create_table( + 'rename_nvp_network_bindings', + sa.Column('network_id', sa.String(length=36), primary_key=True), + sa.Column('binding_type', + sa.Enum('flat', 'vlan', 'stt', 'gre', 'l3_ext', + name=('nvp_network_bindings_binding_type')), + nullable=False), + sa.Column('phy_uuid', sa.String(36), nullable=True), + sa.Column('vlan_id', sa.Integer, nullable=True, autoincrement=False)) + + # copy data from nvp_network_bindings into rename_nvp_network_bindings + op.execute("INSERT INTO rename_nvp_network_bindings SELECT network_id, " + "binding_type, phy_uuid, vlan_id from nvp_network_bindings") + + op.drop_table('nvp_network_bindings') + op.rename_table('rename_nvp_network_bindings', 'nvp_network_bindings') + op.drop_table('nvp_multi_provider_networks') diff --git a/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/3cabb850f4a5_table_to_track_port_.py b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/3cabb850f4a5_table_to_track_port_.py new file mode 100644 index 00000000..a25f565f --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/3cabb850f4a5_table_to_track_port_.py @@ -0,0 +1,61 @@ +# Copyright 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""Table to track port to host associations + +Revision ID: 3cabb850f4a5 +Revises: 5918cbddab04 +Create Date: 2013-06-24 14:30:33.533562 + +""" + +# revision identifiers, used by Alembic. +revision = '3cabb850f4a5' +down_revision = '5918cbddab04' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + 'neutron.plugins.bigswitch.plugin.NeutronRestProxyV2' +] + +from alembic import op +import sqlalchemy as sa + + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + ### commands auto generated by Alembic - please adjust! ### + op.create_table('portlocations', + sa.Column('port_id', sa.String(length=255), + primary_key=True, nullable=False), + sa.Column('host_id', + sa.String(length=255), nullable=False) + ) + ### end Alembic commands ### + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + ### commands auto generated by Alembic - please adjust! ### + op.drop_table('portlocations') + ### end Alembic commands ### diff --git a/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/3cb5d900c5de_security_groups.py b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/3cb5d900c5de_security_groups.py new file mode 100644 index 00000000..7f2d2b88 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/3cb5d900c5de_security_groups.py @@ -0,0 +1,101 @@ +# Copyright 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""security_groups + +Revision ID: 3cb5d900c5de +Revises: 48b6f43f7471 +Create Date: 2013-01-08 00:13:43.051078 + +""" + +# revision identifiers, used by Alembic. +revision = '3cb5d900c5de' +down_revision = '48b6f43f7471' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + 'neutron.plugins.linuxbridge.lb_neutron_plugin.LinuxBridgePluginV2', + 'neutron.plugins.nicira.NeutronPlugin.NvpPluginV2', + 'neutron.plugins.nicira.NeutronServicePlugin.NvpAdvancedPlugin', + 'neutron.plugins.openvswitch.ovs_neutron_plugin.OVSNeutronPluginV2', + 'neutron.plugins.nec.nec_plugin.NECPluginV2', + 'neutron.plugins.ryu.ryu_neutron_plugin.RyuNeutronPluginV2', + 'neutron.plugins.vmware.plugin.NsxPlugin', + 'neutron.plugins.vmware.plugin.NsxServicePlugin', + 'neutron.plugins.oneconvergence.plugin.OneConvergencePluginV2', +] + +from alembic import op +import sqlalchemy as sa + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + ### commands auto generated by Alembic - please adjust! ### + op.create_table( + 'securitygroups', + sa.Column('tenant_id', sa.String(length=255), nullable=True), + sa.Column('id', sa.String(length=36), nullable=False), + sa.Column('name', sa.String(length=255), nullable=True), + sa.Column('description', sa.String(length=255), nullable=True), + sa.PrimaryKeyConstraint('id') + ) + op.create_table( + 'securitygrouprules', + sa.Column('tenant_id', sa.String(length=255), nullable=True), + sa.Column('id', sa.String(length=36), nullable=False), + sa.Column('security_group_id', sa.String(length=36), nullable=False), + sa.Column('remote_group_id', sa.String(length=36), nullable=True), + sa.Column('direction', + sa.Enum('ingress', 'egress', + name='securitygrouprules_direction'), + nullable=True), + sa.Column('ethertype', sa.String(length=40), nullable=True), + sa.Column('protocol', sa.String(length=40), nullable=True), + sa.Column('port_range_min', sa.Integer(), nullable=True), + sa.Column('port_range_max', sa.Integer(), nullable=True), + sa.Column('remote_ip_prefix', sa.String(length=255), nullable=True), + sa.ForeignKeyConstraint(['security_group_id'], ['securitygroups.id'], + ondelete='CASCADE'), + sa.ForeignKeyConstraint(['remote_group_id'], ['securitygroups.id'], + ondelete='CASCADE'), + sa.PrimaryKeyConstraint('id') + ) + op.create_table( + 'securitygroupportbindings', + sa.Column('port_id', sa.String(length=36), nullable=False), + sa.Column('security_group_id', sa.String(length=36), nullable=False), + sa.ForeignKeyConstraint(['port_id'], ['ports.id'], ondelete='CASCADE'), + sa.ForeignKeyConstraint(['security_group_id'], ['securitygroups.id']), + sa.PrimaryKeyConstraint('port_id', 'security_group_id') + ) + ### end Alembic commands ### + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + ### commands auto generated by Alembic - please adjust! ### + op.drop_table('securitygroupportbindings') + op.drop_table('securitygrouprules') + op.drop_table('securitygroups') + ### end Alembic commands ### diff --git a/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/3cbf70257c28_nvp_mac_learning.py b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/3cbf70257c28_nvp_mac_learning.py new file mode 100644 index 00000000..e63956a0 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/3cbf70257c28_nvp_mac_learning.py @@ -0,0 +1,61 @@ +# Copyright 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""nvp_mac_learning + +Revision ID: 3cbf70257c28 +Revises: 5ac71e65402c +Create Date: 2013-05-15 10:15:50.875314 + +""" + +# revision identifiers, used by Alembic. +revision = '3cbf70257c28' +down_revision = '5ac71e65402c' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + 'neutron.plugins.nicira.NeutronPlugin.NvpPluginV2', + 'neutron.plugins.nicira.NeutronServicePlugin.NvpAdvancedPlugin', + 'neutron.plugins.vmware.plugin.NsxPlugin', + 'neutron.plugins.vmware.plugin.NsxServicePlugin' +] + +from alembic import op +import sqlalchemy as sa + + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.create_table( + 'maclearningstates', + sa.Column('port_id', sa.String(length=36), nullable=False), + sa.Column('mac_learning_enabled', sa.Boolean(), nullable=False), + sa.ForeignKeyConstraint( + ['port_id'], ['ports.id'], ondelete='CASCADE'), + sa.PrimaryKeyConstraint('port_id')) + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.drop_table('maclearningstates') diff --git a/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/3d6fae8b70b0_nvp_lbaas_plugin.py b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/3d6fae8b70b0_nvp_lbaas_plugin.py new file mode 100644 index 00000000..0c9f2569 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/3d6fae8b70b0_nvp_lbaas_plugin.py @@ -0,0 +1,80 @@ +# Copyright 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""nvp lbaas plugin + +Revision ID: 3d6fae8b70b0 +Revises: 3ed8f075e38a +Create Date: 2013-09-13 19:34:41.522665 + +""" + +# revision identifiers, used by Alembic. +revision = '3d6fae8b70b0' +down_revision = '3ed8f075e38a' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + 'neutron.plugins.nicira.NeutronServicePlugin.NvpAdvancedPlugin' +] + +from alembic import op +import sqlalchemy as sa + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.create_table( + 'vcns_edge_pool_bindings', + sa.Column('pool_id', sa.String(length=36), nullable=False), + sa.Column('edge_id', sa.String(length=36), nullable=False), + sa.Column('pool_vseid', sa.String(length=36), nullable=True), + sa.ForeignKeyConstraint(['pool_id'], ['pools.id'], + ondelete='CASCADE'), + sa.PrimaryKeyConstraint('pool_id', 'edge_id') + ) + op.create_table( + 'vcns_edge_monitor_bindings', + sa.Column('monitor_id', sa.String(length=36), nullable=False), + sa.Column('edge_id', sa.String(length=36), nullable=False), + sa.Column('monitor_vseid', sa.String(length=36), nullable=True), + sa.ForeignKeyConstraint(['monitor_id'], ['healthmonitors.id'], + ondelete='CASCADE'), + sa.PrimaryKeyConstraint('monitor_id', 'edge_id') + ) + op.create_table( + 'vcns_edge_vip_bindings', + sa.Column('vip_id', sa.String(length=36), nullable=False), + sa.Column('edge_id', sa.String(length=36), nullable=True), + sa.Column('vip_vseid', sa.String(length=36), nullable=True), + sa.Column('app_profileid', sa.String(length=36), nullable=True), + sa.ForeignKeyConstraint(['vip_id'], ['vips.id'], + ondelete='CASCADE'), + sa.PrimaryKeyConstraint('vip_id') + ) + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.drop_table('vcns_edge_vip_bindings') + op.drop_table('vcns_edge_monitor_bindings') + op.drop_table('vcns_edge_pool_bindings') diff --git a/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/3ed8f075e38a_nvp_fwaas_plugin.py b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/3ed8f075e38a_nvp_fwaas_plugin.py new file mode 100644 index 00000000..2ebc6593 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/3ed8f075e38a_nvp_fwaas_plugin.py @@ -0,0 +1,58 @@ +# Copyright 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""nvp fwaas plugin + +Revision ID: 3ed8f075e38a +Revises: 338d7508968c +Create Date: 2013-09-13 19:14:25.509033 + +""" + +# revision identifiers, used by Alembic. +revision = '3ed8f075e38a' +down_revision = '338d7508968c' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + 'neutron.plugins.nicira.NeutronServicePlugin.NvpAdvancedPlugin' +] + +from alembic import op +import sqlalchemy as sa + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.create_table( + 'vcns_firewall_rule_bindings', + sa.Column('rule_id', sa.String(length=36), nullable=False), + sa.Column('edge_id', sa.String(length=36), nullable=False), + sa.Column('rule_vseid', sa.String(length=36), nullable=True), + sa.ForeignKeyConstraint(['rule_id'], ['firewall_rules.id'], ), + sa.PrimaryKeyConstraint('rule_id', 'edge_id') + ) + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.drop_table('vcns_firewall_rule_bindings') diff --git a/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/40dffbf4b549_nvp_dist_router.py b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/40dffbf4b549_nvp_dist_router.py new file mode 100644 index 00000000..02f17814 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/40dffbf4b549_nvp_dist_router.py @@ -0,0 +1,61 @@ +# Copyright 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""nvp_dist_router + +Revision ID: 40dffbf4b549 +Revises: 63afba73813 +Create Date: 2013-08-21 18:00:26.214923 + +""" + +# revision identifiers, used by Alembic. +revision = '40dffbf4b549' +down_revision = '63afba73813' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + 'neutron.plugins.nicira.NeutronPlugin.NvpPluginV2', + 'neutron.plugins.nicira.NeutronServicePlugin.NvpAdvancedPlugin', + 'neutron.plugins.vmware.plugin.NsxPlugin', + 'neutron.plugins.vmware.plugin.NsxServicePlugin' +] + +from alembic import op +import sqlalchemy as sa + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.create_table( + 'nsxrouterextattributess', + sa.Column('router_id', sa.String(length=36), nullable=False), + sa.Column('distributed', sa.Boolean(), nullable=False), + sa.ForeignKeyConstraint( + ['router_id'], ['routers.id'], ondelete='CASCADE'), + sa.PrimaryKeyConstraint('router_id') + ) + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.drop_table('nsxrouterextattributess') diff --git a/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/45680af419f9_nvp_qos.py b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/45680af419f9_nvp_qos.py new file mode 100644 index 00000000..b4d5b430 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/45680af419f9_nvp_qos.py @@ -0,0 +1,92 @@ +# Copyright 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""nvp_qos + +Revision ID: 45680af419f9 +Revises: 54c2c487e913 +Create Date: 2013-02-17 13:27:57.999631 + +""" + +# revision identifiers, used by Alembic. +revision = '45680af419f9' +down_revision = '54c2c487e913' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + 'neutron.plugins.nicira.NeutronPlugin.NvpPluginV2', + 'neutron.plugins.nicira.NeutronServicePlugin.NvpAdvancedPlugin', + 'neutron.plugins.vmware.plugin.NsxPlugin', + 'neutron.plugins.vmware.plugin.NsxServicePlugin' +] + +from alembic import op +import sqlalchemy as sa + + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + ### commands auto generated by Alembic - please adjust! ### + op.create_table( + 'qosqueues', + sa.Column('tenant_id', sa.String(length=255), nullable=True), + sa.Column('id', sa.String(length=36), nullable=False), + sa.Column('name', sa.String(length=255), nullable=True), + sa.Column('default', sa.Boolean(), nullable=True), + sa.Column('min', sa.Integer(), nullable=False), + sa.Column('max', sa.Integer(), nullable=True), + sa.Column('qos_marking', sa.Enum('untrusted', 'trusted', + name='qosqueues_qos_marking'), + nullable=True), + sa.Column('dscp', sa.Integer(), nullable=True), + sa.PrimaryKeyConstraint('id') + ) + op.create_table( + 'networkqueuemappings', + sa.Column('network_id', sa.String(length=36), nullable=False), + sa.Column('queue_id', sa.String(length=36), nullable=True), + sa.ForeignKeyConstraint(['network_id'], ['networks.id'], + ondelete='CASCADE'), + sa.ForeignKeyConstraint(['queue_id'], ['qosqueues.id'], + ondelete='CASCADE'), + sa.PrimaryKeyConstraint('network_id') + ) + op.create_table( + 'portqueuemappings', + sa.Column('port_id', sa.String(length=36), nullable=False), + sa.Column('queue_id', sa.String(length=36), nullable=False), + sa.ForeignKeyConstraint(['port_id'], ['ports.id'], ondelete='CASCADE'), + sa.ForeignKeyConstraint(['queue_id'], ['qosqueues.id'], ), + sa.PrimaryKeyConstraint('port_id', 'queue_id') + ) + ### end Alembic commands ### + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + ### commands auto generated by Alembic - please adjust! ### + op.drop_table('portqueuemappings') + op.drop_table('networkqueuemappings') + op.drop_table('qosqueues') + ### end Alembic commands ### diff --git a/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/4692d074d587_agent_scheduler.py b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/4692d074d587_agent_scheduler.py new file mode 100644 index 00000000..6de9db5f --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/4692d074d587_agent_scheduler.py @@ -0,0 +1,87 @@ +# Copyright 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""agent scheduler + +Revision ID: 4692d074d587 +Revises: 3b54bf9e29f7 +Create Date: 2013-02-21 23:01:50.370306 + +""" + +# revision identifiers, used by Alembic. +revision = '4692d074d587' +down_revision = '3b54bf9e29f7' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + 'neutron.plugins.brocade.NeutronPlugin.BrocadePluginV2', + 'neutron.plugins.linuxbridge.lb_neutron_plugin.LinuxBridgePluginV2', + 'neutron.plugins.openvswitch.ovs_neutron_plugin.OVSNeutronPluginV2', + 'neutron.plugins.nicira.NeutronPlugin.NvpPluginV2', + 'neutron.plugins.nicira.NeutronServicePlugin.NvpAdvancedPlugin', + 'neutron.plugins.nec.nec_plugin.NECPluginV2', + 'neutron.plugins.vmware.plugin.NsxPlugin', + 'neutron.plugins.vmware.plugin.NsxServicePlugin', + 'neutron.plugins.oneconvergence.plugin.OneConvergencePluginV2', + 'neutron.plugins.ml2.plugin.Ml2Plugin', + 'neutron.plugins.bigswitch.plugin.NeutronRestProxyV2' +] + +from alembic import op +import sqlalchemy as sa + + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + ### commands auto generated by Alembic - please adjust! ### + op.create_table( + 'networkdhcpagentbindings', + sa.Column('network_id', sa.String(length=36), nullable=False), + sa.Column('dhcp_agent_id', sa.String(length=36), nullable=False), + sa.ForeignKeyConstraint(['dhcp_agent_id'], ['agents.id'], + ondelete='CASCADE'), + sa.ForeignKeyConstraint(['network_id'], ['networks.id'], + ondelete='CASCADE'), + sa.PrimaryKeyConstraint('network_id', 'dhcp_agent_id') + ) + op.create_table( + 'routerl3agentbindings', + sa.Column('id', sa.String(length=36), nullable=False), + sa.Column('router_id', sa.String(length=36), nullable=True), + sa.Column('l3_agent_id', sa.String(length=36), nullable=True), + sa.ForeignKeyConstraint(['l3_agent_id'], ['agents.id'], + ondelete='CASCADE'), + sa.ForeignKeyConstraint(['router_id'], ['routers.id'], + ondelete='CASCADE'), + sa.PrimaryKeyConstraint('id') + ) + ### end Alembic commands ### + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + ### commands auto generated by Alembic - please adjust! ### + op.drop_table('routerl3agentbindings') + op.drop_table('networkdhcpagentbindings') + ### end Alembic commands ### diff --git a/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/46a0efbd8f0_cisco_n1kv_multisegm.py b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/46a0efbd8f0_cisco_n1kv_multisegm.py new file mode 100644 index 00000000..8ef8c1ef --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/46a0efbd8f0_cisco_n1kv_multisegm.py @@ -0,0 +1,78 @@ +# Copyright 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""cisco_n1kv_multisegment_trunk + +Revision ID: 46a0efbd8f0 +Revises: 53bbd27ec841 +Create Date: 2013-08-20 20:44:08.711110 + +""" + +revision = '46a0efbd8f0' +down_revision = '53bbd27ec841' + +migration_for_plugins = [ + 'neutron.plugins.cisco.network_plugin.PluginV2' +] + +from alembic import op +import sqlalchemy as sa + +from neutron.db import migration + +new_type = sa.Enum('vlan', 'vxlan', 'trunk', 'multi-segment', name='vlan_type') +old_type = sa.Enum('vlan', 'vxlan', name='vlan_type') + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.create_table( + 'cisco_n1kv_trunk_segments', + sa.Column('trunk_segment_id', sa.String(length=36), nullable=False), + sa.Column('segment_id', sa.String(length=36), nullable=False), + sa.Column('dot1qtag', sa.String(length=36), nullable=False), + sa.ForeignKeyConstraint(['trunk_segment_id'], ['networks.id'], + ondelete='CASCADE'), + sa.PrimaryKeyConstraint('trunk_segment_id', 'segment_id', 'dot1qtag') + ) + op.create_table( + 'cisco_n1kv_multi_segments', + sa.Column('multi_segment_id', sa.String(length=36), nullable=False), + sa.Column('segment1_id', sa.String(length=36), nullable=False), + sa.Column('segment2_id', sa.String(length=36), nullable=False), + sa.Column('encap_profile_name', sa.String(length=36), nullable=True), + sa.ForeignKeyConstraint(['multi_segment_id'], ['networks.id'], + ondelete='CASCADE'), + sa.PrimaryKeyConstraint('multi_segment_id', 'segment1_id', + 'segment2_id') + ) + migration.alter_enum('cisco_network_profiles', 'segment_type', new_type, + nullable=False) + op.add_column('cisco_network_profiles', + sa.Column('sub_type', sa.String(length=255), nullable=True)) + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.drop_table('cisco_n1kv_trunk_segments') + op.drop_table('cisco_n1kv_multi_segments') + migration.alter_enum('cisco_network_profiles', 'segment_type', old_type, + nullable=False) + op.drop_column('cisco_network_profiles', 'sub_type') diff --git a/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/477a4488d3f4_ml2_vxlan_type_driver.py b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/477a4488d3f4_ml2_vxlan_type_driver.py new file mode 100644 index 00000000..0083913d --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/477a4488d3f4_ml2_vxlan_type_driver.py @@ -0,0 +1,67 @@ +# Copyright 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""DB Migration for ML2 VXLAN Type Driver + +Revision ID: 477a4488d3f4 +Revises: 20ae61555e95 +Create Date: 2013-07-09 14:14:33.158502 + +""" + +# revision identifiers, used by Alembic. +revision = '477a4488d3f4' +down_revision = '20ae61555e95' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + 'neutron.plugins.ml2.plugin.Ml2Plugin' +] + +from alembic import op +import sqlalchemy as sa + + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.create_table( + 'ml2_vxlan_allocations', + sa.Column('vxlan_vni', sa.Integer, nullable=False, + autoincrement=False), + sa.Column('allocated', sa.Boolean, nullable=False), + sa.PrimaryKeyConstraint('vxlan_vni') + ) + + op.create_table( + 'ml2_vxlan_endpoints', + sa.Column('ip_address', sa.String(length=64)), + sa.Column('udp_port', sa.Integer(), nullable=False, + autoincrement=False), + sa.PrimaryKeyConstraint('ip_address', 'udp_port') + ) + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.drop_table('ml2_vxlan_allocations') + op.drop_table('ml2_vxlan_endpoints') diff --git a/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/48b6f43f7471_service_type.py b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/48b6f43f7471_service_type.py new file mode 100644 index 00000000..d7a74000 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/48b6f43f7471_service_type.py @@ -0,0 +1,74 @@ +# Copyright 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""DB support for service types + +Revision ID: 48b6f43f7471 +Revises: 5a875d0e5c +Create Date: 2013-01-07 13:47:29.093160 + +""" + +# revision identifiers, used by Alembic. +revision = '48b6f43f7471' +down_revision = '5a875d0e5c' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + '*' +] + +from alembic import op +import sqlalchemy as sa + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.create_table( + u'servicetypes', + sa.Column(u'tenant_id', sa.String(255), nullable=True), + sa.Column(u'id', sa.String(36), nullable=False), + sa.Column(u'name', sa.String(255), nullable=True), + sa.Column(u'description', sa.String(255), nullable=True), + sa.Column(u'default', sa.Boolean(), + autoincrement=False, nullable=False), + sa.Column(u'num_instances', sa.Integer(), + autoincrement=False, nullable=True), + sa.PrimaryKeyConstraint(u'id')) + op.create_table( + u'servicedefinitions', + sa.Column(u'id', sa.String(36), nullable=False), + sa.Column(u'service_class', sa.String(length=255), + nullable=False), + sa.Column(u'plugin', sa.String(255), nullable=True), + sa.Column(u'driver', sa.String(255), nullable=True), + sa.Column(u'service_type_id', sa.String(36), + nullable=False), + sa.ForeignKeyConstraint(['service_type_id'], [u'servicetypes.id'], + name=u'servicedefinitions_ibfk_1'), + sa.PrimaryKeyConstraint(u'id', u'service_class', u'service_type_id')) + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.drop_table(u'servicedefinitions') + op.drop_table(u'servicetypes') diff --git a/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/49332180ca96_ryu_plugin_update.py b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/49332180ca96_ryu_plugin_update.py new file mode 100644 index 00000000..53f267b2 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/49332180ca96_ryu_plugin_update.py @@ -0,0 +1,57 @@ +# Copyright 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""ryu plugin update + +Revision ID: 49332180ca96 +Revises: 1149d7de0cfa +Create Date: 2013-01-30 07:52:58.472885 + +""" + +# revision identifiers, used by Alembic. +revision = '49332180ca96' +down_revision = '1149d7de0cfa' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + 'neutron.plugins.ryu.ryu_neutron_plugin.RyuNeutronPluginV2' +] + +from alembic import op +import sqlalchemy as sa + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.drop_table('ofp_server') + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.create_table( + 'ofp_server', + sa.Column('id', sa.Integer(), autoincrement=True, nullable=False), + sa.Column('address', sa.String(length=255)), + sa.Column('host_type', sa.String(length=255)), + sa.PrimaryKeyConstraint(u'id') + ) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/49f5e553f61f_ml2_security_groups.py b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/49f5e553f61f_ml2_security_groups.py new file mode 100644 index 00000000..9451b1dc --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/49f5e553f61f_ml2_security_groups.py @@ -0,0 +1,93 @@ +# Copyright 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""security_groups + +Revision ID: 49f5e553f61f +Revises: 27ef74513d33 +Create Date: 2013-12-21 19:58:17.071412 + +""" + +# revision identifiers, used by Alembic. +revision = '49f5e553f61f' +down_revision = '27ef74513d33' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + 'neutron.plugins.ml2.plugin.Ml2Plugin' +] + +from alembic import op +import sqlalchemy as sa + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + ### commands auto generated by Alembic - please adjust! ### + op.create_table( + 'securitygroups', + sa.Column('tenant_id', sa.String(length=255), nullable=True), + sa.Column('id', sa.String(length=36), nullable=False), + sa.Column('name', sa.String(length=255), nullable=True), + sa.Column('description', sa.String(length=255), nullable=True), + sa.PrimaryKeyConstraint('id') + ) + op.create_table( + 'securitygrouprules', + sa.Column('tenant_id', sa.String(length=255), nullable=True), + sa.Column('id', sa.String(length=36), nullable=False), + sa.Column('security_group_id', sa.String(length=36), nullable=False), + sa.Column('remote_group_id', sa.String(length=36), nullable=True), + sa.Column('direction', + sa.Enum('ingress', 'egress', + name='securitygrouprules_direction'), + nullable=True), + sa.Column('ethertype', sa.String(length=40), nullable=True), + sa.Column('protocol', sa.String(length=40), nullable=True), + sa.Column('port_range_min', sa.Integer(), nullable=True), + sa.Column('port_range_max', sa.Integer(), nullable=True), + sa.Column('remote_ip_prefix', sa.String(length=255), nullable=True), + sa.ForeignKeyConstraint(['security_group_id'], ['securitygroups.id'], + ondelete='CASCADE'), + sa.ForeignKeyConstraint(['remote_group_id'], ['securitygroups.id'], + ondelete='CASCADE'), + sa.PrimaryKeyConstraint('id') + ) + op.create_table( + 'securitygroupportbindings', + sa.Column('port_id', sa.String(length=36), nullable=False), + sa.Column('security_group_id', sa.String(length=36), nullable=False), + sa.ForeignKeyConstraint(['port_id'], ['ports.id'], ondelete='CASCADE'), + sa.ForeignKeyConstraint(['security_group_id'], ['securitygroups.id']), + sa.PrimaryKeyConstraint('port_id', 'security_group_id') + ) + ### end Alembic commands ### + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + ### commands auto generated by Alembic - please adjust! ### + op.drop_table('securitygroupportbindings') + op.drop_table('securitygrouprules') + op.drop_table('securitygroups') + ### end Alembic commands ### diff --git a/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/4eca4a84f08a_remove_ml2_cisco_cred_db.py b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/4eca4a84f08a_remove_ml2_cisco_cred_db.py new file mode 100644 index 00000000..10bc6fee --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/4eca4a84f08a_remove_ml2_cisco_cred_db.py @@ -0,0 +1,59 @@ +# Copyright 2014 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""Remove ML2 Cisco Credentials DB + +Revision ID: 4eca4a84f08a +Revises: 33c3db036fe4 +Create Date: 2014-04-10 19:32:46.697189 + +""" + +# revision identifiers, used by Alembic. +revision = '4eca4a84f08a' +down_revision = '33c3db036fe4' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + 'neutron.plugins.ml2.plugin.Ml2Plugin' +] + +from alembic import op +import sqlalchemy as sa + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.drop_table('cisco_ml2_credentials') + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.create_table( + 'cisco_ml2_credentials', + sa.Column('credential_id', sa.String(length=255), nullable=True), + sa.Column('tenant_id', sa.String(length=255), nullable=False), + sa.Column('credential_name', sa.String(length=255), nullable=False), + sa.Column('user_name', sa.String(length=255), nullable=True), + sa.Column('password', sa.String(length=255), nullable=True), + sa.PrimaryKeyConstraint('tenant_id', 'credential_name') + ) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/50d5ba354c23_ml2_binding_vif_details.py b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/50d5ba354c23_ml2_binding_vif_details.py new file mode 100644 index 00000000..ad4fa3b9 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/50d5ba354c23_ml2_binding_vif_details.py @@ -0,0 +1,97 @@ +# Copyright 2014 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""ml2 binding:vif_details + +Revision ID: 50d5ba354c23 +Revises: 27cc183af192 +Create Date: 2014-02-11 23:21:59.577972 + +""" + +# revision identifiers, used by Alembic. +revision = '50d5ba354c23' +down_revision = '27cc183af192' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + 'neutron.plugins.ml2.plugin.Ml2Plugin' +] + +from alembic import op +import sqlalchemy as sa + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.add_column('ml2_port_bindings', + sa.Column('vif_details', sa.String(length=4095), + nullable=False, server_default='')) + if op.get_bind().engine.name == 'ibm_db_sa': + op.execute( + "UPDATE ml2_port_bindings SET" + " vif_details = '{\"port_filter\": true}'" + " WHERE cap_port_filter = 1") + op.execute( + "UPDATE ml2_port_bindings SET" + " vif_details = '{\"port_filter\": false}'" + " WHERE cap_port_filter = 0") + else: + op.execute( + "UPDATE ml2_port_bindings SET" + " vif_details = '{\"port_filter\": true}'" + " WHERE cap_port_filter = true") + op.execute( + "UPDATE ml2_port_bindings SET" + " vif_details = '{\"port_filter\": false}'" + " WHERE cap_port_filter = false") + op.drop_column('ml2_port_bindings', 'cap_port_filter') + if op.get_bind().engine.name == 'ibm_db_sa': + op.execute("CALL SYSPROC.ADMIN_CMD('REORG TABLE ml2_port_bindings')") + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + if op.get_bind().engine.name == 'ibm_db_sa': + # Note(xuhanp): DB2 doesn't allow nullable=False Column with + # "DEFAULT" clause not specified. So server_default is used. + # Using sa.text will result "DEFAULT 0" for cap_port_filter. + op.add_column('ml2_port_bindings', + sa.Column('cap_port_filter', sa.Boolean(), + nullable=False, + server_default=sa.text("0"))) + op.execute( + "UPDATE ml2_port_bindings SET" + " cap_port_filter = 1" + " WHERE vif_details LIKE '%\"port_filter\": true%'") + else: + op.add_column('ml2_port_bindings', + sa.Column('cap_port_filter', sa.Boolean(), + nullable=False, + server_default=sa.text("false"))) + op.execute( + "UPDATE ml2_port_bindings SET" + " cap_port_filter = true" + " WHERE vif_details LIKE '%\"port_filter\": true%'") + op.drop_column('ml2_port_bindings', 'vif_details') + if op.get_bind().engine.name == 'ibm_db_sa': + op.execute("CALL SYSPROC.ADMIN_CMD('REORG TABLE ml2_port_bindings')") diff --git a/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/50e86cb2637a_nsx_mappings.py b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/50e86cb2637a_nsx_mappings.py new file mode 100644 index 00000000..80da209a --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/50e86cb2637a_nsx_mappings.py @@ -0,0 +1,80 @@ +# Copyright 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""nsx_mappings + +Revision ID: 50e86cb2637a +Revises: havana +Create Date: 2013-10-26 14:37:30.012149 + +""" + +# revision identifiers, used by Alembic. +revision = '50e86cb2637a' +down_revision = '1fcfc149aca4' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + 'neutron.plugins.nicira.NeutronPlugin.NvpPluginV2', + 'neutron.plugins.nicira.NeutronServicePlugin.NvpAdvancedPlugin', + 'neutron.plugins.vmware.plugin.NsxPlugin', + 'neutron.plugins.vmware.plugin.NsxServicePlugin' +] + +from alembic import op +import sqlalchemy as sa + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.create_table('neutron_nsx_port_mappings', + sa.Column('neutron_id', sa.String(length=36), + nullable=False), + sa.Column('nsx_port_id', sa.String(length=36), + nullable=False), + sa.Column('nsx_switch_id', sa.String(length=36), + nullable=True), + sa.ForeignKeyConstraint(['neutron_id'], ['ports.id'], + ondelete='CASCADE'), + sa.PrimaryKeyConstraint('neutron_id')) + + op.execute("INSERT INTO neutron_nsx_port_mappings SELECT quantum_id as " + "neutron_id, nvp_id as nsx_port_id, null as nsx_switch_id from" + " quantum_nvp_port_mapping") + op.drop_table('quantum_nvp_port_mapping') + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + # Restore table to pre-icehouse version + op.create_table('quantum_nvp_port_mapping', + sa.Column('quantum_id', sa.String(length=36), + nullable=False), + sa.Column('nvp_id', sa.String(length=36), + nullable=False), + sa.ForeignKeyConstraint(['quantum_id'], ['ports.id'], + ondelete='CASCADE'), + sa.PrimaryKeyConstraint('quantum_id')) + op.execute("INSERT INTO quantum_nvp_port_mapping SELECT neutron_id as " + "quantum_id, nsx_port_id as nvp_id from" + " neutron_nsx_port_mappings") + op.drop_table('neutron_nsx_port_mappings') diff --git a/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/511471cc46b_agent_ext_model_supp.py b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/511471cc46b_agent_ext_model_supp.py new file mode 100644 index 00000000..8806e242 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/511471cc46b_agent_ext_model_supp.py @@ -0,0 +1,82 @@ +# Copyright 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""Add agent management extension model support + +Revision ID: 511471cc46b +Revises: 363468ac592c +Create Date: 2013-02-18 05:09:32.523460 + +""" + +# revision identifiers, used by Alembic. +revision = '511471cc46b' +down_revision = '363468ac592c' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + 'neutron.plugins.brocade.NeutronPlugin.BrocadePluginV2', + 'neutron.plugins.linuxbridge.lb_neutron_plugin.LinuxBridgePluginV2', + 'neutron.plugins.openvswitch.ovs_neutron_plugin.OVSNeutronPluginV2', + 'neutron.plugins.nec.nec_plugin.NECPluginV2', + 'neutron.plugins.nicira.NeutronPlugin.NvpPluginV2', + 'neutron.plugins.nicira.NeutronServicePlugin.NvpAdvancedPlugin', + 'neutron.plugins.vmware.plugin.NsxPlugin', + 'neutron.plugins.vmware.plugin.NsxServicePlugin', + 'neutron.services.loadbalancer.plugin.LoadBalancerPlugin', + 'neutron.plugins.ibm.sdnve_neutron_plugin.SdnvePluginV2', + 'neutron.plugins.oneconvergence.plugin.OneConvergencePluginV2', + 'neutron.plugins.mlnx.mlnx_plugin.MellanoxEswitchPlugin', + 'neutron.plugins.ml2.plugin.Ml2Plugin', +] + +from alembic import op +import sqlalchemy as sa + + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + ### commands auto generated by Alembic - please adjust! ### + op.create_table( + 'agents', + sa.Column('id', sa.String(length=36), nullable=False), + sa.Column('agent_type', sa.String(length=255), nullable=False), + sa.Column('binary', sa.String(length=255), nullable=False), + sa.Column('topic', sa.String(length=255), nullable=False), + sa.Column('host', sa.String(length=255), nullable=False), + sa.Column('admin_state_up', sa.Boolean(), nullable=False), + sa.Column('created_at', sa.DateTime(), nullable=False), + sa.Column('started_at', sa.DateTime(), nullable=False), + sa.Column('heartbeat_timestamp', sa.DateTime(), nullable=False), + sa.Column('description', sa.String(length=255), nullable=True), + sa.Column('configurations', sa.String(length=4095), nullable=False), + sa.PrimaryKeyConstraint('id') + ) + ### end Alembic commands ### + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + ### commands auto generated by Alembic - please adjust! ### + op.drop_table('agents') + ### end Alembic commands ### diff --git a/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/52c5e4a18807_lbaas_pool_scheduler.py b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/52c5e4a18807_lbaas_pool_scheduler.py new file mode 100644 index 00000000..0d31ce91 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/52c5e4a18807_lbaas_pool_scheduler.py @@ -0,0 +1,61 @@ +# Copyright 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""LBaaS Pool scheduler + +Revision ID: 52c5e4a18807 +Revises: 2032abe8edac +Create Date: 2013-06-14 03:23:47.815865 + +""" + +# revision identifiers, used by Alembic. +revision = '52c5e4a18807' +down_revision = '2032abe8edac' + +migration_for_plugins = [ + 'neutron.services.loadbalancer.plugin.LoadBalancerPlugin', +] + +from alembic import op +import sqlalchemy as sa + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + ### commands auto generated by Alembic - please adjust! ### + op.create_table( + 'poolloadbalanceragentbindings', + sa.Column('pool_id', sa.String(length=36), nullable=False), + sa.Column('agent_id', sa.String(length=36), + nullable=False), + sa.ForeignKeyConstraint(['agent_id'], ['agents.id'], + ondelete='CASCADE'), + sa.ForeignKeyConstraint(['pool_id'], ['pools.id'], + ondelete='CASCADE'), + sa.PrimaryKeyConstraint('pool_id') + ) + ### end Alembic commands ### + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + ### commands auto generated by Alembic - please adjust! ### + op.drop_table('poolloadbalanceragentbindings') + ### end Alembic commands ### diff --git a/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/52ff27f7567a_support_for_vpnaas.py b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/52ff27f7567a_support_for_vpnaas.py new file mode 100644 index 00000000..f9c1e0f5 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/52ff27f7567a_support_for_vpnaas.py @@ -0,0 +1,181 @@ +# Copyright 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""Support for VPNaaS + +Revision ID: 52ff27f7567a +Revises: 39cf3f799352 +Create Date: 2013-07-14 23:04:13.395955 + +""" + +# revision identifiers, used by Alembic. +revision = '52ff27f7567a' +down_revision = '39cf3f799352' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + 'neutron.services.vpn.plugin.VPNDriverPlugin', +] + +from alembic import op +import sqlalchemy as sa + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.create_table( + 'ikepolicies', + sa.Column('tenant_id', sa.String(length=255), nullable=True), + sa.Column('id', sa.String(length=36), nullable=False), + sa.Column('name', sa.String(length=255), nullable=True), + sa.Column('description', sa.String(length=255), nullable=True), + sa.Column( + 'auth_algorithm', + sa.Enum('sha1', name='vpn_auth_algorithms'), nullable=False), + sa.Column( + 'encryption_algorithm', + sa.Enum('3des', 'aes-128', 'aes-256', 'aes-192', + name='vpn_encrypt_algorithms'), nullable=False), + sa.Column( + 'phase1_negotiation_mode', + sa.Enum('main', name='ike_phase1_mode'), nullable=False), + sa.Column( + 'lifetime_units', + sa.Enum('seconds', 'kilobytes', name='vpn_lifetime_units'), + nullable=False), + sa.Column('lifetime_value', sa.Integer(), nullable=False), + sa.Column( + 'ike_version', + sa.Enum('v1', 'v2', name='ike_versions'), nullable=False), + sa.Column( + 'pfs', + sa.Enum('group2', 'group5', 'group14', name='vpn_pfs'), + nullable=False), + sa.PrimaryKeyConstraint('id') + ) + op.create_table( + 'ipsecpolicies', + sa.Column('tenant_id', sa.String(length=255), nullable=True), + sa.Column('id', sa.String(length=36), nullable=False), + sa.Column('name', sa.String(length=255), nullable=True), + sa.Column('description', sa.String(length=255), nullable=True), + sa.Column( + 'transform_protocol', + sa.Enum('esp', 'ah', 'ah-esp', name='ipsec_transform_protocols'), + nullable=False), + sa.Column( + 'auth_algorithm', + sa.Enum('sha1', name='vpn_auth_algorithms'), nullable=False), + sa.Column( + 'encryption_algorithm', + sa.Enum( + '3des', 'aes-128', + 'aes-256', 'aes-192', name='vpn_encrypt_algorithms'), + nullable=False), + sa.Column( + 'encapsulation_mode', + sa.Enum('tunnel', 'transport', name='ipsec_encapsulations'), + nullable=False), + sa.Column( + 'lifetime_units', + sa.Enum( + 'seconds', 'kilobytes', + name='vpn_lifetime_units'), nullable=False), + sa.Column( + 'lifetime_value', sa.Integer(), nullable=False), + sa.Column( + 'pfs', + sa.Enum( + 'group2', 'group5', 'group14', name='vpn_pfs'), + nullable=False), + sa.PrimaryKeyConstraint('id') + ) + op.create_table( + 'vpnservices', + sa.Column('tenant_id', sa.String(length=255), nullable=True), + sa.Column('id', sa.String(length=36), nullable=False), + sa.Column('name', sa.String(length=255), nullable=True), + sa.Column('description', sa.String(length=255), nullable=True), + sa.Column('status', sa.String(length=16), nullable=False), + sa.Column('admin_state_up', sa.Boolean(), nullable=False), + sa.Column('subnet_id', sa.String(length=36), nullable=False), + sa.Column('router_id', sa.String(length=36), nullable=False), + sa.ForeignKeyConstraint(['router_id'], ['routers.id'], ), + sa.ForeignKeyConstraint(['subnet_id'], ['subnets.id'], ), + sa.PrimaryKeyConstraint('id') + ) + op.create_table( + 'ipsec_site_connections', + sa.Column('tenant_id', sa.String(length=255), nullable=True), + sa.Column('id', sa.String(length=36), nullable=False), + sa.Column('name', sa.String(length=255), nullable=True), + sa.Column('description', sa.String(length=255), nullable=True), + sa.Column('peer_address', sa.String(length=64), nullable=False), + sa.Column('peer_id', sa.String(length=255), nullable=False), + sa.Column('route_mode', sa.String(length=8), nullable=False), + sa.Column('mtu', sa.Integer(), nullable=False), + sa.Column( + 'initiator', + sa.Enum( + 'bi-directional', 'response-only', name='vpn_initiators'), + nullable=False), + sa.Column('auth_mode', sa.String(length=16), nullable=False), + sa.Column('psk', sa.String(length=255), nullable=False), + sa.Column( + 'dpd_action', + sa.Enum( + 'hold', 'clear', 'restart', + 'disabled', 'restart-by-peer', name='vpn_dpd_actions'), + nullable=False), + sa.Column('dpd_interval', sa.Integer(), nullable=False), + sa.Column('dpd_timeout', sa.Integer(), nullable=False), + sa.Column('status', sa.String(length=16), nullable=False), + sa.Column('admin_state_up', sa.Boolean(), nullable=False), + sa.Column('vpnservice_id', sa.String(length=36), nullable=False), + sa.Column('ipsecpolicy_id', sa.String(length=36), nullable=False), + sa.Column('ikepolicy_id', sa.String(length=36), nullable=False), + sa.ForeignKeyConstraint(['ikepolicy_id'], ['ikepolicies.id']), + sa.ForeignKeyConstraint(['ipsecpolicy_id'], ['ipsecpolicies.id']), + sa.ForeignKeyConstraint(['vpnservice_id'], ['vpnservices.id']), + sa.PrimaryKeyConstraint('id') + ) + op.create_table( + 'ipsecpeercidrs', + sa.Column('cidr', sa.String(length=32), nullable=False), + sa.Column('ipsec_site_connection_id', + sa.String(length=36), + nullable=False), + sa.ForeignKeyConstraint(['ipsec_site_connection_id'], + ['ipsec_site_connections.id'], + ondelete='CASCADE'), + sa.PrimaryKeyConstraint('cidr', 'ipsec_site_connection_id') + ) + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.drop_table('ipsecpeercidrs') + op.drop_table('ipsec_site_connections') + op.drop_table('vpnservices') + op.drop_table('ipsecpolicies') + op.drop_table('ikepolicies') diff --git a/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/53bbd27ec841_extra_dhcp_opts_supp.py b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/53bbd27ec841_extra_dhcp_opts_supp.py new file mode 100644 index 00000000..f573b1ca --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/53bbd27ec841_extra_dhcp_opts_supp.py @@ -0,0 +1,64 @@ +# Copyright 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""Extra dhcp opts support + +Revision ID: 53bbd27ec841 +Revises: 40dffbf4b549 +Create Date: 2013-05-09 15:36:50.485036 + +""" + +# revision identifiers, used by Alembic. +revision = '53bbd27ec841' +down_revision = '40dffbf4b549' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + 'neutron.plugins.openvswitch.ovs_neutron_plugin.OVSNeutronPluginV2', + 'neutron.plugins.ml2.plugin.Ml2Plugin', + 'neutron.plugins.bigswitch.plugin.NeutronRestProxyV2' +] + +from alembic import op +import sqlalchemy as sa + + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.create_table( + 'extradhcpopts', + sa.Column('id', sa.String(length=36), nullable=False), + sa.Column('port_id', sa.String(length=36), nullable=False), + sa.Column('opt_name', sa.String(length=64), nullable=False), + sa.Column('opt_value', sa.String(length=255), nullable=False), + sa.ForeignKeyConstraint(['port_id'], ['ports.id'], ondelete='CASCADE'), + sa.PrimaryKeyConstraint('id'), + sa.UniqueConstraint('port_id', 'opt_name', name='uidx_portid_optname')) + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + ### commands auto generated by Alembic - please adjust! ### + op.drop_table('extradhcpopts') + ### end Alembic commands ### diff --git a/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/54c2c487e913_lbaas.py b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/54c2c487e913_lbaas.py new file mode 100644 index 00000000..e902e3ba --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/54c2c487e913_lbaas.py @@ -0,0 +1,161 @@ +# Copyright 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""'DB support for load balancing service + +Revision ID: 54c2c487e913 +Revises: 38335592a0dc +Create Date: 2013-02-04 16:32:32.048731 + +""" + +# revision identifiers, used by Alembic. +revision = '54c2c487e913' +down_revision = '38335592a0dc' + +# We need migration_for_plugins to be an empty list to avoid creating tables, +# if there's no plugin that implements the LBaaS extension. + +migration_for_plugins = [] + +from alembic import op +import sqlalchemy as sa + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.create_table( + u'vips', + sa.Column(u'tenant_id', sa.String(255), nullable=True), + sa.Column(u'id', sa.String(36), nullable=False), + sa.Column(u'name', sa.String(255), nullable=True), + sa.Column(u'description', sa.String(255), nullable=True), + sa.Column(u'port_id', sa.String(36), nullable=True), + sa.Column(u'protocol_port', sa.Integer(), nullable=False), + sa.Column(u'protocol', + sa.Enum("HTTP", "HTTPS", "TCP", name="lb_protocols"), + nullable=False), + sa.Column(u'pool_id', sa.String(36), nullable=False), + sa.Column(u'status', sa.String(16), nullable=False), + sa.Column(u'admin_state_up', sa.Boolean(), nullable=False), + sa.Column(u'connection_limit', sa.Integer(), nullable=True), + sa.ForeignKeyConstraint(['port_id'], ['ports.id'], ), + sa.UniqueConstraint('pool_id'), + sa.PrimaryKeyConstraint(u'id') + ) + op.create_table( + u'poolmonitorassociations', + sa.Column(u'pool_id', sa.String(36), nullable=False), + sa.Column(u'monitor_id', sa.String(36), nullable=False), + sa.ForeignKeyConstraint(['monitor_id'], [u'healthmonitors.id'], ), + sa.ForeignKeyConstraint(['pool_id'], [u'pools.id'], ), + sa.PrimaryKeyConstraint(u'pool_id', u'monitor_id') + ) + op.create_table( + u'sessionpersistences', + sa.Column(u'vip_id', sa.String(36), nullable=False), + sa.Column(u'type', + sa.Enum("SOURCE_IP", + "HTTP_COOKIE", + "APP_COOKIE", + name="sesssionpersistences_type"), + nullable=False), + sa.Column(u'cookie_name', sa.String(1024), nullable=True), + sa.ForeignKeyConstraint(['vip_id'], [u'vips.id'], ), + sa.PrimaryKeyConstraint(u'vip_id') + ) + op.create_table( + u'pools', + sa.Column(u'tenant_id', sa.String(255), nullable=True), + sa.Column(u'id', sa.String(36), nullable=False), + sa.Column(u'vip_id', sa.String(36), nullable=True), + sa.Column(u'name', sa.String(255), nullable=True), + sa.Column(u'description', sa.String(255), nullable=True), + sa.Column(u'subnet_id', sa.String(36), nullable=False), + sa.Column(u'protocol', + sa.Enum("HTTP", "HTTPS", "TCP", name="lb_protocols"), + nullable=False), + sa.Column(u'lb_method', + sa.Enum("ROUND_ROBIN", + "LEAST_CONNECTIONS", + "SOURCE_IP", + name="pools_lb_method"), + nullable=False), + sa.Column(u'status', sa.String(16), nullable=False), + sa.Column(u'admin_state_up', sa.Boolean(), nullable=False), + sa.ForeignKeyConstraint(['vip_id'], [u'vips.id'], ), + sa.PrimaryKeyConstraint(u'id') + ) + op.create_table( + u'healthmonitors', + sa.Column(u'tenant_id', sa.String(255), nullable=True), + sa.Column(u'id', sa.String(36), nullable=False), + sa.Column(u'type', + sa.Enum("PING", + "TCP", + "HTTP", + "HTTPS", + name="healthmontiors_type"), + nullable=False), + sa.Column(u'delay', sa.Integer(), nullable=False), + sa.Column(u'timeout', sa.Integer(), nullable=False), + sa.Column(u'max_retries', sa.Integer(), nullable=False), + sa.Column(u'http_method', sa.String(16), nullable=True), + sa.Column(u'url_path', sa.String(255), nullable=True), + sa.Column(u'expected_codes', sa.String(64), nullable=True), + sa.Column(u'status', sa.String(16), nullable=False), + sa.Column(u'admin_state_up', sa.Boolean(), nullable=False), + sa.PrimaryKeyConstraint(u'id') + ) + op.create_table( + u'members', + sa.Column(u'tenant_id', sa.String(255), nullable=True), + sa.Column(u'id', sa.String(36), nullable=False), + sa.Column(u'pool_id', sa.String(36), nullable=False), + sa.Column(u'address', sa.String(64), nullable=False), + sa.Column(u'protocol_port', sa.Integer(), nullable=False), + sa.Column(u'weight', sa.Integer(), nullable=False), + sa.Column(u'status', sa.String(16), nullable=False), + sa.Column(u'admin_state_up', sa.Boolean(), nullable=False), + sa.ForeignKeyConstraint(['pool_id'], [u'pools.id'], ), + sa.PrimaryKeyConstraint(u'id') + ) + op.create_table( + u'poolstatisticss', + sa.Column(u'pool_id', sa.String(36), nullable=False), + sa.Column(u'bytes_in', sa.Integer(), nullable=False), + sa.Column(u'bytes_out', sa.Integer(), nullable=False), + sa.Column(u'active_connections', sa.Integer(), nullable=False), + sa.Column(u'total_connections', sa.Integer(), nullable=False), + sa.ForeignKeyConstraint(['pool_id'], [u'pools.id'], ), + sa.PrimaryKeyConstraint(u'pool_id') + ) + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.drop_table(u'poolstatisticss') + op.drop_table(u'members') + op.drop_table(u'healthmonitors') + op.drop_table(u'pools') + op.drop_table(u'sessionpersistences') + op.drop_table(u'poolmonitorassociations') + op.drop_table(u'vips') diff --git a/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/54f7549a0e5f_set_not_null_peer_address.py b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/54f7549a0e5f_set_not_null_peer_address.py new file mode 100644 index 00000000..626c26fb --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/54f7549a0e5f_set_not_null_peer_address.py @@ -0,0 +1,54 @@ +# Copyright 2014 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""set_not_null_peer_address + +Revision ID: 54f7549a0e5f +Revises: 33dd0a9fa487 +Create Date: 2014-03-17 11:00:17.539028 + +""" + +# revision identifiers, used by Alembic. +revision = '54f7549a0e5f' +down_revision = 'icehouse' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + 'neutron.services.vpn.plugin.VPNDriverPlugin' +] + +from alembic import op +import sqlalchemy as sa + + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.alter_column('ipsec_site_connections', 'peer_address', + existing_type=sa.String(255), nullable=False) + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.alter_column('ipsec_site_connections', 'peer_address', nullable=True, + existing_type=sa.String(255)) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/557edfc53098_new_service_types.py b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/557edfc53098_new_service_types.py new file mode 100644 index 00000000..63646346 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/557edfc53098_new_service_types.py @@ -0,0 +1,79 @@ +# Copyright 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""New service types framework (service providers) + +Revision ID: 557edfc53098 +Revises: 52c5e4a18807 +Create Date: 2013-06-29 21:10:41.283358 + +""" + +# revision identifiers, used by Alembic. +revision = '557edfc53098' +down_revision = '52c5e4a18807' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + 'neutron.services.loadbalancer.plugin.LoadBalancerPlugin', +] + +from alembic import op +import sqlalchemy as sa + + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + op.create_table( + 'providerresourceassociations', + sa.Column('provider_name', sa.String(length=255), nullable=False), + sa.Column('resource_id', sa.String(length=36), + nullable=False, unique=True), + ) + + for table in ('servicedefinitions', 'servicetypes'): + op.execute("DROP TABLE IF EXISTS %s" % table) + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + op.create_table( + 'servicetypes', + sa.Column('id', sa.String(length=36), nullable=False), + sa.Column('tenant_id', sa.String(length=255)), + sa.Column('name', sa.String(255)), + sa.Column('description', sa.String(255)), + sa.Column('default', sa.Boolean(), nullable=False, default=False), + sa.Column('num_instances', sa.Integer, default=0), + sa.PrimaryKeyConstraint('id') + ) + op.create_table( + 'servicedefinitions', + sa.Column('id', sa.String(length=36), nullable=False), + sa.Column('service_class', sa.String(255)), + sa.Column('plugin', sa.String(255)), + sa.Column('driver', sa.String(255)), + sa.Column('service_type_id', sa.String(36), + sa.ForeignKey('servicetypes.id', + ondelete='CASCADE')), + sa.PrimaryKeyConstraint('id', 'service_class') + ) + op.drop_table('providerresourceassociations') diff --git a/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/569e98a8132b_metering.py b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/569e98a8132b_metering.py new file mode 100644 index 00000000..931e078c --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/569e98a8132b_metering.py @@ -0,0 +1,75 @@ +# Copyright 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""metering + +Revision ID: 569e98a8132b +Revises: 13de305df56e +Create Date: 2013-07-17 15:38:36.254595 + +""" + +# revision identifiers, used by Alembic. +revision = '569e98a8132b' +down_revision = 'f9263d6df56' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = ['neutron.services.metering.metering_plugin.' + 'MeteringPlugin'] + +from alembic import op +import sqlalchemy as sa + +from neutron.db import migration + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.drop_table('meteringlabelrules') + op.drop_table('meteringlabels') + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.create_table('meteringlabels', + sa.Column('tenant_id', sa.String(length=255), + nullable=True), + sa.Column('id', sa.String(length=36), nullable=False), + sa.Column('name', sa.String(length=255), + nullable=True), + sa.Column('description', sa.String(length=255), + nullable=True), + sa.PrimaryKeyConstraint('id')) + op.create_table('meteringlabelrules', + sa.Column('id', sa.String(length=36), nullable=False), + sa.Column('direction', + sa.Enum('ingress', 'egress', + name='meteringlabels_direction'), + nullable=True), + sa.Column('remote_ip_prefix', sa.String(length=64), + nullable=True), + sa.Column('metering_label_id', sa.String(length=36), + nullable=False), + sa.Column('excluded', sa.Boolean(), + autoincrement=False, nullable=True), + sa.ForeignKeyConstraint(['metering_label_id'], + ['meteringlabels.id'], + name='meteringlabelrules_ibfk_1'), + sa.PrimaryKeyConstraint('id')) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/5918cbddab04_add_tables_for_route.py b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/5918cbddab04_add_tables_for_route.py new file mode 100644 index 00000000..d6a8ed5f --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/5918cbddab04_add_tables_for_route.py @@ -0,0 +1,69 @@ +# Copyright 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""add tables for router rules support + +Revision ID: 5918cbddab04 +Revises: 3cbf70257c28 +Create Date: 2013-06-16 02:20:07.024752 + +""" + +# revision identifiers, used by Alembic. +revision = '5918cbddab04' +down_revision = '3cbf70257c28' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + 'neutron.plugins.bigswitch.plugin.NeutronRestProxyV2' +] + +from alembic import op +import sqlalchemy as sa + + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.create_table('routerrules', + sa.Column('id', sa.Integer(), nullable=False), + sa.Column('source', sa.String(length=64), nullable=False), + sa.Column('destination', sa.String(length=64), + nullable=False), + sa.Column('action', sa.String(length=10), nullable=False), + sa.Column('router_id', sa.String(length=36), + nullable=True), + sa.ForeignKeyConstraint(['router_id'], ['routers.id'], + ondelete='CASCADE'), + sa.PrimaryKeyConstraint('id')) + op.create_table('nexthops', + sa.Column('rule_id', sa.Integer(), nullable=False), + sa.Column('nexthop', sa.String(length=64), nullable=False), + sa.ForeignKeyConstraint(['rule_id'], ['routerrules.id'], + ondelete='CASCADE'), + sa.PrimaryKeyConstraint('rule_id', 'nexthop')) + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.drop_table('nexthops') + op.drop_table('routerrules') diff --git a/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/5a875d0e5c_ryu.py b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/5a875d0e5c_ryu.py new file mode 100644 index 00000000..44df7061 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/5a875d0e5c_ryu.py @@ -0,0 +1,72 @@ +# Copyright 2012 New Dream Network, LLC (DreamHost) +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Mark McClain, DreamHost + +"""ryu + +This retroactively provides migration support for +https://review.openstack.org/#/c/11204/ + +Revision ID: 5a875d0e5c +Revises: 2c4af419145b +Create Date: 2012-12-18 12:32:04.482477 + +""" + + +# revision identifiers, used by Alembic. +revision = '5a875d0e5c' +down_revision = '2c4af419145b' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + 'neutron.plugins.ryu.ryu_neutron_plugin.RyuNeutronPluginV2' +] + +from alembic import op +import sqlalchemy as sa + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.create_table( + 'tunnelkeys', + sa.Column('network_id', sa.String(length=36), nullable=False), + sa.Column('tunnel_key', sa.Integer(), autoincrement=False, + nullable=False), + sa.ForeignKeyConstraint(['network_id'], ['networks.id'], + ondelete='CASCADE'), + sa.PrimaryKeyConstraint('tunnel_key') + ) + + op.create_table( + 'tunnelkeylasts', + sa.Column('last_key', sa.Integer(), autoincrement=False, + nullable=False), + sa.PrimaryKeyConstraint('last_key') + ) + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.drop_table('tunnelkeylasts') + op.drop_table('tunnelkeys') diff --git a/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/5ac71e65402c_ml2_initial.py b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/5ac71e65402c_ml2_initial.py new file mode 100644 index 00000000..cb46c996 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/5ac71e65402c_ml2_initial.py @@ -0,0 +1,82 @@ +# Copyright 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""ml2_initial + +Revision ID: 5ac71e65402c +Revises: 128e042a2b68 +Create Date: 2013-05-27 16:08:40.853821 + +""" + +# revision identifiers, used by Alembic. +revision = '5ac71e65402c' +down_revision = '128e042a2b68' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + 'neutron.plugins.ml2.plugin.Ml2Plugin' +] + +from alembic import op +import sqlalchemy as sa + + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + ### commands auto generated by Alembic - please adjust! ### + op.create_table( + 'ml2_network_segments', + sa.Column('id', sa.String(length=36), nullable=False), + sa.Column('network_id', sa.String(length=36), nullable=False), + sa.Column('network_type', sa.String(length=32), nullable=False), + sa.Column('physical_network', sa.String(length=64), nullable=True), + sa.Column('segmentation_id', sa.Integer(), nullable=True), + sa.ForeignKeyConstraint(['network_id'], ['networks.id'], + ondelete='CASCADE'), + sa.PrimaryKeyConstraint('id') + ) + op.create_table( + 'ml2_vlan_allocations', + sa.Column('physical_network', sa.String(length=64), nullable=False), + sa.Column('vlan_id', sa.Integer(), autoincrement=False, + nullable=False), + sa.Column('allocated', sa.Boolean(), autoincrement=False, + nullable=False), + sa.PrimaryKeyConstraint('physical_network', 'vlan_id') + ) + op.create_table( + 'ml2_flat_allocations', + sa.Column('physical_network', sa.String(length=64), nullable=False), + sa.PrimaryKeyConstraint('physical_network') + ) + ### end Alembic commands ### + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + ### commands auto generated by Alembic - please adjust! ### + op.drop_table('ml2_network_segments') + op.drop_table('ml2_flat_allocations') + op.drop_table('ml2_vlan_allocations') + ### end Alembic commands ### diff --git a/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/63afba73813_ovs_tunnelendpoints_id_unique.py b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/63afba73813_ovs_tunnelendpoints_id_unique.py new file mode 100644 index 00000000..71672532 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/63afba73813_ovs_tunnelendpoints_id_unique.py @@ -0,0 +1,62 @@ +# Copyright 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""Add unique constraint for id column of TunnelEndpoint + +Revision ID: 63afba73813 +Revises: 3c6e57a23db4 +Create Date: 2013-04-30 13:53:31.717450 + +""" + +# revision identifiers, used by Alembic. +revision = '63afba73813' +down_revision = '3c6e57a23db4' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + 'neutron.plugins.openvswitch.ovs_neutron_plugin.OVSNeutronPluginV2', +] + +from alembic import op + +from neutron.db import migration + + +CONSTRAINT_NAME = 'uniq_ovs_tunnel_endpoints0id' +TABLE_NAME = 'ovs_tunnel_endpoints' + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.create_unique_constraint( + name=CONSTRAINT_NAME, + source=TABLE_NAME, + local_cols=['id'] + ) + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.drop_constraint( + CONSTRAINT_NAME, + TABLE_NAME, + type_='unique' + ) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/66a59a7f516_nec_openflow_router.py b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/66a59a7f516_nec_openflow_router.py new file mode 100644 index 00000000..eeeb3172 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/66a59a7f516_nec_openflow_router.py @@ -0,0 +1,66 @@ +# Copyright 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""NEC OpenFlow Router + +Revision ID: 66a59a7f516 +Revises: 32a65f71af51 +Create Date: 2013-09-03 22:16:31.446031 + +""" + +# revision identifiers, used by Alembic. +revision = '66a59a7f516' +down_revision = '32a65f71af51' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + 'neutron.plugins.nec.nec_plugin.NECPluginV2' +] + +from alembic import op +import sqlalchemy as sa + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.create_table( + 'ofcroutermappings', + sa.Column('ofc_id', sa.String(length=255), nullable=False), + sa.Column('quantum_id', sa.String(length=36), nullable=False), + sa.PrimaryKeyConstraint('quantum_id'), + sa.UniqueConstraint('ofc_id'), + ) + op.create_table( + 'routerproviders', + sa.Column('provider', sa.String(length=255), nullable=True), + sa.Column('router_id', sa.String(length=36), nullable=False), + sa.ForeignKeyConstraint(['router_id'], ['routers.id'], + ondelete='CASCADE'), + sa.PrimaryKeyConstraint('router_id'), + ) + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.drop_table('routerproviders') + op.drop_table('ofcroutermappings') diff --git a/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/6be312499f9_set_not_null_vlan_id_cisco.py b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/6be312499f9_set_not_null_vlan_id_cisco.py new file mode 100644 index 00000000..e304fdc2 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/6be312499f9_set_not_null_vlan_id_cisco.py @@ -0,0 +1,54 @@ +# Copyright 2014 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""set_not_null_vlan_id_cisco + +Revision ID: 6be312499f9 +Revises: d06e871c0d5 +Create Date: 2014-03-27 14:38:12.571173 + +""" + +# revision identifiers, used by Alembic. +revision = '6be312499f9' +down_revision = 'd06e871c0d5' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + 'neutron.plugins.cisco.network_plugin.PluginV2' +] + +from alembic import op +import sqlalchemy as sa + + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.alter_column('cisco_nexusport_bindings', 'vlan_id', nullable=False, + existing_type=sa.Integer) + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.alter_column('cisco_nexusport_bindings', 'vlan_id', nullable=True, + existing_type=sa.Integer) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/86cf4d88bd3_remove_bigswitch_por.py b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/86cf4d88bd3_remove_bigswitch_por.py new file mode 100644 index 00000000..d40718f1 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/86cf4d88bd3_remove_bigswitch_por.py @@ -0,0 +1,57 @@ +# Copyright 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""remove bigswitch port tracking table + +Revision ID: 86cf4d88bd3 +Revises: 569e98a8132b +Create Date: 2013-08-13 21:59:04.373496 + +""" + +# revision identifiers, used by Alembic. +revision = '86cf4d88bd3' +down_revision = '569e98a8132b' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + 'neutron.plugins.bigswitch.plugin.NeutronRestProxyV2' +] + +from alembic import op +import sqlalchemy as sa + + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.drop_table('portlocations') + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.create_table('portlocations', + sa.Column('port_id', sa.String(length=255), + primary_key=True, nullable=False), + sa.Column('host_id', + sa.String(length=255), nullable=False) + ) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/8f682276ee4_ryu_plugin_quota.py b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/8f682276ee4_ryu_plugin_quota.py new file mode 100644 index 00000000..72a06887 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/8f682276ee4_ryu_plugin_quota.py @@ -0,0 +1,59 @@ +# Copyright 2014 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""ryu plugin quota + +Revision ID: 8f682276ee4 +Revises: ed93525fd003 +Create Date: 2014-01-07 15:47:17.349425 + +""" + +# revision identifiers, used by Alembic. +revision = '8f682276ee4' +down_revision = 'ed93525fd003' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + 'neutron.plugins.ryu.ryu_neutron_plugin.RyuNeutronPluginV2' +] + +from alembic import op +import sqlalchemy as sa + + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.create_table( + 'quotas', + sa.Column('id', sa.String(length=36), nullable=False), + sa.Column('tenant_id', sa.String(length=255), nullable=True), + sa.Column('resource', sa.String(length=255), nullable=True), + sa.Column('limit', sa.Integer(), nullable=True), + sa.PrimaryKeyConstraint('id') + ) + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.drop_table('quotas') diff --git a/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/HEAD b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/HEAD new file mode 100644 index 00000000..da886bc5 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/HEAD @@ -0,0 +1 @@ +2026156eab2f diff --git a/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/abc88c33f74f_lb_stats_needs_bigint.py b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/abc88c33f74f_lb_stats_needs_bigint.py new file mode 100644 index 00000000..b276ae7f --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/abc88c33f74f_lb_stats_needs_bigint.py @@ -0,0 +1,65 @@ +# Copyright 2014 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""lb stats + +Revision ID: abc88c33f74f +Revises: 3d2585038b95 +Create Date: 2014-02-24 20:14:59.577972 + +""" + +# revision identifiers, used by Alembic. +revision = 'abc88c33f74f' +down_revision = '3d2585038b95' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + 'neutron.services.loadbalancer.plugin.LoadBalancerPlugin' +] + +from alembic import op +import sqlalchemy as sa + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.alter_column('poolstatisticss', 'bytes_in', + type_=sa.BigInteger(), existing_type=sa.Integer()) + op.alter_column('poolstatisticss', 'bytes_out', + type_=sa.BigInteger(), existing_type=sa.Integer()) + op.alter_column('poolstatisticss', 'active_connections', + type_=sa.BigInteger(), existing_type=sa.Integer()) + op.alter_column('poolstatisticss', 'total_connections', + type_=sa.BigInteger(), existing_type=sa.Integer()) + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.alter_column('poolstatisticss', 'bytes_in', + type_=sa.Integer(), existing_type=sa.BigInteger()) + op.alter_column('poolstatisticss', 'bytes_out', + type_=sa.Integer(), existing_type=sa.BigInteger()) + op.alter_column('poolstatisticss', 'active_connections', + type_=sa.Integer(), existing_type=sa.BigInteger()) + op.alter_column('poolstatisticss', 'total_connections', + type_=sa.Integer(), existing_type=sa.BigInteger()) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/b65aa907aec_set_length_of_protocol_field.py b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/b65aa907aec_set_length_of_protocol_field.py new file mode 100644 index 00000000..de82ce50 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/b65aa907aec_set_length_of_protocol_field.py @@ -0,0 +1,52 @@ +# Copyright 2014 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""set_length_of_protocol_field + +Revision ID: b65aa907aec +Revises: 2447ad0e9585 +Create Date: 2014-03-21 16:30:10.626649 + +""" + +# revision identifiers, used by Alembic. +revision = 'b65aa907aec' +down_revision = '1e5dd1d09b22' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + 'neutron.services.firewall.fwaas_plugin.FirewallPlugin' +] + +from alembic import op +import sqlalchemy as sa + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.alter_column('firewall_rules', 'protocol', type_=sa.String(40), + existing_nullable=True) + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + pass diff --git a/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/b7a8863760e_rm_cisco_vlan_bindin.py b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/b7a8863760e_rm_cisco_vlan_bindin.py new file mode 100644 index 00000000..fa1cc255 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/b7a8863760e_rm_cisco_vlan_bindin.py @@ -0,0 +1,58 @@ +# Copyright 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""Remove cisco_vlan_bindings table + +Revision ID: b7a8863760e +Revises: 3cabb850f4a5 +Create Date: 2013-07-03 19:15:19.143175 + +""" + +# revision identifiers, used by Alembic. +revision = 'b7a8863760e' +down_revision = '3cabb850f4a5' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + 'neutron.plugins.cisco.network_plugin.PluginV2' +] + +from alembic import op +import sqlalchemy as sa + + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.drop_table('cisco_vlan_bindings') + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.create_table( + 'cisco_vlan_bindings', + sa.Column('vlan_id', sa.Integer(display_width=11), nullable=False), + sa.Column('vlan_name', sa.String(length=255), nullable=True), + sa.Column('network_id', sa.String(length=255), nullable=False), + sa.PrimaryKeyConstraint('vlan_id') + ) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/c88b6b5fea3_cisco_n1kv_tables.py b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/c88b6b5fea3_cisco_n1kv_tables.py new file mode 100644 index 00000000..5ac2e431 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/c88b6b5fea3_cisco_n1kv_tables.py @@ -0,0 +1,148 @@ +# Copyright 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""Cisco N1KV tables + +Revision ID: c88b6b5fea3 +Revises: 263772d65691 +Create Date: 2013-08-06 15:08:32.651975 + +""" + +# revision identifiers, used by Alembic. +revision = 'c88b6b5fea3' +down_revision = '263772d65691' + +migration_for_plugins = [ + 'neutron.plugins.cisco.network_plugin.PluginV2' +] + +from alembic import op +import sqlalchemy as sa + +from neutron.db import migration + +vlan_type = sa.Enum('vlan', 'vxlan', name='vlan_type') +network_type = sa.Enum('network', 'policy', name='network_type') + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.drop_column('cisco_credentials', 'tenant_id') + op.add_column( + 'cisco_credentials', + sa.Column('type', sa.String(length=255), nullable=True) + ) + op.create_table( + 'cisco_policy_profiles', + sa.Column('id', sa.String(length=36), nullable=False), + sa.Column('name', sa.String(length=255), nullable=True), + sa.PrimaryKeyConstraint('id') + ) + op.create_table( + 'cisco_n1kv_vmnetworks', + sa.Column('name', sa.String(length=80), nullable=False), + sa.Column('profile_id', sa.String(length=36), nullable=True), + sa.Column('network_id', sa.String(length=36), nullable=True), + sa.Column('port_count', sa.Integer(), autoincrement=False, + nullable=True), + sa.ForeignKeyConstraint(['profile_id'], ['cisco_policy_profiles.id']), + sa.PrimaryKeyConstraint('name') + ) + op.create_table( + 'cisco_n1kv_vxlan_allocations', + sa.Column('vxlan_id', sa.Integer(), autoincrement=False, + nullable=False), + sa.Column('allocated', sa.Boolean(), autoincrement=False, + nullable=False), + sa.PrimaryKeyConstraint('vxlan_id') + ) + op.create_table( + 'cisco_network_profiles', + sa.Column('id', sa.String(length=36), nullable=False), + sa.Column('name', sa.String(length=255), nullable=True), + sa.Column('segment_type', vlan_type, nullable=False), + sa.Column('segment_range', sa.String(length=255), nullable=True), + sa.Column('multicast_ip_index', sa.Integer(), autoincrement=False, + nullable=True), + sa.Column('multicast_ip_range', sa.String(length=255), nullable=True), + sa.Column('physical_network', sa.String(length=255), nullable=True), + sa.PrimaryKeyConstraint('id') + ) + op.create_table( + 'cisco_n1kv_profile_bindings', + sa.Column('profile_type', network_type, nullable=True), + sa.Column('tenant_id', sa.String(length=36), nullable=False), + sa.Column('profile_id', sa.String(length=36), nullable=False), + sa.PrimaryKeyConstraint('tenant_id', 'profile_id') + ) + op.create_table( + 'cisco_n1kv_port_bindings', + sa.Column('port_id', sa.String(length=36), nullable=False), + sa.Column('profile_id', sa.String(length=36), nullable=True), + sa.ForeignKeyConstraint(['port_id'], ['ports.id'], ondelete='CASCADE'), + sa.ForeignKeyConstraint(['profile_id'], ['cisco_policy_profiles.id']), + sa.PrimaryKeyConstraint('port_id') + ) + op.create_table( + 'cisco_n1kv_vlan_allocations', + sa.Column('physical_network', sa.String(length=64), nullable=False), + sa.Column('vlan_id', + sa.Integer(), + autoincrement=False, + nullable=False), + sa.Column('allocated', + sa.Boolean(), + autoincrement=False, + nullable=False), + sa.PrimaryKeyConstraint('physical_network', 'vlan_id') + ) + op.create_table( + 'cisco_n1kv_network_bindings', + sa.Column('network_id', sa.String(length=36), nullable=False), + sa.Column('network_type', sa.String(length=32), nullable=False), + sa.Column('physical_network', sa.String(length=64), nullable=True), + sa.Column('segmentation_id', sa.Integer(), autoincrement=False, + nullable=True), + sa.Column('multicast_ip', sa.String(length=32), nullable=True), + sa.Column('profile_id', sa.String(length=36), nullable=True), + sa.ForeignKeyConstraint(['network_id'], ['networks.id'], + ondelete='CASCADE'), + sa.ForeignKeyConstraint(['profile_id'], ['cisco_network_profiles.id']), + sa.PrimaryKeyConstraint('network_id') + ) + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.drop_table('cisco_n1kv_network_bindings') + op.drop_table('cisco_n1kv_vlan_allocations') + op.drop_table('cisco_n1kv_port_bindings') + op.drop_table('cisco_n1kv_profile_bindings') + network_type.drop(op.get_bind(), checkfirst=False) + op.drop_table('cisco_network_profiles') + vlan_type.drop(op.get_bind(), checkfirst=False) + op.drop_table('cisco_n1kv_vxlan_allocations') + op.drop_table('cisco_n1kv_vmnetworks') + op.drop_table('cisco_policy_profiles') + op.drop_column('cisco_credentials', 'type') + op.add_column( + 'cisco_credentials', + sa.Column('tenant_id', sa.String(length=255), nullable=False) + ) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/d06e871c0d5_set_admin_state_up_not_null_ml2.py b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/d06e871c0d5_set_admin_state_up_not_null_ml2.py new file mode 100644 index 00000000..be99747c --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/d06e871c0d5_set_admin_state_up_not_null_ml2.py @@ -0,0 +1,54 @@ +# Copyright 2014 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""set_admin_state_up_not_null_ml2 + +Revision ID: d06e871c0d5 +Revises: 2447ad0e9585 +Create Date: 2014-03-21 17:22:20.545186 + +""" + +# revision identifiers, used by Alembic. +revision = 'd06e871c0d5' +down_revision = '4eca4a84f08a' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + 'neutron.plugins.ml2.plugin.Ml2Plugin' +] + +from alembic import op +import sqlalchemy as sa + + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.alter_column('ml2_brocadeports', 'admin_state_up', nullable=False, + existing_type=sa.Boolean) + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.alter_column('ml2_brocadeports', 'admin_state_up', nullable=True, + existing_type=sa.Boolean) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/e197124d4b9_add_unique_constrain.py b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/e197124d4b9_add_unique_constrain.py new file mode 100644 index 00000000..cfc86a55 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/e197124d4b9_add_unique_constrain.py @@ -0,0 +1,63 @@ +# Copyright 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""add unique constraint to members + +Revision ID: e197124d4b9 +Revises: havana +Create Date: 2013-11-17 10:09:37.728903 + +""" + +# revision identifiers, used by Alembic. +revision = 'e197124d4b9' +down_revision = 'havana' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + 'neutron.services.loadbalancer.plugin.LoadBalancerPlugin', + 'neutron.plugins.nicira.NeutronServicePlugin.NvpAdvancedPlugin', +] + +from alembic import op + +from neutron.db import migration + + +CONSTRAINT_NAME = 'uniq_member0pool_id0address0port' +TABLE_NAME = 'members' + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.create_unique_constraint( + name=CONSTRAINT_NAME, + source=TABLE_NAME, + local_cols=['pool_id', 'address', 'protocol_port'] + ) + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.drop_constraint( + CONSTRAINT_NAME, + TABLE_NAME, + type_='unique' + ) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/e6b16a30d97_cisco_provider_nets.py b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/e6b16a30d97_cisco_provider_nets.py new file mode 100644 index 00000000..3f0bdd93 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/e6b16a30d97_cisco_provider_nets.py @@ -0,0 +1,60 @@ +# Copyright 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""Add cisco_provider_networks table + +Revision ID: e6b16a30d97 +Revises: 557edfc53098 +Create Date: 2013-07-18 21:46:12.792504 + +""" + +# revision identifiers, used by Alembic. +revision = 'e6b16a30d97' +down_revision = '557edfc53098' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + 'neutron.plugins.cisco.network_plugin.PluginV2' +] + +from alembic import op +import sqlalchemy as sa + + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.create_table( + 'cisco_provider_networks', + sa.Column('network_id', sa.String(length=36), nullable=False), + sa.Column('network_type', sa.String(length=255), nullable=False), + sa.Column('segmentation_id', sa.Integer(), nullable=False), + sa.ForeignKeyConstraint(['network_id'], ['networks.id'], + ondelete='CASCADE'), + sa.PrimaryKeyConstraint('network_id') + ) + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.drop_table('cisco_provider_networks') diff --git a/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/ed93525fd003_bigswitch_quota.py b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/ed93525fd003_bigswitch_quota.py new file mode 100644 index 00000000..f8980c6c --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/ed93525fd003_bigswitch_quota.py @@ -0,0 +1,62 @@ +# Copyright 2014 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""bigswitch_quota + +Revision ID: ed93525fd003 +Revises: 50e86cb2637a +Create Date: 2014-01-05 10:59:19.860397 + +""" + +# revision identifiers, used by Alembic. +revision = 'ed93525fd003' +down_revision = '50e86cb2637a' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + 'neutron.plugins.bigswitch.plugin.NeutronRestProxyV2' +] + +from alembic import op +import sqlalchemy as sa + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + ### commands auto generated by Alembic - please adjust! ### + op.create_table( + 'quotas', + sa.Column('id', sa.String(length=36), nullable=False), + sa.Column('tenant_id', sa.String(length=255), nullable=True), + sa.Column('resource', sa.String(length=255), nullable=True), + sa.Column('limit', sa.Integer(), nullable=True), + sa.PrimaryKeyConstraint('id') + ) + ### end Alembic commands ### + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + ### commands auto generated by Alembic - please adjust! ### + op.drop_table('quotas') + ### end Alembic commands ### diff --git a/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/f44ab9871cd6_bsn_security_groups.py b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/f44ab9871cd6_bsn_security_groups.py new file mode 100644 index 00000000..ec1e9d98 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/f44ab9871cd6_bsn_security_groups.py @@ -0,0 +1,93 @@ +# Copyright 2014 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""bsn_security_groups + +Revision ID: f44ab9871cd6 +Revises: e766b19a3bb +Create Date: 2014-02-26 17:43:43.051078 + +""" + +# revision identifiers, used by Alembic. +revision = 'f44ab9871cd6' +down_revision = 'e766b19a3bb' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + 'neutron.plugins.bigswitch.plugin.NeutronRestProxyV2', +] + +from alembic import op +import sqlalchemy as sa + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + ### commands auto generated by Alembic - please adjust! ### + op.create_table( + 'securitygroups', + sa.Column('tenant_id', sa.String(length=255), nullable=True), + sa.Column('id', sa.String(length=36), nullable=False), + sa.Column('name', sa.String(length=255), nullable=True), + sa.Column('description', sa.String(length=255), nullable=True), + sa.PrimaryKeyConstraint('id') + ) + op.create_table( + 'securitygrouprules', + sa.Column('tenant_id', sa.String(length=255), nullable=True), + sa.Column('id', sa.String(length=36), nullable=False), + sa.Column('security_group_id', sa.String(length=36), nullable=False), + sa.Column('remote_group_id', sa.String(length=36), nullable=True), + sa.Column('direction', + sa.Enum('ingress', 'egress', + name='securitygrouprules_direction'), + nullable=True), + sa.Column('ethertype', sa.String(length=40), nullable=True), + sa.Column('protocol', sa.String(length=40), nullable=True), + sa.Column('port_range_min', sa.Integer(), nullable=True), + sa.Column('port_range_max', sa.Integer(), nullable=True), + sa.Column('remote_ip_prefix', sa.String(length=255), nullable=True), + sa.ForeignKeyConstraint(['security_group_id'], ['securitygroups.id'], + ondelete='CASCADE'), + sa.ForeignKeyConstraint(['remote_group_id'], ['securitygroups.id'], + ondelete='CASCADE'), + sa.PrimaryKeyConstraint('id') + ) + op.create_table( + 'securitygroupportbindings', + sa.Column('port_id', sa.String(length=36), nullable=False), + sa.Column('security_group_id', sa.String(length=36), nullable=False), + sa.ForeignKeyConstraint(['port_id'], ['ports.id'], ondelete='CASCADE'), + sa.ForeignKeyConstraint(['security_group_id'], ['securitygroups.id']), + sa.PrimaryKeyConstraint('port_id', 'security_group_id') + ) + ### end Alembic commands ### + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + ### commands auto generated by Alembic - please adjust! ### + op.drop_table('securitygroupportbindings') + op.drop_table('securitygrouprules') + op.drop_table('securitygroups') + ### end Alembic commands ### diff --git a/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/f489cf14a79c_lbaas_havana.py b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/f489cf14a79c_lbaas_havana.py new file mode 100644 index 00000000..8432a6db --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/f489cf14a79c_lbaas_havana.py @@ -0,0 +1,160 @@ +# Copyright 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""DB support for load balancing service (havana) + +Revision ID: f489cf14a79c +Revises: grizzly +Create Date: 2013-02-04 16:32:32.048731 + +""" + +# revision identifiers, used by Alembic. +revision = 'f489cf14a79c' +down_revision = 'grizzly' + +migration_for_plugins = [ + 'neutron.services.loadbalancer.plugin.LoadBalancerPlugin', +] + +from alembic import op +import sqlalchemy as sa + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.create_table( + u'vips', + sa.Column(u'tenant_id', sa.String(255), nullable=True), + sa.Column(u'id', sa.String(36), nullable=False), + sa.Column(u'name', sa.String(255), nullable=True), + sa.Column(u'description', sa.String(255), nullable=True), + sa.Column(u'port_id', sa.String(36), nullable=True), + sa.Column(u'protocol_port', sa.Integer(), nullable=False), + sa.Column(u'protocol', + sa.Enum("HTTP", "HTTPS", "TCP", name="lb_protocols"), + nullable=False), + sa.Column(u'pool_id', sa.String(36), nullable=False), + sa.Column(u'status', sa.String(16), nullable=False), + sa.Column(u'admin_state_up', sa.Boolean(), nullable=False), + sa.Column(u'connection_limit', sa.Integer(), nullable=True), + sa.ForeignKeyConstraint(['port_id'], ['ports.id'], ), + sa.UniqueConstraint('pool_id'), + sa.PrimaryKeyConstraint(u'id') + ) + op.create_table( + u'sessionpersistences', + sa.Column(u'vip_id', sa.String(36), nullable=False), + sa.Column(u'type', + sa.Enum("SOURCE_IP", + "HTTP_COOKIE", + "APP_COOKIE", + name="sesssionpersistences_type"), + nullable=False), + sa.Column(u'cookie_name', sa.String(1024), nullable=True), + sa.ForeignKeyConstraint(['vip_id'], [u'vips.id'], ), + sa.PrimaryKeyConstraint(u'vip_id') + ) + op.create_table( + u'pools', + sa.Column(u'tenant_id', sa.String(255), nullable=True), + sa.Column(u'id', sa.String(36), nullable=False), + sa.Column(u'vip_id', sa.String(36), nullable=True), + sa.Column(u'name', sa.String(255), nullable=True), + sa.Column(u'description', sa.String(255), nullable=True), + sa.Column(u'subnet_id', sa.String(36), nullable=False), + sa.Column(u'protocol', + sa.Enum("HTTP", "HTTPS", "TCP", name="lb_protocols"), + nullable=False), + sa.Column(u'lb_method', + sa.Enum("ROUND_ROBIN", + "LEAST_CONNECTIONS", + "SOURCE_IP", + name="pools_lb_method"), + nullable=False), + sa.Column(u'status', sa.String(16), nullable=False), + sa.Column(u'admin_state_up', sa.Boolean(), nullable=False), + sa.ForeignKeyConstraint(['vip_id'], [u'vips.id'], ), + sa.PrimaryKeyConstraint(u'id') + ) + op.create_table( + u'healthmonitors', + sa.Column(u'tenant_id', sa.String(255), nullable=True), + sa.Column(u'id', sa.String(36), nullable=False), + sa.Column(u'type', + sa.Enum("PING", + "TCP", + "HTTP", + "HTTPS", + name="healthmontiors_type"), + nullable=False), + sa.Column(u'delay', sa.Integer(), nullable=False), + sa.Column(u'timeout', sa.Integer(), nullable=False), + sa.Column(u'max_retries', sa.Integer(), nullable=False), + sa.Column(u'http_method', sa.String(16), nullable=True), + sa.Column(u'url_path', sa.String(255), nullable=True), + sa.Column(u'expected_codes', sa.String(64), nullable=True), + sa.Column(u'status', sa.String(16), nullable=False), + sa.Column(u'admin_state_up', sa.Boolean(), nullable=False), + sa.PrimaryKeyConstraint(u'id') + ) + op.create_table( + u'poolmonitorassociations', + sa.Column(u'pool_id', sa.String(36), nullable=False), + sa.Column(u'monitor_id', sa.String(36), nullable=False), + sa.ForeignKeyConstraint(['monitor_id'], [u'healthmonitors.id'], ), + sa.ForeignKeyConstraint(['pool_id'], [u'pools.id'], ), + sa.PrimaryKeyConstraint(u'pool_id', u'monitor_id') + ) + op.create_table( + u'members', + sa.Column(u'tenant_id', sa.String(255), nullable=True), + sa.Column(u'id', sa.String(36), nullable=False), + sa.Column(u'pool_id', sa.String(36), nullable=False), + sa.Column(u'address', sa.String(64), nullable=False), + sa.Column(u'protocol_port', sa.Integer(), nullable=False), + sa.Column(u'weight', sa.Integer(), nullable=False), + sa.Column(u'status', sa.String(16), nullable=False), + sa.Column(u'admin_state_up', sa.Boolean(), nullable=False), + sa.ForeignKeyConstraint(['pool_id'], [u'pools.id'], ), + sa.PrimaryKeyConstraint(u'id') + ) + op.create_table( + u'poolstatisticss', + sa.Column(u'pool_id', sa.String(36), nullable=False), + sa.Column(u'bytes_in', sa.Integer(), nullable=False), + sa.Column(u'bytes_out', sa.Integer(), nullable=False), + sa.Column(u'active_connections', sa.Integer(), nullable=False), + sa.Column(u'total_connections', sa.Integer(), nullable=False), + sa.ForeignKeyConstraint(['pool_id'], [u'pools.id'], ), + sa.PrimaryKeyConstraint(u'pool_id') + ) + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.drop_table(u'poolstatisticss') + op.drop_table(u'members') + op.drop_table(u'poolmonitorassociations') + op.drop_table(u'healthmonitors') + op.drop_table(u'pools') + op.drop_table(u'sessionpersistences') + op.drop_table(u'vips') diff --git a/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/f9263d6df56_remove_dhcp_lease.py b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/f9263d6df56_remove_dhcp_lease.py new file mode 100644 index 00000000..54b59234 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/f9263d6df56_remove_dhcp_lease.py @@ -0,0 +1,44 @@ +# Copyright 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""remove_dhcp_lease + +Revision ID: f9263d6df56 +Revises: c88b6b5fea3 +Create Date: 2013-07-17 12:31:33.731197 + +""" + +# revision identifiers, used by Alembic. +revision = 'f9263d6df56' +down_revision = 'c88b6b5fea3' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + '*' +] + +from alembic import op +import sqlalchemy as sa + + +def upgrade(active_plugins=None, options=None): + op.drop_column('ipallocations', u'expiration') + + +def downgrade(active_plugins=None, options=None): + op.add_column('ipallocations', sa.Column(u'expiration', sa.DateTime(), + nullable=True)) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/folsom_initial.py b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/folsom_initial.py new file mode 100644 index 00000000..3e148703 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/folsom_initial.py @@ -0,0 +1,561 @@ +# Copyright 2012 New Dream Network, LLC (DreamHost) +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author Mark McClain (DreamHost) + +"""folsom initial database + +Revision ID: folsom +Revises: None +Create Date: 2012-12-03 09:14:50.579765 + +""" + +PLUGINS = { + 'bigswitch': 'neutron.plugins.bigswitch.plugin.NeutronRestProxyV2', + 'brocade': 'neutron.plugins.brocade.NeutronPlugin.BrocadePluginV2', + 'cisco': 'neutron.plugins.cisco.network_plugin.PluginV2', + 'lbr': 'neutron.plugins.linuxbridge.lb_neutron_plugin.LinuxBridgePluginV2', + 'meta': 'neutron.plugins.metaplugin.meta_neutron_plugin.MetaPluginV2', + 'ml2': 'neutron.plugins.ml2.plugin.Ml2Plugin', + 'mlnx': 'neutron.plugins.mlnx.mlnx_plugin.MellanoxEswitchPlugin', + 'nec': 'neutron.plugins.nec.nec_plugin.NECPluginV2', + 'nvp': 'neutron.plugins.nicira.NeutronPlugin.NvpPluginV2', + 'ocnvsd': 'neutron.plugins.oneconvergence.plugin.OneConvergencePluginV2', + 'ovs': 'neutron.plugins.openvswitch.ovs_neutron_plugin.OVSNeutronPluginV2', + 'plumgrid': 'neutron.plugins.plumgrid.plumgrid_plugin.plumgrid_plugin.' + 'NeutronPluginPLUMgridV2', + 'ryu': 'neutron.plugins.ryu.ryu_neutron_plugin.RyuNeutronPluginV2', + 'ibm': 'neutron.plugins.ibm.sdnve_neutron_plugin.SdnvePluginV2', +} + +L3_CAPABLE = [ + PLUGINS['lbr'], + PLUGINS['meta'], + PLUGINS['ml2'], + PLUGINS['mlnx'], + PLUGINS['nec'], + PLUGINS['ocnvsd'], + PLUGINS['ovs'], + PLUGINS['ryu'], + PLUGINS['brocade'], + PLUGINS['plumgrid'], + PLUGINS['ibm'], +] + +FOLSOM_QUOTA = [ + PLUGINS['lbr'], + PLUGINS['ml2'], + PLUGINS['nvp'], + PLUGINS['ocnvsd'], + PLUGINS['ovs'], +] + + +# revision identifiers, used by Alembic. +revision = 'folsom' +down_revision = None + +from alembic import op +import sqlalchemy as sa + +from neutron.db import migration +from neutron.db.migration.alembic_migrations import common_ext_ops +# NOTE: This is a special migration that creates a Folsom compatible database. + + +def upgrade(active_plugins=None, options=None): + # general model + upgrade_base() + + if migration.should_run(active_plugins, L3_CAPABLE): + common_ext_ops.upgrade_l3() + + if migration.should_run(active_plugins, FOLSOM_QUOTA): + common_ext_ops.upgrade_quota(options) + + if PLUGINS['lbr'] in active_plugins: + upgrade_linuxbridge() + elif PLUGINS['ovs'] in active_plugins: + upgrade_ovs() + elif PLUGINS['cisco'] in active_plugins: + upgrade_cisco() + # Cisco plugin imports OVS models too + upgrade_ovs() + elif PLUGINS['meta'] in active_plugins: + upgrade_meta() + elif PLUGINS['nec'] in active_plugins: + upgrade_nec() + elif PLUGINS['ryu'] in active_plugins: + upgrade_ryu() + elif PLUGINS['brocade'] in active_plugins: + upgrade_brocade() + # Brocade plugin imports linux bridge models too + upgrade_linuxbridge() + + +def upgrade_base(): + op.create_table( + 'networks', + sa.Column('tenant_id', sa.String(length=255), nullable=True), + sa.Column('id', sa.String(length=36), nullable=False), + sa.Column('name', sa.String(length=255), nullable=True), + sa.Column('status', sa.String(length=16), nullable=True), + sa.Column('admin_state_up', sa.Boolean(), nullable=True), + sa.Column('shared', sa.Boolean(), nullable=True), + sa.PrimaryKeyConstraint('id') + ) + + op.create_table( + 'subnets', + sa.Column('tenant_id', sa.String(length=255), nullable=True), + sa.Column('id', sa.String(length=36), nullable=False), + sa.Column('name', sa.String(length=255), nullable=True), + sa.Column('network_id', sa.String(length=36), nullable=True), + sa.Column('ip_version', sa.Integer(), nullable=False), + sa.Column('cidr', sa.String(length=64), nullable=False), + sa.Column('gateway_ip', sa.String(length=64), nullable=True), + sa.Column('enable_dhcp', sa.Boolean(), nullable=True), + sa.Column('shared', sa.Boolean(), nullable=True), + sa.ForeignKeyConstraint(['network_id'], ['networks.id'], ), + sa.PrimaryKeyConstraint('id') + ) + + op.create_table( + 'ports', + sa.Column('tenant_id', sa.String(length=255), nullable=True), + sa.Column('id', sa.String(length=36), nullable=False), + sa.Column('name', sa.String(length=255), nullable=True), + sa.Column('network_id', sa.String(length=36), nullable=False), + sa.Column('mac_address', sa.String(length=32), nullable=False), + sa.Column('admin_state_up', sa.Boolean(), nullable=False), + sa.Column('status', sa.String(length=16), nullable=False), + sa.Column('device_id', sa.String(length=255), nullable=False), + sa.Column('device_owner', sa.String(length=255), nullable=False), + sa.ForeignKeyConstraint(['network_id'], ['networks.id'], ), + sa.PrimaryKeyConstraint('id') + ) + + op.create_table( + 'dnsnameservers', + sa.Column('address', sa.String(length=128), nullable=False), + sa.Column('subnet_id', sa.String(length=36), nullable=False), + sa.ForeignKeyConstraint(['subnet_id'], ['subnets.id'], + ondelete='CASCADE'), + sa.PrimaryKeyConstraint('address', 'subnet_id') + ) + + op.create_table( + 'ipallocations', + sa.Column('port_id', sa.String(length=36), nullable=True), + sa.Column('ip_address', sa.String(length=64), nullable=False), + sa.Column('subnet_id', sa.String(length=36), nullable=False), + sa.Column('network_id', sa.String(length=36), nullable=False), + sa.Column('expiration', sa.DateTime(), nullable=True), + sa.ForeignKeyConstraint(['network_id'], ['networks.id'], + ondelete='CASCADE'), + sa.ForeignKeyConstraint(['port_id'], ['ports.id'], + ondelete='CASCADE'), + sa.ForeignKeyConstraint(['subnet_id'], ['subnets.id'], + ondelete='CASCADE'), + sa.PrimaryKeyConstraint('ip_address', 'subnet_id', 'network_id') + ) + + op.create_table( + 'routes', + sa.Column('destination', sa.String(length=64), nullable=False), + sa.Column('nexthop', sa.String(length=64), nullable=False), + sa.Column('subnet_id', sa.String(length=36), nullable=False), + sa.ForeignKeyConstraint(['subnet_id'], ['subnets.id'], + ondelete='CASCADE'), + sa.PrimaryKeyConstraint('destination', 'nexthop', 'subnet_id') + ) + + op.create_table( + 'ipallocationpools', + sa.Column('id', sa.String(length=36), nullable=False), + sa.Column('subnet_id', sa.String(length=36), nullable=True), + sa.Column('first_ip', sa.String(length=64), nullable=False), + sa.Column('last_ip', sa.String(length=64), nullable=False), + sa.ForeignKeyConstraint(['subnet_id'], ['subnets.id'], + ondelete='CASCADE'), + sa.PrimaryKeyConstraint('id') + ) + + op.create_table( + 'ipavailabilityranges', + sa.Column('allocation_pool_id', sa.String(length=36), nullable=False), + sa.Column('first_ip', sa.String(length=64), nullable=False), + sa.Column('last_ip', sa.String(length=64), nullable=False), + sa.ForeignKeyConstraint(['allocation_pool_id'], + ['ipallocationpools.id'], + ondelete='CASCADE'), + sa.PrimaryKeyConstraint('allocation_pool_id', 'first_ip', 'last_ip') + ) + + +def upgrade_linuxbridge(): + op.create_table( + 'network_states', + sa.Column('physical_network', sa.String(length=64), nullable=False), + sa.Column('vlan_id', sa.Integer(), autoincrement=False, + nullable=False), + sa.Column('allocated', sa.Boolean(), nullable=False), + sa.PrimaryKeyConstraint('physical_network', 'vlan_id') + ) + + op.create_table( + 'network_bindings', + sa.Column('network_id', sa.String(length=36), nullable=False), + sa.Column('physical_network', sa.String(length=64), nullable=True), + sa.Column('vlan_id', sa.Integer(), autoincrement=False, + nullable=False), + sa.ForeignKeyConstraint(['network_id'], ['networks.id'], + ondelete='CASCADE'), + sa.PrimaryKeyConstraint('network_id') + ) + + +def upgrade_ovs(): + op.create_table( + 'ovs_tunnel_endpoints', + sa.Column('ip_address', sa.String(length=64), nullable=False), + sa.Column('id', sa.Integer(), nullable=False), + sa.PrimaryKeyConstraint('ip_address') + ) + + op.create_table( + 'ovs_tunnel_ips', + sa.Column('ip_address', sa.String(length=255), nullable=False), + sa.PrimaryKeyConstraint('ip_address') + ) + + op.create_table( + 'ovs_vlan_allocations', + sa.Column('physical_network', sa.String(length=64), nullable=False), + sa.Column('vlan_id', sa.Integer(), autoincrement=False, + nullable=False), + sa.Column('allocated', sa.Boolean(), nullable=False), + sa.PrimaryKeyConstraint('physical_network', 'vlan_id') + ) + + op.create_table( + 'ovs_tunnel_allocations', + sa.Column('tunnel_id', sa.Integer(), autoincrement=False, + nullable=False), + sa.Column('allocated', sa.Boolean(), nullable=False), + sa.PrimaryKeyConstraint('tunnel_id') + ) + + op.create_table( + 'ovs_network_bindings', + sa.Column('network_id', sa.String(length=36), nullable=False), + sa.Column('network_type', sa.String(length=32), nullable=False), + sa.Column('physical_network', sa.String(length=64), nullable=True), + sa.Column('segmentation_id', sa.Integer(), nullable=True), + sa.ForeignKeyConstraint(['network_id'], ['networks.id'], + ondelete='CASCADE'), + sa.PrimaryKeyConstraint('network_id') + ) + + +def upgrade_meta(): + op.create_table( + 'networkflavors', + sa.Column('flavor', sa.String(length=255)), + sa.Column('network_id', sa.String(length=36), nullable=False), + sa.ForeignKeyConstraint(['network_id'], ['networks.id'], + ondelete='CASCADE'), + sa.PrimaryKeyConstraint('network_id') + ) + + op.create_table( + 'routerflavors', + sa.Column('flavor', sa.String(length=255)), + sa.Column('router_id', sa.String(length=36), nullable=False), + sa.ForeignKeyConstraint(['router_id'], ['routers.id'], + ondelete='CASCADE'), + sa.PrimaryKeyConstraint('router_id') + ) + + +def upgrade_nec(): + op.create_table( + 'ofctenants', + sa.Column('id', sa.String(length=36), nullable=False), + sa.Column('quantum_id', sa.String(length=36), nullable=False), + sa.PrimaryKeyConstraint('id') + ) + + op.create_table( + 'ofcnetworks', + sa.Column('id', sa.String(length=36), nullable=False), + sa.Column('quantum_id', sa.String(length=36), nullable=False), + sa.PrimaryKeyConstraint('id') + ) + + op.create_table( + 'ofcports', + sa.Column('id', sa.String(length=36), nullable=False), + sa.Column('quantum_id', sa.String(length=36), nullable=False), + sa.PrimaryKeyConstraint('id') + ) + + op.create_table( + 'ofcfilters', + sa.Column('id', sa.String(length=36), nullable=False), + sa.Column('quantum_id', sa.String(length=36), nullable=False), + sa.PrimaryKeyConstraint('id') + ) + + op.create_table( + 'portinfos', + sa.Column('id', sa.String(length=36), nullable=False), + sa.Column('datapath_id', sa.String(length=36), nullable=False), + sa.Column('port_no', sa.Integer(), nullable=False), + sa.Column('vlan_id', sa.Integer(), nullable=False), + sa.Column('mac', sa.String(length=32), nullable=False), + sa.PrimaryKeyConstraint('id') + ) + + op.create_table( + 'packetfilters', + sa.Column('tenant_id', sa.String(length=255), nullable=True), + sa.Column('id', sa.String(length=36), nullable=False), + sa.Column('network_id', sa.String(length=36), nullable=False), + sa.Column('priority', sa.Integer(), nullable=False), + sa.Column('action', sa.String(16), nullable=False), + sa.Column('in_port', sa.String(36), nullable=False), + sa.Column('src_mac', sa.String(32), nullable=False), + sa.Column('dst_mac', sa.String(32), nullable=False), + sa.Column('eth_type', sa.Integer(), nullable=False), + sa.Column('src_cidr', sa.String(64), nullable=False), + sa.Column('dst_cidr', sa.String(64), nullable=False), + sa.Column('protocol', sa.String(16), nullable=False), + sa.Column('src_port', sa.Integer(), nullable=False), + sa.Column('dst_port', sa.Integer(), nullable=False), + sa.Column('admin_state_up', sa.Boolean(), nullable=False), + sa.Column('status', sa.String(16), nullable=False), + sa.ForeignKeyConstraint(['network_id'], ['networks.id'], + ondelete='CASCADE'), + sa.PrimaryKeyConstraint('id') + ) + + +def upgrade_ryu(): + op.create_table( + 'ofp_server', + sa.Column('id', sa.Integer(), autoincrement=False, nullable=False), + sa.Column('address', sa.String(255)), + sa.Column('host_type', sa.String(255)), + sa.PrimaryKeyConstraint('id') + ) + + +def upgrade_brocade(): + op.create_table( + 'brocadenetworks', + sa.Column('id', sa.Integer(), autoincrement=False, nullable=False), + sa.Column('vlan', sa.String(10)), + sa.PrimaryKeyConstraint('id') + ) + + op.create_table( + 'brocadeports', + sa.Column('port_id', sa.String(36), nullable=False), + sa.Column('network_id', sa.String(36)), + sa.Column('admin_state_up', sa.Boolean()), + sa.Column('physical_interface', sa.String(36)), + sa.Column('vlan_id', sa.String(10)), + sa.Column('tenant_id', sa.String(36)), + sa.PrimaryKeyConstraint('port_id') + ) + + +def upgrade_cisco(): + op.create_table( + 'cisco_vlan_ids', + sa.Column('vlan_id', sa.Integer(), autoincrement=True), + sa.Column('vlan_used', sa.Boolean()), + sa.PrimaryKeyConstraint('vlan_id') + ) + + op.create_table( + 'cisco_vlan_bindings', + sa.Column('vlan_id', sa.Integer(), autoincrement=True), + sa.Column('vlan_name', sa.String(255)), + sa.Column('network_id', sa.String(255), nullable=False), + sa.PrimaryKeyConstraint('vlan_id') + ) + + op.create_table( + 'portprofiles', + sa.Column('uuid', sa.String(255), nullable=False), + sa.Column('name', sa.String(255)), + sa.Column('vlan_id', sa.Integer()), + sa.Column('qos', sa.String(255)), + sa.PrimaryKeyConstraint('uuid') + ) + + op.create_table( + 'portprofile_bindings', + sa.Column('id', sa.Integer(), autoincrement=True), + sa.Column('tenant_id', sa.String(255)), + sa.Column('port_id', sa.String(255), nullable=False), + sa.Column('portprofile_id', sa.String(255), nullable=False), + sa.Column('default', sa.Boolean()), + sa.PrimaryKeyConstraint('id'), + sa.ForeignKeyConstraint(['port_id'], ['ports.id'], ), + sa.ForeignKeyConstraint(['portprofile_id'], ['portprofiles.uuid'], ), + ) + + op.create_table( + 'qoss', # yes two S's + sa.Column('qos_id', sa.String(255)), + sa.Column('tenant_id', sa.String(255)), + sa.Column('qos_name', sa.String(255)), + sa.Column('qos_desc', sa.String(255)), + sa.PrimaryKeyConstraint('tenant_id', 'qos_name') + ) + + op.create_table( + 'credentials', + sa.Column('credential_id', sa.String(255)), + sa.Column('tenant_id', sa.String(255)), + sa.Column('credential_name', sa.String(255)), + sa.Column('user_name', sa.String(255)), + sa.Column('password', sa.String(255)), + sa.PrimaryKeyConstraint('tenant_id', 'credential_name') + ) + + op.create_table( + 'port_bindings', + sa.Column('id', sa.Integer(), autoincrement=True), + sa.Column('port_id', sa.String(255), nullable=False), + sa.Column('blade_intf_dn', sa.String(255), nullable=False), + sa.Column('portprofile_name', sa.String(255)), + sa.Column('vlan_name', sa.String(255)), + sa.Column('vlan_id', sa.Integer()), + sa.Column('qos', sa.String(255)), + sa.Column('tenant_id', sa.String(255)), + sa.Column('instance_id', sa.String(255)), + sa.Column('vif_id', sa.String(255)), + sa.PrimaryKeyConstraint('id') + ) + + op.create_table( + 'nexusport_bindings', + sa.Column('id', sa.Integer(), primary_key=True, autoincrement=True), + sa.Column('port_id', sa.String(255)), + sa.Column('vlan_id', sa.Integer()), + sa.PrimaryKeyConstraint('id') + ) + + +def downgrade(active_plugins=None, options=None): + if PLUGINS['lbr'] in active_plugins: + downgrade_linuxbridge() + elif PLUGINS['ovs'] in active_plugins: + downgrade_ovs() + elif PLUGINS['cisco'] in active_plugins: + # Cisco plugin imports OVS models too + downgrade_ovs() + downgrade_cisco() + elif PLUGINS['meta'] in active_plugins: + downgrade_meta() + elif PLUGINS['nec'] in active_plugins: + downgrade_nec() + elif PLUGINS['ryu'] in active_plugins: + downgrade_ryu() + elif PLUGINS['brocade'] in active_plugins: + # Brocade plugin imports linux bridge models too + downgrade_brocade() + downgrade_linuxbridge() + + if migration.should_run(active_plugins, FOLSOM_QUOTA): + common_ext_ops.downgrade_quota(options) + + if migration.should_run(active_plugins, L3_CAPABLE): + common_ext_ops.downgrade_l3() + + downgrade_base() + + +def downgrade_base(): + drop_tables( + 'ipavailabilityranges', + 'ipallocationpools', + 'routes', + 'ipallocations', + 'dnsnameservers', + 'ports', + 'subnets', + 'networks' + ) + + +def downgrade_linuxbridge(): + drop_tables('network_bindings', 'network_states') + + +def downgrade_ovs(): + drop_tables( + 'ovs_network_bindings', + 'ovs_tunnel_allocations', + 'ovs_vlan_allocations', + 'ovs_tunnel_ips', + 'ovs_tunnel_endpoints' + ) + + +def downgrade_meta(): + drop_tables('routerflavors', 'networkflavors') + + +def downgrade_nec(): + drop_tables( + 'packetfilters', + 'portinfos', + 'ofcfilters', + 'ofcports', + 'ofcnetworks', + 'ofctenants' + ) + + +def downgrade_ryu(): + op.drop_table('ofp_server') + + +def downgrade_brocade(): + op.drop_table('brocadenetworks') + op.drop_table('brocadeports') + + +def downgrade_cisco(): + drop_tables( + 'nexusport_bindings', + 'port_bindings', + 'credentials', + 'qoss', + 'portprofile_bindings', + 'portprofiles', + 'cisco_vlan_bindings', + 'cisco_vlan_ids' + ) + + +def drop_tables(*tables): + for table in tables: + op.drop_table(table) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/grizzly_release.py b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/grizzly_release.py new file mode 100644 index 00000000..3e8f50a3 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/grizzly_release.py @@ -0,0 +1,40 @@ +# Copyright 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""grizzly + +Revision ID: grizzly +Revises: 1341ed32cc1e +Create Date: 2013-03-12 23:59:59.000000 + +""" + +# revision identifiers, used by Alembic. +revision = 'grizzly' +down_revision = '1341ed32cc1e' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = ['*'] + + +def upgrade(active_plugins=None, options=None): + """A no-op migration for marking the Grizzly release.""" + pass + + +def downgrade(active_plugins=None, options=None): + """A no-op migration for marking the Grizzly release.""" + pass diff --git a/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/havana_release.py b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/havana_release.py new file mode 100644 index 00000000..f30916ab --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/havana_release.py @@ -0,0 +1,40 @@ +# Copyright 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""havana + +Revision ID: havana +Revises: 40b0aff0302e +Create Date: 2013-10-02 00:00:00.000000 + +""" + +# revision identifiers, used by Alembic. +revision = 'havana' +down_revision = '40b0aff0302e' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = ['*'] + + +def upgrade(active_plugins=None, options=None): + """A no-op migration for marking the Havana release.""" + pass + + +def downgrade(active_plugins=None, options=None): + """A no-op migration for marking the Havana release.""" + pass diff --git a/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/icehouse_release.py b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/icehouse_release.py new file mode 100644 index 00000000..5a02ba69 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/alembic_migrations/versions/icehouse_release.py @@ -0,0 +1,40 @@ +# Copyright 2014 Yahoo! Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""icehouse + +Revision ID: icehouse +Revises: 5ac1c354a051 +Create Date: 2013-03-28 00:00:00.000000 + +""" + +# revision identifiers, used by Alembic. +revision = 'icehouse' +down_revision = '5ac1c354a051' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = ['*'] + + +def upgrade(active_plugins=None, options=None): + """A no-op migration for marking the Icehouse release.""" + pass + + +def downgrade(active_plugins=None, options=None): + """A no-op migration for marking the Icehouse release.""" + pass diff --git a/icehouse-patches/neutron/dvr-patch/neutron/db/migration/cli.py b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/cli.py new file mode 100644 index 00000000..83f620b0 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/cli.py @@ -0,0 +1,171 @@ +# Copyright 2012 New Dream Network, LLC (DreamHost) +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Mark McClain, DreamHost + +import os + +from alembic import command as alembic_command +from alembic import config as alembic_config +from alembic import script as alembic_script +from alembic import util as alembic_util +from oslo.config import cfg + + +HEAD_FILENAME = 'HEAD' + + +_core_opts = [ + cfg.StrOpt('core_plugin', + default='', + help=_('Neutron plugin provider module')), + cfg.ListOpt('service_plugins', + default=[], + help=_("The service plugins Neutron will use")), +] + +_quota_opts = [ + cfg.StrOpt('quota_driver', + default='', + help=_('Neutron quota driver class')), +] + +_db_opts = [ + cfg.StrOpt('connection', + deprecated_name='sql_connection', + default='', + secret=True, + help=_('URL to database')), + cfg.StrOpt('engine', + default='', + help=_('Database engine')), +] + +CONF = cfg.ConfigOpts() +CONF.register_cli_opts(_core_opts) +CONF.register_cli_opts(_db_opts, 'database') +CONF.register_opts(_quota_opts, 'QUOTAS') + + +def do_alembic_command(config, cmd, *args, **kwargs): + try: + getattr(alembic_command, cmd)(config, *args, **kwargs) + except alembic_util.CommandError as e: + alembic_util.err(str(e)) + + +def do_check_migration(config, cmd): + do_alembic_command(config, 'branches') + validate_head_file(config) + + +def do_upgrade_downgrade(config, cmd): + if not CONF.command.revision and not CONF.command.delta: + raise SystemExit(_('You must provide a revision or relative delta')) + + revision = CONF.command.revision + + if CONF.command.delta: + sign = '+' if CONF.command.name == 'upgrade' else '-' + revision = sign + str(CONF.command.delta) + else: + revision = CONF.command.revision + + do_alembic_command(config, cmd, revision, sql=CONF.command.sql) + + +def do_stamp(config, cmd): + do_alembic_command(config, cmd, + CONF.command.revision, + sql=CONF.command.sql) + + +def do_revision(config, cmd): + do_alembic_command(config, cmd, + message=CONF.command.message, + autogenerate=CONF.command.autogenerate, + sql=CONF.command.sql) + update_head_file(config) + + +def validate_head_file(config): + script = alembic_script.ScriptDirectory.from_config(config) + if len(script.get_heads()) > 1: + alembic_util.err(_('Timeline branches unable to generate timeline')) + + head_path = os.path.join(script.versions, HEAD_FILENAME) + if (os.path.isfile(head_path) and + open(head_path).read().strip() == script.get_current_head()): + return + else: + alembic_util.err(_('HEAD file does not match migration timeline head')) + + +def update_head_file(config): + script = alembic_script.ScriptDirectory.from_config(config) + if len(script.get_heads()) > 1: + alembic_util.err(_('Timeline branches unable to generate timeline')) + + head_path = os.path.join(script.versions, HEAD_FILENAME) + with open(head_path, 'w+') as f: + f.write(script.get_current_head()) + + +def add_command_parsers(subparsers): + for name in ['current', 'history', 'branches']: + parser = subparsers.add_parser(name) + parser.set_defaults(func=do_alembic_command) + + parser = subparsers.add_parser('check_migration') + parser.set_defaults(func=do_check_migration) + + for name in ['upgrade', 'downgrade']: + parser = subparsers.add_parser(name) + parser.add_argument('--delta', type=int) + parser.add_argument('--sql', action='store_true') + parser.add_argument('revision', nargs='?') + parser.set_defaults(func=do_upgrade_downgrade) + + parser = subparsers.add_parser('stamp') + parser.add_argument('--sql', action='store_true') + parser.add_argument('revision') + parser.set_defaults(func=do_stamp) + + parser = subparsers.add_parser('revision') + parser.add_argument('-m', '--message') + parser.add_argument('--autogenerate', action='store_true') + parser.add_argument('--sql', action='store_true') + parser.set_defaults(func=do_revision) + + +command_opt = cfg.SubCommandOpt('command', + title='Command', + help=_('Available commands'), + handler=add_command_parsers) + +CONF.register_cli_opt(command_opt) + + +def main(): + config = alembic_config.Config( + os.path.join(os.path.dirname(__file__), 'alembic.ini') + ) + config.set_main_option('script_location', + 'neutron.db.migration:alembic_migrations') + # attach the Neutron conf to the Alembic conf + config.neutron_config = CONF + + CONF() + #TODO(gongysh) enable logging + CONF.command.func(config, CONF.command.name) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/db/migration/migrate_to_ml2.py b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/migrate_to_ml2.py new file mode 100644 index 00000000..504061ed --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/db/migration/migrate_to_ml2.py @@ -0,0 +1,462 @@ +# Copyright (c) 2014 Red Hat, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +This script will migrate the database of an openvswitch or linuxbridge +plugin so that it can be used with the ml2 plugin. + +Known Limitations: + + - THIS SCRIPT IS DESTRUCTIVE! Make sure to backup your + Neutron database before running this script, in case anything goes + wrong. + + - It will be necessary to upgrade the database to the target release + via neutron-db-manage before attempting to migrate to ml2. + Initially, only the icehouse release is supported. + + - This script does not automate configuration migration. + +Example usage: + + python -m neutron.db.migration.migrate_to_ml2 openvswitch \ + mysql://login:pass@127.0.0.1/neutron + +Note that migration of tunneling state will only be attempted if the +--tunnel-type parameter is provided. + +To manually test migration from ovs to ml2 with devstack: + + - stack with Q_PLUGIN=openvswitch + - boot an instance and validate connectivity + - stop the neutron service and all agents + - run the neutron-migrate-to-ml2 script + - update /etc/neutron/neutron.conf as follows: + + core_plugin = neutron.plugins.ml2.plugin.Ml2Plugin + + - Create /etc/neutron/plugins/ml2/ml2_conf.ini and ensure that: + - ml2.mechanism_drivers includes 'openvswitch' + - ovs.local_ip is set correctly + - database.connection is set correctly + - Start the neutron service with the ml2 config file created in + the previous step in place of the openvswitch config file + - Start all the agents + - verify that the booted instance still has connectivity + - boot a second instance and validate connectivity +""" + +import argparse + +import sqlalchemy as sa + +from neutron.extensions import portbindings +from neutron.openstack.common import uuidutils +from neutron.plugins.common import constants as p_const +from neutron.plugins.ml2.drivers import type_vxlan + + +# Migration targets +LINUXBRIDGE = 'linuxbridge' +OPENVSWITCH = 'openvswitch' + +# Releases +ICEHOUSE = 'icehouse' + + +SUPPORTED_SCHEMA_VERSIONS = [ICEHOUSE] + + +def check_db_schema_version(engine, metadata): + """Check that current version of the db schema is supported.""" + version_table = sa.Table( + 'alembic_version', metadata, autoload=True, autoload_with=engine) + versions = [v[0] for v in engine.execute(version_table.select())] + if not versions: + raise ValueError(_("Missing version in alembic_versions table")) + elif len(versions) > 1: + raise ValueError(_("Multiple versions in alembic_versions table: %s") + % versions) + current_version = versions[0] + if current_version not in SUPPORTED_SCHEMA_VERSIONS: + raise SystemError(_("Unsupported database schema %(current)s. " + "Please migrate your database to one of following " + "versions: %(supported)s") + % {'current': current_version, + 'supported': ', '.join(SUPPORTED_SCHEMA_VERSIONS)} + ) + + +# Duplicated from neutron.plugins.linuxbridge.common.constants to +# avoid having any dependency on the linuxbridge plugin being +# installed. +def interpret_vlan_id(vlan_id): + """Return (network_type, segmentation_id) tuple for encoded vlan_id.""" + FLAT_VLAN_ID = -1 + LOCAL_VLAN_ID = -2 + if vlan_id == LOCAL_VLAN_ID: + return (p_const.TYPE_LOCAL, None) + elif vlan_id == FLAT_VLAN_ID: + return (p_const.TYPE_FLAT, None) + else: + return (p_const.TYPE_VLAN, vlan_id) + + +class BaseMigrateToMl2_Icehouse(object): + + def __init__(self, vif_type, driver_type, segment_table_name, + vlan_allocation_table_name, old_tables): + self.vif_type = vif_type + self.driver_type = driver_type + self.segment_table_name = segment_table_name + self.vlan_allocation_table_name = vlan_allocation_table_name + self.old_tables = old_tables + + def __call__(self, connection_url, save_tables=False, tunnel_type=None, + vxlan_udp_port=None): + engine = sa.create_engine(connection_url) + metadata = sa.MetaData() + check_db_schema_version(engine, metadata) + + self.define_ml2_tables(metadata) + + # Autoload the ports table to ensure that foreign keys to it and + # the network table can be created for the new tables. + sa.Table('ports', metadata, autoload=True, autoload_with=engine) + metadata.create_all(engine) + + self.migrate_network_segments(engine, metadata) + if tunnel_type: + self.migrate_tunnels(engine, tunnel_type, vxlan_udp_port) + self.migrate_vlan_allocations(engine) + self.migrate_port_bindings(engine, metadata) + + self.drop_old_tables(engine, save_tables) + + def migrate_segment_dict(self, binding): + binding['id'] = uuidutils.generate_uuid() + + def migrate_network_segments(self, engine, metadata): + # Migrating network segments requires loading the data to python + # so that a uuid can be generated for each segment. + source_table = sa.Table(self.segment_table_name, metadata, + autoload=True, autoload_with=engine) + source_segments = engine.execute(source_table.select()) + ml2_segments = [dict(x) for x in source_segments] + for segment in ml2_segments: + self.migrate_segment_dict(segment) + if ml2_segments: + ml2_network_segments = metadata.tables['ml2_network_segments'] + engine.execute(ml2_network_segments.insert(), ml2_segments) + + def migrate_tunnels(self, engine, tunnel_type, vxlan_udp_port=None): + """Override this method to perform plugin-specific tunnel migration.""" + pass + + def migrate_vlan_allocations(self, engine): + engine.execute((""" + INSERT INTO ml2_vlan_allocations + SELECT physical_network, vlan_id, allocated + FROM %(source_table)s + WHERE allocated = 1 + """) % {'source_table': self.vlan_allocation_table_name}) + + def get_port_segment_map(self, engine): + """Retrieve a mapping of port id to segment id. + + The monolithic plugins only support a single segment per + network, so the segment id can be uniquely identified by + the network associated with a given port. + + """ + port_segments = engine.execute(""" + SELECT ports_network.port_id, ml2_network_segments.id AS segment_id + FROM ml2_network_segments, ( + SELECT portbindingports.port_id, ports.network_id + FROM portbindingports, ports + WHERE portbindingports.port_id = ports.id + ) AS ports_network + WHERE ml2_network_segments.network_id = ports_network.network_id + """) + return dict(x for x in port_segments) + + def migrate_port_bindings(self, engine, metadata): + port_segment_map = self.get_port_segment_map(engine) + + port_binding_ports = sa.Table('portbindingports', metadata, + autoload=True, autoload_with=engine) + source_bindings = engine.execute(port_binding_ports.select()) + ml2_bindings = [dict(x) for x in source_bindings] + for binding in ml2_bindings: + binding['vif_type'] = self.vif_type + binding['driver'] = self.driver_type + segment = port_segment_map.get(binding['port_id']) + if segment: + binding['segment'] = segment + if ml2_bindings: + ml2_port_bindings = metadata.tables['ml2_port_bindings'] + engine.execute(ml2_port_bindings.insert(), ml2_bindings) + + def drop_old_tables(self, engine, save_tables=False): + if save_tables: + return + old_tables = self.old_tables + [self.vlan_allocation_table_name, + self.segment_table_name] + for table_name in old_tables: + engine.execute('DROP TABLE %s' % table_name) + + def define_ml2_tables(self, metadata): + + sa.Table( + 'arista_provisioned_nets', metadata, + sa.Column('tenant_id', sa.String(length=255), nullable=True), + sa.Column('id', sa.String(length=36), nullable=False), + sa.Column('network_id', sa.String(length=36), nullable=True), + sa.Column('segmentation_id', sa.Integer(), + autoincrement=False, nullable=True), + sa.PrimaryKeyConstraint('id'), + ) + + sa.Table( + 'arista_provisioned_vms', metadata, + sa.Column('tenant_id', sa.String(length=255), nullable=True), + sa.Column('id', sa.String(length=36), nullable=False), + sa.Column('vm_id', sa.String(length=255), nullable=True), + sa.Column('host_id', sa.String(length=255), nullable=True), + sa.Column('port_id', sa.String(length=36), nullable=True), + sa.Column('network_id', sa.String(length=36), nullable=True), + sa.PrimaryKeyConstraint('id'), + ) + + sa.Table( + 'arista_provisioned_tenants', metadata, + sa.Column('tenant_id', sa.String(length=255), nullable=True), + sa.Column('id', sa.String(length=36), nullable=False), + sa.PrimaryKeyConstraint('id'), + ) + + sa.Table( + 'cisco_ml2_nexusport_bindings', metadata, + sa.Column('binding_id', sa.Integer(), nullable=False), + sa.Column('port_id', sa.String(length=255), nullable=True), + sa.Column('vlan_id', sa.Integer(), autoincrement=False, + nullable=False), + sa.Column('switch_ip', sa.String(length=255), nullable=True), + sa.Column('instance_id', sa.String(length=255), nullable=True), + sa.PrimaryKeyConstraint('binding_id'), + ) + + sa.Table( + 'cisco_ml2_credentials', metadata, + sa.Column('credential_id', sa.String(length=255), nullable=True), + sa.Column('tenant_id', sa.String(length=255), nullable=False), + sa.Column('credential_name', sa.String(length=255), + nullable=False), + sa.Column('user_name', sa.String(length=255), nullable=True), + sa.Column('password', sa.String(length=255), nullable=True), + sa.PrimaryKeyConstraint('tenant_id', 'credential_name'), + ) + + sa.Table( + 'ml2_flat_allocations', metadata, + sa.Column('physical_network', sa.String(length=64), + nullable=False), + sa.PrimaryKeyConstraint('physical_network'), + ) + + sa.Table( + 'ml2_gre_allocations', metadata, + sa.Column('gre_id', sa.Integer, nullable=False, + autoincrement=False), + sa.Column('allocated', sa.Boolean, nullable=False), + sa.PrimaryKeyConstraint('gre_id'), + ) + + sa.Table( + 'ml2_gre_endpoints', metadata, + sa.Column('ip_address', sa.String(length=64)), + sa.PrimaryKeyConstraint('ip_address'), + ) + + sa.Table( + 'ml2_network_segments', metadata, + sa.Column('id', sa.String(length=36), nullable=False), + sa.Column('network_id', sa.String(length=36), nullable=False), + sa.Column('network_type', sa.String(length=32), nullable=False), + sa.Column('physical_network', sa.String(length=64), nullable=True), + sa.Column('segmentation_id', sa.Integer(), nullable=True), + sa.ForeignKeyConstraint(['network_id'], ['networks.id'], + ondelete='CASCADE'), + sa.PrimaryKeyConstraint('id'), + ) + + sa.Table( + 'ml2_port_bindings', metadata, + sa.Column('port_id', sa.String(length=36), nullable=False), + sa.Column('host', sa.String(length=255), nullable=False), + sa.Column('vif_type', sa.String(length=64), nullable=False), + sa.Column('driver', sa.String(length=64), nullable=True), + sa.Column('segment', sa.String(length=36), nullable=True), + sa.Column('vnic_type', sa.String(length=64), nullable=False, + server_default='normal'), + sa.Column('vif_details', sa.String(4095), nullable=False, + server_default=''), + sa.Column('profile', sa.String(4095), nullable=False, + server_default=''), + sa.ForeignKeyConstraint(['port_id'], ['ports.id'], + ondelete='CASCADE'), + sa.ForeignKeyConstraint(['segment'], ['ml2_network_segments.id'], + ondelete='SET NULL'), + sa.PrimaryKeyConstraint('port_id'), + ) + + sa.Table( + 'ml2_vlan_allocations', metadata, + sa.Column('physical_network', sa.String(length=64), + nullable=False), + sa.Column('vlan_id', sa.Integer(), autoincrement=False, + nullable=False), + sa.Column('allocated', sa.Boolean(), autoincrement=False, + nullable=False), + sa.PrimaryKeyConstraint('physical_network', 'vlan_id'), + ) + + sa.Table( + 'ml2_vxlan_allocations', metadata, + sa.Column('vxlan_vni', sa.Integer, nullable=False, + autoincrement=False), + sa.Column('allocated', sa.Boolean, nullable=False), + sa.PrimaryKeyConstraint('vxlan_vni'), + ) + + sa.Table( + 'ml2_vxlan_endpoints', metadata, + sa.Column('ip_address', sa.String(length=64)), + sa.Column('udp_port', sa.Integer(), nullable=False, + autoincrement=False), + sa.PrimaryKeyConstraint('ip_address', 'udp_port'), + ) + + +class MigrateLinuxBridgeToMl2_Icehouse(BaseMigrateToMl2_Icehouse): + + def __init__(self): + super(MigrateLinuxBridgeToMl2_Icehouse, self).__init__( + vif_type=portbindings.VIF_TYPE_BRIDGE, + driver_type=LINUXBRIDGE, + segment_table_name='network_bindings', + vlan_allocation_table_name='network_states', + old_tables=['portbindingports']) + + def migrate_segment_dict(self, binding): + super(MigrateLinuxBridgeToMl2_Icehouse, self).migrate_segment_dict( + binding) + vlan_id = binding.pop('vlan_id') + network_type, segmentation_id = interpret_vlan_id(vlan_id) + binding['network_type'] = network_type + binding['segmentation_id'] = segmentation_id + + +class MigrateOpenvswitchToMl2_Icehouse(BaseMigrateToMl2_Icehouse): + + def __init__(self): + super(MigrateOpenvswitchToMl2_Icehouse, self).__init__( + vif_type=portbindings.VIF_TYPE_OVS, + driver_type=OPENVSWITCH, + segment_table_name='ovs_network_bindings', + vlan_allocation_table_name='ovs_vlan_allocations', + old_tables=[ + 'ovs_tunnel_allocations', + 'ovs_tunnel_endpoints', + 'portbindingports', + ]) + + def migrate_tunnels(self, engine, tunnel_type, vxlan_udp_port=None): + if tunnel_type == p_const.TYPE_GRE: + engine.execute(""" + INSERT INTO ml2_gre_allocations + SELECT tunnel_id as gre_id, allocated + FROM ovs_tunnel_allocations + WHERE allocated = 1 + """) + engine.execute(""" + INSERT INTO ml2_gre_endpoints + SELECT ip_address + FROM ovs_tunnel_endpoints + """) + elif tunnel_type == p_const.TYPE_VXLAN: + if not vxlan_udp_port: + vxlan_udp_port = type_vxlan.VXLAN_UDP_PORT + engine.execute(""" + INSERT INTO ml2_vxlan_allocations + SELECT tunnel_id as vxlan_vni, allocated + FROM ovs_tunnel_allocations + WHERE allocated = 1 + """) + engine.execute(sa.text(""" + INSERT INTO ml2_vxlan_endpoints + SELECT ip_address, :udp_port as udp_port + FROM ovs_tunnel_endpoints + """), udp_port=vxlan_udp_port) + else: + raise ValueError(_('Unknown tunnel type: %s') % tunnel_type) + + +migrate_map = { + ICEHOUSE: { + OPENVSWITCH: MigrateOpenvswitchToMl2_Icehouse, + LINUXBRIDGE: MigrateLinuxBridgeToMl2_Icehouse, + }, +} + + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument('plugin', choices=[OPENVSWITCH, LINUXBRIDGE], + help=_('The plugin type whose database will be ' + 'migrated')) + parser.add_argument('connection', + help=_('The connection url for the target db')) + parser.add_argument('--tunnel-type', choices=[p_const.TYPE_GRE, + p_const.TYPE_VXLAN], + help=_('The %s tunnel type to migrate from') % + OPENVSWITCH) + parser.add_argument('--vxlan-udp-port', default=None, type=int, + help=_('The UDP port to use for VXLAN tunnels.')) + parser.add_argument('--release', default=ICEHOUSE, choices=[ICEHOUSE]) + parser.add_argument('--save-tables', default=False, action='store_true', + help=_("Retain the old plugin's tables")) + #TODO(marun) Provide a verbose option + args = parser.parse_args() + + if args.plugin == LINUXBRIDGE and (args.tunnel_type or + args.vxlan_udp_port): + msg = _('Tunnel args (tunnel-type and vxlan-udp-port) are not valid ' + 'for the %s plugin') + parser.error(msg % LINUXBRIDGE) + + try: + migrate_func = migrate_map[args.release][args.plugin]() + except KeyError: + msg = _('Support for migrating %(plugin)s for release ' + '%(release)s is not yet implemented') + parser.error(msg % {'plugin': args.plugin, 'release': args.release}) + else: + migrate_func(args.connection, args.save_tables, args.tunnel_type, + args.vxlan_udp_port) + + +if __name__ == '__main__': + main() diff --git a/icehouse-patches/neutron/dvr-patch/neutron/db/model_base.py b/icehouse-patches/neutron/dvr-patch/neutron/db/model_base.py new file mode 100644 index 00000000..ab420dcc --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/db/model_base.py @@ -0,0 +1,51 @@ +# Copyright (c) 2012 OpenStack Foundation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from oslo.db.sqlalchemy import models +from sqlalchemy.ext import declarative +from sqlalchemy import orm + + +class NeutronBase(models.ModelBase): + """Base class for Neutron Models.""" + + __table_args__ = {'mysql_engine': 'InnoDB'} + + def __iter__(self): + self._i = iter(orm.object_mapper(self).columns) + return self + + def next(self): + n = self._i.next().name + return n, getattr(self, n) + + def __repr__(self): + """sqlalchemy based automatic __repr__ method.""" + items = ['%s=%r' % (col.name, getattr(self, col.name)) + for col in self.__table__.columns] + return "<%s.%s[object at %x] {%s}>" % (self.__class__.__module__, + self.__class__.__name__, + id(self), ', '.join(items)) + + +class NeutronBaseV2(NeutronBase): + + @declarative.declared_attr + def __tablename__(cls): + # NOTE(jkoelker) use the pluralized name of the class as the table + return cls.__name__.lower() + 's' + + +BASEV2 = declarative.declarative_base(cls=NeutronBaseV2) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/db/models_v2.py b/icehouse-patches/neutron/dvr-patch/neutron/db/models_v2.py new file mode 100644 index 00000000..53efc669 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/db/models_v2.py @@ -0,0 +1,204 @@ +# Copyright (c) 2012 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import sqlalchemy as sa +from sqlalchemy import orm + +from neutron.common import constants +from neutron.db import model_base +from neutron.openstack.common import uuidutils + + +class HasTenant(object): + """Tenant mixin, add to subclasses that have a tenant.""" + + # NOTE(jkoelker) tenant_id is just a free form string ;( + tenant_id = sa.Column(sa.String(255)) + + +class HasId(object): + """id mixin, add to subclasses that have an id.""" + + id = sa.Column(sa.String(36), + primary_key=True, + default=uuidutils.generate_uuid) + + +class HasStatusDescription(object): + """Status with description mixin.""" + + status = sa.Column(sa.String(16), nullable=False) + status_description = sa.Column(sa.String(255)) + + +class IPAvailabilityRange(model_base.BASEV2): + """Internal representation of available IPs for Neutron subnets. + + Allocation - first entry from the range will be allocated. + If the first entry is equal to the last entry then this row + will be deleted. + Recycling ips involves reading the IPAllocationPool and IPAllocation tables + and inserting ranges representing available ips. This happens after the + final allocation is pulled from this table and a new ip allocation is + requested. Any contiguous ranges of available ips will be inserted as a + single range. + """ + + allocation_pool_id = sa.Column(sa.String(36), + sa.ForeignKey('ipallocationpools.id', + ondelete="CASCADE"), + nullable=False, + primary_key=True) + first_ip = sa.Column(sa.String(64), nullable=False, primary_key=True) + last_ip = sa.Column(sa.String(64), nullable=False, primary_key=True) + + def __repr__(self): + return "%s - %s" % (self.first_ip, self.last_ip) + + +class IPAllocationPool(model_base.BASEV2, HasId): + """Representation of an allocation pool in a Neutron subnet.""" + + subnet_id = sa.Column(sa.String(36), sa.ForeignKey('subnets.id', + ondelete="CASCADE"), + nullable=True) + first_ip = sa.Column(sa.String(64), nullable=False) + last_ip = sa.Column(sa.String(64), nullable=False) + available_ranges = orm.relationship(IPAvailabilityRange, + backref='ipallocationpool', + lazy="joined", + cascade='all, delete-orphan') + + def __repr__(self): + return "%s - %s" % (self.first_ip, self.last_ip) + + +class IPAllocation(model_base.BASEV2): + """Internal representation of allocated IP addresses in a Neutron subnet. + """ + + port_id = sa.Column(sa.String(36), sa.ForeignKey('ports.id', + ondelete="CASCADE"), + nullable=True) + ip_address = sa.Column(sa.String(64), nullable=False, primary_key=True) + subnet_id = sa.Column(sa.String(36), sa.ForeignKey('subnets.id', + ondelete="CASCADE"), + nullable=False, primary_key=True) + network_id = sa.Column(sa.String(36), sa.ForeignKey("networks.id", + ondelete="CASCADE"), + nullable=False, primary_key=True) + + +class Route(object): + """mixin of a route.""" + + destination = sa.Column(sa.String(64), nullable=False, primary_key=True) + nexthop = sa.Column(sa.String(64), nullable=False, primary_key=True) + + +class SubnetRoute(model_base.BASEV2, Route): + + subnet_id = sa.Column(sa.String(36), + sa.ForeignKey('subnets.id', + ondelete="CASCADE"), + primary_key=True) + + +class Port(model_base.BASEV2, HasId, HasTenant): + """Represents a port on a Neutron v2 network.""" + + name = sa.Column(sa.String(255)) + network_id = sa.Column(sa.String(36), sa.ForeignKey("networks.id"), + nullable=False) + fixed_ips = orm.relationship(IPAllocation, backref='ports', lazy='joined') + mac_address = sa.Column(sa.String(32), nullable=False) + admin_state_up = sa.Column(sa.Boolean(), nullable=False) + status = sa.Column(sa.String(16), nullable=False) + device_id = sa.Column(sa.String(255), nullable=False) + device_owner = sa.Column(sa.String(255), nullable=False) + + def __init__(self, id=None, tenant_id=None, name=None, network_id=None, + mac_address=None, admin_state_up=None, status=None, + device_id=None, device_owner=None, fixed_ips=None): + self.id = id + self.tenant_id = tenant_id + self.name = name + self.network_id = network_id + self.mac_address = mac_address + self.admin_state_up = admin_state_up + self.device_owner = device_owner + self.device_id = device_id + # Since this is a relationship only set it if one is passed in. + if fixed_ips: + self.fixed_ips = fixed_ips + + # NOTE(arosen): status must be set last as an event is triggered on! + self.status = status + + +class DNSNameServer(model_base.BASEV2): + """Internal representation of a DNS nameserver.""" + + address = sa.Column(sa.String(128), nullable=False, primary_key=True) + subnet_id = sa.Column(sa.String(36), + sa.ForeignKey('subnets.id', + ondelete="CASCADE"), + primary_key=True) + + +class Subnet(model_base.BASEV2, HasId, HasTenant): + """Represents a neutron subnet. + + When a subnet is created the first and last entries will be created. These + are used for the IP allocation. + """ + + name = sa.Column(sa.String(255)) + network_id = sa.Column(sa.String(36), sa.ForeignKey('networks.id')) + ip_version = sa.Column(sa.Integer, nullable=False) + cidr = sa.Column(sa.String(64), nullable=False) + gateway_ip = sa.Column(sa.String(64)) + allocation_pools = orm.relationship(IPAllocationPool, + backref='subnet', + lazy="joined", + cascade='delete') + enable_dhcp = sa.Column(sa.Boolean()) + dns_nameservers = orm.relationship(DNSNameServer, + backref='subnet', + cascade='all, delete, delete-orphan') + routes = orm.relationship(SubnetRoute, + backref='subnet', + cascade='all, delete, delete-orphan') + shared = sa.Column(sa.Boolean) + ipv6_ra_mode = sa.Column(sa.Enum(constants.IPV6_SLAAC, + constants.DHCPV6_STATEFUL, + constants.DHCPV6_STATELESS, + name='ipv6_ra_modes'), nullable=True) + ipv6_address_mode = sa.Column(sa.Enum(constants.IPV6_SLAAC, + constants.DHCPV6_STATEFUL, + constants.DHCPV6_STATELESS, + name='ipv6_address_modes'), nullable=True) + + +class Network(model_base.BASEV2, HasId, HasTenant): + """Represents a v2 neutron network.""" + + name = sa.Column(sa.String(255)) + ports = orm.relationship(Port, backref='networks') + subnets = orm.relationship(Subnet, backref='networks', + lazy="joined") + status = sa.Column(sa.String(16)) + admin_state_up = sa.Column(sa.Boolean) + shared = sa.Column(sa.Boolean) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/db/portbindings_base.py b/icehouse-patches/neutron/dvr-patch/neutron/db/portbindings_base.py new file mode 100644 index 00000000..045b7e3f --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/db/portbindings_base.py @@ -0,0 +1,41 @@ +# Copyright 2013 UnitedStack Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# @author: Yong Sheng Gong, UnitedStack Inc. + +from neutron.api.v2 import attributes +from neutron.db import db_base_plugin_v2 + + +class PortBindingBaseMixin(object): + base_binding_dict = None + + def _process_portbindings_create_and_update(self, context, port_data, + port): + self.extend_port_dict_binding(port, None) + + def extend_port_dict_binding(self, port_res, port_db): + if self.base_binding_dict: + port_res.update(self.base_binding_dict) + + +def _extend_port_dict_binding(plugin, port_res, port_db): + if not isinstance(plugin, PortBindingBaseMixin): + return + plugin.extend_port_dict_binding(port_res, port_db) + + +def register_port_dict_function(): + db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs( + attributes.PORTS, [_extend_port_dict_binding]) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/db/portbindings_db.py b/icehouse-patches/neutron/dvr-patch/neutron/db/portbindings_db.py new file mode 100644 index 00000000..1f94f839 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/db/portbindings_db.py @@ -0,0 +1,121 @@ +# Copyright 2013 IBM Corp. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# @author: Yong Sheng Gong, IBM, Corp. + +import sqlalchemy as sa +from sqlalchemy import orm + +from neutron.api.v2 import attributes +from neutron.db import db_base_plugin_v2 +from neutron.db import model_base +from neutron.db import models_v2 +from neutron.db import portbindings_base +from neutron.extensions import portbindings + + +class PortBindingPort(model_base.BASEV2): + port_id = sa.Column(sa.String(36), + sa.ForeignKey('ports.id', ondelete="CASCADE"), + primary_key=True) + host = sa.Column(sa.String(255), nullable=False) + port = orm.relationship( + models_v2.Port, + backref=orm.backref("portbinding", + lazy='joined', uselist=False, + cascade='delete')) + + +class PortBindingMixin(portbindings_base.PortBindingBaseMixin): + extra_binding_dict = None + + def _port_model_hook(self, context, original_model, query): + query = query.outerjoin(PortBindingPort, + (original_model.id == + PortBindingPort.port_id)) + return query + + def _port_result_filter_hook(self, query, filters): + values = filters and filters.get(portbindings.HOST_ID, []) + if not values: + return query + if len(values) == 1: + query = query.filter(PortBindingPort.host == values[0]) + else: + query = query.filter(PortBindingPort.host.in_(values)) + return query + + db_base_plugin_v2.NeutronDbPluginV2.register_model_query_hook( + models_v2.Port, + "portbindings_port", + '_port_model_hook', + None, + '_port_result_filter_hook') + + def _process_portbindings_create_and_update(self, context, port_data, + port): + binding_profile = port.get(portbindings.PROFILE) + binding_profile_set = attributes.is_attr_set(binding_profile) + if not binding_profile_set and binding_profile is not None: + del port[portbindings.PROFILE] + + binding_vnic = port.get(portbindings.VNIC_TYPE) + binding_vnic_set = attributes.is_attr_set(binding_vnic) + if not binding_vnic_set and binding_vnic is not None: + del port[portbindings.VNIC_TYPE] + # REVISIT(irenab) Add support for vnic_type for plugins that + # can handle more than one type. + # Currently implemented for ML2 plugin that does not use + # PortBindingMixin. + + host = port_data.get(portbindings.HOST_ID) + host_set = attributes.is_attr_set(host) + with context.session.begin(subtransactions=True): + bind_port = context.session.query( + PortBindingPort).filter_by(port_id=port['id']).first() + if host_set: + if not bind_port: + context.session.add(PortBindingPort(port_id=port['id'], + host=host)) + else: + bind_port.host = host + else: + host = (bind_port and bind_port.host or None) + self._extend_port_dict_binding_host(port, host) + + def get_port_host(self, context, port_id): + with context.session.begin(subtransactions=True): + bind_port = context.session.query( + PortBindingPort).filter_by(port_id=port_id).first() + return bind_port and bind_port.host or None + + def _extend_port_dict_binding_host(self, port_res, host): + super(PortBindingMixin, self).extend_port_dict_binding( + port_res, None) + port_res[portbindings.HOST_ID] = host + + def extend_port_dict_binding(self, port_res, port_db): + host = (port_db.portbinding and port_db.portbinding.host or None) + self._extend_port_dict_binding_host(port_res, host) + + +def _extend_port_dict_binding(plugin, port_res, port_db): + if not isinstance(plugin, PortBindingMixin): + return + plugin.extend_port_dict_binding(port_res, port_db) + + +# Register dict extend functions for ports +db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs( + attributes.PORTS, [_extend_port_dict_binding]) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/db/quota_db.py b/icehouse-patches/neutron/dvr-patch/neutron/db/quota_db.py new file mode 100644 index 00000000..dc6a3cf4 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/db/quota_db.py @@ -0,0 +1,179 @@ +# Copyright 2011 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import sqlalchemy as sa + +from neutron.common import exceptions +from neutron.db import model_base +from neutron.db import models_v2 + + +class Quota(model_base.BASEV2, models_v2.HasId): + """Represent a single quota override for a tenant. + + If there is no row for a given tenant id and resource, then the + default for the quota class is used. + """ + tenant_id = sa.Column(sa.String(255), index=True) + resource = sa.Column(sa.String(255)) + limit = sa.Column(sa.Integer) + + +class DbQuotaDriver(object): + """Driver to perform necessary checks to enforce quotas and obtain quota + information. + + The default driver utilizes the local database. + """ + + @staticmethod + def get_tenant_quotas(context, resources, tenant_id): + """Given a list of resources, retrieve the quotas for the given + tenant. + + :param context: The request context, for access checks. + :param resources: A dictionary of the registered resource keys. + :param tenant_id: The ID of the tenant to return quotas for. + :return dict: from resource name to dict of name and limit + """ + + # init with defaults + tenant_quota = dict((key, resource.default) + for key, resource in resources.items()) + + # update with tenant specific limits + q_qry = context.session.query(Quota).filter_by(tenant_id=tenant_id) + tenant_quota.update((q['resource'], q['limit']) for q in q_qry) + + return tenant_quota + + @staticmethod + def delete_tenant_quota(context, tenant_id): + """Delete the quota entries for a given tenant_id. + + Atfer deletion, this tenant will use default quota values in conf. + """ + with context.session.begin(): + tenant_quotas = context.session.query(Quota) + tenant_quotas = tenant_quotas.filter_by(tenant_id=tenant_id) + tenant_quotas.delete() + + @staticmethod + def get_all_quotas(context, resources): + """Given a list of resources, retrieve the quotas for the all tenants. + + :param context: The request context, for access checks. + :param resources: A dictionary of the registered resource keys. + :return quotas: list of dict of tenant_id:, resourcekey1: + resourcekey2: ... + """ + tenant_default = dict((key, resource.default) + for key, resource in resources.items()) + + all_tenant_quotas = {} + + for quota in context.session.query(Quota): + tenant_id = quota['tenant_id'] + + # avoid setdefault() because only want to copy when actually req'd + tenant_quota = all_tenant_quotas.get(tenant_id) + if tenant_quota is None: + tenant_quota = tenant_default.copy() + tenant_quota['tenant_id'] = tenant_id + all_tenant_quotas[tenant_id] = tenant_quota + + tenant_quota[quota['resource']] = quota['limit'] + + return all_tenant_quotas.values() + + @staticmethod + def update_quota_limit(context, tenant_id, resource, limit): + with context.session.begin(): + tenant_quota = context.session.query(Quota).filter_by( + tenant_id=tenant_id, resource=resource).first() + + if tenant_quota: + tenant_quota.update({'limit': limit}) + else: + tenant_quota = Quota(tenant_id=tenant_id, + resource=resource, + limit=limit) + context.session.add(tenant_quota) + + def _get_quotas(self, context, tenant_id, resources, keys): + """Retrieves the quotas for specific resources. + + A helper method which retrieves the quotas for the specific + resources identified by keys, and which apply to the current + context. + + :param context: The request context, for access checks. + :param tenant_id: the tenant_id to check quota. + :param resources: A dictionary of the registered resources. + :param keys: A list of the desired quotas to retrieve. + + """ + desired = set(keys) + sub_resources = dict((k, v) for k, v in resources.items() + if k in desired) + + # Make sure we accounted for all of them... + if len(keys) != len(sub_resources): + unknown = desired - set(sub_resources.keys()) + raise exceptions.QuotaResourceUnknown(unknown=sorted(unknown)) + + # Grab and return the quotas (without usages) + quotas = DbQuotaDriver.get_tenant_quotas( + context, sub_resources, tenant_id) + + return dict((k, v) for k, v in quotas.items()) + + def limit_check(self, context, tenant_id, resources, values): + """Check simple quota limits. + + For limits--those quotas for which there is no usage + synchronization function--this method checks that a set of + proposed values are permitted by the limit restriction. + + This method will raise a QuotaResourceUnknown exception if a + given resource is unknown or if it is not a simple limit + resource. + + If any of the proposed values is over the defined quota, an + OverQuota exception will be raised with the sorted list of the + resources which are too high. Otherwise, the method returns + nothing. + + :param context: The request context, for access checks. + :param tenant_id: The tenant_id to check the quota. + :param resources: A dictionary of the registered resources. + :param values: A dictionary of the values to check against the + quota. + """ + + # Ensure no value is less than zero + unders = [key for key, val in values.items() if val < 0] + if unders: + raise exceptions.InvalidQuotaValue(unders=sorted(unders)) + + # Get the applicable quotas + quotas = self._get_quotas(context, tenant_id, resources, values.keys()) + + # Check the quotas and construct a list of the resources that + # would be put over limit by the desired values + overs = [key for key, val in values.items() + if quotas[key] >= 0 and quotas[key] < val] + if overs: + raise exceptions.OverQuota(overs=sorted(overs)) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/db/routedserviceinsertion_db.py b/icehouse-patches/neutron/dvr-patch/neutron/db/routedserviceinsertion_db.py new file mode 100644 index 00000000..25b87ca4 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/db/routedserviceinsertion_db.py @@ -0,0 +1,106 @@ +# Copyright 2013 VMware, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Kaiwei Fan, VMware, Inc + +import sqlalchemy as sa +from sqlalchemy import event + +from neutron.common import exceptions as qexception +from neutron.db import model_base +from neutron.extensions import routedserviceinsertion as rsi + + +class ServiceRouterBinding(model_base.BASEV2): + resource_id = sa.Column(sa.String(36), + primary_key=True) + resource_type = sa.Column(sa.String(36), + primary_key=True) + router_id = sa.Column(sa.String(36), + sa.ForeignKey('routers.id'), + nullable=False) + + +class AttributeException(qexception.NeutronException): + message = _("Resource type '%(resource_type)s' is longer " + "than %(maxlen)d characters") + + +@event.listens_for(ServiceRouterBinding.resource_type, 'set', retval=True) +def validate_resource_type(target, value, oldvalue, initiator): + """Make sure the resource type fit the resource_type column.""" + maxlen = ServiceRouterBinding.resource_type.property.columns[0].type.length + if len(value) > maxlen: + raise AttributeException(resource_type=value, maxlen=maxlen) + return value + + +class RoutedServiceInsertionDbMixin(object): + """Mixin class to add router service insertion.""" + + def _process_create_resource_router_id(self, context, resource, model): + with context.session.begin(subtransactions=True): + db = ServiceRouterBinding( + resource_id=resource['id'], + resource_type=model.__tablename__, + router_id=resource[rsi.ROUTER_ID]) + context.session.add(db) + return self._make_resource_router_id_dict(db, model) + + def _extend_resource_router_id_dict(self, context, resource, model): + binding = self._get_resource_router_id_binding( + context, resource['resource_id'], model) + resource[rsi.ROUTER_ID] = binding['router_id'] + + def _get_resource_router_id_binding(self, context, model, + resource_id=None, + router_id=None): + query = self._model_query(context, ServiceRouterBinding) + query = query.filter( + ServiceRouterBinding.resource_type == model.__tablename__) + if resource_id: + query = query.filter( + ServiceRouterBinding.resource_id == resource_id) + if router_id: + query = query.filter( + ServiceRouterBinding.router_id == router_id) + return query.first() + + def _get_resource_router_id_bindings(self, context, model, + resource_ids=None, + router_ids=None): + query = self._model_query(context, ServiceRouterBinding) + query = query.filter( + ServiceRouterBinding.resource_type == model.__tablename__) + if resource_ids: + query = query.filter( + ServiceRouterBinding.resource_id.in_(resource_ids)) + if router_ids: + query = query.filter( + ServiceRouterBinding.router_id.in_(router_ids)) + return query.all() + + def _make_resource_router_id_dict(self, resource_router_binding, model, + fields=None): + resource = {'resource_id': resource_router_binding['resource_id'], + 'resource_type': model.__tablename__, + rsi.ROUTER_ID: resource_router_binding[rsi.ROUTER_ID]} + return self._fields(resource, fields) + + def _delete_resource_router_id_binding(self, context, resource_id, model): + with context.session.begin(subtransactions=True): + binding = self._get_resource_router_id_binding( + context, model, resource_id=resource_id) + if binding: + context.session.delete(binding) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/db/routerservicetype_db.py b/icehouse-patches/neutron/dvr-patch/neutron/db/routerservicetype_db.py new file mode 100644 index 00000000..9037a0bb --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/db/routerservicetype_db.py @@ -0,0 +1,57 @@ +# Copyright 2013 VMware, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Kaiwei Fan, VMware, Inc + +import sqlalchemy as sa + +from neutron.db import model_base +from neutron.extensions import routerservicetype as rst + + +class RouterServiceTypeBinding(model_base.BASEV2): + router_id = sa.Column(sa.String(36), + sa.ForeignKey('routers.id', ondelete="CASCADE"), + primary_key=True) + service_type_id = sa.Column(sa.String(36), + nullable=False) + + +class RouterServiceTypeDbMixin(object): + """Mixin class to add router service type.""" + + def _process_create_router_service_type_id(self, context, router): + with context.session.begin(subtransactions=True): + db = RouterServiceTypeBinding( + router_id=router['id'], + service_type_id=router[rst.SERVICE_TYPE_ID]) + context.session.add(db) + return self._make_router_service_type_id_dict(db) + + def _extend_router_service_type_id_dict(self, context, router): + rsbind = self._get_router_service_type_id_binding( + context, router['id']) + if rsbind: + router[rst.SERVICE_TYPE_ID] = rsbind['service_type_id'] + + def _get_router_service_type_id_binding(self, context, router_id): + query = self._model_query(context, RouterServiceTypeBinding) + query = query.filter( + RouterServiceTypeBinding.router_id == router_id) + return query.first() + + def _make_router_service_type_id_dict(self, router_service_type): + res = {'router_id': router_service_type['router_id'], + 'service_type_id': router_service_type[rst.SERVICE_TYPE_ID]} + return self._fields(res, None) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/db/securitygroups_db.py b/icehouse-patches/neutron/dvr-patch/neutron/db/securitygroups_db.py new file mode 100644 index 00000000..c897071b --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/db/securitygroups_db.py @@ -0,0 +1,564 @@ +# Copyright 2012 VMware, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import netaddr +import sqlalchemy as sa +from sqlalchemy import orm +from sqlalchemy.orm import exc +from sqlalchemy.orm import scoped_session + +from neutron.api.v2 import attributes as attr +from neutron.common import constants +from neutron.db import db_base_plugin_v2 +from neutron.db import model_base +from neutron.db import models_v2 +from neutron.extensions import securitygroup as ext_sg +from neutron.openstack.common import uuidutils + + +IP_PROTOCOL_MAP = {constants.PROTO_NAME_TCP: constants.PROTO_NUM_TCP, + constants.PROTO_NAME_UDP: constants.PROTO_NUM_UDP, + constants.PROTO_NAME_ICMP: constants.PROTO_NUM_ICMP, + constants.PROTO_NAME_ICMP_V6: constants.PROTO_NUM_ICMP_V6} + + +class SecurityGroup(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant): + """Represents a v2 neutron security group.""" + + name = sa.Column(sa.String(255)) + description = sa.Column(sa.String(255)) + + +class SecurityGroupPortBinding(model_base.BASEV2): + """Represents binding between neutron ports and security profiles.""" + + port_id = sa.Column(sa.String(36), + sa.ForeignKey("ports.id", + ondelete='CASCADE'), + primary_key=True) + security_group_id = sa.Column(sa.String(36), + sa.ForeignKey("securitygroups.id"), + primary_key=True) + + # Add a relationship to the Port model in order to instruct SQLAlchemy to + # eagerly load security group bindings + ports = orm.relationship( + models_v2.Port, + backref=orm.backref("security_groups", + lazy='joined', cascade='delete')) + + +class SecurityGroupRule(model_base.BASEV2, models_v2.HasId, + models_v2.HasTenant): + """Represents a v2 neutron security group rule.""" + + security_group_id = sa.Column(sa.String(36), + sa.ForeignKey("securitygroups.id", + ondelete="CASCADE"), + nullable=False) + + remote_group_id = sa.Column(sa.String(36), + sa.ForeignKey("securitygroups.id", + ondelete="CASCADE"), + nullable=True) + + direction = sa.Column(sa.Enum('ingress', 'egress', + name='securitygrouprules_direction')) + ethertype = sa.Column(sa.String(40)) + protocol = sa.Column(sa.String(40)) + port_range_min = sa.Column(sa.Integer) + port_range_max = sa.Column(sa.Integer) + remote_ip_prefix = sa.Column(sa.String(255)) + security_group = orm.relationship( + SecurityGroup, + backref=orm.backref('rules', cascade='all,delete'), + primaryjoin="SecurityGroup.id==SecurityGroupRule.security_group_id") + source_group = orm.relationship( + SecurityGroup, + backref=orm.backref('source_rules', cascade='all,delete'), + primaryjoin="SecurityGroup.id==SecurityGroupRule.remote_group_id") + + +class SecurityGroupDbMixin(ext_sg.SecurityGroupPluginBase): + """Mixin class to add security group to db_base_plugin_v2.""" + + __native_bulk_support = True + + def create_security_group_bulk(self, context, security_group_rule): + return self._create_bulk('security_group', context, + security_group_rule) + + def create_security_group(self, context, security_group, default_sg=False): + """Create security group. + + If default_sg is true that means we are a default security group for + a given tenant if it does not exist. + """ + s = security_group['security_group'] + tenant_id = self._get_tenant_id_for_create(context, s) + + if not default_sg: + self._ensure_default_security_group(context, tenant_id) + + with context.session.begin(subtransactions=True): + security_group_db = SecurityGroup(id=s.get('id') or ( + uuidutils.generate_uuid()), + description=s['description'], + tenant_id=tenant_id, + name=s['name']) + context.session.add(security_group_db) + for ethertype in ext_sg.sg_supported_ethertypes: + if s.get('name') == 'default': + # Allow intercommunication + ingress_rule = SecurityGroupRule( + id=uuidutils.generate_uuid(), tenant_id=tenant_id, + security_group=security_group_db, + direction='ingress', + ethertype=ethertype, + source_group=security_group_db) + context.session.add(ingress_rule) + + egress_rule = SecurityGroupRule( + id=uuidutils.generate_uuid(), tenant_id=tenant_id, + security_group=security_group_db, + direction='egress', + ethertype=ethertype) + context.session.add(egress_rule) + + return self._make_security_group_dict(security_group_db) + + def get_security_groups(self, context, filters=None, fields=None, + sorts=None, limit=None, + marker=None, page_reverse=False, default_sg=False): + + # If default_sg is True do not call _ensure_default_security_group() + # so this can be done recursively. Context.tenant_id is checked + # because all the unit tests do not explicitly set the context on + # GETS. TODO(arosen) context handling can probably be improved here. + if not default_sg and context.tenant_id: + self._ensure_default_security_group(context, context.tenant_id) + marker_obj = self._get_marker_obj(context, 'security_group', limit, + marker) + return self._get_collection(context, + SecurityGroup, + self._make_security_group_dict, + filters=filters, fields=fields, + sorts=sorts, + limit=limit, marker_obj=marker_obj, + page_reverse=page_reverse) + + def get_security_groups_count(self, context, filters=None): + return self._get_collection_count(context, SecurityGroup, + filters=filters) + + def get_security_group(self, context, id, fields=None, tenant_id=None): + """Tenant id is given to handle the case when creating a security + group rule on behalf of another use. + """ + + if tenant_id: + tmp_context_tenant_id = context.tenant_id + context.tenant_id = tenant_id + + try: + with context.session.begin(subtransactions=True): + ret = self._make_security_group_dict(self._get_security_group( + context, id), fields) + ret['security_group_rules'] = self.get_security_group_rules( + context, {'security_group_id': [id]}) + finally: + if tenant_id: + context.tenant_id = tmp_context_tenant_id + return ret + + def _get_security_group(self, context, id): + try: + query = self._model_query(context, SecurityGroup) + sg = query.filter(SecurityGroup.id == id).one() + + except exc.NoResultFound: + raise ext_sg.SecurityGroupNotFound(id=id) + return sg + + def delete_security_group(self, context, id): + filters = {'security_group_id': [id]} + ports = self._get_port_security_group_bindings(context, filters) + if ports: + raise ext_sg.SecurityGroupInUse(id=id) + # confirm security group exists + sg = self._get_security_group(context, id) + + if sg['name'] == 'default' and not context.is_admin: + raise ext_sg.SecurityGroupCannotRemoveDefault() + with context.session.begin(subtransactions=True): + context.session.delete(sg) + + def update_security_group(self, context, id, security_group): + s = security_group['security_group'] + with context.session.begin(subtransactions=True): + sg = self._get_security_group(context, id) + if sg['name'] == 'default' and 'name' in s: + raise ext_sg.SecurityGroupCannotUpdateDefault() + sg.update(s) + return self._make_security_group_dict(sg) + + def _make_security_group_dict(self, security_group, fields=None): + res = {'id': security_group['id'], + 'name': security_group['name'], + 'tenant_id': security_group['tenant_id'], + 'description': security_group['description']} + res['security_group_rules'] = [self._make_security_group_rule_dict(r) + for r in security_group.rules] + return self._fields(res, fields) + + def _make_security_group_binding_dict(self, security_group, fields=None): + res = {'port_id': security_group['port_id'], + 'security_group_id': security_group['security_group_id']} + return self._fields(res, fields) + + def _create_port_security_group_binding(self, context, port_id, + security_group_id): + with context.session.begin(subtransactions=True): + db = SecurityGroupPortBinding(port_id=port_id, + security_group_id=security_group_id) + context.session.add(db) + + def _get_port_security_group_bindings(self, context, + filters=None, fields=None): + return self._get_collection(context, + SecurityGroupPortBinding, + self._make_security_group_binding_dict, + filters=filters, fields=fields) + + def _delete_port_security_group_bindings(self, context, port_id): + query = self._model_query(context, SecurityGroupPortBinding) + bindings = query.filter( + SecurityGroupPortBinding.port_id == port_id) + with context.session.begin(subtransactions=True): + for binding in bindings: + context.session.delete(binding) + + def create_security_group_rule_bulk(self, context, security_group_rule): + return self._create_bulk('security_group_rule', context, + security_group_rule) + + def create_security_group_rule_bulk_native(self, context, + security_group_rule): + r = security_group_rule['security_group_rules'] + + scoped_session(context.session) + security_group_id = self._validate_security_group_rules( + context, security_group_rule) + with context.session.begin(subtransactions=True): + if not self.get_security_group(context, security_group_id): + raise ext_sg.SecurityGroupNotFound(id=security_group_id) + + self._check_for_duplicate_rules(context, r) + ret = [] + for rule_dict in r: + rule = rule_dict['security_group_rule'] + tenant_id = self._get_tenant_id_for_create(context, rule) + db = SecurityGroupRule( + id=uuidutils.generate_uuid(), tenant_id=tenant_id, + security_group_id=rule['security_group_id'], + direction=rule['direction'], + remote_group_id=rule.get('remote_group_id'), + ethertype=rule['ethertype'], + protocol=rule['protocol'], + port_range_min=rule['port_range_min'], + port_range_max=rule['port_range_max'], + remote_ip_prefix=rule.get('remote_ip_prefix')) + context.session.add(db) + ret.append(self._make_security_group_rule_dict(db)) + return ret + + def create_security_group_rule(self, context, security_group_rule): + bulk_rule = {'security_group_rules': [security_group_rule]} + return self.create_security_group_rule_bulk_native(context, + bulk_rule)[0] + + def _get_ip_proto_number(self, protocol): + if protocol is None: + return + return IP_PROTOCOL_MAP.get(protocol, protocol) + + def _validate_port_range(self, rule): + """Check that port_range is valid.""" + if (rule['port_range_min'] is None and + rule['port_range_max'] is None): + return + if not rule['protocol']: + raise ext_sg.SecurityGroupProtocolRequiredWithPorts() + ip_proto = self._get_ip_proto_number(rule['protocol']) + if ip_proto in [constants.PROTO_NUM_TCP, constants.PROTO_NUM_UDP]: + if (rule['port_range_min'] is not None and + rule['port_range_min'] <= rule['port_range_max']): + pass + else: + raise ext_sg.SecurityGroupInvalidPortRange() + elif ip_proto == constants.PROTO_NUM_ICMP: + for attr, field in [('port_range_min', 'type'), + ('port_range_max', 'code')]: + if rule[attr] > 255: + raise ext_sg.SecurityGroupInvalidIcmpValue( + field=field, attr=attr, value=rule[attr]) + if (rule['port_range_min'] is None and + rule['port_range_max']): + raise ext_sg.SecurityGroupMissingIcmpType( + value=rule['port_range_max']) + + def _validate_security_group_rules(self, context, security_group_rule): + """Check that rules being installed. + + Check that all rules belong to the same security + group, remote_group_id/security_group_id belong to the same tenant, + and rules are valid. + """ + new_rules = set() + tenant_ids = set() + for rules in security_group_rule['security_group_rules']: + rule = rules.get('security_group_rule') + new_rules.add(rule['security_group_id']) + + self._validate_port_range(rule) + self._validate_ip_prefix(rule) + + if rule['remote_ip_prefix'] and rule['remote_group_id']: + raise ext_sg.SecurityGroupRemoteGroupAndRemoteIpPrefix() + + if rule['tenant_id'] not in tenant_ids: + tenant_ids.add(rule['tenant_id']) + remote_group_id = rule.get('remote_group_id') + # Check that remote_group_id exists for tenant + if remote_group_id: + self.get_security_group(context, remote_group_id, + tenant_id=rule['tenant_id']) + if len(new_rules) > 1: + raise ext_sg.SecurityGroupNotSingleGroupRules() + security_group_id = new_rules.pop() + + # Confirm single tenant and that the tenant has permission + # to add rules to this security group. + if len(tenant_ids) > 1: + raise ext_sg.SecurityGroupRulesNotSingleTenant() + for tenant_id in tenant_ids: + self.get_security_group(context, security_group_id, + tenant_id=tenant_id) + return security_group_id + + def _make_security_group_rule_dict(self, security_group_rule, fields=None): + res = {'id': security_group_rule['id'], + 'tenant_id': security_group_rule['tenant_id'], + 'security_group_id': security_group_rule['security_group_id'], + 'ethertype': security_group_rule['ethertype'], + 'direction': security_group_rule['direction'], + 'protocol': security_group_rule['protocol'], + 'port_range_min': security_group_rule['port_range_min'], + 'port_range_max': security_group_rule['port_range_max'], + 'remote_ip_prefix': security_group_rule['remote_ip_prefix'], + 'remote_group_id': security_group_rule['remote_group_id']} + + return self._fields(res, fields) + + def _make_security_group_rule_filter_dict(self, security_group_rule): + sgr = security_group_rule['security_group_rule'] + res = {'tenant_id': [sgr['tenant_id']], + 'security_group_id': [sgr['security_group_id']], + 'direction': [sgr['direction']]} + + include_if_present = ['protocol', 'port_range_max', 'port_range_min', + 'ethertype', 'remote_ip_prefix', + 'remote_group_id'] + for key in include_if_present: + value = sgr.get(key) + if value: + res[key] = [value] + return res + + def _check_for_duplicate_rules(self, context, security_group_rules): + for i in security_group_rules: + found_self = False + for j in security_group_rules: + if i['security_group_rule'] == j['security_group_rule']: + if found_self: + raise ext_sg.DuplicateSecurityGroupRuleInPost(rule=i) + found_self = True + + # Check in database if rule exists + filters = self._make_security_group_rule_filter_dict(i) + db_rules = self.get_security_group_rules(context, filters) + # Note(arosen): the call to get_security_group_rules wildcards + # values in the filter that have a value of [None]. For + # example, filters = {'remote_group_id': [None]} will return + # all security group rules regardless of their value of + # remote_group_id. Therefore it is not possible to do this + # query unless the behavior of _get_collection() + # is changed which cannot be because other methods are already + # relying on this behavor. Therefore, we do the filtering + # below to check for these corner cases. + for db_rule in db_rules: + # need to remove id from db_rule for matching + id = db_rule.pop('id') + if (i['security_group_rule'] == db_rule): + raise ext_sg.SecurityGroupRuleExists(id=id) + + def _validate_ip_prefix(self, rule): + """Check that a valid cidr was specified as remote_ip_prefix + + No need to check that it is in fact an IP address as this is already + validated by attribute validators. + Check that rule ethertype is consistent with remote_ip_prefix ip type. + Add mask to ip_prefix if absent (192.168.1.10 -> 192.168.1.10/32). + """ + input_prefix = rule['remote_ip_prefix'] + if input_prefix: + addr = netaddr.IPNetwork(input_prefix) + # set input_prefix to always include the netmask: + rule['remote_ip_prefix'] = str(addr) + # check consistency of ethertype with addr version + if rule['ethertype'] != "IPv%d" % (addr.version): + raise ext_sg.SecurityGroupRuleParameterConflict( + ethertype=rule['ethertype'], cidr=input_prefix) + + def get_security_group_rules(self, context, filters=None, fields=None, + sorts=None, limit=None, marker=None, + page_reverse=False): + marker_obj = self._get_marker_obj(context, 'security_group_rule', + limit, marker) + return self._get_collection(context, + SecurityGroupRule, + self._make_security_group_rule_dict, + filters=filters, fields=fields, + sorts=sorts, + limit=limit, marker_obj=marker_obj, + page_reverse=page_reverse) + + def get_security_group_rules_count(self, context, filters=None): + return self._get_collection_count(context, SecurityGroupRule, + filters=filters) + + def get_security_group_rule(self, context, id, fields=None): + security_group_rule = self._get_security_group_rule(context, id) + return self._make_security_group_rule_dict(security_group_rule, fields) + + def _get_security_group_rule(self, context, id): + try: + query = self._model_query(context, SecurityGroupRule) + sgr = query.filter(SecurityGroupRule.id == id).one() + except exc.NoResultFound: + raise ext_sg.SecurityGroupRuleNotFound(id=id) + return sgr + + def delete_security_group_rule(self, context, id): + with context.session.begin(subtransactions=True): + rule = self._get_security_group_rule(context, id) + context.session.delete(rule) + + def _extend_port_dict_security_group(self, port_res, port_db): + # Security group bindings will be retrieved from the sqlalchemy + # model. As they're loaded eagerly with ports because of the + # joined load they will not cause an extra query. + security_group_ids = [sec_group_mapping['security_group_id'] for + sec_group_mapping in port_db.security_groups] + port_res[ext_sg.SECURITYGROUPS] = security_group_ids + return port_res + + # Register dict extend functions for ports + db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs( + attr.PORTS, ['_extend_port_dict_security_group']) + + def _process_port_create_security_group(self, context, port, + security_group_ids): + if attr.is_attr_set(security_group_ids): + for security_group_id in security_group_ids: + self._create_port_security_group_binding(context, port['id'], + security_group_id) + # Convert to list as a set might be passed here and + # this has to be serialized + port[ext_sg.SECURITYGROUPS] = (security_group_ids and + list(security_group_ids) or []) + + def _ensure_default_security_group(self, context, tenant_id): + """Create a default security group if one doesn't exist. + + :returns: the default security group id. + """ + filters = {'name': ['default'], 'tenant_id': [tenant_id]} + default_group = self.get_security_groups(context, filters, + default_sg=True) + if not default_group: + security_group = {'security_group': {'name': 'default', + 'tenant_id': tenant_id, + 'description': 'default'}} + ret = self.create_security_group(context, security_group, True) + return ret['id'] + else: + return default_group[0]['id'] + + def _get_security_groups_on_port(self, context, port): + """Check that all security groups on port belong to tenant. + + :returns: all security groups IDs on port belonging to tenant. + """ + p = port['port'] + if not attr.is_attr_set(p.get(ext_sg.SECURITYGROUPS)): + return + if p.get('device_owner') and p['device_owner'].startswith('network:'): + return + + port_sg = p.get(ext_sg.SECURITYGROUPS, []) + valid_groups = set(g['id'] for g in + self.get_security_groups(context, fields=['id'], + filters={'id': port_sg})) + + requested_groups = set(port_sg) + port_sg_missing = requested_groups - valid_groups + if port_sg_missing: + raise ext_sg.SecurityGroupNotFound(id=str(port_sg_missing[0])) + + return requested_groups + + def _ensure_default_security_group_on_port(self, context, port): + # we don't apply security groups for dhcp, router + if (port['port'].get('device_owner') and + port['port']['device_owner'].startswith('network:')): + return + tenant_id = self._get_tenant_id_for_create(context, + port['port']) + default_sg = self._ensure_default_security_group(context, tenant_id) + if attr.is_attr_set(port['port'].get(ext_sg.SECURITYGROUPS)): + sgids = port['port'].get(ext_sg.SECURITYGROUPS) + else: + sgids = [default_sg] + port['port'][ext_sg.SECURITYGROUPS] = sgids + + def _check_update_deletes_security_groups(self, port): + """Return True if port has as a security group and it's value + is either [] or not is_attr_set, otherwise return False + """ + if (ext_sg.SECURITYGROUPS in port['port'] and + not (attr.is_attr_set(port['port'][ext_sg.SECURITYGROUPS]) + and port['port'][ext_sg.SECURITYGROUPS] != [])): + return True + return False + + def _check_update_has_security_groups(self, port): + """Return True if port has as a security group and False if the + security_group field is is_attr_set or []. + """ + if (ext_sg.SECURITYGROUPS in port['port'] and + (attr.is_attr_set(port['port'][ext_sg.SECURITYGROUPS]) and + port['port'][ext_sg.SECURITYGROUPS] != [])): + return True + return False diff --git a/icehouse-patches/neutron/dvr-patch/neutron/db/securitygroups_rpc_base.py b/icehouse-patches/neutron/dvr-patch/neutron/db/securitygroups_rpc_base.py new file mode 100644 index 00000000..b9db8b39 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/db/securitygroups_rpc_base.py @@ -0,0 +1,374 @@ +# Copyright 2012, Nachi Ueno, NTT MCL, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import netaddr +from sqlalchemy.orm import exc + +from neutron.common import constants as q_const +from neutron.common import ipv6_utils as ipv6 +from neutron.common import utils +from neutron.db import models_v2 +from neutron.db import securitygroups_db as sg_db +from neutron.extensions import securitygroup as ext_sg +from neutron.openstack.common import log as logging + +LOG = logging.getLogger(__name__) + + +IP_MASK = {q_const.IPv4: 32, + q_const.IPv6: 128} + + +DIRECTION_IP_PREFIX = {'ingress': 'source_ip_prefix', + 'egress': 'dest_ip_prefix'} + + +class SecurityGroupServerRpcMixin(sg_db.SecurityGroupDbMixin): + + def create_security_group_rule(self, context, security_group_rule): + bulk_rule = {'security_group_rules': [security_group_rule]} + rule = self.create_security_group_rule_bulk_native(context, + bulk_rule)[0] + sgids = [rule['security_group_id']] + self.notifier.security_groups_rule_updated(context, sgids) + return rule + + def create_security_group_rule_bulk(self, context, + security_group_rule): + rules = super(SecurityGroupServerRpcMixin, + self).create_security_group_rule_bulk_native( + context, security_group_rule) + sgids = set([r['security_group_id'] for r in rules]) + self.notifier.security_groups_rule_updated(context, list(sgids)) + return rules + + def delete_security_group_rule(self, context, sgrid): + rule = self.get_security_group_rule(context, sgrid) + super(SecurityGroupServerRpcMixin, + self).delete_security_group_rule(context, sgrid) + self.notifier.security_groups_rule_updated(context, + [rule['security_group_id']]) + + def update_security_group_on_port(self, context, id, port, + original_port, updated_port): + """Update security groups on port. + + This method returns a flag which indicates request notification + is required and does not perform notification itself. + It is because another changes for the port may require notification. + """ + need_notify = False + port_updates = port['port'] + if (ext_sg.SECURITYGROUPS in port_updates and + not utils.compare_elements( + original_port.get(ext_sg.SECURITYGROUPS), + port_updates[ext_sg.SECURITYGROUPS])): + # delete the port binding and read it with the new rules + port_updates[ext_sg.SECURITYGROUPS] = ( + self._get_security_groups_on_port(context, port)) + self._delete_port_security_group_bindings(context, id) + self._process_port_create_security_group( + context, + updated_port, + port_updates[ext_sg.SECURITYGROUPS]) + need_notify = True + else: + updated_port[ext_sg.SECURITYGROUPS] = ( + original_port[ext_sg.SECURITYGROUPS]) + return need_notify + + def is_security_group_member_updated(self, context, + original_port, updated_port): + """Check security group member updated or not. + + This method returns a flag which indicates request notification + is required and does not perform notification itself. + It is because another changes for the port may require notification. + """ + need_notify = False + if (original_port['fixed_ips'] != updated_port['fixed_ips'] or + not utils.compare_elements( + original_port.get(ext_sg.SECURITYGROUPS), + updated_port.get(ext_sg.SECURITYGROUPS))): + need_notify = True + return need_notify + + def notify_security_groups_member_updated(self, context, port): + """Notify update event of security group members. + + The agent setups the iptables rule to allow + ingress packet from the dhcp server (as a part of provider rules), + so we need to notify an update of dhcp server ip + address to the plugin agent. + security_groups_provider_updated() just notifies that an event + occurs and the plugin agent fetches the update provider + rule in the other RPC call (security_group_rules_for_devices). + """ + if port['device_owner'] == q_const.DEVICE_OWNER_DHCP: + self.notifier.security_groups_provider_updated(context) + else: + self.notifier.security_groups_member_updated( + context, port.get(ext_sg.SECURITYGROUPS)) + + +class SecurityGroupServerRpcCallbackMixin(object): + """A mix-in that enable SecurityGroup agent support in plugin + implementations. + """ + + def security_group_rules_for_devices(self, context, **kwargs): + """Return security group rules for each port. + + also convert remote_group_id rule + to source_ip_prefix and dest_ip_prefix rule + + :params devices: list of devices + :returns: port correspond to the devices with security group rules + """ + devices = kwargs.get('devices') + + ports = {} + for device in devices: + port = self.get_port_from_device(device) + if not port: + continue + if port['device_owner'].startswith('network:'): + continue + ports[port['id']] = port + return self._security_group_rules_for_ports(context, ports) + + def _select_rules_for_ports(self, context, ports): + if not ports: + return [] + sg_binding_port = sg_db.SecurityGroupPortBinding.port_id + sg_binding_sgid = sg_db.SecurityGroupPortBinding.security_group_id + + sgr_sgid = sg_db.SecurityGroupRule.security_group_id + + query = context.session.query(sg_db.SecurityGroupPortBinding, + sg_db.SecurityGroupRule) + query = query.join(sg_db.SecurityGroupRule, + sgr_sgid == sg_binding_sgid) + query = query.filter(sg_binding_port.in_(ports.keys())) + return query.all() + + def _select_ips_for_remote_group(self, context, remote_group_ids): + ips_by_group = {} + if not remote_group_ids: + return ips_by_group + for remote_group_id in remote_group_ids: + ips_by_group[remote_group_id] = [] + + ip_port = models_v2.IPAllocation.port_id + sg_binding_port = sg_db.SecurityGroupPortBinding.port_id + sg_binding_sgid = sg_db.SecurityGroupPortBinding.security_group_id + + query = context.session.query(sg_binding_sgid, + models_v2.Port, + models_v2.IPAllocation.ip_address) + query = query.join(models_v2.IPAllocation, + ip_port == sg_binding_port) + query = query.join(models_v2.Port, + ip_port == models_v2.Port.id) + query = query.filter(sg_binding_sgid.in_(remote_group_ids)) + for security_group_id, port, ip_address in query: + ips_by_group[security_group_id].append(ip_address) + # if there are allowed_address_pairs add them + if getattr(port, 'allowed_address_pairs', None): + for address_pair in port.allowed_address_pairs: + ips_by_group[security_group_id].append( + address_pair['ip_address']) + return ips_by_group + + def _select_remote_group_ids(self, ports): + remote_group_ids = [] + for port in ports.values(): + for rule in port.get('security_group_rules'): + remote_group_id = rule.get('remote_group_id') + if remote_group_id: + remote_group_ids.append(remote_group_id) + return remote_group_ids + + def _select_network_ids(self, ports): + return set((port['network_id'] for port in ports.values())) + + def _select_dhcp_ips_for_network_ids(self, context, network_ids): + if not network_ids: + return {} + query = context.session.query(models_v2.Port, + models_v2.IPAllocation.ip_address) + query = query.join(models_v2.IPAllocation) + query = query.filter(models_v2.Port.network_id.in_(network_ids)) + owner = q_const.DEVICE_OWNER_DHCP + query = query.filter(models_v2.Port.device_owner == owner) + ips = {} + + for network_id in network_ids: + ips[network_id] = [] + + for port, ip in query: + ips[port['network_id']].append(ip) + return ips + + def _select_ra_ips_for_network_ids(self, context, network_ids): + """Select IP addresses to allow sending router advertisement from. + + If OpenStack dnsmasq sends RA, get link local address of + gateway and allow RA from this Link Local address. + The gateway port link local address will only be obtained + when router is created before VM instance is booted and + subnet is attached to router. + + If OpenStack doesn't send RA, allow RA from gateway IP. + Currently, the gateway IP needs to be link local to be able + to send RA to VM. + """ + if not network_ids: + return {} + ips = {} + for network_id in network_ids: + ips[network_id] = set([]) + query = context.session.query(models_v2.Subnet) + subnets = query.filter(models_v2.Subnet.network_id.in_(network_ids)) + for subnet in subnets: + gateway_ip = subnet['gateway_ip'] + if subnet['ip_version'] != 6 or not gateway_ip: + continue + # TODO(xuhanp): Figure out how to call the following code + # each time router is created or updated. + if not netaddr.IPAddress(gateway_ip).is_link_local(): + if subnet['ipv6_ra_mode']: + gateway_ip = self._get_lla_gateway_ip_for_subnet(context, + subnet) + else: + # TODO(xuhanp):Figure out how to allow gateway IP from + # existing device to be global address and figure out the + # link local address by other method. + continue + if gateway_ip: + ips[subnet['network_id']].add(gateway_ip) + + return ips + + def _get_lla_gateway_ip_for_subnet(self, context, subnet): + query = context.session.query(models_v2.Port) + query = query.join(models_v2.IPAllocation) + query = query.filter( + models_v2.IPAllocation.subnet_id == subnet['id']) + query = query.filter( + models_v2.IPAllocation.ip_address == subnet['gateway_ip']) + query = query.filter(models_v2.Port.device_owner == + q_const.DEVICE_OWNER_ROUTER_INTF) + try: + gateway_port = query.one() + except (exc.NoResultFound, exc.MultipleResultsFound): + LOG.warn(_('No valid gateway port on subnet %s is ' + 'found for IPv6 RA'), subnet['id']) + return + mac_address = gateway_port['mac_address'] + lla_ip = str(ipv6.get_ipv6_addr_by_EUI64( + q_const.IPV6_LLA_PREFIX, + mac_address)) + return lla_ip + + def _convert_remote_group_id_to_ip_prefix(self, context, ports): + remote_group_ids = self._select_remote_group_ids(ports) + ips = self._select_ips_for_remote_group(context, remote_group_ids) + for port in ports.values(): + updated_rule = [] + for rule in port.get('security_group_rules'): + remote_group_id = rule.get('remote_group_id') + direction = rule.get('direction') + direction_ip_prefix = DIRECTION_IP_PREFIX[direction] + if not remote_group_id: + updated_rule.append(rule) + continue + + port['security_group_source_groups'].append(remote_group_id) + base_rule = rule + for ip in ips[remote_group_id]: + if ip in port.get('fixed_ips', []): + continue + ip_rule = base_rule.copy() + version = netaddr.IPNetwork(ip).version + ethertype = 'IPv%s' % version + if base_rule['ethertype'] != ethertype: + continue + ip_rule[direction_ip_prefix] = str( + netaddr.IPNetwork(ip).cidr) + updated_rule.append(ip_rule) + port['security_group_rules'] = updated_rule + return ports + + def _add_ingress_dhcp_rule(self, port, ips): + dhcp_ips = ips.get(port['network_id']) + for dhcp_ip in dhcp_ips: + if not netaddr.IPAddress(dhcp_ip).version == 4: + return + + dhcp_rule = {'direction': 'ingress', + 'ethertype': q_const.IPv4, + 'protocol': 'udp', + 'port_range_min': 68, + 'port_range_max': 68, + 'source_port_range_min': 67, + 'source_port_range_max': 67} + dhcp_rule['source_ip_prefix'] = "%s/%s" % (dhcp_ip, + IP_MASK[q_const.IPv4]) + port['security_group_rules'].append(dhcp_rule) + + def _add_ingress_ra_rule(self, port, ips): + ra_ips = ips.get(port['network_id']) + for ra_ip in ra_ips: + if not netaddr.IPAddress(ra_ip).version == 6: + return + + ra_rule = {'direction': 'ingress', + 'ethertype': q_const.IPv6, + 'protocol': q_const.PROTO_NAME_ICMP_V6, + 'source_ip_prefix': ra_ip, + 'source_port_range_min': q_const.ICMPV6_TYPE_RA} + port['security_group_rules'].append(ra_rule) + + def _apply_provider_rule(self, context, ports): + network_ids = self._select_network_ids(ports) + ips_dhcp = self._select_dhcp_ips_for_network_ids(context, network_ids) + ips_ra = self._select_ra_ips_for_network_ids(context, network_ids) + for port in ports.values(): + self._add_ingress_ra_rule(port, ips_ra) + self._add_ingress_dhcp_rule(port, ips_dhcp) + + def _security_group_rules_for_ports(self, context, ports): + rules_in_db = self._select_rules_for_ports(context, ports) + for (binding, rule_in_db) in rules_in_db: + port_id = binding['port_id'] + port = ports[port_id] + direction = rule_in_db['direction'] + rule_dict = { + 'security_group_id': rule_in_db['security_group_id'], + 'direction': direction, + 'ethertype': rule_in_db['ethertype'], + } + for key in ('protocol', 'port_range_min', 'port_range_max', + 'remote_ip_prefix', 'remote_group_id'): + if rule_in_db.get(key): + if key == 'remote_ip_prefix': + direction_ip_prefix = DIRECTION_IP_PREFIX[direction] + rule_dict[direction_ip_prefix] = rule_in_db[key] + continue + rule_dict[key] = rule_in_db[key] + port['security_group_rules'].append(rule_dict) + self._apply_provider_rule(context, ports) + return self._convert_remote_group_id_to_ip_prefix(context, ports) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/db/servicetype_db.py b/icehouse-patches/neutron/dvr-patch/neutron/db/servicetype_db.py new file mode 100644 index 00000000..3e9ad15e --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/db/servicetype_db.py @@ -0,0 +1,99 @@ +# Copyright 2013 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Salvatore Orlando, VMware +# + +import sqlalchemy as sa + +from neutron.db import api as db +from neutron.db import model_base +from neutron.db import models_v2 +from neutron.openstack.common import log as logging +from neutron.services import provider_configuration as pconf + +LOG = logging.getLogger(__name__) + + +class ProviderResourceAssociation(model_base.BASEV2): + provider_name = sa.Column(sa.String(255), + nullable=False, primary_key=True) + # should be manualy deleted on resource deletion + resource_id = sa.Column(sa.String(36), nullable=False, primary_key=True, + unique=True) + + +class ServiceTypeManager(object): + """Manage service type objects in Neutron.""" + + _instance = None + + @classmethod + def get_instance(cls): + if cls._instance is None: + cls._instance = cls() + return cls._instance + + def __init__(self): + self._initialize_db() + self._load_conf() + + def _initialize_db(self): + db.configure_db() + db.register_models(models_v2.model_base.BASEV2) + + def _load_conf(self): + self.conf = pconf.ProviderConfiguration( + pconf.parse_service_provider_opt()) + + def get_service_providers(self, context, filters=None, fields=None): + return self.conf.get_service_providers(filters, fields) + + def get_default_service_provider(self, context, service_type): + """Return the default provider for a given service type.""" + filters = {'service_type': [service_type], + 'default': [True]} + providers = self.get_service_providers(context, filters=filters) + # By construction we expect at most a single item in provider + if not providers: + raise pconf.DefaultServiceProviderNotFound( + service_type=service_type + ) + return providers[0] + + def add_resource_association(self, context, service_type, provider_name, + resource_id): + r = self.conf.get_service_providers( + filters={'service_type': [service_type], 'name': [provider_name]}) + if not r: + raise pconf.ServiceProviderNotFound(provider=provider_name, + service_type=service_type) + + with context.session.begin(subtransactions=True): + # we don't actually need service type for association. + # resource_id is unique and belongs to specific service + # which knows its type + assoc = ProviderResourceAssociation(provider_name=provider_name, + resource_id=resource_id) + context.session.add(assoc) + + def del_resource_associations(self, context, resource_ids): + if not resource_ids: + return + with context.session.begin(subtransactions=True): + (context.session.query(ProviderResourceAssociation). + filter( + ProviderResourceAssociation.resource_id.in_(resource_ids)). + delete(synchronize_session='fetch')) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/db/sqlalchemyutils.py b/icehouse-patches/neutron/dvr-patch/neutron/db/sqlalchemyutils.py new file mode 100644 index 00000000..adf03443 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/db/sqlalchemyutils.py @@ -0,0 +1,107 @@ +# Copyright 2011 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from six import moves +import sqlalchemy +from sqlalchemy.orm.properties import RelationshipProperty + +from neutron.common import exceptions as n_exc +from neutron.openstack.common import log as logging + + +LOG = logging.getLogger(__name__) + + +def paginate_query(query, model, limit, sorts, marker_obj=None): + """Returns a query with sorting / pagination criteria added. + + Pagination works by requiring a unique sort key, specified by sorts. + (If sort keys is not unique, then we risk looping through values.) + We use the last row in the previous page as the 'marker' for pagination. + So we must return values that follow the passed marker in the order. + With a single-valued sort key, this would be easy: sort_key > X. + With a compound-values sort key, (k1, k2, k3) we must do this to repeat + the lexicographical ordering: + (k1 > X1) or (k1 == X1 && k2 > X2) or (k1 == X1 && k2 == X2 && k3 > X3) + The reason of didn't use OFFSET clause was it don't scale, please refer + discussion at https://lists.launchpad.net/openstack/msg02547.html + + We also have to cope with different sort directions. + + Typically, the id of the last row is used as the client-facing pagination + marker, then the actual marker object must be fetched from the db and + passed in to us as marker. + + :param query: the query object to which we should add paging/sorting + :param model: the ORM model class + :param limit: maximum number of items to return + :param sorts: array of attributes and direction by which results should + be sorted + :param marker: the last item of the previous page; we returns the next + results after this value. + :rtype: sqlalchemy.orm.query.Query + :return: The query with sorting/pagination added. + """ + if not sorts: + return query + + # A primary key must be specified in sort keys + assert not (limit and + len(set(dict(sorts).keys()) & + set(model.__table__.primary_key.columns.keys())) == 0) + + # Add sorting + for sort_key, sort_direction in sorts: + sort_dir_func = sqlalchemy.asc if sort_direction else sqlalchemy.desc + try: + sort_key_attr = getattr(model, sort_key) + except AttributeError: + # Extension attribute doesn't support for sorting. Because it + # existed in attr_info, it will be catched at here + msg = _("%s is invalid attribute for sort_key") % sort_key + raise n_exc.BadRequest(resource=model.__tablename__, msg=msg) + if isinstance(sort_key_attr.property, RelationshipProperty): + msg = _("The attribute '%(attr)s' is reference to other " + "resource, can't used by sort " + "'%(resource)s'") % {'attr': sort_key, + 'resource': model.__tablename__} + raise n_exc.BadRequest(resource=model.__tablename__, msg=msg) + query = query.order_by(sort_dir_func(sort_key_attr)) + + # Add pagination + if marker_obj: + marker_values = [getattr(marker_obj, sort[0]) for sort in sorts] + + # Build up an array of sort criteria as in the docstring + criteria_list = [] + for i, sort in enumerate(sorts): + crit_attrs = [(getattr(model, sorts[j][0]) == marker_values[j]) + for j in moves.xrange(i)] + model_attr = getattr(model, sort[0]) + if sort[1]: + crit_attrs.append((model_attr > marker_values[i])) + else: + crit_attrs.append((model_attr < marker_values[i])) + + criteria = sqlalchemy.sql.and_(*crit_attrs) + criteria_list.append(criteria) + + f = sqlalchemy.sql.or_(*criteria_list) + query = query.filter(f) + + if limit: + query = query.limit(limit) + + return query diff --git a/icehouse-patches/neutron/dvr-patch/neutron/db/vpn/__init__.py b/icehouse-patches/neutron/dvr-patch/neutron/db/vpn/__init__.py new file mode 100644 index 00000000..060b986b --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/db/vpn/__init__.py @@ -0,0 +1,16 @@ +# (c) Copyright 2013 Hewlett-Packard Development Company, L.P. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Swaminathan Vasudevan, Hewlett-Packard. diff --git a/icehouse-patches/neutron/dvr-patch/neutron/db/vpn/vpn_db.py b/icehouse-patches/neutron/dvr-patch/neutron/db/vpn/vpn_db.py new file mode 100644 index 00000000..f3d11fec --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/db/vpn/vpn_db.py @@ -0,0 +1,689 @@ +# (c) Copyright 2013 Hewlett-Packard Development Company, L.P. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Swaminathan Vasudevan, Hewlett-Packard. + +import netaddr +import sqlalchemy as sa +from sqlalchemy import orm +from sqlalchemy.orm import exc + +from neutron.common import constants as n_constants +from neutron.db import api as qdbapi +from neutron.db import db_base_plugin_v2 as base_db +from neutron.db import l3_agentschedulers_db as l3_agent_db +from neutron.db import l3_db +from neutron.db import model_base +from neutron.db import models_v2 +from neutron.extensions import vpnaas +from neutron import manager +from neutron.openstack.common import excutils +from neutron.openstack.common import log as logging +from neutron.openstack.common import uuidutils +from neutron.plugins.common import constants +from neutron.plugins.common import utils + +LOG = logging.getLogger(__name__) + +IP_MIN_MTU = {4: 68, 6: 1280} + + +class IPsecPeerCidr(model_base.BASEV2): + """Internal representation of a IPsec Peer Cidrs.""" + + cidr = sa.Column(sa.String(32), nullable=False, primary_key=True) + ipsec_site_connection_id = sa.Column( + sa.String(36), + sa.ForeignKey('ipsec_site_connections.id', + ondelete="CASCADE"), + primary_key=True) + + +class IPsecPolicy(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant): + """Represents a v2 IPsecPolicy Object.""" + __tablename__ = 'ipsecpolicies' + name = sa.Column(sa.String(255)) + description = sa.Column(sa.String(255)) + transform_protocol = sa.Column(sa.Enum("esp", "ah", "ah-esp", + name="ipsec_transform_protocols"), + nullable=False) + auth_algorithm = sa.Column(sa.Enum("sha1", + name="vpn_auth_algorithms"), + nullable=False) + encryption_algorithm = sa.Column(sa.Enum("3des", "aes-128", + "aes-256", "aes-192", + name="vpn_encrypt_algorithms"), + nullable=False) + encapsulation_mode = sa.Column(sa.Enum("tunnel", "transport", + name="ipsec_encapsulations"), + nullable=False) + lifetime_units = sa.Column(sa.Enum("seconds", "kilobytes", + name="vpn_lifetime_units"), + nullable=False) + lifetime_value = sa.Column(sa.Integer, nullable=False) + pfs = sa.Column(sa.Enum("group2", "group5", "group14", + name="vpn_pfs"), nullable=False) + + +class IKEPolicy(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant): + """Represents a v2 IKEPolicy Object.""" + __tablename__ = 'ikepolicies' + name = sa.Column(sa.String(255)) + description = sa.Column(sa.String(255)) + auth_algorithm = sa.Column(sa.Enum("sha1", + name="vpn_auth_algorithms"), + nullable=False) + encryption_algorithm = sa.Column(sa.Enum("3des", "aes-128", + "aes-256", "aes-192", + name="vpn_encrypt_algorithms"), + nullable=False) + phase1_negotiation_mode = sa.Column(sa.Enum("main", + name="ike_phase1_mode"), + nullable=False) + lifetime_units = sa.Column(sa.Enum("seconds", "kilobytes", + name="vpn_lifetime_units"), + nullable=False) + lifetime_value = sa.Column(sa.Integer, nullable=False) + ike_version = sa.Column(sa.Enum("v1", "v2", name="ike_versions"), + nullable=False) + pfs = sa.Column(sa.Enum("group2", "group5", "group14", + name="vpn_pfs"), nullable=False) + + +class IPsecSiteConnection(model_base.BASEV2, + models_v2.HasId, models_v2.HasTenant): + """Represents a IPsecSiteConnection Object.""" + __tablename__ = 'ipsec_site_connections' + name = sa.Column(sa.String(255)) + description = sa.Column(sa.String(255)) + peer_address = sa.Column(sa.String(255), nullable=False) + peer_id = sa.Column(sa.String(255), nullable=False) + route_mode = sa.Column(sa.String(8), nullable=False) + mtu = sa.Column(sa.Integer, nullable=False) + initiator = sa.Column(sa.Enum("bi-directional", "response-only", + name="vpn_initiators"), nullable=False) + auth_mode = sa.Column(sa.String(16), nullable=False) + psk = sa.Column(sa.String(255), nullable=False) + dpd_action = sa.Column(sa.Enum("hold", "clear", + "restart", "disabled", + "restart-by-peer", name="vpn_dpd_actions"), + nullable=False) + dpd_interval = sa.Column(sa.Integer, nullable=False) + dpd_timeout = sa.Column(sa.Integer, nullable=False) + status = sa.Column(sa.String(16), nullable=False) + admin_state_up = sa.Column(sa.Boolean(), nullable=False) + vpnservice_id = sa.Column(sa.String(36), + sa.ForeignKey('vpnservices.id'), + nullable=False) + ipsecpolicy_id = sa.Column(sa.String(36), + sa.ForeignKey('ipsecpolicies.id'), + nullable=False) + ikepolicy_id = sa.Column(sa.String(36), + sa.ForeignKey('ikepolicies.id'), + nullable=False) + ipsecpolicy = orm.relationship( + IPsecPolicy, backref='ipsec_site_connection') + ikepolicy = orm.relationship(IKEPolicy, backref='ipsec_site_connection') + peer_cidrs = orm.relationship(IPsecPeerCidr, + backref='ipsec_site_connection', + lazy='joined', + cascade='all, delete, delete-orphan') + + +class VPNService(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant): + """Represents a v2 VPNService Object.""" + name = sa.Column(sa.String(255)) + description = sa.Column(sa.String(255)) + status = sa.Column(sa.String(16), nullable=False) + admin_state_up = sa.Column(sa.Boolean(), nullable=False) + subnet_id = sa.Column(sa.String(36), sa.ForeignKey('subnets.id'), + nullable=False) + router_id = sa.Column(sa.String(36), sa.ForeignKey('routers.id'), + nullable=False) + subnet = orm.relationship(models_v2.Subnet) + router = orm.relationship(l3_db.Router) + ipsec_site_connections = orm.relationship( + IPsecSiteConnection, + backref='vpnservice', + cascade="all, delete-orphan") + + +class VPNPluginDb(vpnaas.VPNPluginBase, base_db.CommonDbMixin): + """VPN plugin database class using SQLAlchemy models.""" + + def __init__(self): + """Do the initialization for the vpn service plugin here.""" + qdbapi.register_models() + + def update_status(self, context, model, v_id, status): + with context.session.begin(subtransactions=True): + v_db = self._get_resource(context, model, v_id) + v_db.update({'status': status}) + + def _get_resource(self, context, model, v_id): + try: + r = self._get_by_id(context, model, v_id) + except exc.NoResultFound: + with excutils.save_and_reraise_exception(reraise=False) as ctx: + if issubclass(model, IPsecSiteConnection): + raise vpnaas.IPsecSiteConnectionNotFound( + ipsec_site_conn_id=v_id + ) + elif issubclass(model, IKEPolicy): + raise vpnaas.IKEPolicyNotFound(ikepolicy_id=v_id) + elif issubclass(model, IPsecPolicy): + raise vpnaas.IPsecPolicyNotFound(ipsecpolicy_id=v_id) + elif issubclass(model, VPNService): + raise vpnaas.VPNServiceNotFound(vpnservice_id=v_id) + ctx.reraise = True + return r + + def assert_update_allowed(self, obj): + status = getattr(obj, 'status', None) + _id = getattr(obj, 'id', None) + if utils.in_pending_status(status): + raise vpnaas.VPNStateInvalidToUpdate(id=_id, state=status) + + def _make_ipsec_site_connection_dict(self, ipsec_site_conn, fields=None): + + res = {'id': ipsec_site_conn['id'], + 'tenant_id': ipsec_site_conn['tenant_id'], + 'name': ipsec_site_conn['name'], + 'description': ipsec_site_conn['description'], + 'peer_address': ipsec_site_conn['peer_address'], + 'peer_id': ipsec_site_conn['peer_id'], + 'route_mode': ipsec_site_conn['route_mode'], + 'mtu': ipsec_site_conn['mtu'], + 'auth_mode': ipsec_site_conn['auth_mode'], + 'psk': ipsec_site_conn['psk'], + 'initiator': ipsec_site_conn['initiator'], + 'dpd': { + 'action': ipsec_site_conn['dpd_action'], + 'interval': ipsec_site_conn['dpd_interval'], + 'timeout': ipsec_site_conn['dpd_timeout'] + }, + 'admin_state_up': ipsec_site_conn['admin_state_up'], + 'status': ipsec_site_conn['status'], + 'vpnservice_id': ipsec_site_conn['vpnservice_id'], + 'ikepolicy_id': ipsec_site_conn['ikepolicy_id'], + 'ipsecpolicy_id': ipsec_site_conn['ipsecpolicy_id'], + 'peer_cidrs': [pcidr['cidr'] + for pcidr in ipsec_site_conn['peer_cidrs']] + } + + return self._fields(res, fields) + + def create_ipsec_site_connection(self, context, ipsec_site_connection): + ipsec_sitecon = ipsec_site_connection['ipsec_site_connection'] + dpd = ipsec_sitecon['dpd'] + ipsec_sitecon['dpd_action'] = dpd.get('action', 'hold') + ipsec_sitecon['dpd_interval'] = dpd.get('interval', 30) + ipsec_sitecon['dpd_timeout'] = dpd.get('timeout', 120) + tenant_id = self._get_tenant_id_for_create(context, ipsec_sitecon) + self._check_dpd(ipsec_sitecon) + with context.session.begin(subtransactions=True): + #Check permissions + self._get_resource(context, + VPNService, + ipsec_sitecon['vpnservice_id']) + self._get_resource(context, + IKEPolicy, + ipsec_sitecon['ikepolicy_id']) + self._get_resource(context, + IPsecPolicy, + ipsec_sitecon['ipsecpolicy_id']) + self._check_mtu(context, + ipsec_sitecon['mtu'], + ipsec_sitecon['vpnservice_id']) + ipsec_site_conn_db = IPsecSiteConnection( + id=uuidutils.generate_uuid(), + tenant_id=tenant_id, + name=ipsec_sitecon['name'], + description=ipsec_sitecon['description'], + peer_address=ipsec_sitecon['peer_address'], + peer_id=ipsec_sitecon['peer_id'], + route_mode='static', + mtu=ipsec_sitecon['mtu'], + auth_mode='psk', + psk=ipsec_sitecon['psk'], + initiator=ipsec_sitecon['initiator'], + dpd_action=ipsec_sitecon['dpd_action'], + dpd_interval=ipsec_sitecon['dpd_interval'], + dpd_timeout=ipsec_sitecon['dpd_timeout'], + admin_state_up=ipsec_sitecon['admin_state_up'], + status=constants.PENDING_CREATE, + vpnservice_id=ipsec_sitecon['vpnservice_id'], + ikepolicy_id=ipsec_sitecon['ikepolicy_id'], + ipsecpolicy_id=ipsec_sitecon['ipsecpolicy_id'] + ) + context.session.add(ipsec_site_conn_db) + for cidr in ipsec_sitecon['peer_cidrs']: + peer_cidr_db = IPsecPeerCidr( + cidr=cidr, + ipsec_site_connection_id=ipsec_site_conn_db['id'] + ) + context.session.add(peer_cidr_db) + return self._make_ipsec_site_connection_dict(ipsec_site_conn_db) + + def _check_dpd(self, ipsec_sitecon): + if ipsec_sitecon['dpd_timeout'] <= ipsec_sitecon['dpd_interval']: + raise vpnaas.IPsecSiteConnectionDpdIntervalValueError( + attr='dpd_timeout') + + def _check_mtu(self, context, mtu, vpnservice_id): + vpn_service_db = self._get_vpnservice(context, vpnservice_id) + subnet = vpn_service_db.subnet['cidr'] + version = netaddr.IPNetwork(subnet).version + if mtu < IP_MIN_MTU[version]: + raise vpnaas.IPsecSiteConnectionMtuError(mtu=mtu, version=version) + + def update_ipsec_site_connection( + self, context, + ipsec_site_conn_id, ipsec_site_connection): + conn = ipsec_site_connection['ipsec_site_connection'] + changed_peer_cidrs = False + with context.session.begin(subtransactions=True): + ipsec_site_conn_db = self._get_resource( + context, + IPsecSiteConnection, + ipsec_site_conn_id) + dpd = conn.get('dpd', {}) + if dpd.get('action'): + conn['dpd_action'] = dpd.get('action') + if dpd.get('interval') or dpd.get('timeout'): + conn['dpd_interval'] = dpd.get( + 'interval', ipsec_site_conn_db.dpd_interval) + conn['dpd_timeout'] = dpd.get( + 'timeout', ipsec_site_conn_db.dpd_timeout) + self._check_dpd(conn) + + if 'mtu' in conn: + self._check_mtu(context, + conn['mtu'], + ipsec_site_conn_db.vpnservice_id) + + self.assert_update_allowed(ipsec_site_conn_db) + + if "peer_cidrs" in conn: + changed_peer_cidrs = True + old_peer_cidr_list = ipsec_site_conn_db['peer_cidrs'] + old_peer_cidr_dict = dict( + (peer_cidr['cidr'], peer_cidr) + for peer_cidr in old_peer_cidr_list) + new_peer_cidr_set = set(conn["peer_cidrs"]) + old_peer_cidr_set = set(old_peer_cidr_dict) + + new_peer_cidrs = list(new_peer_cidr_set) + for peer_cidr in old_peer_cidr_set - new_peer_cidr_set: + context.session.delete(old_peer_cidr_dict[peer_cidr]) + for peer_cidr in new_peer_cidr_set - old_peer_cidr_set: + pcidr = IPsecPeerCidr( + cidr=peer_cidr, + ipsec_site_connection_id=ipsec_site_conn_id) + context.session.add(pcidr) + del conn["peer_cidrs"] + if conn: + ipsec_site_conn_db.update(conn) + result = self._make_ipsec_site_connection_dict(ipsec_site_conn_db) + if changed_peer_cidrs: + result['peer_cidrs'] = new_peer_cidrs + return result + + def delete_ipsec_site_connection(self, context, ipsec_site_conn_id): + with context.session.begin(subtransactions=True): + ipsec_site_conn_db = self._get_resource( + context, IPsecSiteConnection, ipsec_site_conn_id + ) + context.session.delete(ipsec_site_conn_db) + + def _get_ipsec_site_connection( + self, context, ipsec_site_conn_id): + return self._get_resource( + context, IPsecSiteConnection, ipsec_site_conn_id) + + def get_ipsec_site_connection(self, context, + ipsec_site_conn_id, fields=None): + ipsec_site_conn_db = self._get_ipsec_site_connection( + context, ipsec_site_conn_id) + return self._make_ipsec_site_connection_dict( + ipsec_site_conn_db, fields) + + def get_ipsec_site_connections(self, context, filters=None, fields=None): + return self._get_collection(context, IPsecSiteConnection, + self._make_ipsec_site_connection_dict, + filters=filters, fields=fields) + + def update_ipsec_site_conn_status(self, context, conn_id, new_status): + with context.session.begin(): + self._update_connection_status(context, conn_id, new_status, True) + + def _update_connection_status(self, context, conn_id, new_status, + updated_pending): + """Update the connection status, if changed. + + If the connection is not in a pending state, unconditionally update + the status. Likewise, if in a pending state, and have an indication + that the status has changed, then update the database. + """ + try: + conn_db = self._get_ipsec_site_connection(context, conn_id) + except vpnaas.IPsecSiteConnectionNotFound: + return + if not utils.in_pending_status(conn_db.status) or updated_pending: + conn_db.status = new_status + + def _make_ikepolicy_dict(self, ikepolicy, fields=None): + res = {'id': ikepolicy['id'], + 'tenant_id': ikepolicy['tenant_id'], + 'name': ikepolicy['name'], + 'description': ikepolicy['description'], + 'auth_algorithm': ikepolicy['auth_algorithm'], + 'encryption_algorithm': ikepolicy['encryption_algorithm'], + 'phase1_negotiation_mode': ikepolicy['phase1_negotiation_mode'], + 'lifetime': { + 'units': ikepolicy['lifetime_units'], + 'value': ikepolicy['lifetime_value'], + }, + 'ike_version': ikepolicy['ike_version'], + 'pfs': ikepolicy['pfs'] + } + + return self._fields(res, fields) + + def create_ikepolicy(self, context, ikepolicy): + ike = ikepolicy['ikepolicy'] + tenant_id = self._get_tenant_id_for_create(context, ike) + lifetime_info = ike.get('lifetime', []) + lifetime_units = lifetime_info.get('units', 'seconds') + lifetime_value = lifetime_info.get('value', 3600) + + with context.session.begin(subtransactions=True): + ike_db = IKEPolicy( + id=uuidutils.generate_uuid(), + tenant_id=tenant_id, + name=ike['name'], + description=ike['description'], + auth_algorithm=ike['auth_algorithm'], + encryption_algorithm=ike['encryption_algorithm'], + phase1_negotiation_mode=ike['phase1_negotiation_mode'], + lifetime_units=lifetime_units, + lifetime_value=lifetime_value, + ike_version=ike['ike_version'], + pfs=ike['pfs'] + ) + + context.session.add(ike_db) + return self._make_ikepolicy_dict(ike_db) + + def update_ikepolicy(self, context, ikepolicy_id, ikepolicy): + ike = ikepolicy['ikepolicy'] + with context.session.begin(subtransactions=True): + ikepolicy = context.session.query(IPsecSiteConnection).filter_by( + ikepolicy_id=ikepolicy_id).first() + if ikepolicy: + raise vpnaas.IKEPolicyInUse(ikepolicy_id=ikepolicy_id) + ike_db = self._get_resource(context, IKEPolicy, ikepolicy_id) + if ike: + lifetime_info = ike.get('lifetime') + if lifetime_info: + if lifetime_info.get('units'): + ike['lifetime_units'] = lifetime_info['units'] + if lifetime_info.get('value'): + ike['lifetime_value'] = lifetime_info['value'] + ike_db.update(ike) + return self._make_ikepolicy_dict(ike_db) + + def delete_ikepolicy(self, context, ikepolicy_id): + with context.session.begin(subtransactions=True): + ikepolicy = context.session.query(IPsecSiteConnection).filter_by( + ikepolicy_id=ikepolicy_id).first() + if ikepolicy: + raise vpnaas.IKEPolicyInUse(ikepolicy_id=ikepolicy_id) + ike_db = self._get_resource(context, IKEPolicy, ikepolicy_id) + context.session.delete(ike_db) + + def get_ikepolicy(self, context, ikepolicy_id, fields=None): + ike_db = self._get_resource(context, IKEPolicy, ikepolicy_id) + return self._make_ikepolicy_dict(ike_db, fields) + + def get_ikepolicies(self, context, filters=None, fields=None): + return self._get_collection(context, IKEPolicy, + self._make_ikepolicy_dict, + filters=filters, fields=fields) + + def _make_ipsecpolicy_dict(self, ipsecpolicy, fields=None): + + res = {'id': ipsecpolicy['id'], + 'tenant_id': ipsecpolicy['tenant_id'], + 'name': ipsecpolicy['name'], + 'description': ipsecpolicy['description'], + 'transform_protocol': ipsecpolicy['transform_protocol'], + 'auth_algorithm': ipsecpolicy['auth_algorithm'], + 'encryption_algorithm': ipsecpolicy['encryption_algorithm'], + 'encapsulation_mode': ipsecpolicy['encapsulation_mode'], + 'lifetime': { + 'units': ipsecpolicy['lifetime_units'], + 'value': ipsecpolicy['lifetime_value'], + }, + 'pfs': ipsecpolicy['pfs'] + } + + return self._fields(res, fields) + + def create_ipsecpolicy(self, context, ipsecpolicy): + ipsecp = ipsecpolicy['ipsecpolicy'] + tenant_id = self._get_tenant_id_for_create(context, ipsecp) + lifetime_info = ipsecp['lifetime'] + lifetime_units = lifetime_info.get('units', 'seconds') + lifetime_value = lifetime_info.get('value', 3600) + + with context.session.begin(subtransactions=True): + ipsecp_db = IPsecPolicy(id=uuidutils.generate_uuid(), + tenant_id=tenant_id, + name=ipsecp['name'], + description=ipsecp['description'], + transform_protocol=ipsecp['transform_' + 'protocol'], + auth_algorithm=ipsecp['auth_algorithm'], + encryption_algorithm=ipsecp['encryption_' + 'algorithm'], + encapsulation_mode=ipsecp['encapsulation_' + 'mode'], + lifetime_units=lifetime_units, + lifetime_value=lifetime_value, + pfs=ipsecp['pfs']) + context.session.add(ipsecp_db) + return self._make_ipsecpolicy_dict(ipsecp_db) + + def update_ipsecpolicy(self, context, ipsecpolicy_id, ipsecpolicy): + ipsecp = ipsecpolicy['ipsecpolicy'] + with context.session.begin(subtransactions=True): + ipsecpolicy = context.session.query(IPsecSiteConnection).filter_by( + ipsecpolicy_id=ipsecpolicy_id).first() + if ipsecpolicy: + raise vpnaas.IPsecPolicyInUse(ipsecpolicy_id=ipsecpolicy_id) + ipsecp_db = self._get_resource(context, + IPsecPolicy, + ipsecpolicy_id) + if ipsecp: + lifetime_info = ipsecp.get('lifetime') + if lifetime_info: + if lifetime_info.get('units'): + ipsecp['lifetime_units'] = lifetime_info['units'] + if lifetime_info.get('value'): + ipsecp['lifetime_value'] = lifetime_info['value'] + ipsecp_db.update(ipsecp) + return self._make_ipsecpolicy_dict(ipsecp_db) + + def delete_ipsecpolicy(self, context, ipsecpolicy_id): + with context.session.begin(subtransactions=True): + ipsecpolicy = context.session.query(IPsecSiteConnection).filter_by( + ipsecpolicy_id=ipsecpolicy_id).first() + if ipsecpolicy: + raise vpnaas.IPsecPolicyInUse(ipsecpolicy_id=ipsecpolicy_id) + ipsec_db = self._get_resource(context, IPsecPolicy, ipsecpolicy_id) + context.session.delete(ipsec_db) + + def get_ipsecpolicy(self, context, ipsecpolicy_id, fields=None): + ipsec_db = self._get_resource(context, IPsecPolicy, ipsecpolicy_id) + return self._make_ipsecpolicy_dict(ipsec_db, fields) + + def get_ipsecpolicies(self, context, filters=None, fields=None): + return self._get_collection(context, IPsecPolicy, + self._make_ipsecpolicy_dict, + filters=filters, fields=fields) + + def _make_vpnservice_dict(self, vpnservice, fields=None): + res = {'id': vpnservice['id'], + 'name': vpnservice['name'], + 'description': vpnservice['description'], + 'tenant_id': vpnservice['tenant_id'], + 'subnet_id': vpnservice['subnet_id'], + 'router_id': vpnservice['router_id'], + 'admin_state_up': vpnservice['admin_state_up'], + 'status': vpnservice['status']} + return self._fields(res, fields) + + def _check_router(self, context, router_id): + l3_plugin = manager.NeutronManager.get_service_plugins().get( + constants.L3_ROUTER_NAT) + router = l3_plugin.get_router(context, router_id) + if not router.get(l3_db.EXTERNAL_GW_INFO): + raise vpnaas.RouterIsNotExternal(router_id=router_id) + + def _check_subnet_id(self, context, router_id, subnet_id): + core_plugin = manager.NeutronManager.get_plugin() + ports = core_plugin.get_ports( + context, + filters={ + 'fixed_ips': {'subnet_id': [subnet_id]}, + 'device_id': [router_id]}) + if not ports: + raise vpnaas.SubnetIsNotConnectedToRouter( + subnet_id=subnet_id, + router_id=router_id) + + def create_vpnservice(self, context, vpnservice): + vpns = vpnservice['vpnservice'] + tenant_id = self._get_tenant_id_for_create(context, vpns) + self._check_router(context, vpns['router_id']) + self._check_subnet_id(context, vpns['router_id'], vpns['subnet_id']) + with context.session.begin(subtransactions=True): + vpnservice_db = VPNService(id=uuidutils.generate_uuid(), + tenant_id=tenant_id, + name=vpns['name'], + description=vpns['description'], + subnet_id=vpns['subnet_id'], + router_id=vpns['router_id'], + admin_state_up=vpns['admin_state_up'], + status=constants.PENDING_CREATE) + context.session.add(vpnservice_db) + return self._make_vpnservice_dict(vpnservice_db) + + def update_vpnservice(self, context, vpnservice_id, vpnservice): + vpns = vpnservice['vpnservice'] + with context.session.begin(subtransactions=True): + vpns_db = self._get_resource(context, VPNService, vpnservice_id) + self.assert_update_allowed(vpns_db) + if vpns: + vpns_db.update(vpns) + return self._make_vpnservice_dict(vpns_db) + + def delete_vpnservice(self, context, vpnservice_id): + with context.session.begin(subtransactions=True): + if context.session.query(IPsecSiteConnection).filter_by( + vpnservice_id=vpnservice_id + ).first(): + raise vpnaas.VPNServiceInUse(vpnservice_id=vpnservice_id) + vpns_db = self._get_resource(context, VPNService, vpnservice_id) + context.session.delete(vpns_db) + + def _get_vpnservice(self, context, vpnservice_id): + return self._get_resource(context, VPNService, vpnservice_id) + + def get_vpnservice(self, context, vpnservice_id, fields=None): + vpns_db = self._get_resource(context, VPNService, vpnservice_id) + return self._make_vpnservice_dict(vpns_db, fields) + + def get_vpnservices(self, context, filters=None, fields=None): + return self._get_collection(context, VPNService, + self._make_vpnservice_dict, + filters=filters, fields=fields) + + def check_router_in_use(self, context, router_id): + vpnservices = self.get_vpnservices( + context, filters={'router_id': [router_id]}) + if vpnservices: + raise vpnaas.RouterInUseByVPNService( + router_id=router_id, + vpnservice_id=vpnservices[0]['id']) + + +class VPNPluginRpcDbMixin(): + def _get_agent_hosting_vpn_services(self, context, host): + + plugin = manager.NeutronManager.get_plugin() + agent = plugin._get_agent_by_type_and_host( + context, n_constants.AGENT_TYPE_L3, host) + if not agent.admin_state_up: + return [] + query = context.session.query(VPNService) + query = query.join(IPsecSiteConnection) + query = query.join(IKEPolicy) + query = query.join(IPsecPolicy) + query = query.join(IPsecPeerCidr) + query = query.join(l3_agent_db.RouterL3AgentBinding, + l3_agent_db.RouterL3AgentBinding.router_id == + VPNService.router_id) + query = query.filter( + l3_agent_db.RouterL3AgentBinding.l3_agent_id == agent.id) + return query + + def update_status_by_agent(self, context, service_status_info_list): + """Updating vpnservice and vpnconnection status. + + :param context: context variable + :param service_status_info_list: list of status + The structure is + [{id: vpnservice_id, + status: ACTIVE|DOWN|ERROR, + updated_pending_status: True|False + ipsec_site_connections: { + ipsec_site_connection_id: { + status: ACTIVE|DOWN|ERROR, + updated_pending_status: True|False + } + }] + The agent will set updated_pending_status as True, + when agent update any pending status. + """ + with context.session.begin(subtransactions=True): + for vpnservice in service_status_info_list: + try: + vpnservice_db = self._get_vpnservice( + context, vpnservice['id']) + except vpnaas.VPNServiceNotFound: + LOG.warn(_('vpnservice %s in db is already deleted'), + vpnservice['id']) + continue + + if (not utils.in_pending_status(vpnservice_db.status) + or vpnservice['updated_pending_status']): + vpnservice_db.status = vpnservice['status'] + for conn_id, conn in vpnservice[ + 'ipsec_site_connections'].items(): + self._update_connection_status( + context, conn_id, conn['status'], + conn['updated_pending_status']) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/debug/__init__.py b/icehouse-patches/neutron/dvr-patch/neutron/debug/__init__.py new file mode 100644 index 00000000..404a8253 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/debug/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2012, Nachi Ueno, NTT MCL, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/icehouse-patches/neutron/dvr-patch/neutron/debug/commands.py b/icehouse-patches/neutron/dvr-patch/neutron/debug/commands.py new file mode 100644 index 00000000..775b0fe7 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/debug/commands.py @@ -0,0 +1,155 @@ +# Copyright 2012, Nachi Ueno, NTT MCL, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from cliff import lister +from neutronclient.common import utils +from neutronclient.neutron import v2_0 as client +from neutronclient.neutron.v2_0 import port + +from neutron.openstack.common import log as logging + + +class ProbeCommand(client.NeutronCommand): + log = logging.getLogger(__name__ + '.ProbeCommand') + + def get_debug_agent(self): + return self.app.debug_agent + + def run(self, parsed_args): + self.log.debug('run(%s)', parsed_args) + self.log.info(_('Unimplemented commands')) + + +class CreateProbe(ProbeCommand): + """Create probe port and interface, then plug it in.""" + + log = logging.getLogger(__name__ + '.CreateProbe') + + def get_parser(self, prog_name): + parser = super(CreateProbe, self).get_parser(prog_name) + parser.add_argument( + 'id', metavar='network_id', + help=_('ID of network to probe')) + parser.add_argument( + '--device-owner', + default='network', choices=['network', 'compute'], + help=_('Owner type of the device: network/compute')) + return parser + + def run(self, parsed_args): + self.log.debug('run(%s)' % parsed_args) + debug_agent = self.get_debug_agent() + probe_port = debug_agent.create_probe(parsed_args.id, + parsed_args.device_owner) + self.log.info(_('Probe created : %s '), probe_port.id) + + +class DeleteProbe(ProbeCommand): + """Delete probe - delete port then uplug.""" + + log = logging.getLogger(__name__ + '.DeleteProbe') + + def get_parser(self, prog_name): + parser = super(DeleteProbe, self).get_parser(prog_name) + parser.add_argument( + 'id', metavar='port_id', + help=_('ID of probe port to delete')) + return parser + + def run(self, parsed_args): + self.log.debug('run(%s)' % parsed_args) + debug_agent = self.get_debug_agent() + debug_agent.delete_probe(parsed_args.id) + self.log.info(_('Probe %s deleted'), parsed_args.id) + + +class ListProbe(client.NeutronCommand, lister.Lister): + """List probes.""" + + log = logging.getLogger(__name__ + '.ListProbe') + _formatters = {'fixed_ips': port._format_fixed_ips, } + + def get_debug_agent(self): + return self.app.debug_agent + + def get_data(self, parsed_args): + + debug_agent = self.get_debug_agent() + info = debug_agent.list_probes() + columns = len(info) > 0 and sorted(info[0].keys()) or [] + return (columns, (utils.get_item_properties( + s, columns, formatters=self._formatters, ) + for s in info), ) + + +class ClearProbe(ProbeCommand): + """Clear All probes.""" + + log = logging.getLogger(__name__ + '.ClearProbe') + + def run(self, parsed_args): + self.log.debug('run(%s)' % parsed_args) + debug_agent = self.get_debug_agent() + debug_agent.clear_probe() + self.log.info(_('All Probes deleted ')) + + +class ExecProbe(ProbeCommand): + """Exec commands on the namespace of the probe.""" + + log = logging.getLogger(__name__ + '.ExecProbe') + + def get_parser(self, prog_name): + parser = super(ExecProbe, self).get_parser(prog_name) + parser.add_argument( + 'id', metavar='port_id', + help=_('ID of probe port to execute command')) + parser.add_argument( + 'command', metavar='command', + nargs='?', + default=None, + help=_('Command to execute')) + return parser + + def run(self, parsed_args): + self.log.debug('run(%s)' % parsed_args) + debug_agent = self.get_debug_agent() + result = debug_agent.exec_command(parsed_args.id, parsed_args.command) + self.app.stdout.write(result + '\n') + + +class PingAll(ProbeCommand): + """Ping all fixed_ip.""" + + log = logging.getLogger(__name__ + '.ExecProbe') + + def get_parser(self, prog_name): + parser = super(PingAll, self).get_parser(prog_name) + parser.add_argument( + '--timeout', metavar='', + default=10, + help=_('Ping timeout')) + parser.add_argument( + '--id', metavar='network_id', + default=None, + help=_('ID of network')) + return parser + + def run(self, parsed_args): + self.log.debug('run(%s)' % parsed_args) + debug_agent = self.get_debug_agent() + result = debug_agent.ping_all(parsed_args.id, + timeout=parsed_args.timeout) + self.app.stdout.write(result + '\n') diff --git a/icehouse-patches/neutron/dvr-patch/neutron/debug/debug_agent.py b/icehouse-patches/neutron/dvr-patch/neutron/debug/debug_agent.py new file mode 100644 index 00000000..cbcbbe30 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/debug/debug_agent.py @@ -0,0 +1,196 @@ +# Copyright 2012, Nachi Ueno, NTT MCL, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import shlex +import socket + +import netaddr +from oslo.config import cfg + +from neutron.agent.common import config +from neutron.agent.linux import dhcp +from neutron.agent.linux import ip_lib +from neutron.agent.linux import utils +from neutron.openstack.common import log as logging + + +LOG = logging.getLogger(__name__) + +DEVICE_OWNER_NETWORK_PROBE = 'network:probe' + +DEVICE_OWNER_COMPUTE_PROBE = 'compute:probe' + + +class NeutronDebugAgent(): + + OPTS = [ + # Needed for drivers + cfg.StrOpt('external_network_bridge', default='br-ex', + help=_("Name of bridge used for external network " + "traffic.")), + ] + + def __init__(self, conf, client, driver): + self.conf = conf + self.root_helper = config.get_root_helper(conf) + self.client = client + self.driver = driver + + def _get_namespace(self, port): + return "qprobe-%s" % port.id + + def create_probe(self, network_id, device_owner='network'): + network = self._get_network(network_id) + bridge = None + if network.external: + bridge = self.conf.external_network_bridge + + port = self._create_port(network, device_owner) + interface_name = self.driver.get_device_name(port) + namespace = None + if self.conf.use_namespaces: + namespace = self._get_namespace(port) + + if ip_lib.device_exists(interface_name, self.root_helper, namespace): + LOG.debug(_('Reusing existing device: %s.'), interface_name) + else: + self.driver.plug(network.id, + port.id, + interface_name, + port.mac_address, + bridge=bridge, + namespace=namespace) + ip_cidrs = [] + for fixed_ip in port.fixed_ips: + subnet = fixed_ip.subnet + net = netaddr.IPNetwork(subnet.cidr) + ip_cidr = '%s/%s' % (fixed_ip.ip_address, net.prefixlen) + ip_cidrs.append(ip_cidr) + self.driver.init_l3(interface_name, ip_cidrs, namespace=namespace) + return port + + def _get_subnet(self, subnet_id): + subnet_dict = self.client.show_subnet(subnet_id)['subnet'] + return dhcp.DictModel(subnet_dict) + + def _get_network(self, network_id): + network_dict = self.client.show_network(network_id)['network'] + network = dhcp.DictModel(network_dict) + network.external = network_dict.get('router:external') + obj_subnet = [self._get_subnet(s_id) for s_id in network.subnets] + network.subnets = obj_subnet + return network + + def clear_probe(self): + ports = self.client.list_ports( + device_id=socket.gethostname(), + device_owner=[DEVICE_OWNER_NETWORK_PROBE, + DEVICE_OWNER_COMPUTE_PROBE]) + info = ports['ports'] + for port in info: + self.delete_probe(port['id']) + + def delete_probe(self, port_id): + port = dhcp.DictModel(self.client.show_port(port_id)['port']) + network = self._get_network(port.network_id) + bridge = None + if network.external: + bridge = self.conf.external_network_bridge + ip = ip_lib.IPWrapper(self.root_helper) + namespace = self._get_namespace(port) + if self.conf.use_namespaces and ip.netns.exists(namespace): + self.driver.unplug(self.driver.get_device_name(port), + bridge=bridge, + namespace=namespace) + try: + ip.netns.delete(namespace) + except Exception: + LOG.warn(_('Failed to delete namespace %s'), namespace) + else: + self.driver.unplug(self.driver.get_device_name(port), + bridge=bridge) + self.client.delete_port(port.id) + + def list_probes(self): + ports = self.client.list_ports( + device_owner=[DEVICE_OWNER_NETWORK_PROBE, + DEVICE_OWNER_COMPUTE_PROBE]) + info = ports['ports'] + for port in info: + port['device_name'] = self.driver.get_device_name( + dhcp.DictModel(port)) + return info + + def exec_command(self, port_id, command=None): + port = dhcp.DictModel(self.client.show_port(port_id)['port']) + ip = ip_lib.IPWrapper(self.root_helper) + namespace = self._get_namespace(port) + if self.conf.use_namespaces: + if not command: + return "sudo ip netns exec %s" % self._get_namespace(port) + namespace = ip.ensure_namespace(namespace) + return namespace.netns.execute(shlex.split(command)) + else: + return utils.execute(shlex.split(command)) + + def ensure_probe(self, network_id): + ports = self.client.list_ports(network_id=network_id, + device_id=socket.gethostname(), + device_owner=DEVICE_OWNER_NETWORK_PROBE) + info = ports.get('ports', []) + if info: + return dhcp.DictModel(info[0]) + else: + return self.create_probe(network_id) + + def ping_all(self, network_id=None, timeout=1): + if network_id: + ports = self.client.list_ports(network_id=network_id)['ports'] + else: + ports = self.client.list_ports()['ports'] + result = "" + for port in ports: + probe = self.ensure_probe(port['network_id']) + if port['device_owner'] == DEVICE_OWNER_NETWORK_PROBE: + continue + for fixed_ip in port['fixed_ips']: + address = fixed_ip['ip_address'] + subnet = self._get_subnet(fixed_ip['subnet_id']) + if subnet.ip_version == 4: + ping_command = 'ping' + else: + ping_command = 'ping6' + result += self.exec_command(probe.id, + '%s -c 1 -w %s %s' % (ping_command, + timeout, + address)) + return result + + def _create_port(self, network, device_owner): + host = self.conf.host + body = {'port': {'admin_state_up': True, + 'network_id': network.id, + 'device_id': '%s' % socket.gethostname(), + 'device_owner': '%s:probe' % device_owner, + 'tenant_id': network.tenant_id, + 'binding:host_id': host, + 'fixed_ips': [dict(subnet_id=s.id) + for s in network.subnets]}} + port_dict = self.client.create_port(body)['port'] + port = dhcp.DictModel(port_dict) + port.network = network + for fixed_ip in port.fixed_ips: + fixed_ip.subnet = self._get_subnet(fixed_ip.subnet_id) + return port diff --git a/icehouse-patches/neutron/dvr-patch/neutron/debug/shell.py b/icehouse-patches/neutron/dvr-patch/neutron/debug/shell.py new file mode 100644 index 00000000..fb37bde7 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/debug/shell.py @@ -0,0 +1,88 @@ +# Copyright 2012, Nachi Ueno, NTT MCL, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import sys + +from oslo.config import cfg + +from neutron.agent.common import config +from neutron.agent.linux import interface +from neutron.debug import debug_agent +from neutron.openstack.common import importutils +from neutronclient.common import exceptions as exc +from neutronclient.common import utils +from neutronclient import shell + +COMMAND_V2 = { + 'probe-create': utils.import_class( + 'neutron.debug.commands.CreateProbe'), + 'probe-delete': utils.import_class( + 'neutron.debug.commands.DeleteProbe'), + 'probe-list': utils.import_class( + 'neutron.debug.commands.ListProbe'), + 'probe-clear': utils.import_class( + 'neutron.debug.commands.ClearProbe'), + 'probe-exec': utils.import_class( + 'neutron.debug.commands.ExecProbe'), + 'ping-all': utils.import_class( + 'neutron.debug.commands.PingAll'), + #TODO(nati) ping, netcat , nmap, bench +} +COMMANDS = {'2.0': COMMAND_V2} + + +class NeutronDebugShell(shell.NeutronShell): + def __init__(self, api_version): + super(NeutronDebugShell, self).__init__(api_version) + for k, v in COMMANDS[api_version].items(): + self.command_manager.add_command(k, v) + + def build_option_parser(self, description, version): + parser = super(NeutronDebugShell, self).build_option_parser( + description, version) + default = ( + shell.env('NEUTRON_TEST_CONFIG_FILE') or + shell.env('QUANTUM_TEST_CONFIG_FILE') + ) + parser.add_argument( + '--config-file', + default=default, + help=_('Config file for interface driver ' + '(You may also use l3_agent.ini)')) + return parser + + def initialize_app(self, argv): + super(NeutronDebugShell, self).initialize_app(argv) + if not self.options.config_file: + raise exc.CommandError( + _("You must provide a config file for bridge -" + " either --config-file or env[NEUTRON_TEST_CONFIG_FILE]")) + client = self.client_manager.neutron + cfg.CONF.register_opts(interface.OPTS) + cfg.CONF.register_opts(debug_agent.NeutronDebugAgent.OPTS) + config.register_interface_driver_opts_helper(cfg.CONF) + config.register_use_namespaces_opts_helper(cfg.CONF) + config.register_root_helper(cfg.CONF) + cfg.CONF(['--config-file', self.options.config_file]) + config.setup_logging(cfg.CONF) + driver = importutils.import_object(cfg.CONF.interface_driver, cfg.CONF) + self.debug_agent = debug_agent.NeutronDebugAgent(cfg.CONF, + client, + driver) + + +def main(argv=None): + return NeutronDebugShell(shell.NEUTRON_API_VERSION).run( + argv or sys.argv[1:]) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/extensions/__init__.py b/icehouse-patches/neutron/dvr-patch/neutron/extensions/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/icehouse-patches/neutron/dvr-patch/neutron/extensions/agent.py b/icehouse-patches/neutron/dvr-patch/neutron/extensions/agent.py new file mode 100644 index 00000000..0dc0acd0 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/extensions/agent.py @@ -0,0 +1,163 @@ +# Copyright (c) 2013 OpenStack Foundation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import abc + +from neutron.api import extensions +from neutron.api.v2 import attributes as attr +from neutron.api.v2 import base +from neutron.common import exceptions +from neutron import manager + + +# Attribute Map +RESOURCE_NAME = 'agent' +RESOURCE_ATTRIBUTE_MAP = { + RESOURCE_NAME + 's': { + 'id': {'allow_post': False, 'allow_put': False, + 'validate': {'type:uuid': None}, + 'is_visible': True}, + 'agent_type': {'allow_post': False, 'allow_put': False, + 'is_visible': True}, + 'binary': {'allow_post': False, 'allow_put': False, + 'is_visible': True}, + 'topic': {'allow_post': False, 'allow_put': False, + 'is_visible': True}, + 'host': {'allow_post': False, 'allow_put': False, + 'is_visible': True}, + 'admin_state_up': {'allow_post': False, 'allow_put': True, + 'convert_to': attr.convert_to_boolean, + 'is_visible': True}, + 'created_at': {'allow_post': False, 'allow_put': False, + 'is_visible': True}, + 'started_at': {'allow_post': False, 'allow_put': False, + 'is_visible': True}, + 'heartbeat_timestamp': {'allow_post': False, 'allow_put': False, + 'is_visible': True}, + 'alive': {'allow_post': False, 'allow_put': False, + 'is_visible': True}, + 'configurations': {'allow_post': False, 'allow_put': False, + 'is_visible': True}, + 'description': {'allow_post': False, 'allow_put': True, + 'is_visible': True, + 'validate': {'type:string': None}}, + }, +} + + +class AgentNotFound(exceptions.NotFound): + message = _("Agent %(id)s could not be found") + + +class AgentNotFoundByTypeHost(exceptions.NotFound): + message = _("Agent with agent_type=%(agent_type)s and host=%(host)s " + "could not be found") + + +class MultipleAgentFoundByTypeHost(exceptions.Conflict): + message = _("Multiple agents with agent_type=%(agent_type)s and " + "host=%(host)s found") + + +class Agent(object): + """Agent management extension.""" + + @classmethod + def get_name(cls): + return "agent" + + @classmethod + def get_alias(cls): + return "agent" + + @classmethod + def get_description(cls): + return "The agent management extension." + + @classmethod + def get_namespace(cls): + return "http://docs.openstack.org/ext/agent/api/v2.0" + + @classmethod + def get_updated(cls): + return "2013-02-03T10:00:00-00:00" + + @classmethod + def get_resources(cls): + """Returns Ext Resources.""" + my_plurals = [(key, key[:-1]) for key in RESOURCE_ATTRIBUTE_MAP.keys()] + attr.PLURALS.update(dict(my_plurals)) + plugin = manager.NeutronManager.get_plugin() + params = RESOURCE_ATTRIBUTE_MAP.get(RESOURCE_NAME + 's') + controller = base.create_resource(RESOURCE_NAME + 's', + RESOURCE_NAME, + plugin, params + ) + + ex = extensions.ResourceExtension(RESOURCE_NAME + 's', + controller) + + return [ex] + + def get_extended_resources(self, version): + if version == "2.0": + return RESOURCE_ATTRIBUTE_MAP + else: + return {} + + +class AgentPluginBase(object): + """REST API to operate the Agent. + + All of method must be in an admin context. + """ + + def create_agent(self, context, agent): + """Create agent. + + This operation is not allow in REST API. + @raise exceptions.BadRequest: + """ + raise exceptions.BadRequest + + @abc.abstractmethod + def delete_agent(self, context, id): + """Delete agent. + + Agents register themselves on reporting state. + But if a agent does not report its status + for a long time (for example, it is dead for ever. ), + admin can remove it. Agents must be disabled before + being removed. + """ + pass + + @abc.abstractmethod + def update_agent(self, context, agent): + """Disable or Enable the agent. + + Discription also can be updated. Some agents cannot be disabled, such + as plugins, services. An error code should be reported in this case. + @raise exceptions.BadRequest: + """ + pass + + @abc.abstractmethod + def get_agents(self, context, filters=None, fields=None): + pass + + @abc.abstractmethod + def get_agent(self, context, id, fields=None): + pass diff --git a/icehouse-patches/neutron/dvr-patch/neutron/extensions/allowedaddresspairs.py b/icehouse-patches/neutron/dvr-patch/neutron/extensions/allowedaddresspairs.py new file mode 100644 index 00000000..a9328aaa --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/extensions/allowedaddresspairs.py @@ -0,0 +1,116 @@ +# Copyright 2013 VMware, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import webob.exc + +from neutron.api.v2 import attributes as attr +from neutron.common import exceptions as nexception + + +class AllowedAddressPairsMissingIP(nexception.InvalidInput): + message = _("AllowedAddressPair must contain ip_address") + + +class AddressPairAndPortSecurityRequired(nexception.Conflict): + message = _("Port Security must be enabled in order to have allowed " + "address pairs on a port.") + + +class DuplicateAddressPairInRequest(nexception.InvalidInput): + message = _("Request contains duplicate address pair: " + "mac_address %(mac_address)s ip_address %(ip_address)s.") + + +def _validate_allowed_address_pairs(address_pairs, valid_values=None): + unique_check = {} + for address_pair in address_pairs: + # mac_address is optional, if not set we use the mac on the port + if 'mac_address' in address_pair: + msg = attr._validate_mac_address(address_pair['mac_address']) + if msg: + raise webob.exc.HTTPBadRequest(msg) + if 'ip_address' not in address_pair: + raise AllowedAddressPairsMissingIP() + + mac = address_pair.get('mac_address') + ip_address = address_pair['ip_address'] + if (mac, ip_address) not in unique_check: + unique_check[(mac, ip_address)] = None + else: + raise DuplicateAddressPairInRequest(mac_address=mac, + ip_address=ip_address) + + invalid_attrs = set(address_pair.keys()) - set(['mac_address', + 'ip_address']) + if invalid_attrs: + msg = (_("Unrecognized attribute(s) '%s'") % + ', '.join(set(address_pair.keys()) - + set(['mac_address', 'ip_address']))) + raise webob.exc.HTTPBadRequest(msg) + + if '/' in ip_address: + msg = attr._validate_subnet(ip_address) + else: + msg = attr._validate_ip_address(ip_address) + if msg: + raise webob.exc.HTTPBadRequest(msg) + +attr.validators['type:validate_allowed_address_pairs'] = ( + _validate_allowed_address_pairs) + +ADDRESS_PAIRS = 'allowed_address_pairs' +EXTENDED_ATTRIBUTES_2_0 = { + 'ports': { + ADDRESS_PAIRS: {'allow_post': True, 'allow_put': True, + 'convert_list_to': + attr.convert_kvp_list_to_dict, + 'validate': {'type:validate_allowed_address_pairs': + None}, + 'enforce_policy': True, + 'default': attr.ATTR_NOT_SPECIFIED, + 'is_visible': True}, + } +} + + +class Allowedaddresspairs(object): + """Extension class supporting allowed address pairs.""" + + @classmethod + def get_name(cls): + return "Allowed Address Pairs" + + @classmethod + def get_alias(cls): + return "allowed-address-pairs" + + @classmethod + def get_description(cls): + return "Provides allowed address pairs" + + @classmethod + def get_namespace(cls): + return "http://docs.openstack.org/ext/allowedaddresspairs/api/v2.0" + + @classmethod + def get_updated(cls): + return "2013-07-23T10:00:00-00:00" + + def get_extended_resources(self, version): + if version == "2.0": + attr.PLURALS.update({'allowed_address_pairs': + 'allowed_address_pair'}) + return EXTENDED_ATTRIBUTES_2_0 + else: + return {} diff --git a/icehouse-patches/neutron/dvr-patch/neutron/extensions/dhcpagentscheduler.py b/icehouse-patches/neutron/dvr-patch/neutron/extensions/dhcpagentscheduler.py new file mode 100644 index 00000000..d86ba614 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/extensions/dhcpagentscheduler.py @@ -0,0 +1,152 @@ +# Copyright (c) 2013 OpenStack Foundation. +# All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import abc + +from neutron.api import extensions +from neutron.api.v2 import base +from neutron.api.v2 import resource +from neutron.common import constants +from neutron.common import exceptions +from neutron.extensions import agent +from neutron import manager +from neutron import policy +from neutron import wsgi + +DHCP_NET = 'dhcp-network' +DHCP_NETS = DHCP_NET + 's' +DHCP_AGENT = 'dhcp-agent' +DHCP_AGENTS = DHCP_AGENT + 's' + + +class NetworkSchedulerController(wsgi.Controller): + def index(self, request, **kwargs): + plugin = manager.NeutronManager.get_plugin() + policy.enforce(request.context, + "get_%s" % DHCP_NETS, + {}) + return plugin.list_networks_on_dhcp_agent( + request.context, kwargs['agent_id']) + + def create(self, request, body, **kwargs): + plugin = manager.NeutronManager.get_plugin() + policy.enforce(request.context, + "create_%s" % DHCP_NET, + {}) + return plugin.add_network_to_dhcp_agent( + request.context, kwargs['agent_id'], body['network_id']) + + def delete(self, request, id, **kwargs): + plugin = manager.NeutronManager.get_plugin() + policy.enforce(request.context, + "delete_%s" % DHCP_NET, + {}) + return plugin.remove_network_from_dhcp_agent( + request.context, kwargs['agent_id'], id) + + +class DhcpAgentsHostingNetworkController(wsgi.Controller): + def index(self, request, **kwargs): + plugin = manager.NeutronManager.get_plugin() + policy.enforce(request.context, + "get_%s" % DHCP_AGENTS, + {}) + return plugin.list_dhcp_agents_hosting_network( + request.context, kwargs['network_id']) + + +class Dhcpagentscheduler(extensions.ExtensionDescriptor): + """Extension class supporting dhcp agent scheduler. + """ + + @classmethod + def get_name(cls): + return "DHCP Agent Scheduler" + + @classmethod + def get_alias(cls): + return constants.DHCP_AGENT_SCHEDULER_EXT_ALIAS + + @classmethod + def get_description(cls): + return "Schedule networks among dhcp agents" + + @classmethod + def get_namespace(cls): + return "http://docs.openstack.org/ext/dhcp_agent_scheduler/api/v1.0" + + @classmethod + def get_updated(cls): + return "2013-02-07T10:00:00-00:00" + + @classmethod + def get_resources(cls): + """Returns Ext Resources.""" + exts = [] + parent = dict(member_name="agent", + collection_name="agents") + controller = resource.Resource(NetworkSchedulerController(), + base.FAULT_MAP) + exts.append(extensions.ResourceExtension( + DHCP_NETS, controller, parent)) + + parent = dict(member_name="network", + collection_name="networks") + + controller = resource.Resource(DhcpAgentsHostingNetworkController(), + base.FAULT_MAP) + exts.append(extensions.ResourceExtension( + DHCP_AGENTS, controller, parent)) + return exts + + def get_extended_resources(self, version): + return {} + + +class InvalidDHCPAgent(agent.AgentNotFound): + message = _("Agent %(id)s is not a valid DHCP Agent or has been disabled") + + +class NetworkHostedByDHCPAgent(exceptions.Conflict): + message = _("The network %(network_id)s has been already hosted" + " by the DHCP Agent %(agent_id)s.") + + +class NetworkNotHostedByDhcpAgent(exceptions.Conflict): + message = _("The network %(network_id)s is not hosted" + " by the DHCP agent %(agent_id)s.") + + +class DhcpAgentSchedulerPluginBase(object): + """REST API to operate the DHCP agent scheduler. + + All of method must be in an admin context. + """ + + @abc.abstractmethod + def add_network_to_dhcp_agent(self, context, id, network_id): + pass + + @abc.abstractmethod + def remove_network_from_dhcp_agent(self, context, id, network_id): + pass + + @abc.abstractmethod + def list_networks_on_dhcp_agent(self, context, id): + pass + + @abc.abstractmethod + def list_dhcp_agents_hosting_network(self, context, network_id): + pass diff --git a/icehouse-patches/neutron/dvr-patch/neutron/extensions/dvr.py b/icehouse-patches/neutron/dvr-patch/neutron/extensions/dvr.py new file mode 100644 index 00000000..799201ab --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/extensions/dvr.py @@ -0,0 +1,97 @@ +# Copyright (c) 2014 OpenStack Foundation. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import abc + +import six + +from neutron.api.v2 import attributes +from neutron.common import constants +from neutron.common import exceptions as qexception + +DISTRIBUTED = 'distributed' +EXTENDED_ATTRIBUTES_2_0 = { + 'routers': { + DISTRIBUTED: {'allow_post': True, + 'allow_put': True, + 'is_visible': True, + 'default': attributes.ATTR_NOT_SPECIFIED, + 'convert_to': attributes.convert_to_boolean_if_not_none, + 'enforce_policy': True}, + } +} + + +class DVRMacAddressNotFound(qexception.NotFound): + message = _("Distributed Virtual Router Mac Address for " + "host %(host)s does not exist.") + + +class MacAddressGenerationFailure(qexception.ServiceUnavailable): + message = _("Unable to generate unique dvr mac for host %(host)s.") + + +class Dvr(object): + """Extension class supporting distributed virtual router.""" + + @classmethod + def get_name(cls): + return "Distributed Virtual Router" + + @classmethod + def get_alias(cls): + return constants.L3_DISTRIBUTED_EXT_ALIAS + + @classmethod + def get_description(cls): + return "Enables configuration of Distributed Virtual Routers." + + @classmethod + def get_namespace(cls): + return ("http://docs.openstack.org/ext/" + "%s/api/v1.0" % constants.L3_DISTRIBUTED_EXT_ALIAS) + + @classmethod + def get_updated(cls): + return "2014-06-1T10:00:00-00:00" + + def get_required_extensions(self): + return ["router"] + + @classmethod + def get_resources(cls): + """Returns Ext Resources.""" + return [] + + def get_extended_resources(self, version): + if version == "2.0": + return EXTENDED_ATTRIBUTES_2_0 + else: + return {} + + +@six.add_metaclass(abc.ABCMeta) +class DVRMacAddressPluginBase(object): + + @abc.abstractmethod + def delete_dvr_mac_address(self, context, host): + pass + + @abc.abstractmethod + def get_dvr_mac_address_list(self, context): + pass + + @abc.abstractmethod + def get_dvr_mac_address_by_host(self, context, host): + pass diff --git a/icehouse-patches/neutron/dvr-patch/neutron/extensions/external_net.py b/icehouse-patches/neutron/dvr-patch/neutron/extensions/external_net.py new file mode 100644 index 00000000..6e50e93c --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/extensions/external_net.py @@ -0,0 +1,68 @@ +# Copyright (c) 2013 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from neutron.api import extensions +from neutron.api.v2 import attributes as attr +from neutron.common import exceptions as qexception +from neutron.extensions import l3 + + +class ExternalNetworkInUse(qexception.InUse): + message = _("External network %(net_id)s cannot be updated to be made " + "non-external, since it has existing gateway ports") + + +# For backward compatibility the 'router' prefix is kept. +EXTERNAL = 'router:external' +EXTENDED_ATTRIBUTES_2_0 = { + 'networks': {EXTERNAL: {'allow_post': True, + 'allow_put': True, + 'default': attr.ATTR_NOT_SPECIFIED, + 'is_visible': True, + 'convert_to': attr.convert_to_boolean, + 'enforce_policy': True, + 'required_by_policy': True}}} + + +class External_net(extensions.ExtensionDescriptor): + + @classmethod + def get_name(cls): + return "Neutron external network" + + @classmethod + def get_alias(cls): + return "external-net" + + @classmethod + def get_description(cls): + return _("Adds external network attribute to network resource.") + + @classmethod + def get_namespace(cls): + return "http://docs.openstack.org/ext/neutron/external_net/api/v1.0" + + @classmethod + def get_updated(cls): + return "2013-01-14T10:00:00-00:00" + + def get_extended_resources(self, version): + if version == "2.0": + return EXTENDED_ATTRIBUTES_2_0 + else: + return {} + + def get_alias_namespace_compatibility_map(self): + return {l3.L3.get_alias(): l3.L3.get_namespace()} diff --git a/icehouse-patches/neutron/dvr-patch/neutron/extensions/extraroute.py b/icehouse-patches/neutron/dvr-patch/neutron/extensions/extraroute.py new file mode 100644 index 00000000..7c63baa2 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/extensions/extraroute.py @@ -0,0 +1,74 @@ +# Copyright 2013, Nachi Ueno, NTT MCL, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +from neutron.api.v2 import attributes as attr +from neutron.common import exceptions as qexception + + +# Extra Routes Exceptions +class InvalidRoutes(qexception.InvalidInput): + message = _("Invalid format for routes: %(routes)s, %(reason)s") + + +class RouterInterfaceInUseByRoute(qexception.InUse): + message = _("Router interface for subnet %(subnet_id)s on router " + "%(router_id)s cannot be deleted, as it is required " + "by one or more routes.") + + +class RoutesExhausted(qexception.BadRequest): + message = _("Unable to complete operation for %(router_id)s. " + "The number of routes exceeds the maximum %(quota)s.") + +# Attribute Map +EXTENDED_ATTRIBUTES_2_0 = { + 'routers': { + 'routes': {'allow_post': False, 'allow_put': True, + 'validate': {'type:hostroutes': None}, + 'convert_to': attr.convert_none_to_empty_list, + 'is_visible': True, 'default': attr.ATTR_NOT_SPECIFIED}, + } +} + + +class Extraroute(): + + @classmethod + def get_name(cls): + return "Neutron Extra Route" + + @classmethod + def get_alias(cls): + return "extraroute" + + @classmethod + def get_description(cls): + return "Extra routes configuration for L3 router" + + @classmethod + def get_namespace(cls): + return "http://docs.openstack.org/ext/neutron/extraroutes/api/v1.0" + + @classmethod + def get_updated(cls): + return "2013-02-01T10:00:00-00:00" + + def get_extended_resources(self, version): + if version == "2.0": + attr.PLURALS.update({'routes': 'route'}) + return EXTENDED_ATTRIBUTES_2_0 + else: + return {} diff --git a/icehouse-patches/neutron/dvr-patch/neutron/extensions/firewall.py b/icehouse-patches/neutron/dvr-patch/neutron/extensions/firewall.py new file mode 100644 index 00000000..bbb5d163 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/extensions/firewall.py @@ -0,0 +1,431 @@ +# Copyright 2013 Big Switch Networks, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Sumit Naiksatam, sumitnaiksatam@gmail.com, Big Switch Networks, Inc. + +import abc + +from oslo.config import cfg +import six + +from neutron.api import extensions +from neutron.api.v2 import attributes as attr +from neutron.api.v2 import resource_helper +from neutron.common import exceptions as qexception +from neutron.openstack.common import log as logging +from neutron.plugins.common import constants +from neutron.services import service_base + + +LOG = logging.getLogger(__name__) + + +# Firewall Exceptions +class FirewallNotFound(qexception.NotFound): + message = _("Firewall %(firewall_id)s could not be found.") + + +class FirewallInUse(qexception.InUse): + message = _("Firewall %(firewall_id)s is still active.") + + +class FirewallInPendingState(qexception.Conflict): + message = _("Operation cannot be performed since associated Firewall " + "%(firewall_id)s is in %(pending_state)s.") + + +class FirewallPolicyNotFound(qexception.NotFound): + message = _("Firewall Policy %(firewall_policy_id)s could not be found.") + + +class FirewallPolicyInUse(qexception.InUse): + message = _("Firewall Policy %(firewall_policy_id)s is being used.") + + +class FirewallRuleNotFound(qexception.NotFound): + message = _("Firewall Rule %(firewall_rule_id)s could not be found.") + + +class FirewallRuleInUse(qexception.InUse): + message = _("Firewall Rule %(firewall_rule_id)s is being used.") + + +class FirewallRuleNotAssociatedWithPolicy(qexception.InvalidInput): + message = _("Firewall Rule %(firewall_rule_id)s is not associated " + " with Firewall Policy %(firewall_policy_id)s.") + + +class FirewallRuleInvalidProtocol(qexception.InvalidInput): + message = _("Firewall Rule protocol %(protocol)s is not supported. " + "Only protocol values %(values)s and their integer " + "representation (0 to 255) are supported.") + + +class FirewallRuleInvalidAction(qexception.InvalidInput): + message = _("Firewall rule action %(action)s is not supported. " + "Only action values %(values)s are supported.") + + +class FirewallRuleInvalidICMPParameter(qexception.InvalidInput): + message = _("%(param)s are not allowed when protocol " + "is set to ICMP.") + + +class FirewallInvalidPortValue(qexception.InvalidInput): + message = _("Invalid value for port %(port)s.") + + +class FirewallRuleInfoMissing(qexception.InvalidInput): + message = _("Missing rule info argument for insert/remove " + "rule operation.") + + +class FirewallInternalDriverError(qexception.NeutronException): + """Fwaas exception for all driver errors. + + On any failure or exception in the driver, driver should log it and + raise this exception to the agent + """ + message = _("%(driver)s: Internal driver error.") + + +fw_valid_protocol_values = [None, constants.TCP, constants.UDP, constants.ICMP] +fw_valid_action_values = [constants.FWAAS_ALLOW, constants.FWAAS_DENY] + + +def convert_protocol(value): + if value is None: + return + if value.isdigit(): + val = int(value) + if 0 <= val <= 255: + return val + else: + raise FirewallRuleInvalidProtocol(protocol=value, + values= + fw_valid_protocol_values) + elif value.lower() in fw_valid_protocol_values: + return value.lower() + else: + raise FirewallRuleInvalidProtocol(protocol=value, + values= + fw_valid_protocol_values) + + +def convert_action_to_case_insensitive(value): + if value is None: + return + else: + return value.lower() + + +def convert_port_to_string(value): + if value is None: + return + else: + return str(value) + + +def _validate_port_range(data, key_specs=None): + if data is None: + return + data = str(data) + ports = data.split(':') + for p in ports: + try: + val = int(p) + except (ValueError, TypeError): + msg = _("Port '%s' is not a valid number") % p + LOG.debug(msg) + return msg + if val <= 0 or val > 65535: + msg = _("Invalid port '%s'") % p + LOG.debug(msg) + return msg + + +def _validate_ip_or_subnet_or_none(data, valid_values=None): + if data is None: + return None + msg_ip = attr._validate_ip_address(data, valid_values) + if not msg_ip: + return + msg_subnet = attr._validate_subnet(data, valid_values) + if not msg_subnet: + return + return _("%(msg_ip)s and %(msg_subnet)s") % {'msg_ip': msg_ip, + 'msg_subnet': msg_subnet} + + +attr.validators['type:port_range'] = _validate_port_range +attr.validators['type:ip_or_subnet_or_none'] = _validate_ip_or_subnet_or_none + + +RESOURCE_ATTRIBUTE_MAP = { + 'firewall_rules': { + 'id': {'allow_post': False, 'allow_put': False, + 'validate': {'type:uuid': None}, + 'is_visible': True, 'primary_key': True}, + 'tenant_id': {'allow_post': True, 'allow_put': False, + 'required_by_policy': True, + 'is_visible': True}, + 'name': {'allow_post': True, 'allow_put': True, + 'validate': {'type:string': None}, + 'is_visible': True, 'default': ''}, + 'description': {'allow_post': True, 'allow_put': True, + 'validate': {'type:string': None}, + 'is_visible': True, 'default': ''}, + 'firewall_policy_id': {'allow_post': False, 'allow_put': False, + 'validate': {'type:uuid_or_none': None}, + 'is_visible': True}, + 'shared': {'allow_post': True, 'allow_put': True, + 'default': False, 'convert_to': attr.convert_to_boolean, + 'is_visible': True, 'required_by_policy': True, + 'enforce_policy': True}, + 'protocol': {'allow_post': True, 'allow_put': True, + 'is_visible': True, 'default': None, + 'convert_to': convert_protocol, + 'validate': {'type:values': fw_valid_protocol_values}}, + 'ip_version': {'allow_post': True, 'allow_put': True, + 'default': 4, 'convert_to': attr.convert_to_int, + 'validate': {'type:values': [4, 6]}, + 'is_visible': True}, + 'source_ip_address': {'allow_post': True, 'allow_put': True, + 'validate': {'type:ip_or_subnet_or_none': None}, + 'is_visible': True, 'default': None}, + 'destination_ip_address': {'allow_post': True, 'allow_put': True, + 'validate': {'type:ip_or_subnet_or_none': + None}, + 'is_visible': True, 'default': None}, + 'source_port': {'allow_post': True, 'allow_put': True, + 'validate': {'type:port_range': None}, + 'convert_to': convert_port_to_string, + 'default': None, 'is_visible': True}, + 'destination_port': {'allow_post': True, 'allow_put': True, + 'validate': {'type:port_range': None}, + 'convert_to': convert_port_to_string, + 'default': None, 'is_visible': True}, + 'position': {'allow_post': False, 'allow_put': False, + 'default': None, 'is_visible': True}, + 'action': {'allow_post': True, 'allow_put': True, + 'convert_to': convert_action_to_case_insensitive, + 'validate': {'type:values': fw_valid_action_values}, + 'is_visible': True, 'default': 'deny'}, + 'enabled': {'allow_post': True, 'allow_put': True, + 'default': True, 'convert_to': attr.convert_to_boolean, + 'is_visible': True}, + }, + 'firewall_policies': { + 'id': {'allow_post': False, 'allow_put': False, + 'validate': {'type:uuid': None}, + 'is_visible': True, + 'primary_key': True}, + 'tenant_id': {'allow_post': True, 'allow_put': False, + 'required_by_policy': True, + 'is_visible': True}, + 'name': {'allow_post': True, 'allow_put': True, + 'validate': {'type:string': None}, + 'is_visible': True, 'default': ''}, + 'description': {'allow_post': True, 'allow_put': True, + 'validate': {'type:string': None}, + 'is_visible': True, 'default': ''}, + 'shared': {'allow_post': True, 'allow_put': True, + 'default': False, 'convert_to': attr.convert_to_boolean, + 'is_visible': True, 'required_by_policy': True, + 'enforce_policy': True}, + 'firewall_rules': {'allow_post': True, 'allow_put': True, + 'validate': {'type:uuid_list': None}, + 'convert_to': attr.convert_none_to_empty_list, + 'default': None, 'is_visible': True}, + 'audited': {'allow_post': True, 'allow_put': True, + 'default': False, 'convert_to': attr.convert_to_boolean, + 'is_visible': True}, + }, + 'firewalls': { + 'id': {'allow_post': False, 'allow_put': False, + 'validate': {'type:uuid': None}, + 'is_visible': True, + 'primary_key': True}, + 'tenant_id': {'allow_post': True, 'allow_put': False, + 'required_by_policy': True, + 'is_visible': True}, + 'name': {'allow_post': True, 'allow_put': True, + 'validate': {'type:string': None}, + 'is_visible': True, 'default': ''}, + 'description': {'allow_post': True, 'allow_put': True, + 'validate': {'type:string': None}, + 'is_visible': True, 'default': ''}, + 'admin_state_up': {'allow_post': True, 'allow_put': True, + 'default': True, + 'convert_to': attr.convert_to_boolean, + 'is_visible': True}, + 'status': {'allow_post': False, 'allow_put': False, + 'is_visible': True}, + 'shared': {'allow_post': True, 'allow_put': True, + 'default': False, 'convert_to': attr.convert_to_boolean, + 'is_visible': False, 'required_by_policy': True, + 'enforce_policy': True}, + 'firewall_policy_id': {'allow_post': True, 'allow_put': True, + 'validate': {'type:uuid_or_none': None}, + 'is_visible': True}, + }, +} + +firewall_quota_opts = [ + cfg.IntOpt('quota_firewall', + default=1, + help=_('Number of firewalls allowed per tenant. ' + 'A negative value means unlimited.')), + cfg.IntOpt('quota_firewall_policy', + default=1, + help=_('Number of firewall policies allowed per tenant. ' + 'A negative value means unlimited.')), + cfg.IntOpt('quota_firewall_rule', + default=-1, + help=_('Number of firewall rules allowed per tenant. ' + 'A negative value means unlimited.')), +] +cfg.CONF.register_opts(firewall_quota_opts, 'QUOTAS') + + +class Firewall(extensions.ExtensionDescriptor): + + @classmethod + def get_name(cls): + return "Firewall service" + + @classmethod + def get_alias(cls): + return "fwaas" + + @classmethod + def get_description(cls): + return "Extension for Firewall service" + + @classmethod + def get_namespace(cls): + return "http://wiki.openstack.org/Neutron/FWaaS/API_1.0" + + @classmethod + def get_updated(cls): + return "2013-02-25T10:00:00-00:00" + + @classmethod + def get_resources(cls): + special_mappings = {'firewall_policies': 'firewall_policy'} + plural_mappings = resource_helper.build_plural_mappings( + special_mappings, RESOURCE_ATTRIBUTE_MAP) + attr.PLURALS.update(plural_mappings) + action_map = {'firewall_policy': {'insert_rule': 'PUT', + 'remove_rule': 'PUT'}} + return resource_helper.build_resource_info(plural_mappings, + RESOURCE_ATTRIBUTE_MAP, + constants.FIREWALL, + action_map=action_map) + + @classmethod + def get_plugin_interface(cls): + return FirewallPluginBase + + def update_attributes_map(self, attributes): + super(Firewall, self).update_attributes_map( + attributes, extension_attrs_map=RESOURCE_ATTRIBUTE_MAP) + + def get_extended_resources(self, version): + if version == "2.0": + return RESOURCE_ATTRIBUTE_MAP + else: + return {} + + +@six.add_metaclass(abc.ABCMeta) +class FirewallPluginBase(service_base.ServicePluginBase): + + def get_plugin_name(self): + return constants.FIREWALL + + def get_plugin_type(self): + return constants.FIREWALL + + def get_plugin_description(self): + return 'Firewall service plugin' + + @abc.abstractmethod + def get_firewalls(self, context, filters=None, fields=None): + pass + + @abc.abstractmethod + def get_firewall(self, context, id, fields=None): + pass + + @abc.abstractmethod + def create_firewall(self, context, firewall): + pass + + @abc.abstractmethod + def update_firewall(self, context, id, firewall): + pass + + @abc.abstractmethod + def delete_firewall(self, context, id): + pass + + @abc.abstractmethod + def get_firewall_rules(self, context, filters=None, fields=None): + pass + + @abc.abstractmethod + def get_firewall_rule(self, context, id, fields=None): + pass + + @abc.abstractmethod + def create_firewall_rule(self, context, firewall_rule): + pass + + @abc.abstractmethod + def update_firewall_rule(self, context, id, firewall_rule): + pass + + @abc.abstractmethod + def delete_firewall_rule(self, context, id): + pass + + @abc.abstractmethod + def get_firewall_policy(self, context, id, fields=None): + pass + + @abc.abstractmethod + def get_firewall_policies(self, context, filters=None, fields=None): + pass + + @abc.abstractmethod + def create_firewall_policy(self, context, firewall_policy): + pass + + @abc.abstractmethod + def update_firewall_policy(self, context, id, firewall_policy): + pass + + @abc.abstractmethod + def delete_firewall_policy(self, context, id): + pass + + @abc.abstractmethod + def insert_rule(self, context, id, rule_info): + pass + + @abc.abstractmethod + def remove_rule(self, context, id, rule_info): + pass diff --git a/icehouse-patches/neutron/dvr-patch/neutron/extensions/flavor.py b/icehouse-patches/neutron/dvr-patch/neutron/extensions/flavor.py new file mode 100644 index 00000000..c5937d93 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/extensions/flavor.py @@ -0,0 +1,67 @@ +# Copyright 2012 Nachi Ueno, NTT MCL, Inc. +# All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from neutron.api import extensions +from neutron.api.v2 import attributes +from neutron.openstack.common import log as logging + + +LOG = logging.getLogger(__name__) + +FLAVOR_NETWORK = 'flavor:network' +FLAVOR_ROUTER = 'flavor:router' + +FLAVOR_ATTRIBUTE = { + 'networks': { + FLAVOR_NETWORK: {'allow_post': True, + 'allow_put': False, + 'is_visible': True, + 'default': attributes.ATTR_NOT_SPECIFIED} + }, + 'routers': { + FLAVOR_ROUTER: {'allow_post': True, + 'allow_put': False, + 'is_visible': True, + 'default': attributes.ATTR_NOT_SPECIFIED} + } +} + + +class Flavor(extensions.ExtensionDescriptor): + @classmethod + def get_name(cls): + return "Flavor support for network and router" + + @classmethod + def get_alias(cls): + return "flavor" + + @classmethod + def get_description(cls): + return "Flavor" + + @classmethod + def get_namespace(cls): + return "http://docs.openstack.org/ext/flavor/api/v1.0" + + @classmethod + def get_updated(cls): + return "2012-07-20T10:00:00-00:00" + + def get_extended_resources(self, version): + if version == "2.0": + return FLAVOR_ATTRIBUTE + else: + return {} diff --git a/icehouse-patches/neutron/dvr-patch/neutron/extensions/l3.py b/icehouse-patches/neutron/dvr-patch/neutron/extensions/l3.py new file mode 100644 index 00000000..b02c9337 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/extensions/l3.py @@ -0,0 +1,254 @@ +# Copyright 2012 VMware, Inc. +# All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import abc + +from oslo.config import cfg + +from neutron.api import extensions +from neutron.api.v2 import attributes as attr +from neutron.api.v2 import resource_helper +from neutron.common import exceptions as qexception +from neutron.plugins.common import constants + + +# L3 Exceptions +class RouterNotFound(qexception.NotFound): + message = _("Router %(router_id)s could not be found") + + +class RouterInUse(qexception.InUse): + message = _("Router %(router_id)s still has ports") + + +class RouterInterfaceNotFound(qexception.NotFound): + message = _("Router %(router_id)s does not have " + "an interface with id %(port_id)s") + + +class RouterInterfaceNotFoundForSubnet(qexception.NotFound): + message = _("Router %(router_id)s has no interface " + "on subnet %(subnet_id)s") + + +class RouterInterfaceInUseByFloatingIP(qexception.InUse): + message = _("Router interface for subnet %(subnet_id)s on router " + "%(router_id)s cannot be deleted, as it is required " + "by one or more floating IPs.") + + +class FloatingIPNotFound(qexception.NotFound): + message = _("Floating IP %(floatingip_id)s could not be found") + + +class ExternalGatewayForFloatingIPNotFound(qexception.NotFound): + message = _("External network %(external_network_id)s is not reachable " + "from subnet %(subnet_id)s. Therefore, cannot associate " + "Port %(port_id)s with a Floating IP.") + + +class FloatingIPPortAlreadyAssociated(qexception.InUse): + message = _("Cannot associate floating IP %(floating_ip_address)s " + "(%(fip_id)s) with port %(port_id)s " + "using fixed IP %(fixed_ip)s, as that fixed IP already " + "has a floating IP on external network %(net_id)s.") + + +class L3PortInUse(qexception.InUse): + message = _("Port %(port_id)s has owner %(device_owner)s and therefore" + " cannot be deleted directly via the port API.") + + +class RouterExternalGatewayInUseByFloatingIp(qexception.InUse): + message = _("Gateway cannot be updated for router %(router_id)s, since a " + "gateway to external network %(net_id)s is required by one or " + "more floating IPs.") + +ROUTERS = 'routers' +EXTERNAL_GW_INFO = 'external_gateway_info' + +RESOURCE_ATTRIBUTE_MAP = { + ROUTERS: { + 'id': {'allow_post': False, 'allow_put': False, + 'validate': {'type:uuid': None}, + 'is_visible': True, + 'primary_key': True}, + 'name': {'allow_post': True, 'allow_put': True, + 'validate': {'type:string': None}, + 'is_visible': True, 'default': ''}, + 'admin_state_up': {'allow_post': True, 'allow_put': True, + 'default': True, + 'convert_to': attr.convert_to_boolean, + 'is_visible': True}, + 'status': {'allow_post': False, 'allow_put': False, + 'is_visible': True}, + 'tenant_id': {'allow_post': True, 'allow_put': False, + 'required_by_policy': True, + 'validate': {'type:string': None}, + 'is_visible': True}, + EXTERNAL_GW_INFO: {'allow_post': True, 'allow_put': True, + 'is_visible': True, 'default': None, + 'enforce_policy': True} + }, + 'floatingips': { + 'id': {'allow_post': False, 'allow_put': False, + 'validate': {'type:uuid': None}, + 'is_visible': True, + 'primary_key': True}, + 'floating_ip_address': {'allow_post': False, 'allow_put': False, + 'validate': {'type:ip_address_or_none': None}, + 'is_visible': True}, + 'floating_network_id': {'allow_post': True, 'allow_put': False, + 'validate': {'type:uuid': None}, + 'is_visible': True}, + 'router_id': {'allow_post': False, 'allow_put': False, + 'validate': {'type:uuid_or_none': None}, + 'is_visible': True, 'default': None}, + 'port_id': {'allow_post': True, 'allow_put': True, + 'validate': {'type:uuid_or_none': None}, + 'is_visible': True, 'default': None, + 'required_by_policy': True}, + 'fixed_ip_address': {'allow_post': True, 'allow_put': True, + 'validate': {'type:ip_address_or_none': None}, + 'is_visible': True, 'default': None}, + 'tenant_id': {'allow_post': True, 'allow_put': False, + 'required_by_policy': True, + 'validate': {'type:string': None}, + 'is_visible': True}, + 'status': {'allow_post': False, 'allow_put': False, + 'is_visible': True}, + }, +} + +l3_quota_opts = [ + cfg.IntOpt('quota_router', + default=10, + help=_('Number of routers allowed per tenant. ' + 'A negative value means unlimited.')), + cfg.IntOpt('quota_floatingip', + default=50, + help=_('Number of floating IPs allowed per tenant. ' + 'A negative value means unlimited.')), +] +cfg.CONF.register_opts(l3_quota_opts, 'QUOTAS') + + +class L3(extensions.ExtensionDescriptor): + + @classmethod + def get_name(cls): + return "Neutron L3 Router" + + @classmethod + def get_alias(cls): + return "router" + + @classmethod + def get_description(cls): + return ("Router abstraction for basic L3 forwarding" + " between L2 Neutron networks and access to external" + " networks via a NAT gateway.") + + @classmethod + def get_namespace(cls): + return "http://docs.openstack.org/ext/neutron/router/api/v1.0" + + @classmethod + def get_updated(cls): + return "2012-07-20T10:00:00-00:00" + + @classmethod + def get_resources(cls): + """Returns Ext Resources.""" + plural_mappings = resource_helper.build_plural_mappings( + {}, RESOURCE_ATTRIBUTE_MAP) + attr.PLURALS.update(plural_mappings) + action_map = {'router': {'add_router_interface': 'PUT', + 'remove_router_interface': 'PUT'}} + return resource_helper.build_resource_info(plural_mappings, + RESOURCE_ATTRIBUTE_MAP, + constants.L3_ROUTER_NAT, + action_map=action_map, + register_quota=True) + + def update_attributes_map(self, attributes): + super(L3, self).update_attributes_map( + attributes, extension_attrs_map=RESOURCE_ATTRIBUTE_MAP) + + def get_extended_resources(self, version): + if version == "2.0": + return RESOURCE_ATTRIBUTE_MAP + else: + return {} + + +class RouterPluginBase(object): + + @abc.abstractmethod + def create_router(self, context, router): + pass + + @abc.abstractmethod + def update_router(self, context, id, router): + pass + + @abc.abstractmethod + def get_router(self, context, id, fields=None): + pass + + @abc.abstractmethod + def delete_router(self, context, id): + pass + + @abc.abstractmethod + def get_routers(self, context, filters=None, fields=None, + sorts=None, limit=None, marker=None, page_reverse=False): + pass + + @abc.abstractmethod + def add_router_interface(self, context, router_id, interface_info): + pass + + @abc.abstractmethod + def remove_router_interface(self, context, router_id, interface_info): + pass + + @abc.abstractmethod + def create_floatingip(self, context, floatingip): + pass + + @abc.abstractmethod + def update_floatingip(self, context, id, floatingip): + pass + + @abc.abstractmethod + def get_floatingip(self, context, id, fields=None): + pass + + @abc.abstractmethod + def delete_floatingip(self, context, id): + pass + + @abc.abstractmethod + def get_floatingips(self, context, filters=None, fields=None, + sorts=None, limit=None, marker=None, + page_reverse=False): + pass + + def get_routers_count(self, context, filters=None): + raise NotImplementedError() + + def get_floatingips_count(self, context, filters=None): + raise NotImplementedError() diff --git a/icehouse-patches/neutron/dvr-patch/neutron/extensions/l3agentscheduler.py b/icehouse-patches/neutron/dvr-patch/neutron/extensions/l3agentscheduler.py new file mode 100644 index 00000000..ad5aaa1c --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/extensions/l3agentscheduler.py @@ -0,0 +1,213 @@ +# Copyright (c) 2013 OpenStack Foundation. +# All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import abc + +import webob.exc + +from neutron.api import extensions +from neutron.api.v2 import base +from neutron.api.v2 import resource +from neutron.common import constants +from neutron.common import exceptions +from neutron.extensions import agent +from neutron import manager +from neutron.openstack.common import log as logging +from neutron.plugins.common import constants as service_constants +from neutron import policy +from neutron import wsgi + + +LOG = logging.getLogger(__name__) + + +L3_ROUTER = 'l3-router' +L3_ROUTERS = L3_ROUTER + 's' +L3_AGENT = 'l3-agent' +L3_AGENTS = L3_AGENT + 's' + + +class RouterSchedulerController(wsgi.Controller): + def get_plugin(self): + plugin = manager.NeutronManager.get_service_plugins().get( + service_constants.L3_ROUTER_NAT) + if not plugin: + LOG.error(_('No plugin for L3 routing registered to handle ' + 'router scheduling')) + msg = _('The resource could not be found.') + raise webob.exc.HTTPNotFound(msg) + return plugin + + def index(self, request, **kwargs): + plugin = self.get_plugin() + policy.enforce(request.context, + "get_%s" % L3_ROUTERS, + {}) + return plugin.list_routers_on_l3_agent( + request.context, kwargs['agent_id']) + + def create(self, request, body, **kwargs): + plugin = self.get_plugin() + policy.enforce(request.context, + "create_%s" % L3_ROUTER, + {}) + return plugin.add_router_to_l3_agent( + request.context, + kwargs['agent_id'], + body['router_id']) + + def delete(self, request, id, **kwargs): + plugin = self.get_plugin() + policy.enforce(request.context, + "delete_%s" % L3_ROUTER, + {}) + return plugin.remove_router_from_l3_agent( + request.context, kwargs['agent_id'], id) + + +class L3AgentsHostingRouterController(wsgi.Controller): + def get_plugin(self): + plugin = manager.NeutronManager.get_service_plugins().get( + service_constants.L3_ROUTER_NAT) + if not plugin: + LOG.error(_('No plugin for L3 routing registered to handle ' + 'router scheduling')) + msg = _('The resource could not be found.') + raise webob.exc.HTTPNotFound(msg) + return plugin + + def index(self, request, **kwargs): + plugin = manager.NeutronManager.get_service_plugins().get( + service_constants.L3_ROUTER_NAT) + policy.enforce(request.context, + "get_%s" % L3_AGENTS, + {}) + return plugin.list_l3_agents_hosting_router( + request.context, kwargs['router_id']) + + +class L3agentscheduler(extensions.ExtensionDescriptor): + """Extension class supporting l3 agent scheduler. + """ + + @classmethod + def get_name(cls): + return "L3 Agent Scheduler" + + @classmethod + def get_alias(cls): + return constants.L3_AGENT_SCHEDULER_EXT_ALIAS + + @classmethod + def get_description(cls): + return "Schedule routers among l3 agents" + + @classmethod + def get_namespace(cls): + return "http://docs.openstack.org/ext/l3_agent_scheduler/api/v1.0" + + @classmethod + def get_updated(cls): + return "2013-02-07T10:00:00-00:00" + + @classmethod + def get_resources(cls): + """Returns Ext Resources.""" + exts = [] + parent = dict(member_name="agent", + collection_name="agents") + + controller = resource.Resource(RouterSchedulerController(), + base.FAULT_MAP) + exts.append(extensions.ResourceExtension( + L3_ROUTERS, controller, parent)) + + parent = dict(member_name="router", + collection_name="routers") + + controller = resource.Resource(L3AgentsHostingRouterController(), + base.FAULT_MAP) + exts.append(extensions.ResourceExtension( + L3_AGENTS, controller, parent)) + return exts + + def get_extended_resources(self, version): + return {} + + +class InvalidL3Agent(agent.AgentNotFound): + message = _("Agent %(id)s is not a L3 Agent or has been disabled") + + +class RouterHostedByL3Agent(exceptions.Conflict): + message = _("The router %(router_id)s has been already hosted" + " by the L3 Agent %(agent_id)s.") + + +class RouterSchedulingFailed(exceptions.Conflict): + message = _("Failed scheduling router %(router_id)s to" + " the L3 Agent %(agent_id)s.") + + +class RouterReschedulingFailed(exceptions.Conflict): + message = _("Failed rescheduling router %(router_id)s: " + "no eligible l3 agent found.") + + +class RouterNotHostedByL3Agent(exceptions.Conflict): + message = _("The router %(router_id)s is not hosted " + "by L3 agent %(agent_id)s.") + + +class DistributedRouterNotHostedByL3Agent(exceptions.Conflict): + message = _("The Distributed router %(router_id)s can't be hosted " + "on Centralised L3 agent %(agent_id)s.") + + +class RouterNotHostedByDistributedL3Agent(exceptions.Conflict): + message = _("The Non-Distributed router %(router_id)s can't be hosted " + "on Distributed L3 agent %(agent_id)s.") + + +class RemoveFloatingIPforRouter(exceptions.Conflict): + message = _("Disassociate the floating-ip before removing " + "router %(router_id)s from the L3 agent %(agent_id)s.") + + +class NoSnatEnabledL3Agent(exceptions.Conflict): + message = _("No snat enabled l3-agents for the router %(router_id)s.") + + +class L3AgentSchedulerPluginBase(object): + """REST API to operate the l3 agent scheduler. + + All of method must be in an admin context. + """ + + @abc.abstractmethod + def add_router_to_l3_agent(self, context, id, router_id): + pass + + @abc.abstractmethod + def remove_router_from_l3_agent(self, context, id, router_id): + pass + + @abc.abstractmethod + def list_routers_on_l3_agent(self, context, id): + pass + + @abc.abstractmethod + def list_l3_agents_hosting_router(self, context, router_id): + pass diff --git a/icehouse-patches/neutron/dvr-patch/neutron/extensions/lbaas_agentscheduler.py b/icehouse-patches/neutron/dvr-patch/neutron/extensions/lbaas_agentscheduler.py new file mode 100644 index 00000000..a821cb6e --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/extensions/lbaas_agentscheduler.py @@ -0,0 +1,137 @@ +# Copyright (c) 2013 OpenStack Foundation. +# All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import abc + +from neutron.api import extensions +from neutron.api.v2 import base +from neutron.api.v2 import resource +from neutron.common import constants +from neutron.extensions import agent +from neutron.extensions import loadbalancer +from neutron import manager +from neutron.plugins.common import constants as plugin_const +from neutron import policy +from neutron import wsgi + +LOADBALANCER_POOL = 'loadbalancer-pool' +LOADBALANCER_POOLS = LOADBALANCER_POOL + 's' +LOADBALANCER_AGENT = 'loadbalancer-agent' + + +class PoolSchedulerController(wsgi.Controller): + def index(self, request, **kwargs): + lbaas_plugin = manager.NeutronManager.get_service_plugins().get( + plugin_const.LOADBALANCER) + if not lbaas_plugin: + return {'pools': []} + + policy.enforce(request.context, + "get_%s" % LOADBALANCER_POOLS, + {}, + plugin=lbaas_plugin) + return lbaas_plugin.list_pools_on_lbaas_agent( + request.context, kwargs['agent_id']) + + +class LbaasAgentHostingPoolController(wsgi.Controller): + def index(self, request, **kwargs): + lbaas_plugin = manager.NeutronManager.get_service_plugins().get( + plugin_const.LOADBALANCER) + if not lbaas_plugin: + return + + policy.enforce(request.context, + "get_%s" % LOADBALANCER_AGENT, + {}, + plugin=lbaas_plugin) + return lbaas_plugin.get_lbaas_agent_hosting_pool( + request.context, kwargs['pool_id']) + + +class Lbaas_agentscheduler(extensions.ExtensionDescriptor): + """Extension class supporting LBaaS agent scheduler. + """ + + @classmethod + def get_name(cls): + return "Loadbalancer Agent Scheduler" + + @classmethod + def get_alias(cls): + return constants.LBAAS_AGENT_SCHEDULER_EXT_ALIAS + + @classmethod + def get_description(cls): + return "Schedule pools among lbaas agents" + + @classmethod + def get_namespace(cls): + return "http://docs.openstack.org/ext/lbaas_agent_scheduler/api/v1.0" + + @classmethod + def get_updated(cls): + return "2013-02-07T10:00:00-00:00" + + @classmethod + def get_resources(cls): + """Returns Ext Resources.""" + exts = [] + parent = dict(member_name="agent", + collection_name="agents") + + controller = resource.Resource(PoolSchedulerController(), + base.FAULT_MAP) + exts.append(extensions.ResourceExtension( + LOADBALANCER_POOLS, controller, parent)) + + parent = dict(member_name="pool", + collection_name="pools") + + controller = resource.Resource(LbaasAgentHostingPoolController(), + base.FAULT_MAP) + exts.append(extensions.ResourceExtension( + LOADBALANCER_AGENT, controller, parent, + path_prefix=plugin_const. + COMMON_PREFIXES[plugin_const.LOADBALANCER])) + return exts + + def get_extended_resources(self, version): + return {} + + +class NoEligibleLbaasAgent(loadbalancer.NoEligibleBackend): + message = _("No eligible loadbalancer agent found " + "for pool %(pool_id)s.") + + +class NoActiveLbaasAgent(agent.AgentNotFound): + message = _("No active loadbalancer agent found " + "for pool %(pool_id)s.") + + +class LbaasAgentSchedulerPluginBase(object): + """REST API to operate the lbaas agent scheduler. + + All of method must be in an admin context. + """ + + @abc.abstractmethod + def list_pools_on_lbaas_agent(self, context, id): + pass + + @abc.abstractmethod + def get_lbaas_agent_hosting_pool(self, context, pool_id): + pass diff --git a/icehouse-patches/neutron/dvr-patch/neutron/extensions/loadbalancer.py b/icehouse-patches/neutron/dvr-patch/neutron/extensions/loadbalancer.py new file mode 100644 index 00000000..ae91b651 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/extensions/loadbalancer.py @@ -0,0 +1,506 @@ +# Copyright 2012 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import abc + +from oslo.config import cfg +import six + +from neutron.api import extensions +from neutron.api.v2 import attributes as attr +from neutron.api.v2 import base +from neutron.api.v2 import resource_helper +from neutron.common import exceptions as qexception +from neutron import manager +from neutron.plugins.common import constants +from neutron.services import service_base + + +# Loadbalancer Exceptions +class DelayOrTimeoutInvalid(qexception.BadRequest): + message = _("Delay must be greater than or equal to timeout") + + +class NoEligibleBackend(qexception.NotFound): + message = _("No eligible backend for pool %(pool_id)s") + + +class VipNotFound(qexception.NotFound): + message = _("Vip %(vip_id)s could not be found") + + +class VipExists(qexception.NeutronException): + message = _("Another Vip already exists for pool %(pool_id)s") + + +class PoolNotFound(qexception.NotFound): + message = _("Pool %(pool_id)s could not be found") + + +class MemberNotFound(qexception.NotFound): + message = _("Member %(member_id)s could not be found") + + +class HealthMonitorNotFound(qexception.NotFound): + message = _("Health_monitor %(monitor_id)s could not be found") + + +class PoolMonitorAssociationNotFound(qexception.NotFound): + message = _("Monitor %(monitor_id)s is not associated " + "with Pool %(pool_id)s") + + +class PoolMonitorAssociationExists(qexception.Conflict): + message = _('health_monitor %(monitor_id)s is already associated ' + 'with pool %(pool_id)s') + + +class StateInvalid(qexception.NeutronException): + message = _("Invalid state %(state)s of Loadbalancer resource %(id)s") + + +class PoolInUse(qexception.InUse): + message = _("Pool %(pool_id)s is still in use") + + +class HealthMonitorInUse(qexception.InUse): + message = _("Health monitor %(monitor_id)s still has associations with " + "pools") + + +class PoolStatsNotFound(qexception.NotFound): + message = _("Statistics of Pool %(pool_id)s could not be found") + + +class ProtocolMismatch(qexception.BadRequest): + message = _("Protocol %(vip_proto)s does not match " + "pool protocol %(pool_proto)s") + + +class MemberExists(qexception.NeutronException): + message = _("Member with address %(address)s and port %(port)s " + "already present in pool %(pool)s") + + +RESOURCE_ATTRIBUTE_MAP = { + 'vips': { + 'id': {'allow_post': False, 'allow_put': False, + 'validate': {'type:uuid': None}, + 'is_visible': True, + 'primary_key': True}, + 'tenant_id': {'allow_post': True, 'allow_put': False, + 'validate': {'type:string': None}, + 'required_by_policy': True, + 'is_visible': True}, + 'name': {'allow_post': True, 'allow_put': True, + 'validate': {'type:string': None}, + 'default': '', + 'is_visible': True}, + 'description': {'allow_post': True, 'allow_put': True, + 'validate': {'type:string': None}, + 'is_visible': True, 'default': ''}, + 'subnet_id': {'allow_post': True, 'allow_put': False, + 'validate': {'type:uuid': None}, + 'is_visible': True}, + 'address': {'allow_post': True, 'allow_put': False, + 'default': attr.ATTR_NOT_SPECIFIED, + 'validate': {'type:ip_address_or_none': None}, + 'is_visible': True}, + 'port_id': {'allow_post': False, 'allow_put': False, + 'validate': {'type:uuid': None}, + 'is_visible': True}, + 'protocol_port': {'allow_post': True, 'allow_put': False, + 'validate': {'type:range': [0, 65535]}, + 'convert_to': attr.convert_to_int, + 'is_visible': True}, + 'protocol': {'allow_post': True, 'allow_put': False, + 'validate': {'type:values': ['TCP', 'HTTP', 'HTTPS']}, + 'is_visible': True}, + 'pool_id': {'allow_post': True, 'allow_put': True, + 'validate': {'type:uuid': None}, + 'is_visible': True}, + 'session_persistence': {'allow_post': True, 'allow_put': True, + 'convert_to': attr.convert_none_to_empty_dict, + 'default': {}, + 'validate': { + 'type:dict_or_empty': { + 'type': {'type:values': ['APP_COOKIE', + 'HTTP_COOKIE', + 'SOURCE_IP'], + 'required': True}, + 'cookie_name': {'type:string': None, + 'required': False}}}, + 'is_visible': True}, + 'connection_limit': {'allow_post': True, 'allow_put': True, + 'default': -1, + 'convert_to': attr.convert_to_int, + 'is_visible': True}, + 'admin_state_up': {'allow_post': True, 'allow_put': True, + 'default': True, + 'convert_to': attr.convert_to_boolean, + 'is_visible': True}, + 'status': {'allow_post': False, 'allow_put': False, + 'is_visible': True}, + 'status_description': {'allow_post': False, 'allow_put': False, + 'is_visible': True} + }, + 'pools': { + 'id': {'allow_post': False, 'allow_put': False, + 'validate': {'type:uuid': None}, + 'is_visible': True, + 'primary_key': True}, + 'tenant_id': {'allow_post': True, 'allow_put': False, + 'validate': {'type:string': None}, + 'required_by_policy': True, + 'is_visible': True}, + 'vip_id': {'allow_post': False, 'allow_put': False, + 'is_visible': True}, + 'name': {'allow_post': True, 'allow_put': True, + 'validate': {'type:string': None}, + 'default': '', + 'is_visible': True}, + 'description': {'allow_post': True, 'allow_put': True, + 'validate': {'type:string': None}, + 'is_visible': True, 'default': ''}, + 'subnet_id': {'allow_post': True, 'allow_put': False, + 'validate': {'type:uuid': None}, + 'is_visible': True}, + 'protocol': {'allow_post': True, 'allow_put': False, + 'validate': {'type:values': ['TCP', 'HTTP', 'HTTPS']}, + 'is_visible': True}, + 'provider': {'allow_post': True, 'allow_put': False, + 'validate': {'type:string': None}, + 'is_visible': True, 'default': attr.ATTR_NOT_SPECIFIED}, + 'lb_method': {'allow_post': True, 'allow_put': True, + 'validate': {'type:string': None}, + 'is_visible': True}, + 'members': {'allow_post': False, 'allow_put': False, + 'is_visible': True}, + 'health_monitors': {'allow_post': True, 'allow_put': True, + 'default': None, + 'validate': {'type:uuid_list': None}, + 'convert_to': attr.convert_to_list, + 'is_visible': True}, + 'health_monitors_status': {'allow_post': False, 'allow_put': False, + 'is_visible': True}, + 'admin_state_up': {'allow_post': True, 'allow_put': True, + 'default': True, + 'convert_to': attr.convert_to_boolean, + 'is_visible': True}, + 'status': {'allow_post': False, 'allow_put': False, + 'is_visible': True}, + 'status_description': {'allow_post': False, 'allow_put': False, + 'is_visible': True} + }, + 'members': { + 'id': {'allow_post': False, 'allow_put': False, + 'validate': {'type:uuid': None}, + 'is_visible': True, + 'primary_key': True}, + 'tenant_id': {'allow_post': True, 'allow_put': False, + 'validate': {'type:string': None}, + 'required_by_policy': True, + 'is_visible': True}, + 'pool_id': {'allow_post': True, 'allow_put': True, + 'validate': {'type:uuid': None}, + 'is_visible': True}, + 'address': {'allow_post': True, 'allow_put': False, + 'validate': {'type:ip_address': None}, + 'is_visible': True}, + 'protocol_port': {'allow_post': True, 'allow_put': False, + 'validate': {'type:range': [0, 65535]}, + 'convert_to': attr.convert_to_int, + 'is_visible': True}, + 'weight': {'allow_post': True, 'allow_put': True, + 'default': 1, + 'validate': {'type:range': [0, 256]}, + 'convert_to': attr.convert_to_int, + 'is_visible': True}, + 'admin_state_up': {'allow_post': True, 'allow_put': True, + 'default': True, + 'convert_to': attr.convert_to_boolean, + 'is_visible': True}, + 'status': {'allow_post': False, 'allow_put': False, + 'is_visible': True}, + 'status_description': {'allow_post': False, 'allow_put': False, + 'is_visible': True} + }, + 'health_monitors': { + 'id': {'allow_post': False, 'allow_put': False, + 'validate': {'type:uuid': None}, + 'is_visible': True, + 'primary_key': True}, + 'tenant_id': {'allow_post': True, 'allow_put': False, + 'validate': {'type:string': None}, + 'required_by_policy': True, + 'is_visible': True}, + 'type': {'allow_post': True, 'allow_put': False, + 'validate': {'type:values': ['PING', 'TCP', 'HTTP', 'HTTPS']}, + 'is_visible': True}, + 'delay': {'allow_post': True, 'allow_put': True, + 'validate': {'type:non_negative': None}, + 'convert_to': attr.convert_to_int, + 'is_visible': True}, + 'timeout': {'allow_post': True, 'allow_put': True, + 'validate': {'type:non_negative': None}, + 'convert_to': attr.convert_to_int, + 'is_visible': True}, + 'max_retries': {'allow_post': True, 'allow_put': True, + 'validate': {'type:range': [1, 10]}, + 'convert_to': attr.convert_to_int, + 'is_visible': True}, + 'http_method': {'allow_post': True, 'allow_put': True, + 'validate': {'type:string': None}, + 'default': 'GET', + 'is_visible': True}, + 'url_path': {'allow_post': True, 'allow_put': True, + 'validate': {'type:string': None}, + 'default': '/', + 'is_visible': True}, + 'expected_codes': {'allow_post': True, 'allow_put': True, + 'validate': { + 'type:regex': + '^(\d{3}(\s*,\s*\d{3})*)$|^(\d{3}-\d{3})$'}, + 'default': '200', + 'is_visible': True}, + 'admin_state_up': {'allow_post': True, 'allow_put': True, + 'default': True, + 'convert_to': attr.convert_to_boolean, + 'is_visible': True}, + 'status': {'allow_post': False, 'allow_put': False, + 'is_visible': True}, + 'status_description': {'allow_post': False, 'allow_put': False, + 'is_visible': True}, + 'pools': {'allow_post': False, 'allow_put': False, + 'is_visible': True} + } +} + +SUB_RESOURCE_ATTRIBUTE_MAP = { + 'health_monitors': { + 'parent': {'collection_name': 'pools', + 'member_name': 'pool'}, + 'parameters': {'id': {'allow_post': True, 'allow_put': False, + 'validate': {'type:uuid': None}, + 'is_visible': True}, + 'tenant_id': {'allow_post': True, 'allow_put': False, + 'validate': {'type:string': None}, + 'required_by_policy': True, + 'is_visible': True}, + } + } +} + +lbaas_quota_opts = [ + cfg.IntOpt('quota_vip', + default=10, + help=_('Number of vips allowed per tenant. ' + 'A negative value means unlimited.')), + cfg.IntOpt('quota_pool', + default=10, + help=_('Number of pools allowed per tenant. ' + 'A negative value means unlimited.')), + cfg.IntOpt('quota_member', + default=-1, + help=_('Number of pool members allowed per tenant. ' + 'A negative value means unlimited.')), + cfg.IntOpt('quota_health_monitor', + default=-1, + help=_('Number of health monitors allowed per tenant. ' + 'A negative value means unlimited.')) +] +cfg.CONF.register_opts(lbaas_quota_opts, 'QUOTAS') + + +class Loadbalancer(extensions.ExtensionDescriptor): + + @classmethod + def get_name(cls): + return "LoadBalancing service" + + @classmethod + def get_alias(cls): + return "lbaas" + + @classmethod + def get_description(cls): + return "Extension for LoadBalancing service" + + @classmethod + def get_namespace(cls): + return "http://wiki.openstack.org/neutron/LBaaS/API_1.0" + + @classmethod + def get_updated(cls): + return "2012-10-07T10:00:00-00:00" + + @classmethod + def get_resources(cls): + plural_mappings = resource_helper.build_plural_mappings( + {}, RESOURCE_ATTRIBUTE_MAP) + plural_mappings['health_monitors_status'] = 'health_monitor_status' + attr.PLURALS.update(plural_mappings) + action_map = {'pool': {'stats': 'GET'}} + resources = resource_helper.build_resource_info(plural_mappings, + RESOURCE_ATTRIBUTE_MAP, + constants.LOADBALANCER, + action_map=action_map, + register_quota=True) + plugin = manager.NeutronManager.get_service_plugins()[ + constants.LOADBALANCER] + for collection_name in SUB_RESOURCE_ATTRIBUTE_MAP: + # Special handling needed for sub-resources with 'y' ending + # (e.g. proxies -> proxy) + resource_name = collection_name[:-1] + parent = SUB_RESOURCE_ATTRIBUTE_MAP[collection_name].get('parent') + params = SUB_RESOURCE_ATTRIBUTE_MAP[collection_name].get( + 'parameters') + + controller = base.create_resource(collection_name, resource_name, + plugin, params, + allow_bulk=True, + parent=parent) + + resource = extensions.ResourceExtension( + collection_name, + controller, parent, + path_prefix=constants.COMMON_PREFIXES[constants.LOADBALANCER], + attr_map=params) + resources.append(resource) + + return resources + + @classmethod + def get_plugin_interface(cls): + return LoadBalancerPluginBase + + def update_attributes_map(self, attributes): + super(Loadbalancer, self).update_attributes_map( + attributes, extension_attrs_map=RESOURCE_ATTRIBUTE_MAP) + + def get_extended_resources(self, version): + if version == "2.0": + return RESOURCE_ATTRIBUTE_MAP + else: + return {} + + +@six.add_metaclass(abc.ABCMeta) +class LoadBalancerPluginBase(service_base.ServicePluginBase): + + def get_plugin_name(self): + return constants.LOADBALANCER + + def get_plugin_type(self): + return constants.LOADBALANCER + + def get_plugin_description(self): + return 'LoadBalancer service plugin' + + @abc.abstractmethod + def get_vips(self, context, filters=None, fields=None): + pass + + @abc.abstractmethod + def get_vip(self, context, id, fields=None): + pass + + @abc.abstractmethod + def create_vip(self, context, vip): + pass + + @abc.abstractmethod + def update_vip(self, context, id, vip): + pass + + @abc.abstractmethod + def delete_vip(self, context, id): + pass + + @abc.abstractmethod + def get_pools(self, context, filters=None, fields=None): + pass + + @abc.abstractmethod + def get_pool(self, context, id, fields=None): + pass + + @abc.abstractmethod + def create_pool(self, context, pool): + pass + + @abc.abstractmethod + def update_pool(self, context, id, pool): + pass + + @abc.abstractmethod + def delete_pool(self, context, id): + pass + + @abc.abstractmethod + def stats(self, context, pool_id): + pass + + @abc.abstractmethod + def create_pool_health_monitor(self, context, health_monitor, pool_id): + pass + + @abc.abstractmethod + def get_pool_health_monitor(self, context, id, pool_id, fields=None): + pass + + @abc.abstractmethod + def delete_pool_health_monitor(self, context, id, pool_id): + pass + + @abc.abstractmethod + def get_members(self, context, filters=None, fields=None): + pass + + @abc.abstractmethod + def get_member(self, context, id, fields=None): + pass + + @abc.abstractmethod + def create_member(self, context, member): + pass + + @abc.abstractmethod + def update_member(self, context, id, member): + pass + + @abc.abstractmethod + def delete_member(self, context, id): + pass + + @abc.abstractmethod + def get_health_monitors(self, context, filters=None, fields=None): + pass + + @abc.abstractmethod + def get_health_monitor(self, context, id, fields=None): + pass + + @abc.abstractmethod + def create_health_monitor(self, context, health_monitor): + pass + + @abc.abstractmethod + def update_health_monitor(self, context, id, health_monitor): + pass + + @abc.abstractmethod + def delete_health_monitor(self, context, id): + pass diff --git a/icehouse-patches/neutron/dvr-patch/neutron/extensions/multiprovidernet.py b/icehouse-patches/neutron/dvr-patch/neutron/extensions/multiprovidernet.py new file mode 100644 index 00000000..79fcb9e4 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/extensions/multiprovidernet.py @@ -0,0 +1,114 @@ +# Copyright (c) 2013 OpenStack Foundation. +# All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import webob.exc + +from neutron.api import extensions +from neutron.api.v2 import attributes as attr +from neutron.common import exceptions as qexception +from neutron.extensions import providernet as pnet + +SEGMENTS = 'segments' + + +class SegmentsSetInConjunctionWithProviders(qexception.InvalidInput): + message = _("Segments and provider values cannot both be set.") + + +class SegmentsContainDuplicateEntry(qexception.InvalidInput): + message = _("Duplicate segment entry in request.") + + +def _convert_and_validate_segments(segments, valid_values=None): + unique = set() + for segment in segments: + unique.add(tuple(segment.iteritems())) + network_type = segment.get(pnet.NETWORK_TYPE, + attr.ATTR_NOT_SPECIFIED) + segment[pnet.NETWORK_TYPE] = network_type + physical_network = segment.get(pnet.PHYSICAL_NETWORK, + attr.ATTR_NOT_SPECIFIED) + segment[pnet.PHYSICAL_NETWORK] = physical_network + segmentation_id = segment.get(pnet.SEGMENTATION_ID) + if segmentation_id: + segment[pnet.SEGMENTATION_ID] = attr.convert_to_int( + segmentation_id) + else: + segment[pnet.SEGMENTATION_ID] = attr.ATTR_NOT_SPECIFIED + if len(segment.keys()) != 3: + msg = (_("Unrecognized attribute(s) '%s'") % + ', '.join(set(segment.keys()) - + set([pnet.NETWORK_TYPE, pnet.PHYSICAL_NETWORK, + pnet.SEGMENTATION_ID]))) + raise webob.exc.HTTPBadRequest(msg) + if len(unique) != len(segments): + raise SegmentsContainDuplicateEntry() + + +attr.validators['type:convert_segments'] = ( + _convert_and_validate_segments) + + +EXTENDED_ATTRIBUTES_2_0 = { + 'networks': { + SEGMENTS: {'allow_post': True, 'allow_put': True, + 'validate': {'type:convert_segments': None}, + 'convert_list_to': attr.convert_kvp_list_to_dict, + 'default': attr.ATTR_NOT_SPECIFIED, + 'enforce_policy': True, + 'is_visible': True}, + } +} + + +class Multiprovidernet(extensions.ExtensionDescriptor): + """Extension class supporting multiple provider networks. + + This class is used by neutron's extension framework to make + metadata about the multiple provider network extension available to + clients. No new resources are defined by this extension. Instead, + the existing network resource's request and response messages are + extended with 'segments' attribute. + + With admin rights, network dictionaries returned will also include + 'segments' attribute. + """ + + @classmethod + def get_name(cls): + return "Multi Provider Network" + + @classmethod + def get_alias(cls): + return "multi-provider" + + @classmethod + def get_description(cls): + return ("Expose mapping of virtual networks to multiple physical " + "networks") + + @classmethod + def get_namespace(cls): + return "http://docs.openstack.org/ext/multi-provider/api/v1.0" + + @classmethod + def get_updated(cls): + return "2013-06-27T10:00:00-00:00" + + def get_extended_resources(self, version): + if version == "2.0": + return EXTENDED_ATTRIBUTES_2_0 + else: + return {} diff --git a/icehouse-patches/neutron/dvr-patch/neutron/extensions/portbindings.py b/icehouse-patches/neutron/dvr-patch/neutron/extensions/portbindings.py new file mode 100644 index 00000000..7e5c76dd --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/extensions/portbindings.py @@ -0,0 +1,133 @@ +# Copyright (c) 2012 OpenStack Foundation. +# All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from neutron.api import extensions +from neutron.api.v2 import attributes + +# The type of vnic that this port should be attached to +VNIC_TYPE = 'binding:vnic_type' +# The service will return the vif type for the specific port. +VIF_TYPE = 'binding:vif_type' +# The service may return a dictionary containing additional +# information needed by the interface driver. The set of items +# returned may depend on the value of VIF_TYPE. +VIF_DETAILS = 'binding:vif_details' +# In some cases different implementations may be run on different hosts. +# The host on which the port will be allocated. +HOST_ID = 'binding:host_id' +# The profile will be a dictionary that enables the application running +# on the specific host to pass and receive vif port specific information to +# the plugin. +PROFILE = 'binding:profile' + +# The keys below are used in the VIF_DETAILS attribute to convey +# information to the VIF driver. + +# TODO(rkukura): Replace CAP_PORT_FILTER, which nova no longer +# understands, with the new set of VIF security details to be used in +# the VIF_DETAILS attribute. +# +# - port_filter : Boolean value indicating Neutron provides port filtering +# features such as security group and anti MAC/IP spoofing +# - ovs_hybrid_plug: Boolean used to inform Nova that the hybrid plugging +# strategy for OVS should be used +CAP_PORT_FILTER = 'port_filter' +OVS_HYBRID_PLUG = 'ovs_hybrid_plug' + +VIF_TYPE_UNBOUND = 'unbound' +VIF_TYPE_BINDING_FAILED = 'binding_failed' +VIF_TYPE_IOVISOR = 'iovisor' +VIF_TYPE_OVS = 'ovs' +VIF_TYPE_IVS = 'ivs' +VIF_TYPE_BRIDGE = 'bridge' +VIF_TYPE_802_QBG = '802.1qbg' +VIF_TYPE_802_QBH = '802.1qbh' +VIF_TYPE_HYPERV = 'hyperv' +VIF_TYPE_MIDONET = 'midonet' +VIF_TYPE_MLNX_DIRECT = 'mlnx_direct' +VIF_TYPE_MLNX_HOSTDEV = 'hostdev' +VIF_TYPE_OTHER = 'other' +VIF_TYPES = [VIF_TYPE_UNBOUND, VIF_TYPE_BINDING_FAILED, VIF_TYPE_OVS, + VIF_TYPE_IVS, VIF_TYPE_BRIDGE, VIF_TYPE_802_QBG, + VIF_TYPE_802_QBH, VIF_TYPE_HYPERV, VIF_TYPE_MIDONET, + VIF_TYPE_MLNX_DIRECT, VIF_TYPE_MLNX_HOSTDEV, VIF_TYPE_OTHER] + +VNIC_NORMAL = 'normal' +VNIC_DIRECT = 'direct' +VNIC_MACVTAP = 'macvtap' +VNIC_TYPES = [VNIC_NORMAL, VNIC_DIRECT, VNIC_MACVTAP] + +EXTENDED_ATTRIBUTES_2_0 = { + 'ports': { + VIF_TYPE: {'allow_post': False, 'allow_put': False, + 'default': attributes.ATTR_NOT_SPECIFIED, + 'enforce_policy': True, + 'is_visible': True}, + VIF_DETAILS: {'allow_post': False, 'allow_put': False, + 'default': attributes.ATTR_NOT_SPECIFIED, + 'enforce_policy': True, + 'is_visible': True}, + VNIC_TYPE: {'allow_post': True, 'allow_put': True, + 'default': VNIC_NORMAL, + 'is_visible': True, + 'validate': {'type:values': VNIC_TYPES}, + 'enforce_policy': True}, + HOST_ID: {'allow_post': True, 'allow_put': True, + 'default': attributes.ATTR_NOT_SPECIFIED, + 'is_visible': True, + 'enforce_policy': True}, + PROFILE: {'allow_post': True, 'allow_put': True, + 'default': attributes.ATTR_NOT_SPECIFIED, + 'enforce_policy': True, + 'validate': {'type:dict_or_none': None}, + 'is_visible': True}, + } +} + + +class Portbindings(extensions.ExtensionDescriptor): + """Extension class supporting port bindings. + + This class is used by neutron's extension framework to make + metadata about the port bindings available to external applications. + + With admin rights one will be able to update and read the values. + """ + + @classmethod + def get_name(cls): + return "Port Binding" + + @classmethod + def get_alias(cls): + return "binding" + + @classmethod + def get_description(cls): + return "Expose port bindings of a virtual port to external application" + + @classmethod + def get_namespace(cls): + return "http://docs.openstack.org/ext/binding/api/v1.0" + + @classmethod + def get_updated(cls): + return "2014-02-03T10:00:00-00:00" + + def get_extended_resources(self, version): + if version == "2.0": + return EXTENDED_ATTRIBUTES_2_0 + else: + return {} diff --git a/icehouse-patches/neutron/dvr-patch/neutron/extensions/providernet.py b/icehouse-patches/neutron/dvr-patch/neutron/extensions/providernet.py new file mode 100644 index 00000000..944de104 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/extensions/providernet.py @@ -0,0 +1,95 @@ +# Copyright (c) 2012 OpenStack Foundation. +# All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from neutron.api import extensions +from neutron.api.v2 import attributes +from neutron.common import exceptions as n_exc + + +NETWORK_TYPE = 'provider:network_type' +PHYSICAL_NETWORK = 'provider:physical_network' +SEGMENTATION_ID = 'provider:segmentation_id' + +EXTENDED_ATTRIBUTES_2_0 = { + 'networks': { + NETWORK_TYPE: {'allow_post': True, 'allow_put': True, + 'validate': {'type:string': None}, + 'default': attributes.ATTR_NOT_SPECIFIED, + 'enforce_policy': True, + 'is_visible': True}, + PHYSICAL_NETWORK: {'allow_post': True, 'allow_put': True, + 'validate': {'type:string': None}, + 'default': attributes.ATTR_NOT_SPECIFIED, + 'enforce_policy': True, + 'is_visible': True}, + SEGMENTATION_ID: {'allow_post': True, 'allow_put': True, + 'convert_to': int, + 'enforce_policy': True, + 'default': attributes.ATTR_NOT_SPECIFIED, + 'is_visible': True}, + } +} + + +def _raise_if_updates_provider_attributes(attrs): + """Raise exception if provider attributes are present. + + This method is used for plugins that do not support + updating provider networks. + """ + immutable = (NETWORK_TYPE, PHYSICAL_NETWORK, SEGMENTATION_ID) + if any(attributes.is_attr_set(attrs.get(a)) for a in immutable): + msg = _("Plugin does not support updating provider attributes") + raise n_exc.InvalidInput(error_message=msg) + + +class Providernet(extensions.ExtensionDescriptor): + """Extension class supporting provider networks. + + This class is used by neutron's extension framework to make + metadata about the provider network extension available to + clients. No new resources are defined by this extension. Instead, + the existing network resource's request and response messages are + extended with attributes in the provider namespace. + + With admin rights, network dictionaries returned will also include + provider attributes. + """ + + @classmethod + def get_name(cls): + return "Provider Network" + + @classmethod + def get_alias(cls): + return "provider" + + @classmethod + def get_description(cls): + return "Expose mapping of virtual networks to physical networks" + + @classmethod + def get_namespace(cls): + return "http://docs.openstack.org/ext/provider/api/v1.0" + + @classmethod + def get_updated(cls): + return "2012-09-07T10:00:00-00:00" + + def get_extended_resources(self, version): + if version == "2.0": + return EXTENDED_ATTRIBUTES_2_0 + else: + return {} diff --git a/icehouse-patches/neutron/dvr-patch/neutron/extensions/quotasv2.py b/icehouse-patches/neutron/dvr-patch/neutron/extensions/quotasv2.py new file mode 100644 index 00000000..4fa9bf28 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/extensions/quotasv2.py @@ -0,0 +1,152 @@ +# Copyright 2011 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import sys + +from oslo.config import cfg +import webob + +from neutron.api import extensions +from neutron.api.v2 import attributes +from neutron.api.v2 import base +from neutron.api.v2 import resource +from neutron.common import exceptions as n_exc +from neutron import manager +from neutron.openstack.common import importutils +from neutron import quota +from neutron import wsgi + + +RESOURCE_NAME = 'quota' +RESOURCE_COLLECTION = RESOURCE_NAME + "s" +QUOTAS = quota.QUOTAS +DB_QUOTA_DRIVER = 'neutron.db.quota_db.DbQuotaDriver' +EXTENDED_ATTRIBUTES_2_0 = { + RESOURCE_COLLECTION: {} +} + + +class QuotaSetsController(wsgi.Controller): + + def __init__(self, plugin): + self._resource_name = RESOURCE_NAME + self._plugin = plugin + self._driver = importutils.import_class( + cfg.CONF.QUOTAS.quota_driver + ) + self._update_extended_attributes = True + + def _update_attributes(self): + for quota_resource in QUOTAS.resources.iterkeys(): + attr_dict = EXTENDED_ATTRIBUTES_2_0[RESOURCE_COLLECTION] + attr_dict[quota_resource] = { + 'allow_post': False, + 'allow_put': True, + 'convert_to': attributes.convert_to_int, + 'validate': {'type:range': [-1, sys.maxsize]}, + 'is_visible': True} + self._update_extended_attributes = False + + def _get_quotas(self, request, tenant_id): + return self._driver.get_tenant_quotas( + request.context, QUOTAS.resources, tenant_id) + + def create(self, request, body=None): + msg = _('POST requests are not supported on this resource.') + raise webob.exc.HTTPNotImplemented(msg) + + def index(self, request): + context = request.context + self._check_admin(context) + return {self._resource_name + "s": + self._driver.get_all_quotas(context, QUOTAS.resources)} + + def tenant(self, request): + """Retrieve the tenant info in context.""" + context = request.context + if not context.tenant_id: + raise n_exc.QuotaMissingTenant() + return {'tenant': {'tenant_id': context.tenant_id}} + + def show(self, request, id): + if id != request.context.tenant_id: + self._check_admin(request.context, + reason=_("Only admin is authorized " + "to access quotas for another tenant")) + return {self._resource_name: self._get_quotas(request, id)} + + def _check_admin(self, context, + reason=_("Only admin can view or configure quota")): + if not context.is_admin: + raise n_exc.AdminRequired(reason=reason) + + def delete(self, request, id): + self._check_admin(request.context) + self._driver.delete_tenant_quota(request.context, id) + + def update(self, request, id, body=None): + self._check_admin(request.context) + if self._update_extended_attributes: + self._update_attributes() + body = base.Controller.prepare_request_body( + request.context, body, False, self._resource_name, + EXTENDED_ATTRIBUTES_2_0[RESOURCE_COLLECTION]) + for key, value in body[self._resource_name].items(): + self._driver.update_quota_limit(request.context, id, key, value) + return {self._resource_name: self._get_quotas(request, id)} + + +class Quotasv2(extensions.ExtensionDescriptor): + """Quotas management support.""" + + @classmethod + def get_name(cls): + return "Quota management support" + + @classmethod + def get_alias(cls): + return RESOURCE_COLLECTION + + @classmethod + def get_description(cls): + description = 'Expose functions for quotas management' + if cfg.CONF.QUOTAS.quota_driver == DB_QUOTA_DRIVER: + description += ' per tenant' + return description + + @classmethod + def get_namespace(cls): + return "http://docs.openstack.org/network/ext/quotas-sets/api/v2.0" + + @classmethod + def get_updated(cls): + return "2012-07-29T10:00:00-00:00" + + @classmethod + def get_resources(cls): + """Returns Ext Resources.""" + controller = resource.Resource( + QuotaSetsController(manager.NeutronManager.get_plugin()), + faults=base.FAULT_MAP) + return [extensions.ResourceExtension( + Quotasv2.get_alias(), + controller, + collection_actions={'tenant': 'GET'})] + + def get_extended_resources(self, version): + if version == "2.0": + return EXTENDED_ATTRIBUTES_2_0 + else: + return {} diff --git a/icehouse-patches/neutron/dvr-patch/neutron/extensions/routedserviceinsertion.py b/icehouse-patches/neutron/dvr-patch/neutron/extensions/routedserviceinsertion.py new file mode 100644 index 00000000..06ff9e25 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/extensions/routedserviceinsertion.py @@ -0,0 +1,71 @@ +# Copyright 2013 VMware, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Kaiwei Fan, VMware, Inc + + +ROUTER_ID = 'router_id' +EXTENDED_ATTRIBUTES_2_0 = { + 'vips': { + ROUTER_ID: {'allow_post': True, 'allow_put': False, + 'validate': {'type:uuid_or_none': None}, + 'default': None, 'is_visible': True}, + }, + 'pools': { + ROUTER_ID: {'allow_post': True, 'allow_put': False, + 'validate': {'type:uuid_or_none': None}, + 'default': None, 'is_visible': True}, + }, + 'health_monitors': { + ROUTER_ID: {'allow_post': True, 'allow_put': False, + 'validate': {'type:uuid_or_none': None}, + 'default': None, 'is_visible': True}, + }, + + 'firewalls': { + ROUTER_ID: {'allow_post': True, 'allow_put': False, + 'validate': {'type:uuid_or_none': None}, + 'default': None, 'is_visible': True}, + } +} + + +class Routedserviceinsertion(object): + """Extension class supporting routed service type.""" + + @classmethod + def get_name(cls): + return "Routed Service Insertion" + + @classmethod + def get_alias(cls): + return "routed-service-insertion" + + @classmethod + def get_description(cls): + return "Provides routed service type" + + @classmethod + def get_namespace(cls): + return "" + + @classmethod + def get_updated(cls): + return "2013-01-29T00:00:00-00:00" + + def get_extended_resources(self, version): + if version == "2.0": + return EXTENDED_ATTRIBUTES_2_0 + else: + return {} diff --git a/icehouse-patches/neutron/dvr-patch/neutron/extensions/routerservicetype.py b/icehouse-patches/neutron/dvr-patch/neutron/extensions/routerservicetype.py new file mode 100644 index 00000000..6168adf3 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/extensions/routerservicetype.py @@ -0,0 +1,55 @@ +# Copyright 2013 VMware, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Kaiwei Fan, VMware, Inc + + +SERVICE_TYPE_ID = 'service_type_id' +EXTENDED_ATTRIBUTES_2_0 = { + 'routers': { + SERVICE_TYPE_ID: {'allow_post': True, 'allow_put': False, + 'validate': {'type:uuid_or_none': None}, + 'default': None, 'is_visible': True}, + } +} + + +class Routerservicetype(object): + """Extension class supporting router service type.""" + + @classmethod + def get_name(cls): + return "Router Service Type" + + @classmethod + def get_alias(cls): + return "router-service-type" + + @classmethod + def get_description(cls): + return "Provides router service type" + + @classmethod + def get_namespace(cls): + return "" + + @classmethod + def get_updated(cls): + return "2013-01-29T00:00:00-00:00" + + def get_extended_resources(self, version): + if version == "2.0": + return EXTENDED_ATTRIBUTES_2_0 + else: + return {} diff --git a/icehouse-patches/neutron/dvr-patch/neutron/extensions/securitygroup.py b/icehouse-patches/neutron/dvr-patch/neutron/extensions/securitygroup.py new file mode 100644 index 00000000..5ebb9de6 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/extensions/securitygroup.py @@ -0,0 +1,354 @@ +# Copyright (c) 2012 OpenStack Foundation. +# All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import abc +import netaddr + +from oslo.config import cfg +import six + +from neutron.api import extensions +from neutron.api.v2 import attributes as attr +from neutron.api.v2 import base +from neutron.common import constants as const +from neutron.common import exceptions as qexception +from neutron import manager +from neutron.openstack.common import uuidutils +from neutron import quota + + +# Security group Exceptions +class SecurityGroupInvalidPortRange(qexception.InvalidInput): + message = _("For TCP/UDP protocols, port_range_min must be " + "<= port_range_max") + + +class SecurityGroupInvalidPortValue(qexception.InvalidInput): + message = _("Invalid value for port %(port)s") + + +class SecurityGroupInvalidIcmpValue(qexception.InvalidInput): + message = _("Invalid value for ICMP %(field)s (%(attr)s) " + "%(value)s. It must be 0 to 255.") + + +class SecurityGroupMissingIcmpType(qexception.InvalidInput): + message = _("ICMP code (port-range-max) %(value)s is provided" + " but ICMP type (port-range-min) is missing.") + + +class SecurityGroupInUse(qexception.InUse): + message = _("Security Group %(id)s in use.") + + +class SecurityGroupCannotRemoveDefault(qexception.InUse): + message = _("Removing default security group not allowed.") + + +class SecurityGroupCannotUpdateDefault(qexception.InUse): + message = _("Updating default security group not allowed.") + + +class SecurityGroupDefaultAlreadyExists(qexception.InUse): + message = _("Default security group already exists.") + + +class SecurityGroupRuleInvalidProtocol(qexception.InvalidInput): + message = _("Security group rule protocol %(protocol)s not supported. " + "Only protocol values %(values)s and their integer " + "representation (0 to 255) are supported.") + + +class SecurityGroupRulesNotSingleTenant(qexception.InvalidInput): + message = _("Multiple tenant_ids in bulk security group rule create" + " not allowed") + + +class SecurityGroupRemoteGroupAndRemoteIpPrefix(qexception.InvalidInput): + message = _("Only remote_ip_prefix or remote_group_id may " + "be provided.") + + +class SecurityGroupProtocolRequiredWithPorts(qexception.InvalidInput): + message = _("Must also specifiy protocol if port range is given.") + + +class SecurityGroupNotSingleGroupRules(qexception.InvalidInput): + message = _("Only allowed to update rules for " + "one security profile at a time") + + +class SecurityGroupNotFound(qexception.NotFound): + message = _("Security group %(id)s does not exist") + + +class SecurityGroupRuleNotFound(qexception.NotFound): + message = _("Security group rule %(id)s does not exist") + + +class DuplicateSecurityGroupRuleInPost(qexception.InUse): + message = _("Duplicate Security Group Rule in POST.") + + +class SecurityGroupRuleExists(qexception.InUse): + message = _("Security group rule already exists. Group id is %(id)s.") + + +class SecurityGroupRuleParameterConflict(qexception.InvalidInput): + message = _("Conflicting value ethertype %(ethertype)s for CIDR %(cidr)s") + + +def convert_protocol(value): + if value is None: + return + try: + val = int(value) + if val >= 0 and val <= 255: + return value + raise SecurityGroupRuleInvalidProtocol( + protocol=value, values=sg_supported_protocols) + except (ValueError, TypeError): + if value.lower() in sg_supported_protocols: + return value.lower() + raise SecurityGroupRuleInvalidProtocol( + protocol=value, values=sg_supported_protocols) + except AttributeError: + raise SecurityGroupRuleInvalidProtocol( + protocol=value, values=sg_supported_protocols) + + +def convert_ethertype_to_case_insensitive(value): + if isinstance(value, basestring): + for ethertype in sg_supported_ethertypes: + if ethertype.lower() == value.lower(): + return ethertype + + +def convert_validate_port_value(port): + if port is None: + return port + try: + val = int(port) + except (ValueError, TypeError): + raise SecurityGroupInvalidPortValue(port=port) + + if val >= 0 and val <= 65535: + return val + else: + raise SecurityGroupInvalidPortValue(port=port) + + +def convert_to_uuid_list_or_none(value_list): + if value_list is None: + return + for sg_id in value_list: + if not uuidutils.is_uuid_like(sg_id): + msg = _("'%s' is not an integer or uuid") % sg_id + raise qexception.InvalidInput(error_message=msg) + return value_list + + +def convert_ip_prefix_to_cidr(ip_prefix): + if not ip_prefix: + return + try: + cidr = netaddr.IPNetwork(ip_prefix) + return str(cidr) + except (ValueError, TypeError, netaddr.AddrFormatError): + raise qexception.InvalidCIDR(input=ip_prefix) + + +def _validate_name_not_default(data, valid_values=None): + if data == "default": + raise SecurityGroupDefaultAlreadyExists() + + +attr.validators['type:name_not_default'] = _validate_name_not_default + +sg_supported_protocols = [None, const.PROTO_NAME_TCP, + const.PROTO_NAME_UDP, const.PROTO_NAME_ICMP] +sg_supported_ethertypes = ['IPv4', 'IPv6'] + +# Attribute Map +RESOURCE_ATTRIBUTE_MAP = { + 'security_groups': { + 'id': {'allow_post': False, 'allow_put': False, + 'validate': {'type:uuid': None}, + 'is_visible': True, + 'primary_key': True}, + 'name': {'allow_post': True, 'allow_put': True, + 'is_visible': True, 'default': '', + 'validate': {'type:name_not_default': None}}, + 'description': {'allow_post': True, 'allow_put': True, + 'is_visible': True, 'default': ''}, + 'tenant_id': {'allow_post': True, 'allow_put': False, + 'required_by_policy': True, + 'is_visible': True}, + 'security_group_rules': {'allow_post': False, 'allow_put': False, + 'is_visible': True}, + }, + 'security_group_rules': { + 'id': {'allow_post': False, 'allow_put': False, + 'validate': {'type:uuid': None}, + 'is_visible': True, + 'primary_key': True}, + 'security_group_id': {'allow_post': True, 'allow_put': False, + 'is_visible': True, 'required_by_policy': True}, + 'remote_group_id': {'allow_post': True, 'allow_put': False, + 'default': None, 'is_visible': True}, + 'direction': {'allow_post': True, 'allow_put': True, + 'is_visible': True, + 'validate': {'type:values': ['ingress', 'egress']}}, + 'protocol': {'allow_post': True, 'allow_put': False, + 'is_visible': True, 'default': None, + 'convert_to': convert_protocol}, + 'port_range_min': {'allow_post': True, 'allow_put': False, + 'convert_to': convert_validate_port_value, + 'default': None, 'is_visible': True}, + 'port_range_max': {'allow_post': True, 'allow_put': False, + 'convert_to': convert_validate_port_value, + 'default': None, 'is_visible': True}, + 'ethertype': {'allow_post': True, 'allow_put': False, + 'is_visible': True, 'default': 'IPv4', + 'convert_to': convert_ethertype_to_case_insensitive, + 'validate': {'type:values': sg_supported_ethertypes}}, + 'remote_ip_prefix': {'allow_post': True, 'allow_put': False, + 'default': None, 'is_visible': True, + 'convert_to': convert_ip_prefix_to_cidr}, + 'tenant_id': {'allow_post': True, 'allow_put': False, + 'required_by_policy': True, + 'is_visible': True}, + } +} + + +SECURITYGROUPS = 'security_groups' +EXTENDED_ATTRIBUTES_2_0 = { + 'ports': {SECURITYGROUPS: {'allow_post': True, + 'allow_put': True, + 'is_visible': True, + 'convert_to': convert_to_uuid_list_or_none, + 'default': attr.ATTR_NOT_SPECIFIED}}} +security_group_quota_opts = [ + cfg.IntOpt('quota_security_group', + default=10, + help=_('Number of security groups allowed per tenant. ' + 'A negative value means unlimited.')), + cfg.IntOpt('quota_security_group_rule', + default=100, + help=_('Number of security rules allowed per tenant. ' + 'A negative value means unlimited.')), +] +cfg.CONF.register_opts(security_group_quota_opts, 'QUOTAS') + + +class Securitygroup(extensions.ExtensionDescriptor): + """Security group extension.""" + + @classmethod + def get_name(cls): + return "security-group" + + @classmethod + def get_alias(cls): + return "security-group" + + @classmethod + def get_description(cls): + return "The security groups extension." + + @classmethod + def get_namespace(cls): + # todo + return "http://docs.openstack.org/ext/securitygroups/api/v2.0" + + @classmethod + def get_updated(cls): + return "2012-10-05T10:00:00-00:00" + + @classmethod + def get_resources(cls): + """Returns Ext Resources.""" + my_plurals = [(key, key[:-1]) for key in RESOURCE_ATTRIBUTE_MAP.keys()] + attr.PLURALS.update(dict(my_plurals)) + exts = [] + plugin = manager.NeutronManager.get_plugin() + for resource_name in ['security_group', 'security_group_rule']: + collection_name = resource_name.replace('_', '-') + "s" + params = RESOURCE_ATTRIBUTE_MAP.get(resource_name + "s", dict()) + quota.QUOTAS.register_resource_by_name(resource_name) + controller = base.create_resource(collection_name, + resource_name, + plugin, params, allow_bulk=True, + allow_pagination=True, + allow_sorting=True) + + ex = extensions.ResourceExtension(collection_name, + controller, + attr_map=params) + exts.append(ex) + + return exts + + def get_extended_resources(self, version): + if version == "2.0": + return dict(EXTENDED_ATTRIBUTES_2_0.items() + + RESOURCE_ATTRIBUTE_MAP.items()) + else: + return {} + + +@six.add_metaclass(abc.ABCMeta) +class SecurityGroupPluginBase(object): + + @abc.abstractmethod + def create_security_group(self, context, security_group): + pass + + @abc.abstractmethod + def update_security_group(self, context, id, security_group): + pass + + @abc.abstractmethod + def delete_security_group(self, context, id): + pass + + @abc.abstractmethod + def get_security_groups(self, context, filters=None, fields=None, + sorts=None, limit=None, marker=None, + page_reverse=False): + pass + + @abc.abstractmethod + def get_security_group(self, context, id, fields=None): + pass + + @abc.abstractmethod + def create_security_group_rule(self, context, security_group_rule): + pass + + @abc.abstractmethod + def delete_security_group_rule(self, context, id): + pass + + @abc.abstractmethod + def get_security_group_rules(self, context, filters=None, fields=None, + sorts=None, limit=None, marker=None, + page_reverse=False): + pass + + @abc.abstractmethod + def get_security_group_rule(self, context, id, fields=None): + pass diff --git a/icehouse-patches/neutron/dvr-patch/neutron/extensions/servicetype.py b/icehouse-patches/neutron/dvr-patch/neutron/extensions/servicetype.py new file mode 100644 index 00000000..25633775 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/extensions/servicetype.py @@ -0,0 +1,91 @@ +# Copyright 2013 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Salvatore Orlando, VMware +# + +from neutron.api import extensions +from neutron.api.v2 import attributes +from neutron.api.v2 import base +from neutron.db import servicetype_db +from neutron.openstack.common import log as logging + +LOG = logging.getLogger(__name__) + +RESOURCE_NAME = "service_provider" +COLLECTION_NAME = "%ss" % RESOURCE_NAME +SERVICE_ATTR = 'service_type' +PLUGIN_ATTR = 'plugin' +DRIVER_ATTR = 'driver' +EXT_ALIAS = 'service-type' + +# Attribute Map for Service Provider Resource +# Allow read-only access +RESOURCE_ATTRIBUTE_MAP = { + COLLECTION_NAME: { + 'service_type': {'allow_post': False, 'allow_put': False, + 'is_visible': True}, + 'name': {'allow_post': False, 'allow_put': False, + 'is_visible': True}, + 'default': {'allow_post': False, 'allow_put': False, + 'is_visible': True}, + } +} + + +class Servicetype(extensions.ExtensionDescriptor): + + @classmethod + def get_name(cls): + return _("Neutron Service Type Management") + + @classmethod + def get_alias(cls): + return EXT_ALIAS + + @classmethod + def get_description(cls): + return _("API for retrieving service providers for " + "Neutron advanced services") + + @classmethod + def get_namespace(cls): + return "http://docs.openstack.org/ext/neutron/service-type/api/v1.0" + + @classmethod + def get_updated(cls): + return "2013-01-20T00:00:00-00:00" + + @classmethod + def get_resources(cls): + """Returns Extended Resource for service type management.""" + my_plurals = [(key, key[:-1]) for key in RESOURCE_ATTRIBUTE_MAP.keys()] + attributes.PLURALS.update(dict(my_plurals)) + attr_map = RESOURCE_ATTRIBUTE_MAP[COLLECTION_NAME] + collection_name = COLLECTION_NAME.replace('_', '-') + controller = base.create_resource( + collection_name, + RESOURCE_NAME, + servicetype_db.ServiceTypeManager.get_instance(), + attr_map) + return [extensions.ResourceExtension(collection_name, + controller, + attr_map=attr_map)] + + def get_extended_resources(self, version): + if version == "2.0": + return RESOURCE_ATTRIBUTE_MAP + else: + return {} diff --git a/icehouse-patches/neutron/dvr-patch/neutron/extensions/vpnaas.py b/icehouse-patches/neutron/dvr-patch/neutron/extensions/vpnaas.py new file mode 100644 index 00000000..f6bdcd76 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/extensions/vpnaas.py @@ -0,0 +1,482 @@ +# (c) Copyright 2013 Hewlett-Packard Development Company, L.P. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Swaminathan Vasudevan, Hewlett-Packard. + +import abc + +import six + +from neutron.api import extensions +from neutron.api.v2 import attributes as attr +from neutron.api.v2 import resource_helper +from neutron.common import exceptions as qexception +from neutron.plugins.common import constants +from neutron.services import service_base + + +class VPNServiceNotFound(qexception.NotFound): + message = _("VPNService %(vpnservice_id)s could not be found") + + +class IPsecSiteConnectionNotFound(qexception.NotFound): + message = _("ipsec_site_connection %(ipsecsite_conn_id)s not found") + + +class IPsecSiteConnectionDpdIntervalValueError(qexception.InvalidInput): + message = _("ipsec_site_connection %(attr)s is " + "equal to or less than dpd_interval") + + +class IPsecSiteConnectionMtuError(qexception.InvalidInput): + message = _("ipsec_site_connection MTU %(mtu)d is too small " + "for ipv%(version)s") + + +class IKEPolicyNotFound(qexception.NotFound): + message = _("IKEPolicy %(ikepolicy_id)s could not be found") + + +class IPsecPolicyNotFound(qexception.NotFound): + message = _("IPsecPolicy %(ipsecpolicy_id)s could not be found") + + +class IKEPolicyInUse(qexception.InUse): + message = _("IKEPolicy %(ikepolicy_id)s is in use by existing " + "IPsecSiteConnection and can't be updated or deleted") + + +class VPNServiceInUse(qexception.InUse): + message = _("VPNService %(vpnservice_id)s is still in use") + + +class RouterInUseByVPNService(qexception.InUse): + message = _("Router %(router_id)s is used by VPNService %(vpnservice_id)s") + + +class VPNStateInvalidToUpdate(qexception.BadRequest): + message = _("Invalid state %(state)s of vpnaas resource %(id)s" + " for updating") + + +class IPsecPolicyInUse(qexception.InUse): + message = _("IPsecPolicy %(ipsecpolicy_id)s is in use by existing " + "IPsecSiteConnection and can't be updated or deleted") + + +class DeviceDriverImportError(qexception.NeutronException): + message = _("Can not load driver :%(device_driver)s") + + +class SubnetIsNotConnectedToRouter(qexception.BadRequest): + message = _("Subnet %(subnet_id)s is not " + "connected to Router %(router_id)s") + + +class RouterIsNotExternal(qexception.BadRequest): + message = _("Router %(router_id)s has no external network gateway set") + + +vpn_supported_initiators = ['bi-directional', 'response-only'] +vpn_supported_encryption_algorithms = ['3des', 'aes-128', + 'aes-192', 'aes-256'] +vpn_dpd_supported_actions = [ + 'hold', 'clear', 'restart', 'restart-by-peer', 'disabled' +] +vpn_supported_transform_protocols = ['esp', 'ah', 'ah-esp'] +vpn_supported_encapsulation_mode = ['tunnel', 'transport'] +#TODO(nati) add kilobytes when we support it +vpn_supported_lifetime_units = ['seconds'] +vpn_supported_pfs = ['group2', 'group5', 'group14'] +vpn_supported_ike_versions = ['v1', 'v2'] +vpn_supported_auth_mode = ['psk'] +vpn_supported_auth_algorithms = ['sha1'] +vpn_supported_phase1_negotiation_mode = ['main'] + +vpn_lifetime_limits = (60, attr.UNLIMITED) +positive_int = (0, attr.UNLIMITED) + +RESOURCE_ATTRIBUTE_MAP = { + + 'vpnservices': { + 'id': {'allow_post': False, 'allow_put': False, + 'validate': {'type:uuid': None}, + 'is_visible': True, + 'primary_key': True}, + 'tenant_id': {'allow_post': True, 'allow_put': False, + 'validate': {'type:string': None}, + 'required_by_policy': True, + 'is_visible': True}, + 'name': {'allow_post': True, 'allow_put': True, + 'validate': {'type:string': None}, + 'is_visible': True, 'default': ''}, + 'description': {'allow_post': True, 'allow_put': True, + 'validate': {'type:string': None}, + 'is_visible': True, 'default': ''}, + 'subnet_id': {'allow_post': True, 'allow_put': False, + 'validate': {'type:uuid': None}, + 'is_visible': True}, + 'router_id': {'allow_post': True, 'allow_put': False, + 'validate': {'type:uuid': None}, + 'is_visible': True}, + 'admin_state_up': {'allow_post': True, 'allow_put': True, + 'default': True, + 'convert_to': attr.convert_to_boolean, + 'is_visible': True}, + 'status': {'allow_post': False, 'allow_put': False, + 'is_visible': True} + }, + + 'ipsec_site_connections': { + 'id': {'allow_post': False, 'allow_put': False, + 'validate': {'type:uuid': None}, + 'is_visible': True, + 'primary_key': True}, + 'tenant_id': {'allow_post': True, 'allow_put': False, + 'validate': {'type:string': None}, + 'required_by_policy': True, + 'is_visible': True}, + 'name': {'allow_post': True, 'allow_put': True, + 'validate': {'type:string': None}, + 'is_visible': True, 'default': ''}, + 'description': {'allow_post': True, 'allow_put': True, + 'validate': {'type:string': None}, + 'is_visible': True, 'default': ''}, + 'peer_address': {'allow_post': True, 'allow_put': True, + 'validate': {'type:string': None}, + 'is_visible': True}, + 'peer_id': {'allow_post': True, 'allow_put': True, + 'validate': {'type:string': None}, + 'is_visible': True}, + 'peer_cidrs': {'allow_post': True, 'allow_put': True, + 'convert_to': attr.convert_to_list, + 'validate': {'type:subnet_list': None}, + 'is_visible': True}, + 'route_mode': {'allow_post': False, 'allow_put': False, + 'default': 'static', + 'is_visible': True}, + 'mtu': {'allow_post': True, 'allow_put': True, + 'default': '1500', + 'validate': {'type:range': positive_int}, + 'convert_to': attr.convert_to_int, + 'is_visible': True}, + 'initiator': {'allow_post': True, 'allow_put': True, + 'default': 'bi-directional', + 'validate': {'type:values': vpn_supported_initiators}, + 'is_visible': True}, + 'auth_mode': {'allow_post': False, 'allow_put': False, + 'default': 'psk', + 'validate': {'type:values': vpn_supported_auth_mode}, + 'is_visible': True}, + 'psk': {'allow_post': True, 'allow_put': True, + 'validate': {'type:string': None}, + 'is_visible': True}, + 'dpd': {'allow_post': True, 'allow_put': True, + 'convert_to': attr.convert_none_to_empty_dict, + 'is_visible': True, + 'default': {}, + 'validate': { + 'type:dict_or_empty': { + 'actions': { + 'type:values': vpn_dpd_supported_actions, + }, + 'interval': { + 'type:range': positive_int + }, + 'timeout': { + 'type:range': positive_int + }}}}, + 'admin_state_up': {'allow_post': True, 'allow_put': True, + 'default': True, + 'convert_to': attr.convert_to_boolean, + 'is_visible': True}, + 'status': {'allow_post': False, 'allow_put': False, + 'is_visible': True}, + 'vpnservice_id': {'allow_post': True, 'allow_put': False, + 'validate': {'type:uuid': None}, + 'is_visible': True}, + 'ikepolicy_id': {'allow_post': True, 'allow_put': False, + 'validate': {'type:uuid': None}, + 'is_visible': True}, + 'ipsecpolicy_id': {'allow_post': True, 'allow_put': False, + 'validate': {'type:uuid': None}, + 'is_visible': True} + }, + + 'ipsecpolicies': { + 'id': {'allow_post': False, 'allow_put': False, + 'validate': {'type:uuid': None}, + 'is_visible': True, + 'primary_key': True}, + 'tenant_id': {'allow_post': True, 'allow_put': False, + 'validate': {'type:string': None}, + 'required_by_policy': True, + 'is_visible': True}, + 'name': {'allow_post': True, 'allow_put': True, + 'validate': {'type:string': None}, + 'is_visible': True, 'default': ''}, + 'description': {'allow_post': True, 'allow_put': True, + 'validate': {'type:string': None}, + 'is_visible': True, 'default': ''}, + 'transform_protocol': { + 'allow_post': True, + 'allow_put': True, + 'default': 'esp', + 'validate': { + 'type:values': vpn_supported_transform_protocols}, + 'is_visible': True}, + 'auth_algorithm': { + 'allow_post': True, + 'allow_put': True, + 'default': 'sha1', + 'validate': { + 'type:values': vpn_supported_auth_algorithms + }, + 'is_visible': True}, + 'encryption_algorithm': { + 'allow_post': True, + 'allow_put': True, + 'default': 'aes-128', + 'validate': { + 'type:values': vpn_supported_encryption_algorithms + }, + 'is_visible': True}, + 'encapsulation_mode': { + 'allow_post': True, + 'allow_put': True, + 'default': 'tunnel', + 'validate': { + 'type:values': vpn_supported_encapsulation_mode + }, + 'is_visible': True}, + 'lifetime': {'allow_post': True, 'allow_put': True, + 'convert_to': attr.convert_none_to_empty_dict, + 'default': {}, + 'validate': { + 'type:dict_or_empty': { + 'units': { + 'type:values': vpn_supported_lifetime_units, + }, + 'value': { + 'type:range': vpn_lifetime_limits + }}}, + 'is_visible': True}, + 'pfs': {'allow_post': True, 'allow_put': True, + 'default': 'group5', + 'validate': {'type:values': vpn_supported_pfs}, + 'is_visible': True} + }, + + 'ikepolicies': { + 'id': {'allow_post': False, 'allow_put': False, + 'validate': {'type:uuid': None}, + 'is_visible': True, + 'primary_key': True}, + 'tenant_id': {'allow_post': True, 'allow_put': False, + 'validate': {'type:string': None}, + 'required_by_policy': True, + 'is_visible': True}, + 'name': {'allow_post': True, 'allow_put': True, + 'validate': {'type:string': None}, + 'is_visible': True, 'default': ''}, + 'description': {'allow_post': True, 'allow_put': True, + 'validate': {'type:string': None}, + 'is_visible': True, 'default': ''}, + 'auth_algorithm': {'allow_post': True, 'allow_put': True, + 'default': 'sha1', + 'validate': { + 'type:values': vpn_supported_auth_algorithms}, + 'is_visible': True}, + 'encryption_algorithm': { + 'allow_post': True, 'allow_put': True, + 'default': 'aes-128', + 'validate': {'type:values': vpn_supported_encryption_algorithms}, + 'is_visible': True}, + 'phase1_negotiation_mode': { + 'allow_post': True, 'allow_put': True, + 'default': 'main', + 'validate': { + 'type:values': vpn_supported_phase1_negotiation_mode + }, + 'is_visible': True}, + 'lifetime': {'allow_post': True, 'allow_put': True, + 'convert_to': attr.convert_none_to_empty_dict, + 'default': {}, + 'validate': { + 'type:dict_or_empty': { + 'units': { + 'type:values': vpn_supported_lifetime_units, + }, + 'value': { + 'type:range': vpn_lifetime_limits, + }}}, + 'is_visible': True}, + 'ike_version': {'allow_post': True, 'allow_put': True, + 'default': 'v1', + 'validate': { + 'type:values': vpn_supported_ike_versions}, + 'is_visible': True}, + 'pfs': {'allow_post': True, 'allow_put': True, + 'default': 'group5', + 'validate': {'type:values': vpn_supported_pfs}, + 'is_visible': True} + } +} + + +class Vpnaas(extensions.ExtensionDescriptor): + + @classmethod + def get_name(cls): + return "VPN service" + + @classmethod + def get_alias(cls): + return "vpnaas" + + @classmethod + def get_description(cls): + return "Extension for VPN service" + + @classmethod + def get_namespace(cls): + return "https://wiki.openstack.org/Neutron/VPNaaS" + + @classmethod + def get_updated(cls): + return "2013-05-29T10:00:00-00:00" + + @classmethod + def get_resources(cls): + special_mappings = {'ikepolicies': 'ikepolicy', + 'ipsecpolicies': 'ipsecpolicy'} + plural_mappings = resource_helper.build_plural_mappings( + special_mappings, RESOURCE_ATTRIBUTE_MAP) + plural_mappings['peer_cidrs'] = 'peer_cidr' + attr.PLURALS.update(plural_mappings) + return resource_helper.build_resource_info(plural_mappings, + RESOURCE_ATTRIBUTE_MAP, + constants.VPN, + register_quota=True, + translate_name=True) + + @classmethod + def get_plugin_interface(cls): + return VPNPluginBase + + def update_attributes_map(self, attributes): + super(Vpnaas, self).update_attributes_map( + attributes, extension_attrs_map=RESOURCE_ATTRIBUTE_MAP) + + def get_extended_resources(self, version): + if version == "2.0": + return RESOURCE_ATTRIBUTE_MAP + else: + return {} + + +@six.add_metaclass(abc.ABCMeta) +class VPNPluginBase(service_base.ServicePluginBase): + + def get_plugin_name(self): + return constants.VPN + + def get_plugin_type(self): + return constants.VPN + + def get_plugin_description(self): + return 'VPN service plugin' + + @abc.abstractmethod + def get_vpnservices(self, context, filters=None, fields=None): + pass + + @abc.abstractmethod + def get_vpnservice(self, context, vpnservice_id, fields=None): + pass + + @abc.abstractmethod + def create_vpnservice(self, context, vpnservice): + pass + + @abc.abstractmethod + def update_vpnservice(self, context, vpnservice_id, vpnservice): + pass + + @abc.abstractmethod + def delete_vpnservice(self, context, vpnservice_id): + pass + + @abc.abstractmethod + def get_ipsec_site_connections(self, context, filters=None, fields=None): + pass + + @abc.abstractmethod + def get_ipsec_site_connection(self, context, + ipsecsite_conn_id, fields=None): + pass + + @abc.abstractmethod + def create_ipsec_site_connection(self, context, ipsec_site_connection): + pass + + @abc.abstractmethod + def update_ipsec_site_connection(self, context, + ipsecsite_conn_id, ipsec_site_connection): + pass + + @abc.abstractmethod + def delete_ipsec_site_connection(self, context, ipsecsite_conn_id): + pass + + @abc.abstractmethod + def get_ikepolicy(self, context, ikepolicy_id, fields=None): + pass + + @abc.abstractmethod + def get_ikepolicies(self, context, filters=None, fields=None): + pass + + @abc.abstractmethod + def create_ikepolicy(self, context, ikepolicy): + pass + + @abc.abstractmethod + def update_ikepolicy(self, context, ikepolicy_id, ikepolicy): + pass + + @abc.abstractmethod + def delete_ikepolicy(self, context, ikepolicy_id): + pass + + @abc.abstractmethod + def get_ipsecpolicies(self, context, filters=None, fields=None): + pass + + @abc.abstractmethod + def get_ipsecpolicy(self, context, ipsecpolicy_id, fields=None): + pass + + @abc.abstractmethod + def create_ipsecpolicy(self, context, ipsecpolicy): + pass + + @abc.abstractmethod + def update_ipsecpolicy(self, context, ipsecpolicy_id, ipsecpolicy): + pass + + @abc.abstractmethod + def delete_ipsecpolicy(self, context, ipsecpolicy_id): + pass diff --git a/icehouse-patches/neutron/dvr-patch/neutron/hacking/__init__.py b/icehouse-patches/neutron/dvr-patch/neutron/hacking/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/icehouse-patches/neutron/dvr-patch/neutron/hacking/checks.py b/icehouse-patches/neutron/dvr-patch/neutron/hacking/checks.py new file mode 100644 index 00000000..899d7624 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/hacking/checks.py @@ -0,0 +1,50 @@ +# Copyright (c) 2014 OpenStack Foundation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import re + +import pep8 + +""" +Guidelines for writing new hacking checks + + - Use only for Neutron specific tests. OpenStack general tests + should be submitted to the common 'hacking' module. + - Pick numbers in the range N3xx. Find the current test with + the highest allocated number and then pick the next value. + - Keep the test method code in the source file ordered based + on the N3xx value. + - List the new rule in the top level HACKING.rst file + - Add test cases for each new rule to + neutron/tests/unit/test_hacking.py + +""" + +log_translation = re.compile( + r"(.)*LOG\.(audit|error|info|warn|warning|critical|exception)\(\s*('|\")") + + +def validate_log_translations(logical_line, physical_line, filename): + # Translations are not required in the test directory + if "neutron/tests" in filename: + return + if pep8.noqa(physical_line): + return + msg = "N320: Log messages require translations!" + if log_translation.match(logical_line): + yield (0, msg) + + +def factory(register): + register(validate_log_translations) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/hooks.py b/icehouse-patches/neutron/dvr-patch/neutron/hooks.py new file mode 100644 index 00000000..e01e343f --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/hooks.py @@ -0,0 +1,27 @@ +# Copyright 2013 Hewlett-Packard Development Company, L.P. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import sys + + +def setup_hook(config): + """Filter config parsed from a setup.cfg to inject our defaults.""" + metadata = config['metadata'] + requires = metadata.get('requires_dist', '').split('\n') + if sys.platform == 'win32': + requires.append('pywin32') + requires.append('wmi') + metadata['requires_dist'] = "\n".join(requires) + config['metadata'] = metadata diff --git a/icehouse-patches/neutron/dvr-patch/neutron/locale/de/LC_MESSAGES/neutron-log-error.po b/icehouse-patches/neutron/dvr-patch/neutron/locale/de/LC_MESSAGES/neutron-log-error.po new file mode 100644 index 00000000..48d2edcb --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/locale/de/LC_MESSAGES/neutron-log-error.po @@ -0,0 +1,170 @@ +# Translations template for neutron. +# Copyright (C) 2014 ORGANIZATION +# This file is distributed under the same license as the neutron project. +# +# Translators: +msgid "" +msgstr "" +"Project-Id-Version: Neutron\n" +"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" +"POT-Creation-Date: 2014-06-09 06:08+0000\n" +"PO-Revision-Date: 2014-05-29 07:49+0000\n" +"Last-Translator: FULL NAME \n" +"Language-Team: German (http://www.transifex.com/projects/p/neutron/language/" +"de/)\n" +"Language: de\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 1.3\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +#: neutron/openstack/common/excutils.py:76 +#, python-format +msgid "Original exception being dropped: %s" +msgstr "Ursprüngliche Ausnahme wird gelöscht: %s" + +#: neutron/openstack/common/excutils.py:105 +#, python-format +msgid "Unexpected exception occurred %d time(s)... retrying." +msgstr "Unerwartete Ausnahme %d mal(e) aufgetreten... Neuversuch." + +#: neutron/openstack/common/periodic_task.py:179 +#, python-format +msgid "Error during %(full_task_name)s: %(e)s" +msgstr "" + +#: neutron/openstack/common/service.py:188 +msgid "Exception during rpc cleanup." +msgstr "" + +#: neutron/openstack/common/service.py:276 +msgid "Unhandled exception" +msgstr "Nicht behandelte Ausnahme" + +#: neutron/openstack/common/db/api.py:72 +msgid "DB exceeded retry limit." +msgstr "" + +#: neutron/openstack/common/db/api.py:76 +msgid "DB connection error." +msgstr "" + +#: neutron/openstack/common/db/sqlalchemy/session.py:460 +msgid "DB exception wrapped." +msgstr "Datenbankausnahme eingeschlossen." + +#: neutron/openstack/common/middleware/notifier.py:40 +#, python-format +msgid "An exception occurred processing the API call: %s " +msgstr "" + +#: neutron/openstack/common/rpc/amqp.py:480 +#: neutron/openstack/common/rpc/impl_zmq.py:286 +msgid "Exception during message handling" +msgstr "Ausnahme bei Nachrichtenbehandlung" + +#: neutron/openstack/common/rpc/common.py:88 +msgid "Exception in string format operation" +msgstr "Ausnahme bei Zeichenfolgeformatoperation" + +#: neutron/openstack/common/rpc/common.py:292 +#, python-format +msgid "Returning exception %s to caller" +msgstr "Ausnahme %s wird an Aufrufenden zurückgegeben" + +#: neutron/openstack/common/rpc/impl_kombu.py:156 +msgid "Failed to process message ... skipping it." +msgstr "" + +#: neutron/openstack/common/rpc/impl_kombu.py:160 +msgid "Failed to process message ... will requeue." +msgstr "" + +#: neutron/openstack/common/rpc/impl_kombu.py:571 +#, python-format +msgid "" +"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " +"again in %(sleep_time)d seconds." +msgstr "" +"AMQP-Server auf %(hostname)s:%(port)d ist nicht erreichbar: %(err_str)s. " +"Erneuter Versuch in %(sleep_time)d Sekunden." + +#: neutron/openstack/common/rpc/impl_kombu.py:625 +#: neutron/openstack/common/rpc/impl_qpid.py:575 +#, python-format +msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" +msgstr "" +"Fehler beim Deklarieren von Consumer für Topic '%(topic)s': %(err_str)s" + +#: neutron/openstack/common/rpc/impl_kombu.py:647 +#: neutron/openstack/common/rpc/impl_qpid.py:594 +#, python-format +msgid "Failed to consume message from queue: %s" +msgstr "Nachricht aus Warteschlange wurde nicht verarbeitet: %s" + +#: neutron/openstack/common/rpc/impl_kombu.py:686 +#: neutron/openstack/common/rpc/impl_qpid.py:629 +#, python-format +msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" +msgstr "" +"Fehler beim Veröffentlichen von Nachricht zu Topic '%(topic)s': %(err_str)s" + +#: neutron/openstack/common/rpc/impl_qpid.py:191 +msgid "Failed to process message... skipping it." +msgstr "Nachricht wurde nicht verarbeitet und wird übersprungen." + +#: neutron/openstack/common/rpc/impl_qpid.py:517 +#, python-format +msgid "Unable to connect to AMQP server: %(e)s. Sleeping %(delay)s seconds" +msgstr "" +"Verbindung zu AMQP-Server kann nicht hergestellt werden: %(e)s. %(delay)s " +"Sekunden Ruhemodus" + +#: neutron/openstack/common/rpc/impl_qpid.py:602 +msgid "Error processing message. Skipping it." +msgstr "Fehler beim Verarbeiten der Nachricht. Wird übersprungen." + +#: neutron/openstack/common/rpc/impl_zmq.py:96 +msgid "JSON serialization failed." +msgstr "JSON-Serialisierung fehlgeschlagen." + +#: neutron/openstack/common/rpc/impl_zmq.py:195 +msgid "ZeroMQ socket could not be closed." +msgstr "" + +#: neutron/openstack/common/rpc/impl_zmq.py:339 +msgid "RPC message did not include method." +msgstr "RPC-Nachricht hat keine Methode enthalten." + +#: neutron/openstack/common/rpc/impl_zmq.py:476 +msgid "Topic socket file creation failed." +msgstr "Erstellung von Topicsocketdatei fehlgeschlagen." + +#: neutron/openstack/common/rpc/impl_zmq.py:482 +#, python-format +msgid "" +"Local per-topic backlog buffer full for topic %(topic)s. Dropping message." +msgstr "" +"Lokaler topicbezogener Rückstandspuffer für Topic %(topic)s voll. Nachricht " +"wird gelöscht." + +#: neutron/openstack/common/rpc/impl_zmq.py:498 +#, python-format +msgid "Required IPC directory does not exist at %s" +msgstr "das benötigte IPC-Verzeichnis existiert nicht unter %s" + +#: neutron/openstack/common/rpc/impl_zmq.py:507 +#, python-format +msgid "Permission denied to IPC directory at %s" +msgstr "Zugriff verweigert zum IPC Verzeichnis %s" + +#: neutron/openstack/common/rpc/impl_zmq.py:510 +msgid "Could not create ZeroMQ receiver daemon. Socket may already be in use." +msgstr "" +"ZeroMQ-Empfängerdämon konnte nicht erstellt werden. Socket ist " +"möglicherweise bereits belegt." + +#: neutron/openstack/common/rpc/impl_zmq.py:563 +msgid "ZMQ Envelope version unsupported or unknown." +msgstr "ZMQ-Umschlagsversion nicht unterstützt oder unbekannt." diff --git a/icehouse-patches/neutron/dvr-patch/neutron/locale/de/LC_MESSAGES/neutron-log-info.po b/icehouse-patches/neutron/dvr-patch/neutron/locale/de/LC_MESSAGES/neutron-log-info.po new file mode 100644 index 00000000..338a6e23 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/locale/de/LC_MESSAGES/neutron-log-info.po @@ -0,0 +1,131 @@ +# Translations template for neutron. +# Copyright (C) 2014 ORGANIZATION +# This file is distributed under the same license as the neutron project. +# +# Translators: +# Carsten Duch , 2014 +msgid "" +msgstr "" +"Project-Id-Version: Neutron\n" +"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" +"POT-Creation-Date: 2014-06-16 06:08+0000\n" +"PO-Revision-Date: 2014-06-14 19:31+0000\n" +"Last-Translator: Carsten Duch \n" +"Language-Team: German (http://www.transifex.com/projects/p/neutron/language/" +"de/)\n" +"Language: de\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 1.3\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +#: neutron/openstack/common/periodic_task.py:125 +#, python-format +msgid "Skipping periodic task %(task)s because its interval is negative" +msgstr "" + +#: neutron/openstack/common/periodic_task.py:130 +#, python-format +msgid "Skipping periodic task %(task)s because it is disabled" +msgstr "" + +#: neutron/openstack/common/service.py:176 +#, python-format +msgid "Caught %s, exiting" +msgstr "%s abgefangen. Vorgang wird beendet" + +#: neutron/openstack/common/service.py:240 +msgid "Parent process has died unexpectedly, exiting" +msgstr "" +"Übergeordneter Prozess wurde unerwartet abgebrochen. Vorgang wird beendet" + +#: neutron/openstack/common/service.py:271 +#, python-format +msgid "Child caught %s, exiting" +msgstr "" + +#: neutron/openstack/common/service.py:310 +msgid "Forking too fast, sleeping" +msgstr "Verzweigung zu schnell; im Ruhemodus" + +#: neutron/openstack/common/service.py:329 +#, python-format +msgid "Started child %d" +msgstr "Untergeordnetes Element %d gestartet" + +#: neutron/openstack/common/service.py:339 +#, python-format +msgid "Starting %d workers" +msgstr "Starten von %d Workers" + +#: neutron/openstack/common/service.py:356 +#, python-format +msgid "Child %(pid)d killed by signal %(sig)d" +msgstr "Untergeordnetes Element %(pid)d durch Signal %(sig)d abgebrochen" + +#: neutron/openstack/common/service.py:360 +#, python-format +msgid "Child %(pid)s exited with status %(code)d" +msgstr "Untergeordnete %(pid)s mit Status %(code)d beendet" + +#: neutron/openstack/common/service.py:399 +#, python-format +msgid "Caught %s, stopping children" +msgstr "%s abgefangen, untergeordnete Elemente werden gestoppt" + +#: neutron/openstack/common/service.py:408 +msgid "Wait called after thread killed. Cleaning up." +msgstr "Warten aufgerufen nach dem der Thread abgebrochen wurde. Bereinige." + +#: neutron/openstack/common/service.py:424 +#, python-format +msgid "Waiting on %d children to exit" +msgstr "Warten auf Beenden von %d untergeordneten Elementen" + +#: neutron/openstack/common/db/sqlalchemy/utils.py:379 +#, python-format +msgid "Deleting duplicated row with id: %(id)s from table: %(table)s" +msgstr "Lösche doppelte Zeile mit der ID %(id)s aus der Tabelle %(table)s" + +#: neutron/openstack/common/rpc/impl_kombu.py:498 +#, python-format +msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" +msgstr "" +"Wiederherstellung der Verbindung zu AMQP-Server auf %(hostname)s:%(port)d" + +#: neutron/openstack/common/rpc/impl_kombu.py:520 +#, python-format +msgid "Connected to AMQP server on %(hostname)s:%(port)d" +msgstr "Verbunden mit AMQP-Server auf %(hostname)s:%(port)d" + +#: neutron/openstack/common/rpc/impl_qpid.py:523 +#, python-format +msgid "Connected to AMQP server on %s" +msgstr "Verbunden mit AMQP-Server auf %s" + +#: neutron/openstack/common/rpc/impl_zmq.py:371 +msgid "Registering reactor" +msgstr "Registrieren von Reaktor" + +#: neutron/openstack/common/rpc/impl_zmq.py:383 +msgid "In reactor registered" +msgstr "Eingangsreaktor registriert" + +#: neutron/openstack/common/rpc/impl_zmq.py:388 +msgid "Consuming socket" +msgstr "Socketverwendung" + +#: neutron/openstack/common/rpc/impl_zmq.py:438 +#, python-format +msgid "Creating proxy for topic: %s" +msgstr "Erstellen von Proxy für Topic: %s" + +#: neutron/openstack/common/rpc/impl_zmq.py:591 +msgid "Skipping topic registration. Already registered." +msgstr "Überspringen von Topicregistrierung. Bereits registriert." + +#: neutron/openstack/common/rpc/matchmaker.py:216 +#, python-format +msgid "Matchmaker unregistered: %(key)s, %(host)s" +msgstr "MatchMaker nicht registriert: %(key)s, %(host)s" diff --git a/icehouse-patches/neutron/dvr-patch/neutron/locale/de/LC_MESSAGES/neutron-log-warning.po b/icehouse-patches/neutron/dvr-patch/neutron/locale/de/LC_MESSAGES/neutron-log-warning.po new file mode 100644 index 00000000..71ff0eb1 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/locale/de/LC_MESSAGES/neutron-log-warning.po @@ -0,0 +1,57 @@ +# Translations template for neutron. +# Copyright (C) 2014 ORGANIZATION +# This file is distributed under the same license as the neutron project. +# +# Translators: +msgid "" +msgstr "" +"Project-Id-Version: Neutron\n" +"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" +"POT-Creation-Date: 2014-06-09 06:08+0000\n" +"PO-Revision-Date: 2014-05-29 07:49+0000\n" +"Last-Translator: FULL NAME \n" +"Language-Team: German (http://www.transifex.com/projects/p/neutron/language/" +"de/)\n" +"Language: de\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 1.3\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +#: neutron/openstack/common/service.py:363 +#, python-format +msgid "pid %d not in child list" +msgstr "PID %d nicht in Liste untergeordneter Elemente" + +#: neutron/openstack/common/db/sqlalchemy/session.py:506 +#, python-format +msgid "Database server has gone away: %s" +msgstr "Datenbankserver ist nicht mehr vorhanden: %s" + +#: neutron/openstack/common/db/sqlalchemy/session.py:559 +msgid "Unable to detect effective SQL mode" +msgstr "Unfähig geltenden SQL Modus zu erkennen." + +#: neutron/openstack/common/db/sqlalchemy/session.py:567 +#, python-format +msgid "" +"MySQL SQL mode is '%s', consider enabling TRADITIONAL or STRICT_ALL_TABLES" +msgstr "" +"MySQL SQL Modus ist '%s', erwägen Sie TRADITIONAL oder STRICT_ALL_TABLES zu " +"aktivieren" + +#: neutron/openstack/common/db/sqlalchemy/session.py:673 +#, python-format +msgid "SQL connection failed. %s attempts left." +msgstr "SQL-Verbindung fehlgeschlagen. Noch %s weitere Versuche übrig." + +#: neutron/openstack/common/db/sqlalchemy/utils.py:97 +msgid "Id not in sort_keys; is sort_keys unique?" +msgstr "ID ist nicht in 'sort_keys' enthalten; ist 'sort_keys' eindeutig?" + +#: neutron/openstack/common/rpc/matchmaker_ring.py:75 +#: neutron/openstack/common/rpc/matchmaker_ring.py:93 +#, python-format +msgid "No key defining hosts for topic '%s', see ringfile" +msgstr "Keine schlüsseldefinierenden Hosts für Topic '%s', siehe Ringdatei" diff --git a/icehouse-patches/neutron/dvr-patch/neutron/locale/en_AU/LC_MESSAGES/neutron-log-error.po b/icehouse-patches/neutron/dvr-patch/neutron/locale/en_AU/LC_MESSAGES/neutron-log-error.po new file mode 100644 index 00000000..e8e05cae --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/locale/en_AU/LC_MESSAGES/neutron-log-error.po @@ -0,0 +1,163 @@ +# Translations template for neutron. +# Copyright (C) 2014 ORGANIZATION +# This file is distributed under the same license as the neutron project. +# +# Translators: +msgid "" +msgstr "" +"Project-Id-Version: Neutron\n" +"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" +"POT-Creation-Date: 2014-06-09 06:08+0000\n" +"PO-Revision-Date: 2014-05-29 07:49+0000\n" +"Last-Translator: FULL NAME \n" +"Language-Team: English (Australia) (http://www.transifex.com/projects/p/" +"neutron/language/en_AU/)\n" +"Language: en_AU\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 1.3\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +#: neutron/openstack/common/excutils.py:76 +#, python-format +msgid "Original exception being dropped: %s" +msgstr "Original exception being dropped: %s" + +#: neutron/openstack/common/excutils.py:105 +#, python-format +msgid "Unexpected exception occurred %d time(s)... retrying." +msgstr "Unexpected exception occurred %d time(s)... retrying." + +#: neutron/openstack/common/periodic_task.py:179 +#, python-format +msgid "Error during %(full_task_name)s: %(e)s" +msgstr "" + +#: neutron/openstack/common/service.py:188 +msgid "Exception during rpc cleanup." +msgstr "Exception during rpc cleanup." + +#: neutron/openstack/common/service.py:276 +msgid "Unhandled exception" +msgstr "Unhandled exception" + +#: neutron/openstack/common/db/api.py:72 +msgid "DB exceeded retry limit." +msgstr "" + +#: neutron/openstack/common/db/api.py:76 +msgid "DB connection error." +msgstr "" + +#: neutron/openstack/common/db/sqlalchemy/session.py:460 +msgid "DB exception wrapped." +msgstr "DB exception wrapped." + +#: neutron/openstack/common/middleware/notifier.py:40 +#, python-format +msgid "An exception occurred processing the API call: %s " +msgstr "" + +#: neutron/openstack/common/rpc/amqp.py:480 +#: neutron/openstack/common/rpc/impl_zmq.py:286 +msgid "Exception during message handling" +msgstr "Exception during message handling" + +#: neutron/openstack/common/rpc/common.py:88 +msgid "Exception in string format operation" +msgstr "Exception in string format operation" + +#: neutron/openstack/common/rpc/common.py:292 +#, python-format +msgid "Returning exception %s to caller" +msgstr "Returning exception %s to caller" + +#: neutron/openstack/common/rpc/impl_kombu.py:156 +msgid "Failed to process message ... skipping it." +msgstr "Failed to process message ... skipping it." + +#: neutron/openstack/common/rpc/impl_kombu.py:160 +msgid "Failed to process message ... will requeue." +msgstr "Failed to process message ... will requeue." + +#: neutron/openstack/common/rpc/impl_kombu.py:571 +#, python-format +msgid "" +"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " +"again in %(sleep_time)d seconds." +msgstr "" +"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " +"again in %(sleep_time)d seconds." + +#: neutron/openstack/common/rpc/impl_kombu.py:625 +#: neutron/openstack/common/rpc/impl_qpid.py:575 +#, python-format +msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" +msgstr "Failed to declare consumer for topic '%(topic)s': %(err_str)s" + +#: neutron/openstack/common/rpc/impl_kombu.py:647 +#: neutron/openstack/common/rpc/impl_qpid.py:594 +#, python-format +msgid "Failed to consume message from queue: %s" +msgstr "Failed to consume message from queue: %s" + +#: neutron/openstack/common/rpc/impl_kombu.py:686 +#: neutron/openstack/common/rpc/impl_qpid.py:629 +#, python-format +msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" +msgstr "Failed to publish message to topic '%(topic)s': %(err_str)s" + +#: neutron/openstack/common/rpc/impl_qpid.py:191 +msgid "Failed to process message... skipping it." +msgstr "Failed to process message... skipping it." + +#: neutron/openstack/common/rpc/impl_qpid.py:517 +#, python-format +msgid "Unable to connect to AMQP server: %(e)s. Sleeping %(delay)s seconds" +msgstr "Unable to connect to AMQP server: %(e)s. Sleeping %(delay)s seconds" + +#: neutron/openstack/common/rpc/impl_qpid.py:602 +msgid "Error processing message. Skipping it." +msgstr "Error processing message. Skipping it." + +#: neutron/openstack/common/rpc/impl_zmq.py:96 +msgid "JSON serialization failed." +msgstr "JSON serialization failed." + +#: neutron/openstack/common/rpc/impl_zmq.py:195 +msgid "ZeroMQ socket could not be closed." +msgstr "ZeroMQ socket could not be closed." + +#: neutron/openstack/common/rpc/impl_zmq.py:339 +msgid "RPC message did not include method." +msgstr "RPC message did not include method." + +#: neutron/openstack/common/rpc/impl_zmq.py:476 +msgid "Topic socket file creation failed." +msgstr "Topic socket file creation failed." + +#: neutron/openstack/common/rpc/impl_zmq.py:482 +#, python-format +msgid "" +"Local per-topic backlog buffer full for topic %(topic)s. Dropping message." +msgstr "" +"Local per-topic backlog buffer full for topic %(topic)s. Dropping message." + +#: neutron/openstack/common/rpc/impl_zmq.py:498 +#, python-format +msgid "Required IPC directory does not exist at %s" +msgstr "Required IPC directory does not exist at %s" + +#: neutron/openstack/common/rpc/impl_zmq.py:507 +#, python-format +msgid "Permission denied to IPC directory at %s" +msgstr "Permission denied to IPC directory at %s" + +#: neutron/openstack/common/rpc/impl_zmq.py:510 +msgid "Could not create ZeroMQ receiver daemon. Socket may already be in use." +msgstr "Could not create ZeroMQ receiver daemon. Socket may already be in use." + +#: neutron/openstack/common/rpc/impl_zmq.py:563 +msgid "ZMQ Envelope version unsupported or unknown." +msgstr "ZMQ Envelope version unsupported or unknown." diff --git a/icehouse-patches/neutron/dvr-patch/neutron/locale/en_AU/LC_MESSAGES/neutron-log-info.po b/icehouse-patches/neutron/dvr-patch/neutron/locale/en_AU/LC_MESSAGES/neutron-log-info.po new file mode 100644 index 00000000..1c16092b --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/locale/en_AU/LC_MESSAGES/neutron-log-info.po @@ -0,0 +1,128 @@ +# Translations template for neutron. +# Copyright (C) 2014 ORGANIZATION +# This file is distributed under the same license as the neutron project. +# +# Translators: +msgid "" +msgstr "" +"Project-Id-Version: Neutron\n" +"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" +"POT-Creation-Date: 2014-06-16 06:08+0000\n" +"PO-Revision-Date: 2014-05-29 07:49+0000\n" +"Last-Translator: FULL NAME \n" +"Language-Team: English (Australia) (http://www.transifex.com/projects/p/" +"neutron/language/en_AU/)\n" +"Language: en_AU\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 1.3\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +#: neutron/openstack/common/periodic_task.py:125 +#, python-format +msgid "Skipping periodic task %(task)s because its interval is negative" +msgstr "" + +#: neutron/openstack/common/periodic_task.py:130 +#, python-format +msgid "Skipping periodic task %(task)s because it is disabled" +msgstr "" + +#: neutron/openstack/common/service.py:176 +#, python-format +msgid "Caught %s, exiting" +msgstr "Caught %s, exiting" + +#: neutron/openstack/common/service.py:240 +msgid "Parent process has died unexpectedly, exiting" +msgstr "Parent process has died unexpectedly, exiting" + +#: neutron/openstack/common/service.py:271 +#, python-format +msgid "Child caught %s, exiting" +msgstr "" + +#: neutron/openstack/common/service.py:310 +msgid "Forking too fast, sleeping" +msgstr "Forking too fast, sleeping" + +#: neutron/openstack/common/service.py:329 +#, python-format +msgid "Started child %d" +msgstr "Started child %d" + +#: neutron/openstack/common/service.py:339 +#, python-format +msgid "Starting %d workers" +msgstr "Starting %d workers" + +#: neutron/openstack/common/service.py:356 +#, python-format +msgid "Child %(pid)d killed by signal %(sig)d" +msgstr "Child %(pid)d killed by signal %(sig)d" + +#: neutron/openstack/common/service.py:360 +#, python-format +msgid "Child %(pid)s exited with status %(code)d" +msgstr "Child %(pid)s exited with status %(code)d" + +#: neutron/openstack/common/service.py:399 +#, python-format +msgid "Caught %s, stopping children" +msgstr "Caught %s, stopping children" + +#: neutron/openstack/common/service.py:408 +msgid "Wait called after thread killed. Cleaning up." +msgstr "" + +#: neutron/openstack/common/service.py:424 +#, python-format +msgid "Waiting on %d children to exit" +msgstr "Waiting on %d children to exit" + +#: neutron/openstack/common/db/sqlalchemy/utils.py:379 +#, python-format +msgid "Deleting duplicated row with id: %(id)s from table: %(table)s" +msgstr "Deleting duplicated row with id: %(id)s from table: %(table)s" + +#: neutron/openstack/common/rpc/impl_kombu.py:498 +#, python-format +msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" +msgstr "Reconnecting to AMQP server on %(hostname)s:%(port)d" + +#: neutron/openstack/common/rpc/impl_kombu.py:520 +#, python-format +msgid "Connected to AMQP server on %(hostname)s:%(port)d" +msgstr "Connected to AMQP server on %(hostname)s:%(port)d" + +#: neutron/openstack/common/rpc/impl_qpid.py:523 +#, python-format +msgid "Connected to AMQP server on %s" +msgstr "Connected to AMQP server on %s" + +#: neutron/openstack/common/rpc/impl_zmq.py:371 +msgid "Registering reactor" +msgstr "Registering reactor" + +#: neutron/openstack/common/rpc/impl_zmq.py:383 +msgid "In reactor registered" +msgstr "In reactor registered" + +#: neutron/openstack/common/rpc/impl_zmq.py:388 +msgid "Consuming socket" +msgstr "Consuming socket" + +#: neutron/openstack/common/rpc/impl_zmq.py:438 +#, python-format +msgid "Creating proxy for topic: %s" +msgstr "Creating proxy for topic: %s" + +#: neutron/openstack/common/rpc/impl_zmq.py:591 +msgid "Skipping topic registration. Already registered." +msgstr "Skipping topic registration. Already registered." + +#: neutron/openstack/common/rpc/matchmaker.py:216 +#, python-format +msgid "Matchmaker unregistered: %(key)s, %(host)s" +msgstr "Matchmaker unregistered: %(key)s, %(host)s" diff --git a/icehouse-patches/neutron/dvr-patch/neutron/locale/en_GB/LC_MESSAGES/neutron-log-error.po b/icehouse-patches/neutron/dvr-patch/neutron/locale/en_GB/LC_MESSAGES/neutron-log-error.po new file mode 100644 index 00000000..75ea2a0b --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/locale/en_GB/LC_MESSAGES/neutron-log-error.po @@ -0,0 +1,163 @@ +# Translations template for neutron. +# Copyright (C) 2014 ORGANIZATION +# This file is distributed under the same license as the neutron project. +# +# Translators: +msgid "" +msgstr "" +"Project-Id-Version: Neutron\n" +"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" +"POT-Creation-Date: 2014-06-09 06:08+0000\n" +"PO-Revision-Date: 2014-05-29 07:49+0000\n" +"Last-Translator: FULL NAME \n" +"Language-Team: English (United Kingdom) (http://www.transifex.com/projects/p/" +"neutron/language/en_GB/)\n" +"Language: en_GB\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 1.3\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +#: neutron/openstack/common/excutils.py:76 +#, python-format +msgid "Original exception being dropped: %s" +msgstr "Original exception being dropped: %s" + +#: neutron/openstack/common/excutils.py:105 +#, python-format +msgid "Unexpected exception occurred %d time(s)... retrying." +msgstr "Unexpected exception occurred %d time(s)... retrying." + +#: neutron/openstack/common/periodic_task.py:179 +#, python-format +msgid "Error during %(full_task_name)s: %(e)s" +msgstr "" + +#: neutron/openstack/common/service.py:188 +msgid "Exception during rpc cleanup." +msgstr "Exception during rpc cleanup." + +#: neutron/openstack/common/service.py:276 +msgid "Unhandled exception" +msgstr "Unhandled exception" + +#: neutron/openstack/common/db/api.py:72 +msgid "DB exceeded retry limit." +msgstr "" + +#: neutron/openstack/common/db/api.py:76 +msgid "DB connection error." +msgstr "" + +#: neutron/openstack/common/db/sqlalchemy/session.py:460 +msgid "DB exception wrapped." +msgstr "DB exception wrapped." + +#: neutron/openstack/common/middleware/notifier.py:40 +#, python-format +msgid "An exception occurred processing the API call: %s " +msgstr "" + +#: neutron/openstack/common/rpc/amqp.py:480 +#: neutron/openstack/common/rpc/impl_zmq.py:286 +msgid "Exception during message handling" +msgstr "Exception during message handling" + +#: neutron/openstack/common/rpc/common.py:88 +msgid "Exception in string format operation" +msgstr "Exception in string format operation" + +#: neutron/openstack/common/rpc/common.py:292 +#, python-format +msgid "Returning exception %s to caller" +msgstr "Returning exception %s to caller" + +#: neutron/openstack/common/rpc/impl_kombu.py:156 +msgid "Failed to process message ... skipping it." +msgstr "Failed to process message ... skipping it." + +#: neutron/openstack/common/rpc/impl_kombu.py:160 +msgid "Failed to process message ... will requeue." +msgstr "Failed to process message ... will requeue." + +#: neutron/openstack/common/rpc/impl_kombu.py:571 +#, python-format +msgid "" +"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " +"again in %(sleep_time)d seconds." +msgstr "" +"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " +"again in %(sleep_time)d seconds." + +#: neutron/openstack/common/rpc/impl_kombu.py:625 +#: neutron/openstack/common/rpc/impl_qpid.py:575 +#, python-format +msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" +msgstr "Failed to declare consumer for topic '%(topic)s': %(err_str)s" + +#: neutron/openstack/common/rpc/impl_kombu.py:647 +#: neutron/openstack/common/rpc/impl_qpid.py:594 +#, python-format +msgid "Failed to consume message from queue: %s" +msgstr "Failed to consume message from queue: %s" + +#: neutron/openstack/common/rpc/impl_kombu.py:686 +#: neutron/openstack/common/rpc/impl_qpid.py:629 +#, python-format +msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" +msgstr "Failed to publish message to topic '%(topic)s': %(err_str)s" + +#: neutron/openstack/common/rpc/impl_qpid.py:191 +msgid "Failed to process message... skipping it." +msgstr "Failed to process message... skipping it." + +#: neutron/openstack/common/rpc/impl_qpid.py:517 +#, python-format +msgid "Unable to connect to AMQP server: %(e)s. Sleeping %(delay)s seconds" +msgstr "Unable to connect to AMQP server: %(e)s. Sleeping %(delay)s seconds" + +#: neutron/openstack/common/rpc/impl_qpid.py:602 +msgid "Error processing message. Skipping it." +msgstr "Error processing message. Skipping it." + +#: neutron/openstack/common/rpc/impl_zmq.py:96 +msgid "JSON serialization failed." +msgstr "JSON serialization failed." + +#: neutron/openstack/common/rpc/impl_zmq.py:195 +msgid "ZeroMQ socket could not be closed." +msgstr "" + +#: neutron/openstack/common/rpc/impl_zmq.py:339 +msgid "RPC message did not include method." +msgstr "RPC message did not include method." + +#: neutron/openstack/common/rpc/impl_zmq.py:476 +msgid "Topic socket file creation failed." +msgstr "Topic socket file creation failed." + +#: neutron/openstack/common/rpc/impl_zmq.py:482 +#, python-format +msgid "" +"Local per-topic backlog buffer full for topic %(topic)s. Dropping message." +msgstr "" +"Local per-topic backlog buffer full for topic %(topic)s. Dropping message." + +#: neutron/openstack/common/rpc/impl_zmq.py:498 +#, python-format +msgid "Required IPC directory does not exist at %s" +msgstr "Required IPC directory does not exist at %s" + +#: neutron/openstack/common/rpc/impl_zmq.py:507 +#, python-format +msgid "Permission denied to IPC directory at %s" +msgstr "Permission denied to IPC directory at %s" + +#: neutron/openstack/common/rpc/impl_zmq.py:510 +msgid "Could not create ZeroMQ receiver daemon. Socket may already be in use." +msgstr "Could not create ZeroMQ receiver daemon. Socket may already be in use." + +#: neutron/openstack/common/rpc/impl_zmq.py:563 +msgid "ZMQ Envelope version unsupported or unknown." +msgstr "ZMQ Envelope version unsupported or unknown." diff --git a/icehouse-patches/neutron/dvr-patch/neutron/locale/en_GB/LC_MESSAGES/neutron-log-info.po b/icehouse-patches/neutron/dvr-patch/neutron/locale/en_GB/LC_MESSAGES/neutron-log-info.po new file mode 100644 index 00000000..9586f9ea --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/locale/en_GB/LC_MESSAGES/neutron-log-info.po @@ -0,0 +1,128 @@ +# Translations template for neutron. +# Copyright (C) 2014 ORGANIZATION +# This file is distributed under the same license as the neutron project. +# +# Translators: +msgid "" +msgstr "" +"Project-Id-Version: Neutron\n" +"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" +"POT-Creation-Date: 2014-06-16 06:08+0000\n" +"PO-Revision-Date: 2014-05-29 07:49+0000\n" +"Last-Translator: FULL NAME \n" +"Language-Team: English (United Kingdom) (http://www.transifex.com/projects/p/" +"neutron/language/en_GB/)\n" +"Language: en_GB\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 1.3\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +#: neutron/openstack/common/periodic_task.py:125 +#, python-format +msgid "Skipping periodic task %(task)s because its interval is negative" +msgstr "" + +#: neutron/openstack/common/periodic_task.py:130 +#, python-format +msgid "Skipping periodic task %(task)s because it is disabled" +msgstr "" + +#: neutron/openstack/common/service.py:176 +#, python-format +msgid "Caught %s, exiting" +msgstr "Caught %s, exiting" + +#: neutron/openstack/common/service.py:240 +msgid "Parent process has died unexpectedly, exiting" +msgstr "Parent process has died unexpectedly, exiting" + +#: neutron/openstack/common/service.py:271 +#, python-format +msgid "Child caught %s, exiting" +msgstr "" + +#: neutron/openstack/common/service.py:310 +msgid "Forking too fast, sleeping" +msgstr "Forking too fast, sleeping" + +#: neutron/openstack/common/service.py:329 +#, python-format +msgid "Started child %d" +msgstr "Started child %d" + +#: neutron/openstack/common/service.py:339 +#, python-format +msgid "Starting %d workers" +msgstr "Starting %d workers" + +#: neutron/openstack/common/service.py:356 +#, python-format +msgid "Child %(pid)d killed by signal %(sig)d" +msgstr "Child %(pid)d killed by signal %(sig)d" + +#: neutron/openstack/common/service.py:360 +#, python-format +msgid "Child %(pid)s exited with status %(code)d" +msgstr "Child %(pid)s exited with status %(code)d" + +#: neutron/openstack/common/service.py:399 +#, python-format +msgid "Caught %s, stopping children" +msgstr "Caught %s, stopping children" + +#: neutron/openstack/common/service.py:408 +msgid "Wait called after thread killed. Cleaning up." +msgstr "" + +#: neutron/openstack/common/service.py:424 +#, python-format +msgid "Waiting on %d children to exit" +msgstr "Waiting on %d children to exit" + +#: neutron/openstack/common/db/sqlalchemy/utils.py:379 +#, python-format +msgid "Deleting duplicated row with id: %(id)s from table: %(table)s" +msgstr "Deleting duplicated row with id: %(id)s from table: %(table)s" + +#: neutron/openstack/common/rpc/impl_kombu.py:498 +#, python-format +msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" +msgstr "Reconnecting to AMQP server on %(hostname)s:%(port)d" + +#: neutron/openstack/common/rpc/impl_kombu.py:520 +#, python-format +msgid "Connected to AMQP server on %(hostname)s:%(port)d" +msgstr "Connected to AMQP server on %(hostname)s:%(port)d" + +#: neutron/openstack/common/rpc/impl_qpid.py:523 +#, python-format +msgid "Connected to AMQP server on %s" +msgstr "Connected to AMQP server on %s" + +#: neutron/openstack/common/rpc/impl_zmq.py:371 +msgid "Registering reactor" +msgstr "Registering reactor" + +#: neutron/openstack/common/rpc/impl_zmq.py:383 +msgid "In reactor registered" +msgstr "In reactor registered" + +#: neutron/openstack/common/rpc/impl_zmq.py:388 +msgid "Consuming socket" +msgstr "Consuming socket" + +#: neutron/openstack/common/rpc/impl_zmq.py:438 +#, python-format +msgid "Creating proxy for topic: %s" +msgstr "Creating proxy for topic: %s" + +#: neutron/openstack/common/rpc/impl_zmq.py:591 +msgid "Skipping topic registration. Already registered." +msgstr "Skipping topic registration. Already registered." + +#: neutron/openstack/common/rpc/matchmaker.py:216 +#, python-format +msgid "Matchmaker unregistered: %(key)s, %(host)s" +msgstr "Matchmaker unregistered: %(key)s, %(host)s" diff --git a/icehouse-patches/neutron/dvr-patch/neutron/locale/en_US/LC_MESSAGES/neutron.po b/icehouse-patches/neutron/dvr-patch/neutron/locale/en_US/LC_MESSAGES/neutron.po new file mode 100644 index 00000000..fdd4c807 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/locale/en_US/LC_MESSAGES/neutron.po @@ -0,0 +1,16173 @@ +# English (United States) translations for neutron. +# Copyright (C) 2013 ORGANIZATION +# This file is distributed under the same license as the neutron project. +# +# Translators: +msgid "" +msgstr "" +"Project-Id-Version: Neutron\n" +"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" +"POT-Creation-Date: 2014-06-30 06:06+0000\n" +"PO-Revision-Date: 2013-01-28 21:54+0000\n" +"Last-Translator: FULL NAME \n" +"Language-Team: English (United States) " +"(http://www.transifex.com/projects/p/openstack/language/en_US/)\n" +"Plural-Forms: nplurals=2; plural=(n != 1)\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 1.3\n" + +#: neutron/auth.py:35 +msgid "X_USER_ID is not found in request" +msgstr "" + +#: neutron/context.py:81 +#, python-format +msgid "Arguments dropped when creating context: %s" +msgstr "" + +#: neutron/context.py:109 +#, python-format +msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" +msgstr "" + +#: neutron/manager.py:71 +#, python-format +msgid "dhcp_agents_per_network must be >= 1. '%s' is invalid." +msgstr "" + +#: neutron/manager.py:83 +msgid "Neutron core_plugin not configured!" +msgstr "" + +#: neutron/manager.py:112 +#, python-format +msgid "Loading core plugin: %s" +msgstr "" + +#: neutron/manager.py:137 +#, python-format +msgid "Error loading plugin by name, %s" +msgstr "" + +#: neutron/manager.py:138 +#, python-format +msgid "Error loading plugin by class, %s" +msgstr "" + +#: neutron/manager.py:139 +msgid "Plugin not found." +msgstr "" + +#: neutron/manager.py:144 +msgid "Loading services supported by the core plugin" +msgstr "" + +#: neutron/manager.py:152 +#, python-format +msgid "Service %s is supported by the core plugin" +msgstr "" + +#: neutron/manager.py:165 +#, python-format +msgid "Loading service plugins: %s" +msgstr "" + +#: neutron/manager.py:170 +#, python-format +msgid "Loading Plugin: %s" +msgstr "" + +#: neutron/manager.py:178 +#, python-format +msgid "Multiple plugins for service %s were configured" +msgstr "" + +#: neutron/manager.py:190 +#, python-format +msgid "Successfully loaded %(type)s plugin. Description: %(desc)s" +msgstr "" + +#: neutron/policy.py:86 +#, python-format +msgid "Loading policies from file: %s" +msgstr "" + +#: neutron/policy.py:93 +#, python-format +msgid "" +"Found deprecated policy rule:%s. Please consider upgrading your policy " +"configuration file" +msgstr "" + +#: neutron/policy.py:105 +#, python-format +msgid "" +"Inserting policy:%(new_policy)s in place of deprecated " +"policy:%(old_policy)s" +msgstr "" + +#: neutron/policy.py:113 +#, python-format +msgid "" +"Backward compatibility unavailable for deprecated policy %s. The policy " +"will not be enforced" +msgstr "" + +#: neutron/policy.py:135 +#, python-format +msgid "Unable to find data type descriptor for attribute %s" +msgstr "" + +#: neutron/policy.py:140 +#, python-format +msgid "" +"Attribute type descriptor is not a dict. Unable to generate any sub-attr " +"policy rule for %s." +msgstr "" + +#: neutron/policy.py:213 +#, python-format +msgid "" +"Unable to identify a target field from:%s.match should be in the form " +"%%()s" +msgstr "" + +#: neutron/policy.py:239 +#, python-format +msgid "Unable to find ':' as separator in %s." +msgstr "" + +#: neutron/policy.py:243 +#, python-format +msgid "Unable to find resource name in %s" +msgstr "" + +#: neutron/policy.py:252 +#, python-format +msgid "" +"Unable to verify match:%(match)s as the parent resource: %(res)s was not " +"found" +msgstr "" + +#: neutron/policy.py:278 +#, python-format +msgid "Policy check error while calling %s!" +msgstr "" + +#: neutron/policy.py:309 +#, python-format +msgid "Unable to find requested field: %(field)s in target: %(target_dict)s" +msgstr "" + +#: neutron/policy.py:367 +#, python-format +msgid "Failed policy check for '%s'" +msgstr "" + +#: neutron/quota.py:34 +msgid "Resource name(s) that are supported in quota features" +msgstr "" + +#: neutron/quota.py:38 +msgid "" +"Default number of resource allowed per tenant. A negative value means " +"unlimited." +msgstr "" + +#: neutron/quota.py:42 +msgid "Number of networks allowed per tenant.A negative value means unlimited." +msgstr "" + +#: neutron/quota.py:46 +msgid "Number of subnets allowed per tenant, A negative value means unlimited." +msgstr "" + +#: neutron/quota.py:50 +msgid "Number of ports allowed per tenant. A negative value means unlimited." +msgstr "" + +#: neutron/quota.py:54 +msgid "Default driver to use for quota checks" +msgstr "" + +#: neutron/quota.py:148 neutron/quota.py:153 +msgid "Access to this resource was denied." +msgstr "" + +#: neutron/quota.py:226 +msgid "" +"ConfDriver is used as quota_driver because the loaded plugin does not " +"support 'quotas' table." +msgstr "" + +#: neutron/quota.py:231 +#, python-format +msgid "Loaded quota_driver: %s." +msgstr "" + +#: neutron/quota.py:240 +#, python-format +msgid "%s is already registered." +msgstr "" + +#: neutron/service.py:40 +msgid "Seconds between running periodic tasks" +msgstr "" + +#: neutron/service.py:43 +msgid "Number of separate worker processes for service" +msgstr "" + +#: neutron/service.py:46 +msgid "Number of RPC worker processes for service" +msgstr "" + +#: neutron/service.py:49 +msgid "" +"Range of seconds to randomly delay when starting the periodic task " +"scheduler to reduce stampeding. (Disable by setting to 0)" +msgstr "" + +#: neutron/service.py:105 neutron/service.py:163 +msgid "Unrecoverable error: please check log for details." +msgstr "" + +#: neutron/service.py:144 +msgid "Active plugin doesn't implement start_rpc_listeners" +msgstr "" + +#: neutron/service.py:146 +#, python-format +msgid "'rpc_workers = %d' ignored because start_rpc_listeners is not implemented." +msgstr "" + +#: neutron/service.py:170 +msgid "No known API applications configured." +msgstr "" + +#: neutron/service.py:177 +#, python-format +msgid "Neutron service started, listening on %(host)s:%(port)s" +msgstr "" + +#: neutron/service.py:278 +msgid "Exception occurs when timer stops" +msgstr "" + +#: neutron/service.py:288 +msgid "Exception occurs when waiting for timer" +msgstr "" + +#: neutron/wsgi.py:51 +msgid "Number of backlog requests to configure the socket with" +msgstr "" + +#: neutron/wsgi.py:55 +msgid "" +"Sets the value of TCP_KEEPIDLE in seconds for each server socket. Not " +"supported on OS X." +msgstr "" + +#: neutron/wsgi.py:59 +msgid "Number of seconds to keep retrying to listen" +msgstr "" + +#: neutron/wsgi.py:62 +msgid "Max header line to accommodate large tokens" +msgstr "" + +#: neutron/wsgi.py:65 +msgid "Enable SSL on the API server" +msgstr "" + +#: neutron/wsgi.py:67 +msgid "CA certificate file to use to verify connecting clients" +msgstr "" + +#: neutron/wsgi.py:70 +msgid "Certificate file to use when starting the server securely" +msgstr "" + +#: neutron/wsgi.py:73 +msgid "Private key file to use when starting the server securely" +msgstr "" + +#: neutron/wsgi.py:132 +#, python-format +msgid "Unable to listen on %(host)s:%(port)s" +msgstr "" + +#: neutron/wsgi.py:138 +#, python-format +msgid "Unable to find ssl_cert_file : %s" +msgstr "" + +#: neutron/wsgi.py:144 +#, python-format +msgid "Unable to find ssl_key_file : %s" +msgstr "" + +#: neutron/wsgi.py:149 +#, python-format +msgid "Unable to find ssl_ca_file : %s" +msgstr "" + +#: neutron/wsgi.py:182 +#, python-format +msgid "Could not bind to %(host)s:%(port)s after trying for %(time)d seconds" +msgstr "" + +#: neutron/wsgi.py:342 +msgid "Missing Content-Type" +msgstr "" + +#: neutron/wsgi.py:531 +#, python-format +msgid "Data %(data)s type is %(type)s" +msgstr "" + +#: neutron/wsgi.py:614 +msgid "Cannot understand JSON" +msgstr "" + +#: neutron/wsgi.py:627 neutron/wsgi.py:630 +msgid "Inline DTD forbidden" +msgstr "" + +#: neutron/wsgi.py:711 +msgid "Cannot understand XML" +msgstr "" + +#: neutron/wsgi.py:820 +msgid "Unrecognized Content-Type provided in request" +msgstr "" + +#: neutron/wsgi.py:824 +msgid "No Content-Type provided in request" +msgstr "" + +#: neutron/wsgi.py:828 +msgid "Empty body provided in request" +msgstr "" + +#: neutron/wsgi.py:835 +msgid "Unable to deserialize body as provided Content-Type" +msgstr "" + +#: neutron/wsgi.py:931 +msgid "You must implement __call__" +msgstr "" + +#: neutron/wsgi.py:1024 neutron/api/v2/base.py:190 neutron/api/v2/base.py:331 +#: neutron/api/v2/base.py:471 neutron/api/v2/base.py:522 +#: neutron/extensions/l3agentscheduler.py:49 +#: neutron/extensions/l3agentscheduler.py:87 +msgid "The resource could not be found." +msgstr "" + +#: neutron/wsgi.py:1071 +#, python-format +msgid "%(method)s %(url)s" +msgstr "" + +#: neutron/wsgi.py:1077 +msgid "Unsupported Content-Type" +msgstr "" + +#: neutron/wsgi.py:1078 +#, python-format +msgid "InvalidContentType: %s" +msgstr "" + +#: neutron/wsgi.py:1082 +msgid "Malformed request body" +msgstr "" + +#: neutron/wsgi.py:1083 +#, python-format +msgid "MalformedRequestBody: %s" +msgstr "" + +#: neutron/wsgi.py:1090 +#, python-format +msgid "HTTP exception thrown: %s" +msgstr "" + +#: neutron/wsgi.py:1095 +msgid "Internal error" +msgstr "" + +#: neutron/wsgi.py:1110 neutron/wsgi.py:1212 +#, python-format +msgid "%(url)s returned with HTTP %(status)d" +msgstr "" + +#: neutron/wsgi.py:1113 +#, python-format +msgid "%(url)s returned a fault: %(exception)s" +msgstr "" + +#: neutron/wsgi.py:1231 +#, python-format +msgid "The requested content type %s is invalid." +msgstr "" + +#: neutron/wsgi.py:1290 +msgid "Could not deserialize data" +msgstr "" + +#: neutron/agent/dhcp_agent.py:51 +msgid "Interval to resync." +msgstr "" + +#: neutron/agent/dhcp_agent.py:54 +msgid "The driver used to manage the DHCP server." +msgstr "" + +#: neutron/agent/dhcp_agent.py:56 +msgid "Support Metadata requests on isolated networks." +msgstr "" + +#: neutron/agent/dhcp_agent.py:58 +msgid "" +"Allows for serving metadata requests from a dedicated network. Requires " +"enable_isolated_metadata = True" +msgstr "" + +#: neutron/agent/dhcp_agent.py:62 +msgid "Number of threads to use during sync process." +msgstr "" + +#: neutron/agent/dhcp_agent.py:65 neutron/agent/l3_agent.py:190 +#: neutron/agent/metadata/namespace_proxy.py:165 +msgid "Location of Metadata Proxy UNIX domain socket" +msgstr "" + +#: neutron/agent/dhcp_agent.py:102 +#, python-format +msgid "" +"The '%s' DHCP-driver does not support retrieving of a list of existing " +"networks" +msgstr "" + +#: neutron/agent/dhcp_agent.py:109 neutron/agent/dhcp_agent.py:598 +msgid "DHCP agent started" +msgstr "" + +#: neutron/agent/dhcp_agent.py:118 +#, python-format +msgid "Calling driver for network: %(net)s action: %(action)s" +msgstr "" + +#: neutron/agent/dhcp_agent.py:134 +#, python-format +msgid "" +"Unable to %(action)s dhcp for %(net_id)s: there is a conflict with its " +"current state; please check that the network and/or its subnet(s) still " +"exist." +msgstr "" + +#: neutron/agent/dhcp_agent.py:143 neutron/agent/dhcp_agent.py:201 +#, python-format +msgid "Network %s has been deleted." +msgstr "" + +#: neutron/agent/dhcp_agent.py:145 +#, python-format +msgid "Unable to %(action)s dhcp for %(net_id)s." +msgstr "" + +#: neutron/agent/dhcp_agent.py:155 +msgid "Synchronizing state" +msgstr "" + +#: neutron/agent/dhcp_agent.py:167 +#, python-format +msgid "Unable to sync network state on deleted network %s" +msgstr "" + +#: neutron/agent/dhcp_agent.py:173 +msgid "Synchronizing state complete" +msgstr "" + +#: neutron/agent/dhcp_agent.py:177 +msgid "Unable to sync network state." +msgstr "" + +#: neutron/agent/dhcp_agent.py:189 +#, python-format +msgid "resync: %(reason)s" +msgstr "" + +#: neutron/agent/dhcp_agent.py:205 +#, python-format +msgid "Network %s info call failed." +msgstr "" + +#: neutron/agent/dhcp_agent.py:217 +#, python-format +msgid "" +"Network %s may have been deleted and its resources may have already been " +"disposed." +msgstr "" + +#: neutron/agent/dhcp_agent.py:342 +#, python-format +msgid "" +"%(port_num)d router ports found on the metadata access network. Only the " +"port %(port_id)s, for router %(router_id)s will be considered" +msgstr "" + +#: neutron/agent/dhcp_agent.py:580 neutron/agent/l3_agent.py:961 +#: neutron/agent/metadata/agent.py:362 +#: neutron/services/metering/agents/metering_agent.py:272 +msgid "" +"Neutron server does not support state report. State report for this agent" +" will be disabled." +msgstr "" + +#: neutron/agent/dhcp_agent.py:586 neutron/agent/l3_agent.py:966 +#: neutron/agent/metadata/agent.py:367 +#: neutron/plugins/ibm/agent/sdnve_neutron_agent.py:111 +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:794 +#: neutron/plugins/mlnx/agent/eswitch_neutron_agent.py:248 +#: neutron/plugins/nec/agent/nec_neutron_agent.py:182 +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:266 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:244 +#: neutron/services/loadbalancer/agent/agent_manager.py:121 +#: neutron/services/metering/agents/metering_agent.py:277 +msgid "Failed reporting state!" +msgstr "" + +#: neutron/agent/dhcp_agent.py:593 +#, python-format +msgid "Agent updated: %(payload)s" +msgstr "" + +#: neutron/agent/dhcp_agent.py:595 neutron/agent/l3_agent.py:971 +#: neutron/services/metering/agents/metering_agent.py:280 +#, python-format +msgid "agent_updated by server side %s!" +msgstr "" + +#: neutron/agent/l3_agent.py:164 neutron/debug/debug_agent.py:41 +msgid "Name of bridge used for external network traffic." +msgstr "" + +#: neutron/agent/l3_agent.py:168 +msgid "TCP Port used by Neutron metadata namespace proxy." +msgstr "" + +#: neutron/agent/l3_agent.py:172 +msgid "" +"Send this many gratuitous ARPs for HA setup, if less than or equal to 0, " +"the feature is disabled" +msgstr "" + +#: neutron/agent/l3_agent.py:175 +msgid "" +"If namespaces is disabled, the l3 agent can only configure a router that " +"has the matching router ID." +msgstr "" + +#: neutron/agent/l3_agent.py:180 +msgid "Agent should implement routers with no gateway" +msgstr "" + +#: neutron/agent/l3_agent.py:182 +msgid "UUID of external network for routers implemented by the agents." +msgstr "" + +#: neutron/agent/l3_agent.py:185 +msgid "Allow running metadata proxy." +msgstr "" + +#: neutron/agent/l3_agent.py:187 +msgid "Delete namespace after removing a router." +msgstr "" + +#: neutron/agent/l3_agent.py:210 +#, python-format +msgid "Error importing interface driver '%s'" +msgstr "" + +#: neutron/agent/l3_agent.py:238 neutron/agent/linux/dhcp.py:729 +#: neutron/services/metering/drivers/iptables/iptables_driver.py:89 +msgid "An interface driver must be specified" +msgstr "" + +#: neutron/agent/l3_agent.py:243 +msgid "Router id is required if not using namespaces." +msgstr "" + +#: neutron/agent/l3_agent.py:264 +msgid "RuntimeError in obtaining router list for namespace cleanup." +msgstr "" + +#: neutron/agent/l3_agent.py:284 +#, python-format +msgid "Failed to destroy stale router namespace %s" +msgstr "" + +#: neutron/agent/l3_agent.py:305 neutron/agent/linux/dhcp.py:225 +#, python-format +msgid "Failed trying to delete namespace: %s" +msgstr "" + +#: neutron/agent/l3_agent.py:335 +msgid "" +"The 'gateway_external_network_id' option must be configured for this " +"agent as Neutron has more than one external network." +msgstr "" + +#: neutron/agent/l3_agent.py:359 +#, python-format +msgid "Info for router %s were not found. Skipping router removal" +msgstr "" + +#: neutron/agent/l3_agent.py:408 +#: neutron/services/firewall/agents/varmour/varmour_router.py:102 +#, python-format +msgid "Router port %s has no IP address" +msgstr "" + +#: neutron/agent/l3_agent.py:410 neutron/db/l3_db.py:974 +#: neutron/services/firewall/agents/varmour/varmour_router.py:105 +#, python-format +msgid "Ignoring multiple IPs on router port %s" +msgstr "" + +#: neutron/agent/l3_agent.py:450 +#, python-format +msgid "Deleting stale internal router device: %s" +msgstr "" + +#: neutron/agent/l3_agent.py:479 +#, python-format +msgid "Deleting stale external router device: %s" +msgstr "" + +#: neutron/agent/l3_agent.py:598 +#, python-format +msgid "Unable to configure IP address for floating IP: %s" +msgstr "" + +#: neutron/agent/l3_agent.py:628 +#, python-format +msgid "Failed sending gratuitous ARP: %s" +msgstr "" + +#: neutron/agent/l3_agent.py:737 +#, python-format +msgid "Got router deleted notification for %s" +msgstr "" + +#: neutron/agent/l3_agent.py:742 +#, python-format +msgid "Got routers updated notification :%s" +msgstr "" + +#: neutron/agent/l3_agent.py:750 +#, python-format +msgid "Got router removed from agent :%r" +msgstr "" + +#: neutron/agent/l3_agent.py:754 +#, python-format +msgid "Got router added to agent :%r" +msgstr "" + +#: neutron/agent/l3_agent.py:761 +#, python-format +msgid "The external network bridge '%s' does not exist" +msgstr "" + +#: neutron/agent/l3_agent.py:811 +#, python-format +msgid "Starting RPC loop for %d updated routers" +msgstr "" + +#: neutron/agent/l3_agent.py:829 +msgid "RPC loop successfully completed" +msgstr "" + +#: neutron/agent/l3_agent.py:831 neutron/agent/l3_agent.py:869 +#: neutron/services/metering/agents/metering_agent.py:61 +msgid "Failed synchronizing routers" +msgstr "" + +#: neutron/agent/l3_agent.py:849 +#, python-format +msgid "Starting _sync_routers_task - fullsync:%s" +msgstr "" + +#: neutron/agent/l3_agent.py:860 +#, python-format +msgid "Processing :%r" +msgstr "" + +#: neutron/agent/l3_agent.py:863 +msgid "_sync_routers_task successfully completed" +msgstr "" + +#: neutron/agent/l3_agent.py:865 +msgid "Failed synchronizing routers due to RPC error" +msgstr "" + +#: neutron/agent/l3_agent.py:878 +msgid "L3 agent started" +msgstr "" + +#: neutron/agent/l3_agent.py:893 +#, python-format +msgid "Added route entry is '%s'" +msgstr "" + +#: neutron/agent/l3_agent.py:901 +#, python-format +msgid "Removed route entry is '%s'" +msgstr "" + +#: neutron/agent/l3_agent.py:934 +msgid "Report state task started" +msgstr "" + +#: neutron/agent/l3_agent.py:958 +msgid "Report state task successfully completed" +msgstr "" + +#: neutron/agent/netns_cleanup_util.py:59 +msgid "Delete the namespace by removing all devices." +msgstr "" + +#: neutron/agent/netns_cleanup_util.py:116 +#, python-format +msgid "Unable to find bridge for device: %s" +msgstr "" + +#: neutron/agent/netns_cleanup_util.py:140 +#, python-format +msgid "Error unable to destroy namespace: %s" +msgstr "" + +#: neutron/agent/ovs_cleanup_util.py:39 +msgid "" +"True to delete all ports on all the OpenvSwitch bridges. False to delete " +"ports created by Neutron on integration and external network bridges." +msgstr "" + +#: neutron/agent/ovs_cleanup_util.py:73 +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:664 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:281 +#, python-format +msgid "Delete %s" +msgstr "" + +#: neutron/agent/ovs_cleanup_util.py:103 +#, python-format +msgid "Cleaning %s" +msgstr "" + +#: neutron/agent/ovs_cleanup_util.py:110 +msgid "OVS cleanup completed successfully" +msgstr "" + +#: neutron/agent/securitygroups_rpc.py:29 +msgid "Driver for security groups firewall in the L2 agent" +msgstr "" + +#: neutron/agent/securitygroups_rpc.py:33 +msgid "" +"Controls whether the neutron security group API is enabled in the server." +" It should be false when using no security groups or using the nova " +"security group API." +msgstr "" + +#: neutron/agent/securitygroups_rpc.py:56 +#: neutron/agent/securitygroups_rpc.py:140 +msgid "Driver configuration doesn't match with enable_security_group" +msgstr "" + +#: neutron/agent/securitygroups_rpc.py:69 +msgid "Disabled security-group extension." +msgstr "" + +#: neutron/agent/securitygroups_rpc.py:71 +msgid "Disabled allowed-address-pairs extension." +msgstr "" + +#: neutron/agent/securitygroups_rpc.py:78 +#, python-format +msgid "Get security group rules for devices via rpc %r" +msgstr "" + +#: neutron/agent/securitygroups_rpc.py:95 +msgid "" +"Security group agent binding currently not set. This should be set by the" +" end of the init process." +msgstr "" + +#: neutron/agent/securitygroups_rpc.py:106 +#, python-format +msgid "Security group rule updated on remote: %s" +msgstr "" + +#: neutron/agent/securitygroups_rpc.py:118 +#, python-format +msgid "Security group member updated on remote: %s" +msgstr "" + +#: neutron/agent/securitygroups_rpc.py:125 +#: neutron/agent/securitygroups_rpc.py:194 +msgid "Provider rule updated" +msgstr "" + +#: neutron/agent/securitygroups_rpc.py:138 +#, python-format +msgid "Init firewall settings (driver=%s)" +msgstr "" + +#: neutron/agent/securitygroups_rpc.py:157 +#, python-format +msgid "Preparing filters for devices %s" +msgstr "" + +#: neutron/agent/securitygroups_rpc.py:165 +#, python-format +msgid "Security group rule updated %r" +msgstr "" + +#: neutron/agent/securitygroups_rpc.py:172 +#, python-format +msgid "Security group member updated %r" +msgstr "" + +#: neutron/agent/securitygroups_rpc.py:186 +#, python-format +msgid "" +"Adding %s devices to the list of devices for which firewall needs to be " +"refreshed" +msgstr "" + +#: neutron/agent/securitygroups_rpc.py:206 +#, python-format +msgid "Remove device filter for %r" +msgstr "" + +#: neutron/agent/securitygroups_rpc.py:215 +msgid "Refresh firewall rules" +msgstr "" + +#: neutron/agent/securitygroups_rpc.py:219 +msgid "No ports here to refresh firewall" +msgstr "" + +#: neutron/agent/securitygroups_rpc.py:225 +#, python-format +msgid "Update port filter for %s" +msgstr "" + +#: neutron/agent/securitygroups_rpc.py:243 +#, python-format +msgid "Preparing device filters for %d new devices" +msgstr "" + +#: neutron/agent/securitygroups_rpc.py:256 +msgid "Refreshing firewall for all filtered devices" +msgstr "" + +#: neutron/agent/securitygroups_rpc.py:264 +#, python-format +msgid "Refreshing firewall for %d devices" +msgstr "" + +#: neutron/agent/common/config.py:29 +msgid "Root helper application." +msgstr "" + +#: neutron/agent/common/config.py:34 +msgid "" +"Seconds between nodes reporting state to server; should be less than " +"agent_down_time, best if it is half or less than agent_down_time." +msgstr "" + +#: neutron/agent/common/config.py:41 +msgid "The driver used to manage the virtual interface." +msgstr "" + +#: neutron/agent/common/config.py:46 +msgid "Allow overlapping IP." +msgstr "" + +#: neutron/agent/common/config.py:102 +msgid "" +"DEFAULT.root_helper is deprecated! Please move root_helper configuration " +"to [AGENT] section." +msgstr "" + +#: neutron/agent/common/config.py:113 +msgid "Top-level directory for maintaining dhcp state" +msgstr "" + +#: neutron/agent/linux/async_process.py:66 +msgid "respawn_interval must be >= 0 if provided." +msgstr "" + +#: neutron/agent/linux/async_process.py:80 +msgid "Process is already started" +msgstr "" + +#: neutron/agent/linux/async_process.py:82 +#, python-format +msgid "Launching async process [%s]." +msgstr "" + +#: neutron/agent/linux/async_process.py:88 +#, python-format +msgid "Halting async process [%s]." +msgstr "" + +#: neutron/agent/linux/async_process.py:91 +msgid "Process is not running." +msgstr "" + +#: neutron/agent/linux/async_process.py:163 +#, python-format +msgid "An error occurred while killing [%s]." +msgstr "" + +#: neutron/agent/linux/async_process.py:170 +#, python-format +msgid "Halting async process [%s] in response to an error." +msgstr "" + +#: neutron/agent/linux/async_process.py:176 +#, python-format +msgid "Respawning async process [%s]." +msgstr "" + +#: neutron/agent/linux/async_process.py:185 +#, python-format +msgid "An error occurred while communicating with async process [%s]." +msgstr "" + +#: neutron/agent/linux/daemon.py:37 +#, python-format +msgid "Error while handling pidfile: %s" +msgstr "" + +#: neutron/agent/linux/daemon.py:45 +msgid "Unable to unlock pid file" +msgstr "" + +#: neutron/agent/linux/daemon.py:94 +msgid "Fork failed" +msgstr "" + +#: neutron/agent/linux/daemon.py:136 +#, python-format +msgid "Pidfile %s already exist. Daemon already running?" +msgstr "" + +#: neutron/agent/linux/dhcp.py:43 +msgid "Location to store DHCP server config files" +msgstr "" + +#: neutron/agent/linux/dhcp.py:46 neutron/plugins/vmware/dhcp_meta/nsx.py:44 +msgid "Domain to use for building the hostnames" +msgstr "" + +#: neutron/agent/linux/dhcp.py:49 +msgid "Override the default dnsmasq settings with this file" +msgstr "" + +#: neutron/agent/linux/dhcp.py:51 +msgid "Comma-separated list of the DNS servers which will be used as forwarders." +msgstr "" + +#: neutron/agent/linux/dhcp.py:55 +msgid "Delete namespace after removing a dhcp server." +msgstr "" + +#: neutron/agent/linux/dhcp.py:59 +msgid "Limit number of leases to prevent a denial-of-service." +msgstr "" + +#: neutron/agent/linux/dhcp.py:207 +#, python-format +msgid "" +"DHCP for %(net_id)s is stale, pid %(pid)d does not exist, performing " +"cleanup" +msgstr "" + +#: neutron/agent/linux/dhcp.py:214 +#, python-format +msgid "No DHCP started for %s" +msgstr "" + +#: neutron/agent/linux/dhcp.py:246 neutron/agent/linux/external_process.py:78 +#, python-format +msgid "Error while reading %s" +msgstr "" + +#: neutron/agent/linux/dhcp.py:253 neutron/agent/linux/external_process.py:86 +#, python-format +msgid "Unable to convert value in %s" +msgstr "" + +#: neutron/agent/linux/dhcp.py:255 neutron/agent/linux/external_process.py:84 +#, python-format +msgid "Unable to access %s" +msgstr "" + +#: neutron/agent/linux/dhcp.py:316 +#, python-format +msgid "" +"FAILED VERSION REQUIREMENT FOR DNSMASQ. DHCP AGENT MAY NOT RUN CORRECTLY!" +" Please ensure that its version is %s or above!" +msgstr "" + +#: neutron/agent/linux/dhcp.py:321 +#, python-format +msgid "" +"Unable to determine dnsmasq version. Please ensure that its version is %s" +" or above!" +msgstr "" + +#: neutron/agent/linux/dhcp.py:424 +#, python-format +msgid "Killing dhcpmasq for network since all subnets have turned off DHCP: %s" +msgstr "" + +#: neutron/agent/linux/dhcp.py:436 +#, python-format +msgid "Pid %d is stale, relaunching dnsmasq" +msgstr "" + +#: neutron/agent/linux/dhcp.py:437 +#, python-format +msgid "Reloading allocations for network: %s" +msgstr "" + +#: neutron/agent/linux/dhcp.py:487 +#, python-format +msgid "Building host file: %s" +msgstr "" + +#: neutron/agent/linux/dhcp.py:497 +#, python-format +msgid "Adding %(mac)s : %(name)s : %(ip)s" +msgstr "" + +#: neutron/agent/linux/dhcp.py:513 +#, python-format +msgid "Done building host file %s" +msgstr "" + +#: neutron/agent/linux/dhcp.py:736 +#, python-format +msgid "Error importing interface driver '%(driver)s': %(inner)s" +msgstr "" + +#: neutron/agent/linux/dhcp.py:776 +#, python-format +msgid "Setting gateway for dhcp netns on net %(n)s to %(ip)s" +msgstr "" + +#: neutron/agent/linux/dhcp.py:786 +#, python-format +msgid "Removing gateway for dhcp netns on net %s" +msgstr "" + +#: neutron/agent/linux/dhcp.py:830 +#, python-format +msgid "" +"DHCP port %(device_id)s on network %(network_id)s does not yet exist. " +"Checking for a reserved port." +msgstr "" + +#: neutron/agent/linux/dhcp.py:844 +#, python-format +msgid "DHCP port %(device_id)s on network %(network_id)s does not yet exist." +msgstr "" + +#: neutron/agent/linux/dhcp.py:879 neutron/debug/debug_agent.py:67 +#, python-format +msgid "Reusing existing device: %s." +msgstr "" + +#: neutron/agent/linux/external_process.py:30 +msgid "Location to store child pid files" +msgstr "" + +#: neutron/agent/linux/external_process.py:61 +#, python-format +msgid "Process for %(uuid)s pid %(pid)d is stale, ignoring command" +msgstr "" + +#: neutron/agent/linux/external_process.py:64 +#, python-format +msgid "No process started for %s" +msgstr "" + +#: neutron/agent/linux/interface.py:37 +msgid "Name of Open vSwitch bridge to use" +msgstr "" + +#: neutron/agent/linux/interface.py:40 +msgid "Uses veth for an interface or not" +msgstr "" + +#: neutron/agent/linux/interface.py:42 +msgid "MTU setting for device." +msgstr "" + +#: neutron/agent/linux/interface.py:44 +msgid "Mapping between flavor and LinuxInterfaceDriver" +msgstr "" + +#: neutron/agent/linux/interface.py:46 +msgid "Admin username" +msgstr "" + +#: neutron/agent/linux/interface.py:48 neutron/agent/metadata/agent.py:54 +#: neutron/plugins/metaplugin/common/config.py:65 +msgid "Admin password" +msgstr "" + +#: neutron/agent/linux/interface.py:51 neutron/agent/metadata/agent.py:57 +#: neutron/plugins/metaplugin/common/config.py:68 +msgid "Admin tenant name" +msgstr "" + +#: neutron/agent/linux/interface.py:53 neutron/agent/metadata/agent.py:59 +#: neutron/plugins/metaplugin/common/config.py:70 +msgid "Authentication URL" +msgstr "" + +#: neutron/agent/linux/interface.py:55 neutron/agent/metadata/agent.py:61 +#: neutron/common/config.py:47 neutron/plugins/metaplugin/common/config.py:72 +msgid "The type of authentication to use" +msgstr "" + +#: neutron/agent/linux/interface.py:57 neutron/agent/metadata/agent.py:63 +#: neutron/plugins/metaplugin/common/config.py:74 +msgid "Authentication region" +msgstr "" + +#: neutron/agent/linux/interface.py:214 neutron/agent/linux/interface.py:268 +#: neutron/agent/linux/interface.py:330 neutron/agent/linux/interface.py:379 +#, python-format +msgid "Device %s already exists" +msgstr "" + +#: neutron/agent/linux/interface.py:232 neutron/agent/linux/interface.py:279 +#: neutron/agent/linux/interface.py:342 neutron/agent/linux/interface.py:386 +#, python-format +msgid "Unplugged interface '%s'" +msgstr "" + +#: neutron/agent/linux/interface.py:234 neutron/agent/linux/interface.py:278 +#: neutron/agent/linux/interface.py:344 neutron/agent/linux/interface.py:388 +#, python-format +msgid "Failed unplugging interface '%s'" +msgstr "" + +#: neutron/agent/linux/interface.py:446 +#, python-format +msgid "Driver location: %s" +msgstr "" + +#: neutron/agent/linux/ip_lib.py:26 +msgid "Force ip_lib calls to use the root helper" +msgstr "" + +#: neutron/agent/linux/iptables_firewall.py:56 +#, python-format +msgid "Preparing device (%s) filter" +msgstr "" + +#: neutron/agent/linux/iptables_firewall.py:64 +#, python-format +msgid "Updating device (%s) filter" +msgstr "" + +#: neutron/agent/linux/iptables_firewall.py:66 +#, python-format +msgid "Attempted to update port filter which is not filtered %s" +msgstr "" + +#: neutron/agent/linux/iptables_firewall.py:75 +#, python-format +msgid "Removing device (%s) filter" +msgstr "" + +#: neutron/agent/linux/iptables_firewall.py:77 +#, python-format +msgid "Attempted to remove port filter which is not filtered %r" +msgstr "" + +#: neutron/agent/linux/iptables_manager.py:157 +#, python-format +msgid "Attempted to remove chain %s which does not exist" +msgstr "" + +#: neutron/agent/linux/iptables_manager.py:199 +#, python-format +msgid "Unknown chain: %r" +msgstr "" + +#: neutron/agent/linux/iptables_manager.py:234 +#, python-format +msgid "" +"Tried to remove rule that was not there: %(chain)r %(rule)r %(wrap)r " +"%(top)r" +msgstr "" + +#: neutron/agent/linux/iptables_manager.py:372 +#, python-format +msgid "Got semaphore / lock \"%s\"" +msgstr "" + +#: neutron/agent/linux/iptables_manager.py:375 +#, python-format +msgid "Semaphore / lock released \"%s\"" +msgstr "" + +#: neutron/agent/linux/iptables_manager.py:424 +#: neutron/tests/unit/test_iptables_manager.py:558 +#: neutron/tests/unit/test_iptables_manager.py:592 +#, python-format +msgid "" +"IPTablesManager.apply failed to apply the following set of iptables " +"rules:\n" +"%s" +msgstr "" + +#: neutron/agent/linux/iptables_manager.py:427 +msgid "IPTablesManager.apply completed with success" +msgstr "" + +#: neutron/agent/linux/iptables_manager.py:437 +#, python-format +msgid "Unable to find table %s" +msgstr "" + +#: neutron/agent/linux/iptables_manager.py:637 +#, python-format +msgid "Attempted to get traffic counters of chain %s which does not exist" +msgstr "" + +#: neutron/agent/linux/ovs_lib.py:34 +msgid "Timeout in seconds for ovs-vsctl commands" +msgstr "" + +#: neutron/agent/linux/ovs_lib.py:68 neutron/agent/linux/ovs_lib.py:168 +#: neutron/agent/linux/ovs_lib.py:315 +#: neutron/plugins/bigswitch/agent/restproxy_agent.py:55 +#, python-format +msgid "Unable to execute %(cmd)s. Exception: %(exception)s" +msgstr "" + +#: neutron/agent/linux/ovs_lib.py:223 +msgid "defer_apply_on" +msgstr "" + +#: neutron/agent/linux/ovs_lib.py:227 +msgid "defer_apply_off" +msgstr "" + +#: neutron/agent/linux/ovs_lib.py:237 +#, python-format +msgid "Applying following deferred flows to bridge %s" +msgstr "" + +#: neutron/agent/linux/ovs_lib.py:240 +#, python-format +msgid "%(action)s: %(flow)s" +msgstr "" + +#: neutron/agent/linux/ovs_lib.py:266 +msgid "" +"Unable to create VXLAN tunnel port. Please ensure that an openvswitch " +"version that supports VXLAN is installed." +msgstr "" + +#: neutron/agent/linux/ovs_lib.py:363 +#, python-format +msgid "Found not yet ready openvswitch port: %s" +msgstr "" + +#: neutron/agent/linux/ovs_lib.py:378 +#, python-format +msgid "Found failed openvswitch port: %s" +msgstr "" + +#: neutron/agent/linux/ovs_lib.py:433 +#, python-format +msgid "Port: %(port_name)s is on %(switch)s, not on %(br_name)s" +msgstr "" + +#: neutron/agent/linux/ovs_lib.py:441 +#, python-format +msgid "ofport: %(ofport)s for VIF: %(vif)s is not a positive integer" +msgstr "" + +#: neutron/agent/linux/ovs_lib.py:451 +#, python-format +msgid "Unable to parse interface details. Exception: %s" +msgstr "" + +#: neutron/agent/linux/ovs_lib.py:469 +#, python-format +msgid "Unable to determine mac address for %s" +msgstr "" + +#: neutron/agent/linux/ovs_lib.py:486 +#, python-format +msgid "Interface %s not found." +msgstr "" + +#: neutron/agent/linux/ovs_lib.py:497 +#, python-format +msgid "Unable to retrieve bridges. Exception: %s" +msgstr "" + +#: neutron/agent/linux/ovs_lib.py:506 +#, python-format +msgid "Bridge %s not found." +msgstr "" + +#: neutron/agent/linux/ovs_lib.py:522 +msgid "Cannot match priority on flow deletion or modification" +msgstr "" + +#: neutron/agent/linux/ovs_lib.py:527 +msgid "Must specify one or more actions on flow addition or modification" +msgstr "" + +#: neutron/agent/linux/ovsdb_monitor.py:44 +#, python-format +msgid "Output received from ovsdb monitor: %s" +msgstr "" + +#: neutron/agent/linux/ovsdb_monitor.py:50 +#, python-format +msgid "Error received from ovsdb monitor: %s" +msgstr "" + +#: neutron/agent/linux/utils.py:47 +#, python-format +msgid "Running command: %s" +msgstr "" + +#: neutron/agent/linux/utils.py:70 +#, python-format +msgid "" +"\n" +"Command: %(cmd)s\n" +"Exit code: %(code)s\n" +"Stdout: %(stdout)r\n" +"Stderr: %(stderr)r" +msgstr "" + +#: neutron/agent/metadata/agent.py:52 +#: neutron/plugins/metaplugin/common/config.py:63 +msgid "Admin user" +msgstr "" + +#: neutron/agent/metadata/agent.py:66 +msgid "Turn off verification of the certificate for ssl" +msgstr "" + +#: neutron/agent/metadata/agent.py:69 +msgid "Certificate Authority public key (CA cert) file for ssl" +msgstr "" + +#: neutron/agent/metadata/agent.py:73 +msgid "Network service endpoint type to pull from the keystone catalog" +msgstr "" + +#: neutron/agent/metadata/agent.py:76 +msgid "IP address used by Nova metadata server." +msgstr "" + +#: neutron/agent/metadata/agent.py:79 +msgid "TCP Port used by Nova metadata server." +msgstr "" + +#: neutron/agent/metadata/agent.py:82 +#: neutron/plugins/vmware/dhcp_meta/nsx.py:63 +msgid "Shared secret to sign instance-id request" +msgstr "" + +#: neutron/agent/metadata/agent.py:87 +msgid "Protocol to access nova metadata, http or https" +msgstr "" + +#: neutron/agent/metadata/agent.py:89 +msgid "Allow to perform insecure SSL (https) requests to nova metadata" +msgstr "" + +#: neutron/agent/metadata/agent.py:93 +msgid "Client certificate for nova metadata api server." +msgstr "" + +#: neutron/agent/metadata/agent.py:96 +msgid "Private key of client certificate." +msgstr "" + +#: neutron/agent/metadata/agent.py:126 +#: neutron/agent/metadata/namespace_proxy.py:68 +#, python-format +msgid "Request: %s" +msgstr "" + +#: neutron/agent/metadata/agent.py:135 +#: neutron/agent/metadata/namespace_proxy.py:76 +msgid "Unexpected error." +msgstr "" + +#: neutron/agent/metadata/agent.py:136 +#: neutron/agent/metadata/namespace_proxy.py:77 +msgid "An unknown error has occurred. Please try your request again." +msgstr "" + +#: neutron/agent/metadata/agent.py:178 +msgid "" +"Either one of parameter network_id or router_id must be passed to " +"_get_ports method." +msgstr "" + +#: neutron/agent/metadata/agent.py:230 +msgid "" +"The remote metadata server responded with Forbidden. This response " +"usually occurs when shared secrets do not match." +msgstr "" + +#: neutron/agent/metadata/agent.py:241 +#: neutron/agent/metadata/namespace_proxy.py:120 +msgid "Remote metadata server experienced an internal server error." +msgstr "" + +#: neutron/agent/metadata/agent.py:247 +#: neutron/agent/metadata/namespace_proxy.py:126 +#, python-format +msgid "Unexpected response code: %s" +msgstr "" + +#: neutron/agent/metadata/agent.py:307 +msgid "Location for Metadata Proxy UNIX domain socket" +msgstr "" + +#: neutron/agent/metadata/agent.py:310 +msgid "Number of separate worker processes for metadata server" +msgstr "" + +#: neutron/agent/metadata/agent.py:314 +msgid "Number of backlog requests to configure the metadata server socket with" +msgstr "" + +#: neutron/agent/metadata/namespace_proxy.py:63 +msgid "network_id and router_id are None. One must be provided." +msgstr "" + +#: neutron/agent/metadata/namespace_proxy.py:149 +msgid "Network that will have instance metadata proxied." +msgstr "" + +#: neutron/agent/metadata/namespace_proxy.py:152 +msgid "Router that will have connected instances' metadata proxied." +msgstr "" + +#: neutron/agent/metadata/namespace_proxy.py:155 +msgid "Location of pid file of this process." +msgstr "" + +#: neutron/agent/metadata/namespace_proxy.py:158 +msgid "Run as daemon." +msgstr "" + +#: neutron/agent/metadata/namespace_proxy.py:161 +msgid "TCP Port to listen for metadata server requests." +msgstr "" + +#: neutron/api/api_common.py:101 +#, python-format +msgid "" +"Invalid value for pagination_max_limit: %s. It should be an integer " +"greater to 0" +msgstr "" + +#: neutron/api/api_common.py:115 +#, python-format +msgid "Limit must be an integer 0 or greater and not '%d'" +msgstr "" + +#: neutron/api/api_common.py:132 +msgid "The number of sort_keys and sort_dirs must be same" +msgstr "" + +#: neutron/api/api_common.py:137 +#, python-format +msgid "%s is invalid attribute for sort_keys" +msgstr "" + +#: neutron/api/api_common.py:141 +#, python-format +msgid "" +"%(invalid_dirs)s is invalid value for sort_dirs, valid value is '%(asc)s'" +" and '%(desc)s'" +msgstr "" + +#: neutron/api/api_common.py:315 neutron/api/v2/base.py:592 +#, python-format +msgid "Unable to find '%s' in request body" +msgstr "" + +#: neutron/api/api_common.py:322 +#, python-format +msgid "Failed to parse request. Parameter '%s' not specified" +msgstr "" + +#: neutron/api/extensions.py:253 +#, python-format +msgid "Extension with alias %s does not exist" +msgstr "" + +#: neutron/api/extensions.py:257 neutron/api/extensions.py:261 +msgid "Resource not found." +msgstr "" + +#: neutron/api/extensions.py:283 +#, python-format +msgid "Extended resource: %s" +msgstr "" + +#: neutron/api/extensions.py:305 +#, python-format +msgid "Extended action: %s" +msgstr "" + +#: neutron/api/extensions.py:313 +#, python-format +msgid "Extended request: %s" +msgstr "" + +#: neutron/api/extensions.py:403 +msgid "Initializing extension manager." +msgstr "" + +#: neutron/api/extensions.py:486 +#, python-format +msgid "Error fetching extended attributes for extension '%s'" +msgstr "" + +#: neutron/api/extensions.py:492 +#, python-format +msgid "" +"Extension '%s' provides no backward compatibility map for extended " +"attributes" +msgstr "" + +#: neutron/api/extensions.py:502 +#, python-format +msgid "" +"It was impossible to process the following extensions: %s because of " +"missing requirements." +msgstr "" + +#: neutron/api/extensions.py:513 +#, python-format +msgid "Ext name: %s" +msgstr "" + +#: neutron/api/extensions.py:514 +#, python-format +msgid "Ext alias: %s" +msgstr "" + +#: neutron/api/extensions.py:515 +#, python-format +msgid "Ext description: %s" +msgstr "" + +#: neutron/api/extensions.py:516 +#, python-format +msgid "Ext namespace: %s" +msgstr "" + +#: neutron/api/extensions.py:517 +#, python-format +msgid "Ext updated: %s" +msgstr "" + +#: neutron/api/extensions.py:519 +#, python-format +msgid "Exception loading extension: %s" +msgstr "" + +#: neutron/api/extensions.py:537 +#, python-format +msgid "Extension path '%s' doesn't exist!" +msgstr "" + +#: neutron/api/extensions.py:545 +#, python-format +msgid "Loading extension file: %s" +msgstr "" + +#: neutron/api/extensions.py:553 +#, python-format +msgid "Did not find expected name \"%(ext_name)s\" in %(file)s" +msgstr "" + +#: neutron/api/extensions.py:561 +#, python-format +msgid "Extension file %(f)s wasn't loaded due to %(exception)s" +msgstr "" + +#: neutron/api/extensions.py:570 +#, python-format +msgid "Loaded extension: %s" +msgstr "" + +#: neutron/api/extensions.py:601 +#, python-format +msgid "Extension %s not supported by any of loaded plugins" +msgstr "" + +#: neutron/api/extensions.py:612 +#, python-format +msgid "Loaded plugins do not implement extension %s interface" +msgstr "" + +#: neutron/api/versions.py:45 +msgid "Unknown API version specified" +msgstr "" + +#: neutron/api/rpc/agentnotifiers/dhcp_rpc_agent_api.py:65 +#, python-format +msgid "" +"Unable to schedule network %s: no agents available; will retry on " +"subsequent port creation events." +msgstr "" + +#: neutron/api/rpc/agentnotifiers/dhcp_rpc_agent_api.py:78 +#, python-format +msgid "" +"Only %(active)d of %(total)d DHCP agents associated with network " +"'%(net_id)s' are marked as active, so notifications may be sent to " +"inactive agents." +msgstr "" + +#: neutron/api/rpc/agentnotifiers/dhcp_rpc_agent_api.py:90 +#, python-format +msgid "" +"Will not send event %(method)s for network %(net_id)s: no agent " +"available. Payload: %(payload)s" +msgstr "" + +#: neutron/api/rpc/agentnotifiers/l3_rpc_agent_api.py:38 +#, python-format +msgid "Nofity agent at %(host)s the message %(method)s" +msgstr "" + +#: neutron/api/rpc/agentnotifiers/l3_rpc_agent_api.py:58 +#, python-format +msgid "Notify agent at %(topic)s.%(host)s the message %(method)s" +msgstr "" + +#: neutron/api/rpc/agentnotifiers/l3_rpc_agent_api.py:74 +#, python-format +msgid "" +"No plugin for L3 routing registered. Cannot notify agents with the " +"message %s" +msgstr "" + +#: neutron/api/rpc/agentnotifiers/l3_rpc_agent_api.py:92 +#, python-format +msgid "" +"Fanout notify agent at %(topic)s the message %(method)s on router " +"%(router_id)s" +msgstr "" + +#: neutron/api/rpc/agentnotifiers/metering_rpc_agent_api.py:49 +#, python-format +msgid "Notify metering agent at %(topic)s.%(host)s the message %(method)s" +msgstr "" + +#: neutron/api/rpc/agentnotifiers/metering_rpc_agent_api.py:64 +#, python-format +msgid "" +"Fanout notify metering agent at %(topic)s the message %(method)s on " +"router %(router_id)s" +msgstr "" + +#: neutron/api/v2/attributes.py:44 +#, python-format +msgid "" +"Invalid input. '%(target_dict)s' must be a dictionary with keys: " +"%(expected_keys)s" +msgstr "" + +#: neutron/api/v2/attributes.py:55 +#, python-format +msgid "" +"Validation of dictionary's keys failed.Expected keys: %(expected_keys)s " +"Provided keys: %(provided_keys)s" +msgstr "" + +#: neutron/api/v2/attributes.py:69 +#, python-format +msgid "'%(data)s' is not in %(valid_values)s" +msgstr "" + +#: neutron/api/v2/attributes.py:85 +#, python-format +msgid "'%s' Blank strings are not permitted" +msgstr "" + +#: neutron/api/v2/attributes.py:95 +#, python-format +msgid "'%s' is not a valid string" +msgstr "" + +#: neutron/api/v2/attributes.py:100 +#, python-format +msgid "'%(data)s' exceeds maximum length of %(max_len)s" +msgstr "" + +#: neutron/api/v2/attributes.py:110 +#, python-format +msgid "'%s' is not a valid boolean value" +msgstr "" + +#: neutron/api/v2/attributes.py:129 neutron/api/v2/attributes.py:454 +#, python-format +msgid "'%s' is not an integer" +msgstr "" + +#: neutron/api/v2/attributes.py:133 +#, python-format +msgid "'%(data)s' is too small - must be at least '%(limit)d'" +msgstr "" + +#: neutron/api/v2/attributes.py:138 +#, python-format +msgid "'%(data)s' is too large - must be no larger than '%(limit)d'" +msgstr "" + +#: neutron/api/v2/attributes.py:147 +#, python-format +msgid "'%s' contains whitespace" +msgstr "" + +#: neutron/api/v2/attributes.py:164 +#, python-format +msgid "'%s' is not a valid MAC address" +msgstr "" + +#: neutron/api/v2/attributes.py:179 +#, python-format +msgid "'%s' is not a valid IP address" +msgstr "" + +#: neutron/api/v2/attributes.py:190 +#, python-format +msgid "Invalid data format for IP pool: '%s'" +msgstr "" + +#: neutron/api/v2/attributes.py:209 neutron/api/v2/attributes.py:216 +#, python-format +msgid "Invalid data format for fixed IP: '%s'" +msgstr "" + +#: neutron/api/v2/attributes.py:224 +#, python-format +msgid "Duplicate IP address '%s'" +msgstr "" + +#: neutron/api/v2/attributes.py:240 +#, python-format +msgid "Invalid data format for nameserver: '%s'" +msgstr "" + +#: neutron/api/v2/attributes.py:251 +#, python-format +msgid "'%s' is not a valid nameserver" +msgstr "" + +#: neutron/api/v2/attributes.py:255 +#, python-format +msgid "Duplicate nameserver '%s'" +msgstr "" + +#: neutron/api/v2/attributes.py:263 +#, python-format +msgid "Invalid data format for hostroute: '%s'" +msgstr "" + +#: neutron/api/v2/attributes.py:283 +#, python-format +msgid "Duplicate hostroute '%s'" +msgstr "" + +#: neutron/api/v2/attributes.py:300 neutron/tests/unit/test_attributes.py:460 +#: neutron/tests/unit/test_attributes.py:474 +#: neutron/tests/unit/test_attributes.py:482 +#, python-format +msgid "'%(data)s' isn't a recognized IP subnet cidr, '%(cidr)s' is recommended" +msgstr "" + +#: neutron/api/v2/attributes.py:306 +#, python-format +msgid "'%s' is not a valid IP subnet" +msgstr "" + +#: neutron/api/v2/attributes.py:314 neutron/api/v2/attributes.py:367 +#, python-format +msgid "'%s' is not a list" +msgstr "" + +#: neutron/api/v2/attributes.py:319 neutron/api/v2/attributes.py:378 +#, python-format +msgid "Duplicate items in the list: '%s'" +msgstr "" + +#: neutron/api/v2/attributes.py:342 +#, python-format +msgid "'%s' is not a valid input" +msgstr "" + +#: neutron/api/v2/attributes.py:355 +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:549 +#, python-format +msgid "'%s' is not a valid UUID" +msgstr "" + +#: neutron/api/v2/attributes.py:398 +#, python-format +msgid "Validator '%s' does not exist." +msgstr "" + +#: neutron/api/v2/attributes.py:408 +#, python-format +msgid "'%s' is not a dictionary" +msgstr "" + +#: neutron/api/v2/attributes.py:459 +#, python-format +msgid "'%s' should be non-negative" +msgstr "" + +#: neutron/api/v2/attributes.py:478 +#, python-format +msgid "'%s' cannot be converted to boolean" +msgstr "" + +#: neutron/api/v2/attributes.py:486 +#: neutron/plugins/nec/extensions/packetfilter.py:75 +#, python-format +msgid "'%s' is not a integer" +msgstr "" + +#: neutron/api/v2/attributes.py:499 +#, python-format +msgid "'%s' is not of the form =[value]" +msgstr "" + +#: neutron/api/v2/base.py:86 +msgid "Native pagination depend on native sorting" +msgstr "" + +#: neutron/api/v2/base.py:89 +msgid "Allow sorting is enabled because native pagination requires native sorting" +msgstr "" + +#: neutron/api/v2/base.py:360 +#, python-format +msgid "Unable to undo add for %(resource)s %(id)s" +msgstr "" + +#: neutron/api/v2/base.py:492 +#, python-format +msgid "Invalid format: %s" +msgstr "" + +#: neutron/api/v2/base.py:545 +msgid "" +"Specifying 'tenant_id' other than authenticated tenant in request " +"requires admin privileges" +msgstr "" + +#: neutron/api/v2/base.py:553 +msgid "Running without keystone AuthN requires that tenant_id is specified" +msgstr "" + +#: neutron/api/v2/base.py:571 +msgid "Resource body required" +msgstr "" + +#: neutron/api/v2/base.py:573 +#, python-format +msgid "Request body: %(body)s" +msgstr "" + +#: neutron/api/v2/base.py:583 +msgid "Bulk operation not supported" +msgstr "" + +#: neutron/api/v2/base.py:587 +msgid "Resources required" +msgstr "" + +#: neutron/api/v2/base.py:603 +#, python-format +msgid "Failed to parse request. Required attribute '%s' not specified" +msgstr "" + +#: neutron/api/v2/base.py:610 +#, python-format +msgid "Attribute '%s' not allowed in POST" +msgstr "" + +#: neutron/api/v2/base.py:615 +#, python-format +msgid "Cannot update read-only attribute %s" +msgstr "" + +#: neutron/api/v2/base.py:633 +#, python-format +msgid "Invalid input for %(attr)s. Reason: %(reason)s." +msgstr "" + +#: neutron/api/v2/base.py:642 neutron/extensions/allowedaddresspairs.py:57 +#: neutron/extensions/multiprovidernet.py:51 +#, python-format +msgid "Unrecognized attribute(s) '%s'" +msgstr "" + +#: neutron/api/v2/base.py:661 +#, python-format +msgid "Tenant %(tenant_id)s not allowed to create %(resource)s on this network" +msgstr "" + +#: neutron/api/v2/resource.py:97 +#, python-format +msgid "%(action)s failed (client error): %(exc)s" +msgstr "" + +#: neutron/api/v2/resource.py:100 neutron/api/v2/resource.py:110 +#: neutron/api/v2/resource.py:129 +#, python-format +msgid "%s failed" +msgstr "" + +#: neutron/api/v2/resource.py:131 +#: neutron/tests/unit/test_api_v2_resource.py:275 +#: neutron/tests/unit/test_api_v2_resource.py:291 +msgid "Request Failed: internal server error while processing your request." +msgstr "" + +#: neutron/cmd/sanity_check.py:38 +msgid "" +"Check for Open vSwitch VXLAN support failed. Please ensure that the " +"version of openvswitch being used has VXLAN support." +msgstr "" + +#: neutron/cmd/sanity_check.py:47 +msgid "" +"Check for Open vSwitch patch port support failed. Please ensure that the " +"version of openvswitch being used has patch port support or disable " +"features requiring patch ports (gre/vxlan, etc.)." +msgstr "" + +#: neutron/cmd/sanity_check.py:57 +msgid "" +"Nova notifcations are enabled, but novaclient is not installed. Either " +"disable nova notifications or install python-novaclient." +msgstr "" + +#: neutron/cmd/sanity_check.py:66 +msgid "Check for vxlan support" +msgstr "" + +#: neutron/cmd/sanity_check.py:68 +msgid "Check for patch port support" +msgstr "" + +#: neutron/cmd/sanity_check.py:70 +msgid "Check for nova notification support" +msgstr "" + +#: neutron/common/config.py:37 +msgid "The host IP to bind to" +msgstr "" + +#: neutron/common/config.py:39 +msgid "The port to bind to" +msgstr "" + +#: neutron/common/config.py:41 +msgid "The API paste config file to use" +msgstr "" + +#: neutron/common/config.py:43 +msgid "The path for API extensions" +msgstr "" + +#: neutron/common/config.py:45 +msgid "The policy file to use" +msgstr "" + +#: neutron/common/config.py:49 +msgid "The core plugin Neutron will use" +msgstr "" + +#: neutron/common/config.py:51 neutron/db/migration/cli.py:35 +msgid "The service plugins Neutron will use" +msgstr "" + +#: neutron/common/config.py:53 +msgid "The base MAC address Neutron will use for VIFs" +msgstr "" + +#: neutron/common/config.py:55 +msgid "How many times Neutron will retry MAC generation" +msgstr "" + +#: neutron/common/config.py:57 +msgid "Allow the usage of the bulk API" +msgstr "" + +#: neutron/common/config.py:59 +msgid "Allow the usage of the pagination" +msgstr "" + +#: neutron/common/config.py:61 +msgid "Allow the usage of the sorting" +msgstr "" + +#: neutron/common/config.py:63 +msgid "" +"The maximum number of items returned in a single response, value was " +"'infinite' or negative integer means no limit" +msgstr "" + +#: neutron/common/config.py:67 +msgid "Maximum number of DNS nameservers" +msgstr "" + +#: neutron/common/config.py:69 +msgid "Maximum number of host routes per subnet" +msgstr "" + +#: neutron/common/config.py:71 +msgid "Maximum number of fixed ips per port" +msgstr "" + +#: neutron/common/config.py:74 +msgid "" +"DHCP lease duration (in seconds). Use -1 to tell dnsmasq to use infinite " +"lease times." +msgstr "" + +#: neutron/common/config.py:77 +msgid "Allow sending resource operation notification to DHCP agent" +msgstr "" + +#: neutron/common/config.py:80 +msgid "Allow overlapping IP support in Neutron" +msgstr "" + +#: neutron/common/config.py:82 +msgid "The hostname Neutron is running on" +msgstr "" + +#: neutron/common/config.py:84 +msgid "Ensure that configured gateway is on subnet" +msgstr "" + +#: neutron/common/config.py:86 +msgid "Send notification to nova when port status changes" +msgstr "" + +#: neutron/common/config.py:88 +msgid "" +"Send notification to nova when port data (fixed_ips/floatingip) changes " +"so nova can update its cache." +msgstr "" + +#: neutron/common/config.py:92 +msgid "URL for connection to nova" +msgstr "" + +#: neutron/common/config.py:94 +msgid "Username for connecting to nova in admin context" +msgstr "" + +#: neutron/common/config.py:96 +msgid "Password for connection to nova in admin context" +msgstr "" + +#: neutron/common/config.py:99 +msgid "The uuid of the admin nova tenant" +msgstr "" + +#: neutron/common/config.py:102 +msgid "Authorization URL for connecting to nova in admin context" +msgstr "" + +#: neutron/common/config.py:105 +msgid "CA file for novaclient to verify server certificates" +msgstr "" + +#: neutron/common/config.py:107 +msgid "If True, ignore any SSL validation issues" +msgstr "" + +#: neutron/common/config.py:109 +msgid "" +"Name of nova region to use. Useful if keystone manages more than one " +"region." +msgstr "" + +#: neutron/common/config.py:112 +msgid "" +"Number of seconds between sending events to nova if there are any events " +"to send." +msgstr "" + +#: neutron/common/config.py:119 +msgid "" +"Where to store Neutron state files. This directory must be writable by " +"the agent." +msgstr "" + +#: neutron/common/config.py:151 +#, python-format +msgid "Base MAC: %s" +msgstr "" + +#: neutron/common/config.py:162 +msgid "Logging enabled!" +msgstr "" + +#: neutron/common/config.py:178 +#, python-format +msgid "Config paste file: %s" +msgstr "" + +#: neutron/common/config.py:183 +#, python-format +msgid "Unable to load %(app_name)s from configuration file %(config_path)s." +msgstr "" + +#: neutron/common/exceptions.py:30 +#: neutron/plugins/vmware/api_client/exception.py:27 +msgid "An unknown exception occurred." +msgstr "" + +#: neutron/common/exceptions.py:51 +#, python-format +msgid "Bad %(resource)s request: %(msg)s" +msgstr "" + +#: neutron/common/exceptions.py:63 +msgid "Not authorized." +msgstr "" + +#: neutron/common/exceptions.py:67 +msgid "The service is unavailable" +msgstr "" + +#: neutron/common/exceptions.py:71 +#, python-format +msgid "User does not have admin privileges: %(reason)s" +msgstr "" + +#: neutron/common/exceptions.py:75 +#, python-format +msgid "Policy doesn't allow %(action)s to be performed." +msgstr "" + +#: neutron/common/exceptions.py:79 +#, python-format +msgid "Network %(net_id)s could not be found" +msgstr "" + +#: neutron/common/exceptions.py:83 +#, python-format +msgid "Subnet %(subnet_id)s could not be found" +msgstr "" + +#: neutron/common/exceptions.py:87 +#, python-format +msgid "Port %(port_id)s could not be found" +msgstr "" + +#: neutron/common/exceptions.py:91 +#, python-format +msgid "Port %(port_id)s could not be found on network %(net_id)s" +msgstr "" + +#: neutron/common/exceptions.py:96 +msgid "Policy configuration policy.json could not be found" +msgstr "" + +#: neutron/common/exceptions.py:100 +#, python-format +msgid "Failed to init policy %(policy)s because %(reason)s" +msgstr "" + +#: neutron/common/exceptions.py:104 +#, python-format +msgid "Failed to check policy %(policy)s because %(reason)s" +msgstr "" + +#: neutron/common/exceptions.py:108 +#, python-format +msgid "Unsupported port state: %(port_state)s" +msgstr "" + +#: neutron/common/exceptions.py:112 +msgid "The resource is inuse" +msgstr "" + +#: neutron/common/exceptions.py:116 +#, python-format +msgid "" +"Unable to complete operation on network %(net_id)s. There are one or more" +" ports still in use on the network." +msgstr "" + +#: neutron/common/exceptions.py:121 +#, python-format +msgid "" +"Unable to complete operation on subnet %(subnet_id)s. One or more ports " +"have an IP allocation from this subnet." +msgstr "" + +#: neutron/common/exceptions.py:126 +#, python-format +msgid "" +"Unable to complete operation on port %(port_id)s for network %(net_id)s. " +"Port already has an attacheddevice %(device_id)s." +msgstr "" + +#: neutron/common/exceptions.py:132 +#, python-format +msgid "" +"Unable to complete operation for network %(net_id)s. The mac address " +"%(mac)s is in use." +msgstr "" + +#: neutron/common/exceptions.py:138 +#, python-format +msgid "" +"Unable to complete operation for %(subnet_id)s. The number of host routes" +" exceeds the limit %(quota)s." +msgstr "" + +#: neutron/common/exceptions.py:144 +#, python-format +msgid "" +"Unable to complete operation for %(subnet_id)s. The number of DNS " +"nameservers exceeds the limit %(quota)s." +msgstr "" + +#: neutron/common/exceptions.py:149 +#, python-format +msgid "" +"Unable to complete operation for network %(net_id)s. The IP address " +"%(ip_address)s is in use." +msgstr "" + +#: neutron/common/exceptions.py:154 +#, python-format +msgid "" +"Unable to create the network. The VLAN %(vlan_id)s on physical network " +"%(physical_network)s is in use." +msgstr "" + +#: neutron/common/exceptions.py:160 +#, python-format +msgid "" +"Unable to create the flat network. Physical network %(physical_network)s " +"is in use." +msgstr "" + +#: neutron/common/exceptions.py:165 +#, python-format +msgid "Unable to create the network. The tunnel ID %(tunnel_id)s is in use." +msgstr "" + +#: neutron/common/exceptions.py:170 +msgid "Tenant network creation is not enabled." +msgstr "" + +#: neutron/common/exceptions.py:178 +msgid "" +"Unable to create the network. No tenant network is available for " +"allocation." +msgstr "" + +#: neutron/common/exceptions.py:183 +#, python-format +msgid "" +"Subnet on port %(port_id)s does not match the requested subnet " +"%(subnet_id)s" +msgstr "" + +#: neutron/common/exceptions.py:188 +#, python-format +msgid "Malformed request body: %(reason)s" +msgstr "" + +#: neutron/common/exceptions.py:198 +#, python-format +msgid "Invalid input for operation: %(error_message)s." +msgstr "" + +#: neutron/common/exceptions.py:202 +#, python-format +msgid "The allocation pool %(pool)s is not valid." +msgstr "" + +#: neutron/common/exceptions.py:206 +#, python-format +msgid "" +"Found overlapping allocation pools:%(pool_1)s %(pool_2)s for subnet " +"%(subnet_cidr)s." +msgstr "" + +#: neutron/common/exceptions.py:211 +#, python-format +msgid "The allocation pool %(pool)s spans beyond the subnet cidr %(subnet_cidr)s." +msgstr "" + +#: neutron/common/exceptions.py:216 +#, python-format +msgid "Unable to generate unique mac on network %(net_id)s." +msgstr "" + +#: neutron/common/exceptions.py:220 +#, python-format +msgid "No more IP addresses available on network %(net_id)s." +msgstr "" + +#: neutron/common/exceptions.py:224 +#, python-format +msgid "Bridge %(bridge)s does not exist." +msgstr "" + +#: neutron/common/exceptions.py:228 +#, python-format +msgid "Creation failed. %(dev_name)s already exists." +msgstr "" + +#: neutron/common/exceptions.py:232 +msgid "Sudo privilege is required to run this command." +msgstr "" + +#: neutron/common/exceptions.py:236 +#, python-format +msgid "Unknown quota resources %(unknown)s." +msgstr "" + +#: neutron/common/exceptions.py:240 +#, python-format +msgid "Quota exceeded for resources: %(overs)s" +msgstr "" + +#: neutron/common/exceptions.py:244 +msgid "Tenant-id was missing from Quota request" +msgstr "" + +#: neutron/common/exceptions.py:248 +#, python-format +msgid "" +"Change would make usage less than 0 for the following resources: " +"%(unders)s" +msgstr "" + +#: neutron/common/exceptions.py:253 +#, python-format +msgid "" +"Unable to reconfigure sharing settings for network %(network)s. Multiple " +"tenants are using it" +msgstr "" + +#: neutron/common/exceptions.py:258 +#, python-format +msgid "Invalid extension environment: %(reason)s" +msgstr "" + +#: neutron/common/exceptions.py:262 +#, python-format +msgid "Extensions not found: %(extensions)s" +msgstr "" + +#: neutron/common/exceptions.py:266 +#, python-format +msgid "Invalid content type %(content_type)s" +msgstr "" + +#: neutron/common/exceptions.py:270 +#, python-format +msgid "Unable to find any IP address on external network %(net_id)s." +msgstr "" + +#: neutron/common/exceptions.py:275 +msgid "More than one external network exists" +msgstr "" + +#: neutron/common/exceptions.py:279 +#, python-format +msgid "An invalid value was provided for %(opt_name)s: %(opt_value)s" +msgstr "" + +#: neutron/common/exceptions.py:284 +#, python-format +msgid "Gateway ip %(ip_address)s conflicts with allocation pool %(pool)s" +msgstr "" + +#: neutron/common/exceptions.py:289 +#, python-format +msgid "" +"Current gateway ip %(ip_address)s already in use by port %(port_id)s. " +"Unable to update." +msgstr "" + +#: neutron/common/exceptions.py:294 +#, python-format +msgid "Invalid network VLAN range: '%(vlan_range)s' - '%(error)s'" +msgstr "" + +#: neutron/common/exceptions.py:304 +#, python-format +msgid "Invalid network VXLAN port range: '%(vxlan_range)s'" +msgstr "" + +#: neutron/common/exceptions.py:308 +msgid "VXLAN Network unsupported." +msgstr "" + +#: neutron/common/exceptions.py:312 +#, python-format +msgid "Found duplicate extension: %(alias)s" +msgstr "" + +#: neutron/common/exceptions.py:316 +#, python-format +msgid "" +"The following device_id %(device_id)s is not owned by your tenant or " +"matches another tenants router." +msgstr "" + +#: neutron/common/exceptions.py:321 +#, python-format +msgid "Invalid CIDR %(input)s given as IP prefix" +msgstr "" + +#: neutron/common/ipv6_utils.py:27 +msgid "Unable to generate IP address by EUI64 for IPv4 prefix" +msgstr "" + +#: neutron/common/ipv6_utils.py:34 +#, python-format +msgid "" +"Bad prefix or mac format for generating IPv6 address by EUI-64: " +"%(prefix)s, %(mac)s:" +msgstr "" + +#: neutron/common/ipv6_utils.py:38 +#, python-format +msgid "Bad prefix type for generate IPv6 address by EUI-64: %s" +msgstr "" + +#: neutron/common/log.py:32 +#, python-format +msgid "" +"%(class_name)s method %(method_name)s called with arguments %(args)s " +"%(kwargs)s" +msgstr "" + +#: neutron/common/utils.py:68 +#, python-format +msgid "" +"Method %(func_name)s cannot be cached due to unhashable parameters: args:" +" %(args)s, kwargs: %(kwargs)s" +msgstr "" + +#: neutron/common/utils.py:91 +#, python-format +msgid "" +"Instance of class %(module)s.%(class)s doesn't contain attribute _cache " +"therefore results cannot be cached for %(func_name)s." +msgstr "" + +#: neutron/common/utils.py:117 neutron/openstack/common/fileutils.py:63 +#, python-format +msgid "Reloading cached file %s" +msgstr "" + +#: neutron/common/utils.py:200 +#, python-format +msgid "Invalid mapping: '%s'" +msgstr "" + +#: neutron/common/utils.py:203 +#, python-format +msgid "Missing key in mapping: '%s'" +msgstr "" + +#: neutron/common/utils.py:206 +#, python-format +msgid "Missing value in mapping: '%s'" +msgstr "" + +#: neutron/common/utils.py:208 +#, python-format +msgid "Key %(key)s in mapping: '%(mapping)s' not unique" +msgstr "" + +#: neutron/common/utils.py:211 +#, python-format +msgid "Value %(value)s in mapping: '%(mapping)s' not unique" +msgstr "" + +#: neutron/db/agents_db.py:36 +msgid "" +"Seconds to regard the agent is down; should be at least twice " +"report_interval, to be sure the agent is down for good." +msgstr "" + +#: neutron/db/agents_db.py:93 +#, python-format +msgid "Configuration for agent %(agent_type)s on host %(host)s is invalid." +msgstr "" + +#: neutron/db/agents_db.py:214 +msgid "Message with invalid timestamp received" +msgstr "" + +#: neutron/db/agentschedulers_db.py:37 +msgid "Driver to use for scheduling network to DHCP agent" +msgstr "" + +#: neutron/db/agentschedulers_db.py:39 +msgid "Allow auto scheduling networks to DHCP agent." +msgstr "" + +#: neutron/db/agentschedulers_db.py:41 +msgid "Number of DHCP agents scheduled to host a network." +msgstr "" + +#: neutron/db/api.py:77 +#, python-format +msgid "Database registration exception: %s" +msgstr "" + +#: neutron/db/api.py:89 +msgid "Database exception" +msgstr "" + +#: neutron/db/db_base_plugin_v2.py:148 +msgid "Cannot create resource for another tenant" +msgstr "" + +#: neutron/db/db_base_plugin_v2.py:317 +#, python-format +msgid "Generated mac for network %(network_id)s is %(mac_address)s" +msgstr "" + +#: neutron/db/db_base_plugin_v2.py:323 +#, python-format +msgid "Generated mac %(mac_address)s exists. Remaining attempts %(max_retries)s." +msgstr "" + +#: neutron/db/db_base_plugin_v2.py:327 +#, python-format +msgid "Unable to generate mac address after %s attempts" +msgstr "" + +#: neutron/db/db_base_plugin_v2.py:345 +#, python-format +msgid "Delete allocated IP %(ip_address)s (%(network_id)s/%(subnet_id)s)" +msgstr "" + +#: neutron/db/db_base_plugin_v2.py:383 +#, python-format +msgid "All IPs from subnet %(subnet_id)s (%(cidr)s) allocated" +msgstr "" + +#: neutron/db/db_base_plugin_v2.py:388 +#, python-format +msgid "Allocated IP - %(ip_address)s from %(first_ip)s to %(last_ip)s" +msgstr "" + +#: neutron/db/db_base_plugin_v2.py:395 +msgid "No more free IP's in slice. Deleting allocation pool." +msgstr "" + +#: neutron/db/db_base_plugin_v2.py:414 +#, python-format +msgid "Rebuilding availability ranges for subnet %s" +msgstr "" + +#: neutron/db/db_base_plugin_v2.py:546 +msgid "IP allocation requires subnet_id or ip_address" +msgstr "" + +#: neutron/db/db_base_plugin_v2.py:558 +#, python-format +msgid "IP address %s is not a valid IP for the defined networks subnets" +msgstr "" + +#: neutron/db/db_base_plugin_v2.py:564 +#, python-format +msgid "" +"Failed to create port on network %(network_id)s, because fixed_ips " +"included invalid subnet %(subnet_id)s" +msgstr "" + +#: neutron/db/db_base_plugin_v2.py:584 +#, python-format +msgid "IP address %s is not a valid IP for the defined subnet" +msgstr "" + +#: neutron/db/db_base_plugin_v2.py:593 neutron/db/db_base_plugin_v2.py:626 +msgid "Exceeded maximim amount of fixed ips per port" +msgstr "" + +#: neutron/db/db_base_plugin_v2.py:641 +#, python-format +msgid "Port update. Hold %s" +msgstr "" + +#: neutron/db/db_base_plugin_v2.py:648 +#, python-format +msgid "Port update. Adding %s" +msgstr "" + +#: neutron/db/db_base_plugin_v2.py:717 +#, python-format +msgid "" +"Requested subnet with cidr: %(cidr)s for network: %(network_id)s overlaps" +" with another subnet" +msgstr "" + +#: neutron/db/db_base_plugin_v2.py:722 +#, python-format +msgid "" +"Validation for CIDR: %(new_cidr)s failed - overlaps with subnet " +"%(subnet_id)s (CIDR: %(cidr)s)" +msgstr "" + +#: neutron/db/db_base_plugin_v2.py:742 +msgid "Performing IP validity checks on allocation pools" +msgstr "" + +#: neutron/db/db_base_plugin_v2.py:749 +#, python-format +msgid "Found invalid IP address in pool: %(start)s - %(end)s:" +msgstr "" + +#: neutron/db/db_base_plugin_v2.py:756 +msgid "Specified IP addresses do not match the subnet IP version" +msgstr "" + +#: neutron/db/db_base_plugin_v2.py:760 +#, python-format +msgid "Start IP (%(start)s) is greater than end IP (%(end)s)" +msgstr "" + +#: neutron/db/db_base_plugin_v2.py:765 +#, python-format +msgid "Found pool larger than subnet CIDR:%(start)s - %(end)s" +msgstr "" + +#: neutron/db/db_base_plugin_v2.py:778 +msgid "Checking for overlaps among allocation pools and gateway ip" +msgstr "" + +#: neutron/db/db_base_plugin_v2.py:789 +#, python-format +msgid "Found overlapping ranges: %(l_range)s and %(r_range)s" +msgstr "" + +#: neutron/db/db_base_plugin_v2.py:802 neutron/db/db_base_plugin_v2.py:806 +#, python-format +msgid "Invalid route: %s" +msgstr "" + +#: neutron/db/db_base_plugin_v2.py:879 +#, python-format +msgid "" +"ipv6_ra_mode set to '%(ra_mode)s' with ipv6_address_mode set to " +"'%(addr_mode)s' is not valid. If both attributes are set, they must be " +"the same value" +msgstr "" + +#: neutron/db/db_base_plugin_v2.py:887 +msgid "" +"ipv6_ra_mode or ipv6_address_mode cannot be set when enable_dhcp is set " +"to False." +msgstr "" + +#: neutron/db/db_base_plugin_v2.py:893 +msgid "Cannot disable enable_dhcp with ipv6 attributes set" +msgstr "" + +#: neutron/db/db_base_plugin_v2.py:983 +#, python-format +msgid "An exception occurred while creating the %(resource)s:%(item)s" +msgstr "" + +#: neutron/db/db_base_plugin_v2.py:1080 +#, python-format +msgid "%(name)s '%(addr)s' does not match the ip_version '%(ip_version)s'" +msgstr "" + +#: neutron/db/db_base_plugin_v2.py:1104 +msgid "Gateway is not valid on subnet" +msgstr "" + +#: neutron/db/db_base_plugin_v2.py:1124 neutron/db/db_base_plugin_v2.py:1138 +msgid "new subnet" +msgstr "" + +#: neutron/db/db_base_plugin_v2.py:1131 +#, python-format +msgid "Error parsing dns address %s" +msgstr "" + +#: neutron/db/db_base_plugin_v2.py:1147 +msgid "ipv6_ra_mode is not valid when ip_version is 4" +msgstr "" + +#: neutron/db/db_base_plugin_v2.py:1151 +msgid "ipv6_address_mode is not valid when ip_version is 4" +msgstr "" + +#: neutron/db/db_base_plugin_v2.py:1452 +#, python-format +msgid "Allocated IP %(ip_address)s (%(network_id)s/%(subnet_id)s/%(port_id)s)" +msgstr "" + +#: neutron/db/db_base_plugin_v2.py:1531 +#, python-format +msgid "" +"Ignoring PortNotFound when deleting port '%s'. The port has already been " +"deleted." +msgstr "" + +#: neutron/db/dhcp_rpc_base.py:58 +msgid "Unrecognized action" +msgstr "" + +#: neutron/db/dhcp_rpc_base.py:75 +#, python-format +msgid "" +"Action %(action)s for network %(net_id)s could not complete successfully:" +" %(reason)s" +msgstr "" + +#: neutron/db/dhcp_rpc_base.py:85 +#, python-format +msgid "get_active_networks requested from %s" +msgstr "" + +#: neutron/db/dhcp_rpc_base.py:92 +#, python-format +msgid "get_active_networks_info from %s" +msgstr "" + +#: neutron/db/dhcp_rpc_base.py:112 +#, python-format +msgid "Network %(network_id)s requested from %(host)s" +msgstr "" + +#: neutron/db/dhcp_rpc_base.py:119 neutron/db/dhcp_rpc_base.py:183 +#, python-format +msgid "Network %s could not be found, it might have been deleted concurrently." +msgstr "" + +#: neutron/db/dhcp_rpc_base.py:141 +#, python-format +msgid "Port %(device_id)s for %(network_id)s requested from %(host)s" +msgstr "" + +#: neutron/db/dhcp_rpc_base.py:175 +#, python-format +msgid "" +"DHCP port %(device_id)s on network %(network_id)s does not exist on " +"%(host)s" +msgstr "" + +#: neutron/db/dhcp_rpc_base.py:215 +#, python-format +msgid "DHCP port deletion for %(network_id)s request from %(host)s" +msgstr "" + +#: neutron/db/dhcp_rpc_base.py:228 +#, python-format +msgid "DHCP port remove fixed_ip for %(subnet_id)s request from %(host)s" +msgstr "" + +#: neutron/db/dhcp_rpc_base.py:252 +#, python-format +msgid "Updating lease expiration is now deprecated. Issued from host %s." +msgstr "" + +#: neutron/db/dhcp_rpc_base.py:263 +#, python-format +msgid "Create dhcp port %(port)s from %(host)s." +msgstr "" + +#: neutron/db/dhcp_rpc_base.py:280 +#, python-format +msgid "Update dhcp port %(port)s from %(host)s." +msgstr "" + +#: neutron/db/extraroute_db.py:36 +msgid "Maximum number of routes" +msgstr "" + +#: neutron/db/extraroute_db.py:91 +msgid "the nexthop is not connected with router" +msgstr "" + +#: neutron/db/extraroute_db.py:96 +msgid "the nexthop is used by router" +msgstr "" + +#: neutron/db/extraroute_db.py:125 +#, python-format +msgid "Added routes are %s" +msgstr "" + +#: neutron/db/extraroute_db.py:133 +#, python-format +msgid "Removed routes are %s" +msgstr "" + +#: neutron/db/l3_agentschedulers_db.py:34 +msgid "Driver to use for scheduling router to a default L3 agent" +msgstr "" + +#: neutron/db/l3_agentschedulers_db.py:37 +msgid "Allow auto scheduling of routers to L3 agent." +msgstr "" + +#: neutron/db/l3_db.py:240 +#, python-format +msgid "No eligible l3 agent associated with external network %s found" +msgstr "" + +#: neutron/db/l3_db.py:261 +#, python-format +msgid "No IPs available for external network %s" +msgstr "" + +#: neutron/db/l3_db.py:275 +#, python-format +msgid "Network %s is not an external network" +msgstr "" + +#: neutron/db/l3_db.py:389 +#, python-format +msgid "Router already has a port on subnet %s" +msgstr "" + +#: neutron/db/l3_db.py:403 +#, python-format +msgid "" +"Cidr %(subnet_cidr)s of subnet %(subnet_id)s overlaps with cidr %(cidr)s " +"of subnet %(sub_id)s" +msgstr "" + +#: neutron/db/l3_db.py:417 neutron/db/l3_db.py:543 +#: neutron/plugins/bigswitch/plugin.py:1009 +#: neutron/plugins/bigswitch/plugin.py:1018 +msgid "Either subnet_id or port_id must be specified" +msgstr "" + +#: neutron/db/l3_db.py:422 +msgid "Cannot specify both subnet-id and port-id" +msgstr "" + +#: neutron/db/l3_db.py:435 +msgid "Router port must have exactly one fixed IP" +msgstr "" + +#: neutron/db/l3_db.py:449 +msgid "Subnet for router interface must have a gateway IP" +msgstr "" + +#: neutron/db/l3_db.py:597 neutron/plugins/nec/nec_router.py:197 +#, python-format +msgid "Cannot add floating IP to port on subnet %s which has no gateway_ip" +msgstr "" + +#: neutron/db/l3_db.py:633 +#, python-format +msgid "" +"Port %(port_id)s is associated with a different tenant than Floating IP " +"%(floatingip_id)s and therefore cannot be bound." +msgstr "" + +#: neutron/db/l3_db.py:637 +#, python-format +msgid "" +"Cannot create floating IP and bind it to Port %s, since that port is " +"owned by a different tenant." +msgstr "" + +#: neutron/db/l3_db.py:649 +#, python-format +msgid "Port %(id)s does not have fixed ip %(address)s" +msgstr "" + +#: neutron/db/l3_db.py:656 +#, python-format +msgid "Cannot add floating IP to port %s that hasno fixed IP addresses" +msgstr "" + +#: neutron/db/l3_db.py:660 +#, python-format +msgid "" +"Port %s has multiple fixed IPs. Must provide a specific IP when " +"assigning a floating IP" +msgstr "" + +#: neutron/db/l3_db.py:703 neutron/plugins/vmware/plugins/base.py:1871 +msgid "fixed_ip_address cannot be specified without a port_id" +msgstr "" + +#: neutron/db/l3_db.py:738 +#, python-format +msgid "Network %s is not a valid external network" +msgstr "" + +#: neutron/db/l3_db.py:875 +#, python-format +msgid "" +"Port %(port_id)s has owner %(port_owner)s, but no IP address, so it can " +"be deleted" +msgstr "" + +#: neutron/db/l3_db.py:980 +#, python-format +msgid "Skipping port %s as no IP is configure on it" +msgstr "" + +#: neutron/db/l3_rpc_base.py:50 +msgid "" +"No plugin for L3 routing registered! Will reply to l3 agent with empty " +"router dictionary." +msgstr "" + +#: neutron/db/l3_rpc_base.py:64 +#, python-format +msgid "" +"Routers returned to l3 agent:\n" +" %s" +msgstr "" + +#: neutron/db/l3_rpc_base.py:70 +#, python-format +msgid "Checking router: %(id)s for host: %(host)s" +msgstr "" + +#: neutron/db/l3_rpc_base.py:95 +#, python-format +msgid "External network ID returned to l3 agent: %s" +msgstr "" + +#: neutron/db/l3_rpc_base.py:105 +#, python-format +msgid "New status for floating IP %(floatingip_id)s: %(status)s" +msgstr "" + +#: neutron/db/l3_rpc_base.py:113 +#, python-format +msgid "Floating IP: %s no longer present." +msgstr "" + +#: neutron/db/routedserviceinsertion_db.py:36 +#, python-format +msgid "Resource type '%(resource_type)s' is longer than %(maxlen)d characters" +msgstr "" + +#: neutron/db/securitygroups_rpc_base.py:277 +#, python-format +msgid "No valid gateway port on subnet %s is found for IPv6 RA" +msgstr "" + +#: neutron/db/sqlalchemyutils.py:73 +#, python-format +msgid "%s is invalid attribute for sort_key" +msgstr "" + +#: neutron/db/sqlalchemyutils.py:76 +#, python-format +msgid "" +"The attribute '%(attr)s' is reference to other resource, can't used by " +"sort '%(resource)s'" +msgstr "" + +#: neutron/db/firewall/firewall_db.py:239 +#: neutron/plugins/vmware/plugins/service.py:902 +#: neutron/services/firewall/fwaas_plugin.py:227 +msgid "create_firewall() called" +msgstr "" + +#: neutron/db/firewall/firewall_db.py:255 +#: neutron/plugins/vmware/plugins/service.py:936 +#: neutron/services/firewall/fwaas_plugin.py:242 +msgid "update_firewall() called" +msgstr "" + +#: neutron/db/firewall/firewall_db.py:265 +#: neutron/plugins/vmware/plugins/service.py:962 +#: neutron/services/firewall/fwaas_plugin.py:257 +msgid "delete_firewall() called" +msgstr "" + +#: neutron/db/firewall/firewall_db.py:275 +msgid "get_firewall() called" +msgstr "" + +#: neutron/db/firewall/firewall_db.py:280 +msgid "get_firewalls() called" +msgstr "" + +#: neutron/db/firewall/firewall_db.py:286 +msgid "get_firewalls_count() called" +msgstr "" + +#: neutron/db/firewall/firewall_db.py:291 +msgid "create_firewall_policy() called" +msgstr "" + +#: neutron/db/firewall/firewall_db.py:307 +#: neutron/plugins/vmware/plugins/service.py:1024 +#: neutron/services/firewall/fwaas_plugin.py:266 +msgid "update_firewall_policy() called" +msgstr "" + +#: neutron/db/firewall/firewall_db.py:319 +msgid "delete_firewall_policy() called" +msgstr "" + +#: neutron/db/firewall/firewall_db.py:331 +msgid "get_firewall_policy() called" +msgstr "" + +#: neutron/db/firewall/firewall_db.py:336 +msgid "get_firewall_policies() called" +msgstr "" + +#: neutron/db/firewall/firewall_db.py:342 +msgid "get_firewall_policies_count() called" +msgstr "" + +#: neutron/db/firewall/firewall_db.py:347 +msgid "create_firewall_rule() called" +msgstr "" + +#: neutron/db/firewall/firewall_db.py:376 +#: neutron/plugins/vmware/plugins/service.py:998 +#: neutron/services/firewall/fwaas_plugin.py:274 +msgid "update_firewall_rule() called" +msgstr "" + +#: neutron/db/firewall/firewall_db.py:400 +msgid "delete_firewall_rule() called" +msgstr "" + +#: neutron/db/firewall/firewall_db.py:408 +msgid "get_firewall_rule() called" +msgstr "" + +#: neutron/db/firewall/firewall_db.py:413 +msgid "get_firewall_rules() called" +msgstr "" + +#: neutron/db/firewall/firewall_db.py:419 +msgid "get_firewall_rules_count() called" +msgstr "" + +#: neutron/db/firewall/firewall_db.py:428 +#: neutron/plugins/vmware/plugins/service.py:1051 +#: neutron/services/firewall/fwaas_plugin.py:284 +msgid "insert_rule() called" +msgstr "" + +#: neutron/db/firewall/firewall_db.py:468 +#: neutron/plugins/vmware/plugins/service.py:1080 +#: neutron/services/firewall/fwaas_plugin.py:292 +msgid "remove_rule() called" +msgstr "" + +#: neutron/db/loadbalancer/loadbalancer_db.py:68 +#, python-format +msgid "The %(key)s field can not have negative value. Current value is %(value)d." +msgstr "" + +#: neutron/db/loadbalancer/loadbalancer_db.py:272 +msgid "'cookie_name' should be specified for this type of session persistence." +msgstr "" + +#: neutron/db/loadbalancer/loadbalancer_db.py:276 +msgid "'cookie_name' is not allowed for this type of session persistence" +msgstr "" + +#: neutron/db/metering/metering_rpc.py:46 +#, python-format +msgid "Unable to find agent %s." +msgstr "" + +#: neutron/db/migration/cli.py:32 +msgid "Neutron plugin provider module" +msgstr "" + +#: neutron/db/migration/cli.py:41 +msgid "Neutron quota driver class" +msgstr "" + +#: neutron/db/migration/cli.py:49 +msgid "URL to database" +msgstr "" + +#: neutron/db/migration/cli.py:52 +msgid "Database engine" +msgstr "" + +#: neutron/db/migration/cli.py:75 +msgid "You must provide a revision or relative delta" +msgstr "" + +#: neutron/db/migration/cli.py:105 neutron/db/migration/cli.py:118 +msgid "Timeline branches unable to generate timeline" +msgstr "" + +#: neutron/db/migration/cli.py:112 +msgid "HEAD file does not match migration timeline head" +msgstr "" + +#: neutron/db/migration/cli.py:154 +msgid "Available commands" +msgstr "" + +#: neutron/db/migration/migrate_to_ml2.py:88 +msgid "Missing version in alembic_versions table" +msgstr "" + +#: neutron/db/migration/migrate_to_ml2.py:90 +#, python-format +msgid "Multiple versions in alembic_versions table: %s" +msgstr "" + +#: neutron/db/migration/migrate_to_ml2.py:94 +#, python-format +msgid "" +"Unsupported database schema %(current)s. Please migrate your database to " +"one of following versions: %(supported)s" +msgstr "" + +#: neutron/db/migration/migrate_to_ml2.py:414 +#, python-format +msgid "Unknown tunnel type: %s" +msgstr "" + +#: neutron/db/migration/migrate_to_ml2.py:428 +msgid "The plugin type whose database will be migrated" +msgstr "" + +#: neutron/db/migration/migrate_to_ml2.py:431 +msgid "The connection url for the target db" +msgstr "" + +#: neutron/db/migration/migrate_to_ml2.py:434 +#, python-format +msgid "The %s tunnel type to migrate from" +msgstr "" + +#: neutron/db/migration/migrate_to_ml2.py:437 +#: neutron/plugins/openvswitch/common/config.py:78 +msgid "The UDP port to use for VXLAN tunnels." +msgstr "" + +#: neutron/db/migration/migrate_to_ml2.py:440 +msgid "Retain the old plugin's tables" +msgstr "" + +#: neutron/db/migration/migrate_to_ml2.py:446 +#, python-format +msgid "" +"Tunnel args (tunnel-type and vxlan-udp-port) are not valid for the %s " +"plugin" +msgstr "" + +#: neutron/db/migration/migrate_to_ml2.py:453 +#, python-format +msgid "" +"Support for migrating %(plugin)s for release %(release)s is not yet " +"implemented" +msgstr "" + +#: neutron/db/vpn/vpn_db.py:678 +#, python-format +msgid "vpnservice %s in db is already deleted" +msgstr "" + +#: neutron/debug/commands.py:32 +msgid "Unimplemented commands" +msgstr "" + +#: neutron/debug/commands.py:44 +msgid "ID of network to probe" +msgstr "" + +#: neutron/debug/commands.py:48 +msgid "Owner type of the device: network/compute" +msgstr "" + +#: neutron/debug/commands.py:56 +#, python-format +msgid "Probe created : %s " +msgstr "" + +#: neutron/debug/commands.py:68 +msgid "ID of probe port to delete" +msgstr "" + +#: neutron/debug/commands.py:75 +#, python-format +msgid "Probe %s deleted" +msgstr "" + +#: neutron/debug/commands.py:106 +msgid "All Probes deleted " +msgstr "" + +#: neutron/debug/commands.py:118 +msgid "ID of probe port to execute command" +msgstr "" + +#: neutron/debug/commands.py:123 +msgid "Command to execute" +msgstr "" + +#: neutron/debug/commands.py:143 +msgid "Ping timeout" +msgstr "" + +#: neutron/debug/commands.py:147 +msgid "ID of network" +msgstr "" + +#: neutron/debug/debug_agent.py:120 +#, python-format +msgid "Failed to delete namespace %s" +msgstr "" + +#: neutron/debug/shell.py:62 +msgid "Config file for interface driver (You may also use l3_agent.ini)" +msgstr "" + +#: neutron/debug/shell.py:70 +msgid "" +"You must provide a config file for bridge - either --config-file or " +"env[NEUTRON_TEST_CONFIG_FILE]" +msgstr "" + +#: neutron/extensions/agent.py:61 +#, python-format +msgid "Agent %(id)s could not be found" +msgstr "" + +#: neutron/extensions/agent.py:65 +#, python-format +msgid "Agent with agent_type=%(agent_type)s and host=%(host)s could not be found" +msgstr "" + +#: neutron/extensions/agent.py:70 +#, python-format +msgid "Multiple agents with agent_type=%(agent_type)s and host=%(host)s found" +msgstr "" + +#: neutron/extensions/allowedaddresspairs.py:22 +msgid "AllowedAddressPair must contain ip_address" +msgstr "" + +#: neutron/extensions/allowedaddresspairs.py:26 +msgid "" +"Port Security must be enabled in order to have allowed address pairs on a" +" port." +msgstr "" + +#: neutron/extensions/allowedaddresspairs.py:31 +#, python-format +msgid "" +"Request contains duplicate address pair: mac_address %(mac_address)s " +"ip_address %(ip_address)s." +msgstr "" + +#: neutron/extensions/dhcpagentscheduler.py:119 +#, python-format +msgid "Agent %(id)s is not a valid DHCP Agent or has been disabled" +msgstr "" + +#: neutron/extensions/dhcpagentscheduler.py:123 +#, python-format +msgid "" +"The network %(network_id)s has been already hosted by the DHCP Agent " +"%(agent_id)s." +msgstr "" + +#: neutron/extensions/dhcpagentscheduler.py:128 +#, python-format +msgid "The network %(network_id)s is not hosted by the DHCP agent %(agent_id)s." +msgstr "" + +#: neutron/extensions/external_net.py:23 +#, python-format +msgid "" +"External network %(net_id)s cannot be updated to be made non-external, " +"since it has existing gateway ports" +msgstr "" + +#: neutron/extensions/external_net.py:51 +msgid "Adds external network attribute to network resource." +msgstr "" + +#: neutron/extensions/extra_dhcp_opt.py:25 +#, python-format +msgid "ExtraDhcpOpt %(id)s could not be found" +msgstr "" + +#: neutron/extensions/extra_dhcp_opt.py:29 +#, python-format +msgid "Invalid data format for extra-dhcp-opt: %(data)s" +msgstr "" + +#: neutron/extensions/extraroute.py:23 +#, python-format +msgid "Invalid format for routes: %(routes)s, %(reason)s" +msgstr "" + +#: neutron/extensions/extraroute.py:27 +#, python-format +msgid "" +"Router interface for subnet %(subnet_id)s on router %(router_id)s cannot " +"be deleted, as it is required by one or more routes." +msgstr "" + +#: neutron/extensions/extraroute.py:33 +#, python-format +msgid "" +"Unable to complete operation for %(router_id)s. The number of routes " +"exceeds the maximum %(quota)s." +msgstr "" + +#: neutron/extensions/firewall.py:37 +#, python-format +msgid "Firewall %(firewall_id)s could not be found." +msgstr "" + +#: neutron/extensions/firewall.py:41 +#, python-format +msgid "Firewall %(firewall_id)s is still active." +msgstr "" + +#: neutron/extensions/firewall.py:45 +#, python-format +msgid "" +"Operation cannot be performed since associated Firewall %(firewall_id)s " +"is in %(pending_state)s." +msgstr "" + +#: neutron/extensions/firewall.py:50 +#, python-format +msgid "Firewall Policy %(firewall_policy_id)s could not be found." +msgstr "" + +#: neutron/extensions/firewall.py:54 +#, python-format +msgid "Firewall Policy %(firewall_policy_id)s is being used." +msgstr "" + +#: neutron/extensions/firewall.py:58 +#, python-format +msgid "Firewall Rule %(firewall_rule_id)s could not be found." +msgstr "" + +#: neutron/extensions/firewall.py:62 +#, python-format +msgid "Firewall Rule %(firewall_rule_id)s is being used." +msgstr "" + +#: neutron/extensions/firewall.py:66 +#, python-format +msgid "" +"Firewall Rule %(firewall_rule_id)s is not associated with Firewall " +"Policy %(firewall_policy_id)s." +msgstr "" + +#: neutron/extensions/firewall.py:71 +#, python-format +msgid "" +"Firewall Rule protocol %(protocol)s is not supported. Only protocol " +"values %(values)s and their integer representation (0 to 255) are " +"supported." +msgstr "" + +#: neutron/extensions/firewall.py:77 +#, python-format +msgid "" +"Firewall rule action %(action)s is not supported. Only action values " +"%(values)s are supported." +msgstr "" + +#: neutron/extensions/firewall.py:82 +#, python-format +msgid "%(param)s are not allowed when protocol is set to ICMP." +msgstr "" + +#: neutron/extensions/firewall.py:87 +#, python-format +msgid "Invalid value for port %(port)s." +msgstr "" + +#: neutron/extensions/firewall.py:91 +msgid "Missing rule info argument for insert/remove rule operation." +msgstr "" + +#: neutron/extensions/firewall.py:101 +#, python-format +msgid "%(driver)s: Internal driver error." +msgstr "" + +#: neutron/extensions/firewall.py:150 +#, python-format +msgid "Port '%s' is not a valid number" +msgstr "" + +#: neutron/extensions/firewall.py:154 +#, python-format +msgid "Invalid port '%s'" +msgstr "" + +#: neutron/extensions/firewall.py:168 +#, python-format +msgid "%(msg_ip)s and %(msg_subnet)s" +msgstr "" + +#: neutron/extensions/firewall.py:289 +msgid "Number of firewalls allowed per tenant. A negative value means unlimited." +msgstr "" + +#: neutron/extensions/firewall.py:293 +msgid "" +"Number of firewall policies allowed per tenant. A negative value means " +"unlimited." +msgstr "" + +#: neutron/extensions/firewall.py:297 +msgid "" +"Number of firewall rules allowed per tenant. A negative value means " +"unlimited." +msgstr "" + +#: neutron/extensions/l3.py:29 +#, python-format +msgid "Router %(router_id)s could not be found" +msgstr "" + +#: neutron/extensions/l3.py:33 +#, python-format +msgid "Router %(router_id)s still has ports" +msgstr "" + +#: neutron/extensions/l3.py:37 +#, python-format +msgid "Router %(router_id)s does not have an interface with id %(port_id)s" +msgstr "" + +#: neutron/extensions/l3.py:42 +#, python-format +msgid "Router %(router_id)s has no interface on subnet %(subnet_id)s" +msgstr "" + +#: neutron/extensions/l3.py:47 +#, python-format +msgid "" +"Router interface for subnet %(subnet_id)s on router %(router_id)s cannot " +"be deleted, as it is required by one or more floating IPs." +msgstr "" + +#: neutron/extensions/l3.py:53 +#, python-format +msgid "Floating IP %(floatingip_id)s could not be found" +msgstr "" + +#: neutron/extensions/l3.py:57 +#, python-format +msgid "" +"External network %(external_network_id)s is not reachable from subnet " +"%(subnet_id)s. Therefore, cannot associate Port %(port_id)s with a " +"Floating IP." +msgstr "" + +#: neutron/extensions/l3.py:63 +#, python-format +msgid "" +"Cannot associate floating IP %(floating_ip_address)s (%(fip_id)s) with " +"port %(port_id)s using fixed IP %(fixed_ip)s, as that fixed IP already " +"has a floating IP on external network %(net_id)s." +msgstr "" + +#: neutron/extensions/l3.py:70 +#, python-format +msgid "" +"Port %(port_id)s has owner %(device_owner)s and therefore cannot be " +"deleted directly via the port API." +msgstr "" + +#: neutron/extensions/l3.py:75 +#, python-format +msgid "" +"Gateway cannot be updated for router %(router_id)s, since a gateway to " +"external network %(net_id)s is required by one or more floating IPs." +msgstr "" + +#: neutron/extensions/l3.py:138 +msgid "Number of routers allowed per tenant. A negative value means unlimited." +msgstr "" + +#: neutron/extensions/l3.py:142 +msgid "" +"Number of floating IPs allowed per tenant. A negative value means " +"unlimited." +msgstr "" + +#: neutron/extensions/l3agentscheduler.py:47 +#: neutron/extensions/l3agentscheduler.py:85 +msgid "No plugin for L3 routing registered to handle router scheduling" +msgstr "" + +#: neutron/extensions/l3agentscheduler.py:151 +#, python-format +msgid "Agent %(id)s is not a L3 Agent or has been disabled" +msgstr "" + +#: neutron/extensions/l3agentscheduler.py:155 +#, python-format +msgid "" +"The router %(router_id)s has been already hosted by the L3 Agent " +"%(agent_id)s." +msgstr "" + +#: neutron/extensions/l3agentscheduler.py:160 +#, python-format +msgid "Failed scheduling router %(router_id)s to the L3 Agent %(agent_id)s." +msgstr "" + +#: neutron/extensions/l3agentscheduler.py:165 +#, python-format +msgid "Failed rescheduling router %(router_id)s: no eligible l3 agent found." +msgstr "" + +#: neutron/extensions/l3agentscheduler.py:170 +#, python-format +msgid "The router %(router_id)s is not hosted by L3 agent %(agent_id)s." +msgstr "" + +#: neutron/extensions/lbaas_agentscheduler.py:116 +#, python-format +msgid "No eligible loadbalancer agent found for pool %(pool_id)s." +msgstr "" + +#: neutron/extensions/lbaas_agentscheduler.py:121 +#, python-format +msgid "No active loadbalancer agent found for pool %(pool_id)s." +msgstr "" + +#: neutron/extensions/loadbalancer.py:33 +msgid "Delay must be greater than or equal to timeout" +msgstr "" + +#: neutron/extensions/loadbalancer.py:37 +#, python-format +msgid "No eligible backend for pool %(pool_id)s" +msgstr "" + +#: neutron/extensions/loadbalancer.py:41 +#, python-format +msgid "Vip %(vip_id)s could not be found" +msgstr "" + +#: neutron/extensions/loadbalancer.py:45 +#, python-format +msgid "Another Vip already exists for pool %(pool_id)s" +msgstr "" + +#: neutron/extensions/loadbalancer.py:49 +#, python-format +msgid "Pool %(pool_id)s could not be found" +msgstr "" + +#: neutron/extensions/loadbalancer.py:53 +#, python-format +msgid "Member %(member_id)s could not be found" +msgstr "" + +#: neutron/extensions/loadbalancer.py:57 +#, python-format +msgid "Health_monitor %(monitor_id)s could not be found" +msgstr "" + +#: neutron/extensions/loadbalancer.py:61 +#, python-format +msgid "Monitor %(monitor_id)s is not associated with Pool %(pool_id)s" +msgstr "" + +#: neutron/extensions/loadbalancer.py:66 +#, python-format +msgid "health_monitor %(monitor_id)s is already associated with pool %(pool_id)s" +msgstr "" + +#: neutron/extensions/loadbalancer.py:71 +#, python-format +msgid "Invalid state %(state)s of Loadbalancer resource %(id)s" +msgstr "" + +#: neutron/extensions/loadbalancer.py:75 +#, python-format +msgid "Pool %(pool_id)s is still in use" +msgstr "" + +#: neutron/extensions/loadbalancer.py:79 +#, python-format +msgid "Health monitor %(monitor_id)s still has associations with pools" +msgstr "" + +#: neutron/extensions/loadbalancer.py:84 +#, python-format +msgid "Statistics of Pool %(pool_id)s could not be found" +msgstr "" + +#: neutron/extensions/loadbalancer.py:88 +#, python-format +msgid "Protocol %(vip_proto)s does not match pool protocol %(pool_proto)s" +msgstr "" + +#: neutron/extensions/loadbalancer.py:93 +#, python-format +msgid "" +"Member with address %(address)s and port %(port)s already present in pool" +" %(pool)s" +msgstr "" + +#: neutron/extensions/loadbalancer.py:309 +msgid "Number of vips allowed per tenant. A negative value means unlimited." +msgstr "" + +#: neutron/extensions/loadbalancer.py:313 +msgid "Number of pools allowed per tenant. A negative value means unlimited." +msgstr "" + +#: neutron/extensions/loadbalancer.py:317 +msgid "" +"Number of pool members allowed per tenant. A negative value means " +"unlimited." +msgstr "" + +#: neutron/extensions/loadbalancer.py:321 +msgid "" +"Number of health monitors allowed per tenant. A negative value means " +"unlimited." +msgstr "" + +#: neutron/extensions/metering.py:33 +#, python-format +msgid "Metering label %(label_id)s does not exist" +msgstr "" + +#: neutron/extensions/metering.py:37 +msgid "Duplicate Metering Rule in POST." +msgstr "" + +#: neutron/extensions/metering.py:41 +#, python-format +msgid "Metering label rule %(rule_id)s does not exist" +msgstr "" + +#: neutron/extensions/metering.py:45 +#, python-format +msgid "" +"Metering label rule with remote_ip_prefix %(remote_ip_prefix)s overlaps " +"another" +msgstr "" + +#: neutron/extensions/multiprovidernet.py:27 +msgid "Segments and provider values cannot both be set." +msgstr "" + +#: neutron/extensions/multiprovidernet.py:31 +msgid "Duplicate segment entry in request." +msgstr "" + +#: neutron/extensions/portsecurity.py:20 +msgid "" +"Port has security group associated. Cannot disable port security or ip " +"address until security group is removed" +msgstr "" + +#: neutron/extensions/portsecurity.py:25 +msgid "" +"Port security must be enabled and port must have an IP address in order " +"to use security groups." +msgstr "" + +#: neutron/extensions/portsecurity.py:30 +msgid "Port does not have port security binding." +msgstr "" + +#: neutron/extensions/providernet.py:54 +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:287 +msgid "Plugin does not support updating provider attributes" +msgstr "" + +#: neutron/extensions/quotasv2.py:67 +msgid "POST requests are not supported on this resource." +msgstr "" + +#: neutron/extensions/quotasv2.py:86 +msgid "Only admin is authorized to access quotas for another tenant" +msgstr "" + +#: neutron/extensions/quotasv2.py:91 +msgid "Only admin can view or configure quota" +msgstr "" + +#: neutron/extensions/securitygroup.py:34 +msgid "For TCP/UDP protocols, port_range_min must be <= port_range_max" +msgstr "" + +#: neutron/extensions/securitygroup.py:39 +#, python-format +msgid "Invalid value for port %(port)s" +msgstr "" + +#: neutron/extensions/securitygroup.py:43 +#, python-format +msgid "" +"Invalid value for ICMP %(field)s (%(attr)s) %(value)s. It must be 0 to " +"255." +msgstr "" + +#: neutron/extensions/securitygroup.py:48 +#, python-format +msgid "" +"ICMP code (port-range-max) %(value)s is provided but ICMP type (port-" +"range-min) is missing." +msgstr "" + +#: neutron/extensions/securitygroup.py:53 +#, python-format +msgid "Security Group %(id)s in use." +msgstr "" + +#: neutron/extensions/securitygroup.py:57 +msgid "Removing default security group not allowed." +msgstr "" + +#: neutron/extensions/securitygroup.py:61 +msgid "Updating default security group not allowed." +msgstr "" + +#: neutron/extensions/securitygroup.py:65 +msgid "Default security group already exists." +msgstr "" + +#: neutron/extensions/securitygroup.py:69 +#, python-format +msgid "" +"Security group rule protocol %(protocol)s not supported. Only protocol " +"values %(values)s and their integer representation (0 to 255) are " +"supported." +msgstr "" + +#: neutron/extensions/securitygroup.py:75 +msgid "Multiple tenant_ids in bulk security group rule create not allowed" +msgstr "" + +#: neutron/extensions/securitygroup.py:80 +msgid "Only remote_ip_prefix or remote_group_id may be provided." +msgstr "" + +#: neutron/extensions/securitygroup.py:85 +msgid "Must also specifiy protocol if port range is given." +msgstr "" + +#: neutron/extensions/securitygroup.py:89 +msgid "Only allowed to update rules for one security profile at a time" +msgstr "" + +#: neutron/extensions/securitygroup.py:94 +#, python-format +msgid "Security group %(id)s does not exist" +msgstr "" + +#: neutron/extensions/securitygroup.py:98 +#, python-format +msgid "Security group rule %(id)s does not exist" +msgstr "" + +#: neutron/extensions/securitygroup.py:102 +msgid "Duplicate Security Group Rule in POST." +msgstr "" + +#: neutron/extensions/securitygroup.py:106 +#, python-format +msgid "Security group rule already exists. Group id is %(id)s." +msgstr "" + +#: neutron/extensions/securitygroup.py:110 +#, python-format +msgid "Conflicting value ethertype %(ethertype)s for CIDR %(cidr)s" +msgstr "" + +#: neutron/extensions/securitygroup.py:158 +#, python-format +msgid "'%s' is not an integer or uuid" +msgstr "" + +#: neutron/extensions/securitygroup.py:247 +msgid "" +"Number of security groups allowed per tenant. A negative value means " +"unlimited." +msgstr "" + +#: neutron/extensions/securitygroup.py:251 +msgid "" +"Number of security rules allowed per tenant. A negative value means " +"unlimited." +msgstr "" + +#: neutron/extensions/servicetype.py:52 +msgid "Neutron Service Type Management" +msgstr "" + +#: neutron/extensions/servicetype.py:60 +msgid "API for retrieving service providers for Neutron advanced services" +msgstr "" + +#: neutron/extensions/vpnaas.py:31 +#, python-format +msgid "VPNService %(vpnservice_id)s could not be found" +msgstr "" + +#: neutron/extensions/vpnaas.py:35 +#, python-format +msgid "ipsec_site_connection %(ipsecsite_conn_id)s not found" +msgstr "" + +#: neutron/extensions/vpnaas.py:39 +#, python-format +msgid "ipsec_site_connection %(attr)s is equal to or less than dpd_interval" +msgstr "" + +#: neutron/extensions/vpnaas.py:44 +#, python-format +msgid "ipsec_site_connection MTU %(mtu)d is too small for ipv%(version)s" +msgstr "" + +#: neutron/extensions/vpnaas.py:49 +#, python-format +msgid "IKEPolicy %(ikepolicy_id)s could not be found" +msgstr "" + +#: neutron/extensions/vpnaas.py:53 +#, python-format +msgid "IPsecPolicy %(ipsecpolicy_id)s could not be found" +msgstr "" + +#: neutron/extensions/vpnaas.py:57 +#, python-format +msgid "" +"IKEPolicy %(ikepolicy_id)s is in use by existing IPsecSiteConnection and " +"can't be updated or deleted" +msgstr "" + +#: neutron/extensions/vpnaas.py:62 +#, python-format +msgid "VPNService %(vpnservice_id)s is still in use" +msgstr "" + +#: neutron/extensions/vpnaas.py:66 +#, python-format +msgid "Router %(router_id)s is used by VPNService %(vpnservice_id)s" +msgstr "" + +#: neutron/extensions/vpnaas.py:70 +#, python-format +msgid "Invalid state %(state)s of vpnaas resource %(id)s for updating" +msgstr "" + +#: neutron/extensions/vpnaas.py:75 +#, python-format +msgid "" +"IPsecPolicy %(ipsecpolicy_id)s is in use by existing IPsecSiteConnection " +"and can't be updated or deleted" +msgstr "" + +#: neutron/extensions/vpnaas.py:80 +#, python-format +msgid "Can not load driver :%(device_driver)s" +msgstr "" + +#: neutron/extensions/vpnaas.py:84 +#, python-format +msgid "Subnet %(subnet_id)s is not connected to Router %(router_id)s" +msgstr "" + +#: neutron/extensions/vpnaas.py:89 +#, python-format +msgid "Router %(router_id)s has no external network gateway set" +msgstr "" + +#: neutron/notifiers/nova.py:165 +msgid "device_id is not set on port yet." +msgstr "" + +#: neutron/notifiers/nova.py:169 +msgid "Port ID not set! Nova will not be notified of port status change." +msgstr "" + +#: neutron/notifiers/nova.py:194 +#, python-format +msgid "" +"Ignoring state change previous_port_status: %(pre_status)s " +"current_port_status: %(cur_status)s port_id %(id)s" +msgstr "" + +#: neutron/notifiers/nova.py:220 +#, python-format +msgid "Sending events: %s" +msgstr "" + +#: neutron/notifiers/nova.py:225 +#, python-format +msgid "Nova returned NotFound for event: %s" +msgstr "" + +#: neutron/notifiers/nova.py:228 +#, python-format +msgid "Failed to notify nova on events: %s" +msgstr "" + +#: neutron/notifiers/nova.py:232 neutron/notifiers/nova.py:248 +#, python-format +msgid "Error response returned from nova: %s" +msgstr "" + +#: neutron/notifiers/nova.py:243 +#, python-format +msgid "Nova event: %s returned with failed status" +msgstr "" + +#: neutron/notifiers/nova.py:246 +#, python-format +msgid "Nova event response: %s" +msgstr "" + +#: neutron/openstack/common/eventlet_backdoor.py:140 +#, python-format +msgid "Eventlet backdoor listening on %(port)s for process %(pid)d" +msgstr "" + +#: neutron/openstack/common/gettextutils.py:320 +msgid "Message objects do not support addition." +msgstr "" + +#: neutron/openstack/common/gettextutils.py:330 +msgid "" +"Message objects do not support str() because they may contain non-ascii " +"characters. Please use unicode() or translate() instead." +msgstr "" + +#: neutron/openstack/common/lockutils.py:103 +#, python-format +msgid "Could not release the acquired lock `%s`" +msgstr "" + +#: neutron/openstack/common/lockutils.py:168 +#, python-format +msgid "Got semaphore \"%(lock)s\"" +msgstr "" + +#: neutron/openstack/common/lockutils.py:177 +#, python-format +msgid "Attempting to grab file lock \"%(lock)s\"" +msgstr "" + +#: neutron/openstack/common/lockutils.py:187 +#, python-format +msgid "Created lock path: %s" +msgstr "" + +#: neutron/openstack/common/lockutils.py:205 +#, python-format +msgid "Got file lock \"%(lock)s\" at %(path)s" +msgstr "" + +#: neutron/openstack/common/lockutils.py:209 +#, python-format +msgid "Released file lock \"%(lock)s\" at %(path)s" +msgstr "" + +#: neutron/openstack/common/lockutils.py:247 +#, python-format +msgid "Got semaphore / lock \"%(function)s\"" +msgstr "" + +#: neutron/openstack/common/lockutils.py:251 +#, python-format +msgid "Semaphore / lock released \"%(function)s\"" +msgstr "" + +#: neutron/openstack/common/log.py:327 +#, python-format +msgid "Deprecated: %s" +msgstr "" + +#: neutron/openstack/common/log.py:436 +#, python-format +msgid "Error loading logging config %(log_config)s: %(err_msg)s" +msgstr "" + +#: neutron/openstack/common/log.py:486 +#, python-format +msgid "syslog facility must be one of: %s" +msgstr "" + +#: neutron/openstack/common/log.py:729 +#, python-format +msgid "Fatal call to deprecated config: %(msg)s" +msgstr "" + +#: neutron/openstack/common/loopingcall.py:82 +#, python-format +msgid "task run outlasted interval by %s sec" +msgstr "" + +#: neutron/openstack/common/loopingcall.py:89 +msgid "in fixed duration looping call" +msgstr "" + +#: neutron/openstack/common/loopingcall.py:129 +#, python-format +msgid "Dynamic looping call sleeping for %.02f seconds" +msgstr "" + +#: neutron/openstack/common/loopingcall.py:136 +msgid "in dynamic looping call" +msgstr "" + +#: neutron/openstack/common/periodic_task.py:39 +#, python-format +msgid "Unexpected argument for periodic task creation: %(arg)s." +msgstr "" + +#: neutron/openstack/common/policy.py:395 +#, python-format +msgid "Failed to understand rule %(rule)s" +msgstr "" + +#: neutron/openstack/common/policy.py:405 +#, python-format +msgid "No handler for matches of kind %s" +msgstr "" + +#: neutron/openstack/common/policy.py:680 +#, python-format +msgid "Failed to understand rule %(rule)r" +msgstr "" + +#: neutron/openstack/common/processutils.py:130 +#, python-format +msgid "Got unknown keyword args to utils.execute: %r" +msgstr "" + +#: neutron/openstack/common/processutils.py:145 +#, python-format +msgid "Running cmd (subprocess): %s" +msgstr "" + +#: neutron/openstack/common/processutils.py:169 +#: neutron/openstack/common/processutils.py:241 +#, python-format +msgid "Result was %s" +msgstr "" + +#: neutron/openstack/common/processutils.py:181 +#, python-format +msgid "%r failed. Retrying." +msgstr "" + +#: neutron/openstack/common/processutils.py:220 +#, python-format +msgid "Running cmd (SSH): %s" +msgstr "" + +#: neutron/openstack/common/processutils.py:222 +msgid "Environment not supported over SSH" +msgstr "" + +#: neutron/openstack/common/processutils.py:226 +msgid "process_input not supported over SSH" +msgstr "" + +#: neutron/openstack/common/sslutils.py:50 +#, python-format +msgid "Unable to find cert_file : %s" +msgstr "" + +#: neutron/openstack/common/sslutils.py:53 +#, python-format +msgid "Unable to find ca_file : %s" +msgstr "" + +#: neutron/openstack/common/sslutils.py:56 +#, python-format +msgid "Unable to find key_file : %s" +msgstr "" + +#: neutron/openstack/common/sslutils.py:59 +msgid "" +"When running server in SSL mode, you must specify both a cert_file and " +"key_file option value in your configuration file" +msgstr "" + +#: neutron/openstack/common/sslutils.py:98 +#, python-format +msgid "Invalid SSL version : %s" +msgstr "" + +#: neutron/openstack/common/strutils.py:92 +#, python-format +msgid "Unrecognized value '%(val)s', acceptable values are: %(acceptable)s" +msgstr "" + +#: neutron/openstack/common/strutils.py:197 +#, python-format +msgid "Invalid unit system: \"%s\"" +msgstr "" + +#: neutron/openstack/common/strutils.py:206 +#, python-format +msgid "Invalid string format: %s" +msgstr "" + +#: neutron/openstack/common/versionutils.py:69 +#, python-format +msgid "" +"%(what)s is deprecated as of %(as_of)s in favor of %(in_favor_of)s and " +"may be removed in %(remove_in)s." +msgstr "" + +#: neutron/openstack/common/versionutils.py:73 +#, python-format +msgid "" +"%(what)s is deprecated as of %(as_of)s and may be removed in " +"%(remove_in)s. It will not be superseded." +msgstr "" + +#: neutron/openstack/common/db/exception.py:44 +msgid "Invalid Parameter: Unicode is not supported by the current database." +msgstr "" + +#: neutron/openstack/common/db/sqlalchemy/utils.py:58 +msgid "Sort key supplied was not valid." +msgstr "" + +#: neutron/openstack/common/db/sqlalchemy/utils.py:119 +msgid "Unknown sort direction, must be 'desc' or 'asc'" +msgstr "" + +#: neutron/openstack/common/db/sqlalchemy/utils.py:162 +#, python-format +msgid "" +"There is no `deleted` column in `%s` table. Project doesn't use soft-" +"deleted feature." +msgstr "" + +#: neutron/openstack/common/db/sqlalchemy/utils.py:174 +#, python-format +msgid "Unrecognized read_deleted value '%s'" +msgstr "" + +#: neutron/openstack/common/db/sqlalchemy/utils.py:181 +#, python-format +msgid "There is no `project_id` column in `%s` table." +msgstr "" + +#: neutron/openstack/common/db/sqlalchemy/utils.py:243 +msgid "model should be a subclass of ModelBase" +msgstr "" + +#: neutron/openstack/common/db/sqlalchemy/utils.py:286 +#, python-format +msgid "" +"Please specify column %s in col_name_col_instance param. It is required " +"because column has unsupported type by sqlite)." +msgstr "" + +#: neutron/openstack/common/db/sqlalchemy/utils.py:292 +#, python-format +msgid "" +"col_name_col_instance param has wrong type of column instance for column " +"%s It should be instance of sqlalchemy.Column." +msgstr "" + +#: neutron/openstack/common/db/sqlalchemy/utils.py:400 +msgid "Unsupported id columns type" +msgstr "" + +#: neutron/openstack/common/middleware/catch_errors.py:40 +#, python-format +msgid "An error occurred during processing the request: %s" +msgstr "" + +#: neutron/openstack/common/middleware/sizelimit.py:55 +#: neutron/openstack/common/middleware/sizelimit.py:64 +#: neutron/openstack/common/middleware/sizelimit.py:75 +msgid "Request is too large." +msgstr "" + +#: neutron/plugins/bigswitch/config.py:32 +msgid "" +"A comma separated list of Big Switch or Floodlight servers and port " +"numbers. The plugin proxies the requests to the Big Switch/Floodlight " +"server, which performs the networking configuration. Only oneserver is " +"needed per deployment, but you may wish todeploy multiple servers to " +"support failover." +msgstr "" + +#: neutron/plugins/bigswitch/config.py:39 +msgid "" +"The username and password for authenticating against the Big Switch or " +"Floodlight controller." +msgstr "" + +#: neutron/plugins/bigswitch/config.py:42 +msgid "" +"If True, Use SSL when connecting to the Big Switch or Floodlight " +"controller." +msgstr "" + +#: neutron/plugins/bigswitch/config.py:45 +msgid "" +"Trust and store the first certificate received for each controller " +"address and use it to validate future connections to that address." +msgstr "" + +#: neutron/plugins/bigswitch/config.py:49 +msgid "Disables SSL certificate validation for controllers" +msgstr "" + +#: neutron/plugins/bigswitch/config.py:51 +msgid "Re-use HTTP/HTTPS connections to the controller." +msgstr "" + +#: neutron/plugins/bigswitch/config.py:54 +msgid "Directory containing ca_certs and host_certs certificate directories." +msgstr "" + +#: neutron/plugins/bigswitch/config.py:57 +msgid "Sync data on connect" +msgstr "" + +#: neutron/plugins/bigswitch/config.py:59 +msgid "" +"If neutron fails to create a resource because the backend controller " +"doesn't know of a dependency, the plugin automatically triggers a full " +"data synchronization to the controller." +msgstr "" + +#: neutron/plugins/bigswitch/config.py:64 +msgid "" +"Time between verifications that the backend controller database is " +"consistent with Neutron. (0 to disable)" +msgstr "" + +#: neutron/plugins/bigswitch/config.py:67 +msgid "" +"Maximum number of seconds to wait for proxy request to connect and " +"complete." +msgstr "" + +#: neutron/plugins/bigswitch/config.py:70 +msgid "" +"Maximum number of threads to spawn to handle large volumes of port " +"creations." +msgstr "" + +#: neutron/plugins/bigswitch/config.py:74 +msgid "User defined identifier for this Neutron deployment" +msgstr "" + +#: neutron/plugins/bigswitch/config.py:76 +msgid "" +"Flag to decide if a route to the metadata server should be injected into " +"the VM" +msgstr "" + +#: neutron/plugins/bigswitch/config.py:81 +msgid "" +"The default router rules installed in new tenant routers. Repeat the " +"config option for each rule. Format is " +"::: Use an * to specify default for " +"all tenants." +msgstr "" + +#: neutron/plugins/bigswitch/config.py:86 +msgid "Maximum number of router rules" +msgstr "" + +#: neutron/plugins/bigswitch/config.py:90 +msgid "Virtual interface type to configure on Nova compute nodes" +msgstr "" + +#: neutron/plugins/bigswitch/config.py:97 +#, python-format +msgid "Nova compute nodes to manually set VIF type to %s" +msgstr "" + +#: neutron/plugins/bigswitch/config.py:104 +msgid "List of allowed vif_type values." +msgstr "" + +#: neutron/plugins/bigswitch/config.py:108 +msgid "" +"Name of integration bridge on compute nodes used for security group " +"insertion." +msgstr "" + +#: neutron/plugins/bigswitch/config.py:111 +msgid "Seconds between agent checks for port changes" +msgstr "" + +#: neutron/plugins/bigswitch/config.py:113 +msgid "Virtual switch type." +msgstr "" + +#: neutron/plugins/bigswitch/plugin.py:94 +msgid "Syntax error in server config file, aborting plugin" +msgstr "" + +#: neutron/plugins/bigswitch/plugin.py:132 neutron/plugins/ml2/db.py:100 +#, python-format +msgid "get_port_and_sgs() called for port_id %s" +msgstr "" + +#: neutron/plugins/bigswitch/plugin.py:238 +#, python-format +msgid "Unable to update remote topology: %s" +msgstr "" + +#: neutron/plugins/bigswitch/plugin.py:322 +#, python-format +msgid "" +"Setting admin_state_up=False is not supported in this plugin version. " +"Ignoring setting for resource: %s" +msgstr "" + +#: neutron/plugins/bigswitch/plugin.py:328 +#, python-format +msgid "" +"Operational status is internally set by the plugin. Ignoring setting " +"status=%s." +msgstr "" + +#: neutron/plugins/bigswitch/plugin.py:353 +#, python-format +msgid "Unrecognized vif_type in configuration [%s]. Defaulting to ovs." +msgstr "" + +#: neutron/plugins/bigswitch/plugin.py:399 +#: neutron/plugins/ml2/drivers/mech_bigswitch/driver.py:96 +msgid "Iconsistency with backend controller triggering full synchronization." +msgstr "" + +#: neutron/plugins/bigswitch/plugin.py:416 +#, python-format +msgid "NeutronRestProxyV2: Unable to create port: %s" +msgstr "" + +#: neutron/plugins/bigswitch/plugin.py:475 +#, python-format +msgid "NeutronRestProxy: Starting plugin. Version=%s" +msgstr "" + +#: neutron/plugins/bigswitch/plugin.py:502 +msgid "NeutronRestProxyV2: initialization done" +msgstr "" + +#: neutron/plugins/bigswitch/plugin.py:545 +msgid "NeutronRestProxyV2: create_network() called" +msgstr "" + +#: neutron/plugins/bigswitch/plugin.py:588 +msgid "NeutronRestProxyV2.update_network() called" +msgstr "" + +#: neutron/plugins/bigswitch/plugin.py:616 +msgid "NeutronRestProxyV2: delete_network() called" +msgstr "" + +#: neutron/plugins/bigswitch/plugin.py:654 +msgid "NeutronRestProxyV2: create_port() called" +msgstr "" + +#: neutron/plugins/bigswitch/plugin.py:746 +msgid "NeutronRestProxyV2: update_port() called" +msgstr "" + +#: neutron/plugins/bigswitch/plugin.py:806 +msgid "NeutronRestProxyV2: delete_port() called" +msgstr "" + +#: neutron/plugins/bigswitch/plugin.py:823 +msgid "NeutronRestProxyV2: create_subnet() called" +msgstr "" + +#: neutron/plugins/bigswitch/plugin.py:840 +msgid "NeutronRestProxyV2: update_subnet() called" +msgstr "" + +#: neutron/plugins/bigswitch/plugin.py:859 +msgid "NeutronRestProxyV2: delete_subnet() called" +msgstr "" + +#: neutron/plugins/bigswitch/plugin.py:898 +msgid "NeutronRestProxyV2: create_router() called" +msgstr "" + +#: neutron/plugins/bigswitch/plugin.py:921 +msgid "NeutronRestProxyV2.update_router() called" +msgstr "" + +#: neutron/plugins/bigswitch/plugin.py:944 +msgid "NeutronRestProxyV2: delete_router() called" +msgstr "" + +#: neutron/plugins/bigswitch/plugin.py:973 +msgid "NeutronRestProxyV2: add_router_interface() called" +msgstr "" + +#: neutron/plugins/bigswitch/plugin.py:1001 +msgid "NeutronRestProxyV2: remove_router_interface() called" +msgstr "" + +#: neutron/plugins/bigswitch/plugin.py:1035 +msgid "NeutronRestProxyV2: create_floatingip() called" +msgstr "" + +#: neutron/plugins/bigswitch/plugin.py:1052 +#, python-format +msgid "NeutronRestProxyV2: Unable to create remote floating IP: %s" +msgstr "" + +#: neutron/plugins/bigswitch/plugin.py:1059 +msgid "NeutronRestProxyV2: update_floatingip() called" +msgstr "" + +#: neutron/plugins/bigswitch/plugin.py:1076 +msgid "NeutronRestProxyV2: delete_floatingip() called" +msgstr "" + +#: neutron/plugins/bigswitch/plugin.py:1091 +msgid "NeutronRestProxyV2: diassociate_floatingips() called" +msgstr "" + +#: neutron/plugins/bigswitch/plugin.py:1122 +msgid "NeutronRestProxyV2: too many external networks" +msgstr "" + +#: neutron/plugins/bigswitch/plugin.py:1137 +msgid "Adding host route: " +msgstr "" + +#: neutron/plugins/bigswitch/plugin.py:1138 +#, python-format +msgid "Destination:%(dst)s nexthop:%(next)s" +msgstr "" + +#: neutron/plugins/bigswitch/routerrule_db.py:75 +msgid "No rules in router" +msgstr "" + +#: neutron/plugins/bigswitch/routerrule_db.py:89 +#, python-format +msgid "Updating router rules to %s" +msgstr "" + +#: neutron/plugins/bigswitch/servermanager.py:77 +#, python-format +msgid "Error in REST call to remote network controller: %(reason)s" +msgstr "" + +#: neutron/plugins/bigswitch/servermanager.py:116 +msgid "Couldn't retrieve capabilities. Newer API calls won't be supported." +msgstr "" + +#: neutron/plugins/bigswitch/servermanager.py:118 +#, python-format +msgid "The following capabilities were received for %(server)s: %(cap)s" +msgstr "" + +#: neutron/plugins/bigswitch/servermanager.py:147 +#, python-format +msgid "ServerProxy: server=%(server)s, port=%(port)d, ssl=%(ssl)r" +msgstr "" + +#: neutron/plugins/bigswitch/servermanager.py:150 +#, python-format +msgid "" +"ServerProxy: resource=%(resource)s, data=%(data)r, headers=%(headers)r, " +"action=%(action)s" +msgstr "" + +#: neutron/plugins/bigswitch/servermanager.py:171 +msgid "ServerProxy: Could not establish HTTPS connection" +msgstr "" + +#: neutron/plugins/bigswitch/servermanager.py:179 +msgid "ServerProxy: Could not establish HTTP connection" +msgstr "" + +#: neutron/plugins/bigswitch/servermanager.py:212 +#, python-format +msgid "ServerProxy: %(action)s failure, %(e)r" +msgstr "" + +#: neutron/plugins/bigswitch/servermanager.py:215 +#, python-format +msgid "" +"ServerProxy: status=%(status)d, reason=%(reason)r, ret=%(ret)s, " +"data=%(data)r" +msgstr "" + +#: neutron/plugins/bigswitch/servermanager.py:227 +msgid "ServerPool: initializing" +msgstr "" + +#: neutron/plugins/bigswitch/servermanager.py:250 +msgid "Servers not defined. Aborting server manager." +msgstr "" + +#: neutron/plugins/bigswitch/servermanager.py:257 +#, python-format +msgid "Servers must be defined as :. Configuration was %s" +msgstr "" + +#: neutron/plugins/bigswitch/servermanager.py:265 +msgid "ServerPool: initialization done" +msgstr "" + +#: neutron/plugins/bigswitch/servermanager.py:311 +#, python-format +msgid "ssl_cert_directory [%s] does not exist. Create it or disable ssl." +msgstr "" + +#: neutron/plugins/bigswitch/servermanager.py:328 +#, python-format +msgid "No certificates were found to verify controller %s" +msgstr "" + +#: neutron/plugins/bigswitch/servermanager.py:370 +#, python-format +msgid "" +"Could not retrieve initial certificate from controller %(server)s. Error " +"details: %(error)s" +msgstr "" + +#: neutron/plugins/bigswitch/servermanager.py:375 +#, python-format +msgid "Storing to certificate for host %(server)s at %(path)s" +msgstr "" + +#: neutron/plugins/bigswitch/servermanager.py:417 +msgid "Server requires synchronization, but no topology function was defined." +msgstr "" + +#: neutron/plugins/bigswitch/servermanager.py:432 +#, python-format +msgid "" +"ServerProxy: %(action)s failure for servers: %(server)r Response: " +"%(response)s" +msgstr "" + +#: neutron/plugins/bigswitch/servermanager.py:438 +#, python-format +msgid "" +"ServerProxy: Error details: status=%(status)d, reason=%(reason)r, " +"ret=%(ret)s, data=%(data)r" +msgstr "" + +#: neutron/plugins/bigswitch/servermanager.py:445 +#, python-format +msgid "ServerProxy: %(action)s failure for all servers: %(server)r" +msgstr "" + +#: neutron/plugins/bigswitch/servermanager.py:468 +#, python-format +msgid "" +"NeutronRestProxyV2: Received and ignored error code %(code)s on " +"%(action)s action to resource %(resource)s" +msgstr "" + +#: neutron/plugins/bigswitch/servermanager.py:478 +#, python-format +msgid "Unable to create remote router: %s" +msgstr "" + +#: neutron/plugins/bigswitch/servermanager.py:484 +#, python-format +msgid "Unable to update remote router: %s" +msgstr "" + +#: neutron/plugins/bigswitch/servermanager.py:489 +#, python-format +msgid "Unable to delete remote router: %s" +msgstr "" + +#: neutron/plugins/bigswitch/servermanager.py:495 +#, python-format +msgid "Unable to add router interface: %s" +msgstr "" + +#: neutron/plugins/bigswitch/servermanager.py:500 +#, python-format +msgid "Unable to delete remote intf: %s" +msgstr "" + +#: neutron/plugins/bigswitch/servermanager.py:506 +#, python-format +msgid "Unable to create remote network: %s" +msgstr "" + +#: neutron/plugins/bigswitch/servermanager.py:512 +#: neutron/plugins/bigswitch/servermanager.py:517 +#, python-format +msgid "Unable to update remote network: %s" +msgstr "" + +#: neutron/plugins/bigswitch/servermanager.py:526 +#, python-format +msgid "No device MAC attached to port %s. Skipping notification to controller." +msgstr "" + +#: neutron/plugins/bigswitch/servermanager.py:531 +#, python-format +msgid "Unable to create remote port: %s" +msgstr "" + +#: neutron/plugins/bigswitch/servermanager.py:536 +#, python-format +msgid "Unable to delete remote port: %s" +msgstr "" + +#: neutron/plugins/bigswitch/servermanager.py:546 +#, python-format +msgid "Unable to create floating IP: %s" +msgstr "" + +#: neutron/plugins/bigswitch/servermanager.py:551 +#, python-format +msgid "Unable to update floating IP: %s" +msgstr "" + +#: neutron/plugins/bigswitch/servermanager.py:556 +#, python-format +msgid "Unable to delete floating IP: %s" +msgstr "" + +#: neutron/plugins/bigswitch/servermanager.py:561 +msgid "Backend server(s) do not support automated consitency checks." +msgstr "" + +#: neutron/plugins/bigswitch/servermanager.py:565 +#, python-format +msgid "Consistency watchdog disabled by polling interval setting of %s." +msgstr "" + +#: neutron/plugins/bigswitch/servermanager.py:577 +msgid "Encountered an error checking controller health." +msgstr "" + +#: neutron/plugins/bigswitch/agent/restproxy_agent.py:116 +#: neutron/plugins/ryu/agent/ryu_neutron_agent.py:235 +msgid "Port update received" +msgstr "" + +#: neutron/plugins/bigswitch/agent/restproxy_agent.py:120 +#, python-format +msgid "Port %s is not present on this host." +msgstr "" + +#: neutron/plugins/bigswitch/agent/restproxy_agent.py:123 +#, python-format +msgid "Port %s found. Refreshing firewall." +msgstr "" + +#: neutron/plugins/bigswitch/agent/restproxy_agent.py:151 +#: neutron/plugins/ryu/agent/ryu_neutron_agent.py:268 +msgid "Agent loop has new device" +msgstr "" + +#: neutron/plugins/bigswitch/agent/restproxy_agent.py:155 +#: neutron/plugins/mlnx/agent/eswitch_neutron_agent.py:398 +#: neutron/plugins/nec/agent/nec_neutron_agent.py:225 +#: neutron/plugins/oneconvergence/agent/nvsd_neutron_agent.py:159 +#: neutron/plugins/ryu/agent/ryu_neutron_agent.py:272 +msgid "Error in agent event loop" +msgstr "" + +#: neutron/plugins/bigswitch/agent/restproxy_agent.py:161 +#: neutron/plugins/ibm/agent/sdnve_neutron_agent.py:226 +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:993 +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:1404 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1457 +#: neutron/plugins/ryu/agent/ryu_neutron_agent.py:278 +#, python-format +msgid "Loop iteration exceeded interval (%(polling_interval)s vs. %(elapsed)s)!" +msgstr "" + +#: neutron/plugins/bigswitch/db/consistency_db.py:26 +msgid "Only one read_for_update call may be made at a time." +msgstr "" + +#: neutron/plugins/bigswitch/db/consistency_db.py:81 +#, python-format +msgid "Consistency hash for group %(hash_id)s updated to %(hash)s" +msgstr "" + +#: neutron/plugins/bigswitch/db/porttracker_db.py:39 +msgid "No host_id in port request to track port location." +msgstr "" + +#: neutron/plugins/bigswitch/db/porttracker_db.py:42 +#, python-format +msgid "Received an empty port ID for host_id '%s'" +msgstr "" + +#: neutron/plugins/bigswitch/db/porttracker_db.py:45 +#, python-format +msgid "Received an empty host_id for port '%s'" +msgstr "" + +#: neutron/plugins/bigswitch/db/porttracker_db.py:47 +#, python-format +msgid "Logging port %(port)s on host_id %(host)s" +msgstr "" + +#: neutron/plugins/bigswitch/extensions/routerrule.py:28 +#, python-format +msgid "Invalid format for router rules: %(rule)s, %(reason)s" +msgstr "" + +#: neutron/plugins/bigswitch/extensions/routerrule.py:32 +#, python-format +msgid "" +"Unable to complete rules update for %(router_id)s. The number of rules " +"exceeds the maximum %(quota)s." +msgstr "" + +#: neutron/plugins/bigswitch/extensions/routerrule.py:49 +#, python-format +msgid "Invalid data format for router rule: '%s'" +msgstr "" + +#: neutron/plugins/bigswitch/extensions/routerrule.py:81 +#, python-format +msgid "Duplicate nexthop in rule '%s'" +msgstr "" + +#: neutron/plugins/bigswitch/extensions/routerrule.py:89 +#, python-format +msgid "Action must be either permit or deny. '%s' was provided" +msgstr "" + +#: neutron/plugins/bigswitch/extensions/routerrule.py:101 +#, python-format +msgid "Duplicate router rules (src,dst) found '%s'" +msgstr "" + +#: neutron/plugins/brocade/NeutronPlugin.py:62 +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:34 +msgid "The address of the host to SSH to" +msgstr "" + +#: neutron/plugins/brocade/NeutronPlugin.py:64 +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:36 +msgid "The SSH username to use" +msgstr "" + +#: neutron/plugins/brocade/NeutronPlugin.py:66 +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:38 +msgid "The SSH password to use" +msgstr "" + +#: neutron/plugins/brocade/NeutronPlugin.py:68 +msgid "Currently unused" +msgstr "" + +#: neutron/plugins/brocade/NeutronPlugin.py:72 +msgid "The network interface to use when creatinga port" +msgstr "" + +#: neutron/plugins/brocade/NeutronPlugin.py:123 +#: neutron/plugins/hyperv/rpc_callbacks.py:47 +#: neutron/plugins/linuxbridge/lb_neutron_plugin.py:80 +#: neutron/plugins/mlnx/rpc_callbacks.py:62 +#: neutron/plugins/openvswitch/ovs_neutron_plugin.py:88 +#, python-format +msgid "Device %(device)s details requested from %(agent_id)s" +msgstr "" + +#: neutron/plugins/brocade/NeutronPlugin.py:137 +#: neutron/plugins/brocade/NeutronPlugin.py:164 +#: neutron/plugins/hyperv/rpc_callbacks.py:63 +#: neutron/plugins/hyperv/rpc_callbacks.py:92 +#: neutron/plugins/linuxbridge/lb_neutron_plugin.py:103 +#: neutron/plugins/linuxbridge/lb_neutron_plugin.py:140 +#: neutron/plugins/linuxbridge/lb_neutron_plugin.py:163 +#: neutron/plugins/mlnx/rpc_callbacks.py:84 +#: neutron/plugins/mlnx/rpc_callbacks.py:113 +#: neutron/plugins/mlnx/rpc_callbacks.py:128 +#: neutron/plugins/openvswitch/ovs_neutron_plugin.py:106 +#: neutron/plugins/openvswitch/ovs_neutron_plugin.py:143 +#: neutron/plugins/openvswitch/ovs_neutron_plugin.py:166 +#, python-format +msgid "%s can not be found in database" +msgstr "" + +#: neutron/plugins/brocade/NeutronPlugin.py:299 +#: neutron/plugins/brocade/NeutronPlugin.py:343 +#: neutron/plugins/brocade/NeutronPlugin.py:396 +#: neutron/plugins/brocade/NeutronPlugin.py:426 +msgid "Brocade NOS driver error" +msgstr "" + +#: neutron/plugins/brocade/NeutronPlugin.py:300 +#, python-format +msgid "Returning the allocated vlan (%d) to the pool" +msgstr "" + +#: neutron/plugins/brocade/NeutronPlugin.py:303 +#: neutron/plugins/brocade/NeutronPlugin.py:344 +#: neutron/plugins/brocade/NeutronPlugin.py:397 +#: neutron/plugins/brocade/NeutronPlugin.py:428 +msgid "Brocade plugin raised exception, check logs" +msgstr "" + +#: neutron/plugins/brocade/NeutronPlugin.py:309 +#, python-format +msgid "Allocated vlan (%d) from the pool" +msgstr "" + +#: neutron/plugins/brocade/nos/nosdriver.py:69 +#, python-format +msgid "Connect failed to switch: %s" +msgstr "" + +#: neutron/plugins/brocade/nos/nosdriver.py:71 +#: neutron/plugins/ml2/drivers/brocade/nos/nosdriver.py:76 +#, python-format +msgid "Connect success to host %(host)s:%(ssh_port)d" +msgstr "" + +#: neutron/plugins/brocade/nos/nosdriver.py:96 +#: neutron/plugins/brocade/nos/nosdriver.py:110 +#: neutron/plugins/brocade/nos/nosdriver.py:123 +#: neutron/plugins/brocade/nos/nosdriver.py:136 +#, python-format +msgid "NETCONF error: %s" +msgstr "" + +#: neutron/plugins/cisco/network_plugin.py:89 +msgid "Plugin initialization complete" +msgstr "" + +#: neutron/plugins/cisco/network_plugin.py:117 +#, python-format +msgid "'%(model)s' object has no attribute '%(name)s'" +msgstr "" + +#: neutron/plugins/cisco/network_plugin.py:134 +#: neutron/plugins/cisco/db/network_db_v2.py:36 +msgid "get_all_qoss() called" +msgstr "" + +#: neutron/plugins/cisco/network_plugin.py:140 +msgid "get_qos_details() called" +msgstr "" + +#: neutron/plugins/cisco/network_plugin.py:145 +msgid "create_qos() called" +msgstr "" + +#: neutron/plugins/cisco/network_plugin.py:151 +msgid "delete_qos() called" +msgstr "" + +#: neutron/plugins/cisco/network_plugin.py:156 +msgid "rename_qos() called" +msgstr "" + +#: neutron/plugins/cisco/network_plugin.py:161 +msgid "get_all_credentials() called" +msgstr "" + +#: neutron/plugins/cisco/network_plugin.py:167 +msgid "get_credential_details() called" +msgstr "" + +#: neutron/plugins/cisco/network_plugin.py:172 +msgid "rename_credential() called" +msgstr "" + +#: neutron/plugins/cisco/common/cisco_exceptions.py:25 +#, python-format +msgid "Segmentation ID for network %(net_id)s is not found." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_exceptions.py:30 +msgid "" +"Unable to complete operation. No more dynamic NICs are available in the " +"system." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_exceptions.py:36 +#, python-format +msgid "" +"NetworkVlanBinding for %(vlan_id)s and network %(network_id)s already " +"exists." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_exceptions.py:42 +#, python-format +msgid "Vlan ID %(vlan_id)s not found." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_exceptions.py:47 +msgid "" +"Unable to complete operation. VLAN ID exists outside of the configured " +"network segment range." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_exceptions.py:53 +msgid "No Vlan ID available." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_exceptions.py:58 +#, python-format +msgid "QoS level %(qos_id)s could not be found for tenant %(tenant_id)s." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_exceptions.py:64 +#, python-format +msgid "QoS level with name %(qos_name)s already exists for tenant %(tenant_id)s." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_exceptions.py:70 +#: neutron/plugins/ml2/drivers/cisco/nexus/exceptions.py:23 +#, python-format +msgid "Credential %(credential_id)s could not be found." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_exceptions.py:75 +#: neutron/plugins/ml2/drivers/cisco/nexus/exceptions.py:28 +#, python-format +msgid "Credential %(credential_name)s could not be found." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_exceptions.py:80 +#, python-format +msgid "Credential %(credential_name)s already exists." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_exceptions.py:85 +#, python-format +msgid "Provider network %s already exists" +msgstr "" + +#: neutron/plugins/cisco/common/cisco_exceptions.py:90 +#: neutron/plugins/ml2/drivers/cisco/nexus/exceptions.py:39 +#, python-format +msgid "Connection to %(host)s is not configured." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_exceptions.py:95 +#: neutron/plugins/ml2/drivers/cisco/nexus/exceptions.py:44 +#, python-format +msgid "Unable to connect to Nexus %(nexus_host)s. Reason: %(exc)s." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_exceptions.py:100 +#: neutron/plugins/ml2/drivers/cisco/nexus/exceptions.py:49 +#, python-format +msgid "Failed to configure Nexus: %(config)s. Reason: %(exc)s." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_exceptions.py:105 +#, python-format +msgid "Nexus Port Binding (%(filters)s) is not present." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_exceptions.py:114 +#: neutron/plugins/ml2/drivers/cisco/nexus/exceptions.py:69 +msgid "No usable Nexus switch found to create SVI interface." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_exceptions.py:119 +#, python-format +msgid "PortVnic Binding %(port_id)s already exists." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_exceptions.py:124 +#, python-format +msgid "PortVnic Binding %(port_id)s is not present." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_exceptions.py:129 +#: neutron/plugins/ml2/drivers/cisco/nexus/exceptions.py:74 +msgid "No subnet_id specified for router gateway." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_exceptions.py:134 +#: neutron/plugins/ml2/drivers/cisco/nexus/exceptions.py:79 +#, python-format +msgid "Subnet %(subnet_id)s has an interface on %(router_id)s." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_exceptions.py:139 +#: neutron/plugins/ml2/drivers/cisco/nexus/exceptions.py:84 +msgid "Nexus hardware router gateway only uses Subnet Ids." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_exceptions.py:143 +#, python-format +msgid "" +"Unable to unplug the attachment %(att_id)s from port %(port_id)s for " +"network %(net_id)s. The attachment %(att_id)s does not exist." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_exceptions.py:150 +#, python-format +msgid "Policy Profile %(profile_id)s already exists." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_exceptions.py:156 +#, python-format +msgid "Policy Profile %(profile_id)s could not be found." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_exceptions.py:161 +#, python-format +msgid "Network Profile %(profile_id)s already exists." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_exceptions.py:167 +#, python-format +msgid "Network Profile %(profile)s could not be found." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_exceptions.py:172 +#, python-format +msgid "" +"One or more network segments belonging to network profile %(profile)s is " +"in use." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_exceptions.py:178 +#, python-format +msgid "" +"No more segments available in network segment pool " +"%(network_profile_name)s." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_exceptions.py:184 +#, python-format +msgid "VM Network %(name)s could not be found." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_exceptions.py:189 +#, python-format +msgid "Unable to create the network. The VXLAN ID %(vxlan_id)s is in use." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_exceptions.py:195 +#, python-format +msgid "Vxlan ID %(vxlan_id)s not found." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_exceptions.py:200 +msgid "" +"Unable to complete operation. VXLAN ID exists outside of the configured " +"network segment range." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_exceptions.py:206 +#, python-format +msgid "Connection to VSM failed: %(reason)s." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_exceptions.py:211 +#, python-format +msgid "Internal VSM Error: %(reason)s." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_exceptions.py:216 +#, python-format +msgid "Network Binding for network %(network_id)s could not be found." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_exceptions.py:222 +#, python-format +msgid "Port Binding for port %(port_id)s could not be found." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_exceptions.py:228 +#, python-format +msgid "Profile-Tenant binding for profile %(profile_id)s could not be found." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_exceptions.py:234 +msgid "No service cluster found to perform multi-segment bridging." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_faults.py:72 +msgid "Port not Found" +msgstr "" + +#: neutron/plugins/cisco/common/cisco_faults.py:73 +msgid "Unable to find a port with the specified identifier." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_faults.py:87 +msgid "Credential Not Found" +msgstr "" + +#: neutron/plugins/cisco/common/cisco_faults.py:88 +msgid "Unable to find a Credential with the specified identifier." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_faults.py:103 +msgid "QoS Not Found" +msgstr "" + +#: neutron/plugins/cisco/common/cisco_faults.py:104 +msgid "Unable to find a QoS with the specified identifier." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_faults.py:119 +msgid "Nova tenant Not Found" +msgstr "" + +#: neutron/plugins/cisco/common/cisco_faults.py:120 +msgid "Unable to find a Novatenant with the specified identifier." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_faults.py:135 +msgid "Requested State Invalid" +msgstr "" + +#: neutron/plugins/cisco/common/cisco_faults.py:136 +msgid "Unable to update port state with specified value." +msgstr "" + +#: neutron/plugins/cisco/common/config.py:24 +msgid "Virtual Switch to use" +msgstr "" + +#: neutron/plugins/cisco/common/config.py:28 +msgid "Nexus Switch to use" +msgstr "" + +#: neutron/plugins/cisco/common/config.py:33 +#: neutron/plugins/ml2/drivers/cisco/nexus/config.py:21 +msgid "VLAN Name prefix" +msgstr "" + +#: neutron/plugins/cisco/common/config.py:35 +msgid "VLAN Name prefix for provider vlans" +msgstr "" + +#: neutron/plugins/cisco/common/config.py:37 +msgid "Provider VLANs are automatically created as needed on the Nexus switch" +msgstr "" + +#: neutron/plugins/cisco/common/config.py:40 +msgid "" +"Provider VLANs are automatically trunked as needed on the ports of the " +"Nexus switch" +msgstr "" + +#: neutron/plugins/cisco/common/config.py:43 +msgid "Enable L3 support on the Nexus switches" +msgstr "" + +#: neutron/plugins/cisco/common/config.py:45 +#: neutron/plugins/ml2/drivers/cisco/nexus/config.py:23 +msgid "Distribute SVI interfaces over all switches" +msgstr "" + +#: neutron/plugins/cisco/common/config.py:49 +msgid "Model Class" +msgstr "" + +#: neutron/plugins/cisco/common/config.py:53 +msgid "Nexus Driver Name" +msgstr "" + +#: neutron/plugins/cisco/common/config.py:58 +msgid "N1K Integration Bridge" +msgstr "" + +#: neutron/plugins/cisco/common/config.py:60 +msgid "N1K Enable Tunneling" +msgstr "" + +#: neutron/plugins/cisco/common/config.py:62 +msgid "N1K Tunnel Bridge" +msgstr "" + +#: neutron/plugins/cisco/common/config.py:64 +msgid "N1K Local IP" +msgstr "" + +#: neutron/plugins/cisco/common/config.py:66 +msgid "N1K Tenant Network Type" +msgstr "" + +#: neutron/plugins/cisco/common/config.py:68 +msgid "N1K Bridge Mappings" +msgstr "" + +#: neutron/plugins/cisco/common/config.py:70 +msgid "N1K VXLAN ID Ranges" +msgstr "" + +#: neutron/plugins/cisco/common/config.py:72 +msgid "N1K Network VLAN Ranges" +msgstr "" + +#: neutron/plugins/cisco/common/config.py:74 +msgid "N1K default network profile" +msgstr "" + +#: neutron/plugins/cisco/common/config.py:76 +msgid "N1K default policy profile" +msgstr "" + +#: neutron/plugins/cisco/common/config.py:78 +msgid "N1K policy profile for network node" +msgstr "" + +#: neutron/plugins/cisco/common/config.py:80 +msgid "N1K Policy profile polling duration in seconds" +msgstr "" + +#: neutron/plugins/cisco/common/config.py:82 +msgid "Number of threads to use to make HTTP requests" +msgstr "" + +#: neutron/plugins/cisco/common/config.py:135 +#: neutron/plugins/ml2/drivers/cisco/apic/config.py:68 +#: neutron/plugins/ml2/drivers/cisco/nexus/config.py:58 +msgid "Some config files were not parsed properly" +msgstr "" + +#: neutron/plugins/cisco/db/n1kv_db_v2.py:329 +#, python-format +msgid "seg_min %(seg_min)s, seg_max %(seg_max)s" +msgstr "" + +#: neutron/plugins/cisco/db/n1kv_db_v2.py:560 +#, python-format +msgid "Reserving specific vlan %(vlan)s on physical network %(network)s from pool" +msgstr "" + +#: neutron/plugins/cisco/db/n1kv_db_v2.py:585 +#, python-format +msgid "vlan_id %(vlan)s on physical network %(network)s not found" +msgstr "" + +#: neutron/plugins/cisco/db/n1kv_db_v2.py:599 +#, python-format +msgid "Unreasonable vxlan ID range %(vxlan_min)s - %(vxlan_max)s" +msgstr "" + +#: neutron/plugins/cisco/db/n1kv_db_v2.py:641 +#, python-format +msgid "Reserving specific vxlan %s from pool" +msgstr "" + +#: neutron/plugins/cisco/db/n1kv_db_v2.py:662 +#, python-format +msgid "vxlan_id %s not found" +msgstr "" + +#: neutron/plugins/cisco/db/n1kv_db_v2.py:770 +msgid "create_network_profile()" +msgstr "" + +#: neutron/plugins/cisco/db/n1kv_db_v2.py:792 +msgid "delete_network_profile()" +msgstr "" + +#: neutron/plugins/cisco/db/n1kv_db_v2.py:806 +msgid "update_network_profile()" +msgstr "" + +#: neutron/plugins/cisco/db/n1kv_db_v2.py:815 +msgid "get_network_profile()" +msgstr "" + +#: neutron/plugins/cisco/db/n1kv_db_v2.py:840 +msgid "create_policy_profile()" +msgstr "" + +#: neutron/plugins/cisco/db/n1kv_db_v2.py:851 +msgid "delete_policy_profile()" +msgstr "" + +#: neutron/plugins/cisco/db/n1kv_db_v2.py:860 +msgid "update_policy_profile()" +msgstr "" + +#: neutron/plugins/cisco/db/n1kv_db_v2.py:869 +msgid "get_policy_profile()" +msgstr "" + +#: neutron/plugins/cisco/db/n1kv_db_v2.py:888 +msgid "Invalid profile type" +msgstr "" + +#: neutron/plugins/cisco/db/n1kv_db_v2.py:905 +msgid "_profile_binding_exists()" +msgstr "" + +#: neutron/plugins/cisco/db/n1kv_db_v2.py:913 +msgid "get_profile_binding()" +msgstr "" + +#: neutron/plugins/cisco/db/n1kv_db_v2.py:923 +msgid "delete_profile_binding()" +msgstr "" + +#: neutron/plugins/cisco/db/n1kv_db_v2.py:930 +#, python-format +msgid "" +"Profile-Tenant binding missing for profile ID %(profile_id)s and tenant " +"ID %(tenant_id)s" +msgstr "" + +#: neutron/plugins/cisco/db/n1kv_db_v2.py:944 +msgid "_get_profile_bindings()" +msgstr "" + +#: neutron/plugins/cisco/db/n1kv_db_v2.py:1094 +msgid "segment_range not required for TRUNK" +msgstr "" + +#: neutron/plugins/cisco/db/n1kv_db_v2.py:1100 +msgid "multicast_ip_range not required" +msgstr "" + +#: neutron/plugins/cisco/db/n1kv_db_v2.py:1227 +msgid "Invalid segment range. example range: 500-550" +msgstr "" + +#: neutron/plugins/cisco/db/n1kv_db_v2.py:1240 +msgid "Invalid multicast ip address range. example range: 224.1.1.1-224.1.1.10" +msgstr "" + +#: neutron/plugins/cisco/db/n1kv_db_v2.py:1247 +#, python-format +msgid "%s is not a valid multicast ip address" +msgstr "" + +#: neutron/plugins/cisco/db/n1kv_db_v2.py:1251 +#, python-format +msgid "%s is reserved multicast ip address" +msgstr "" + +#: neutron/plugins/cisco/db/n1kv_db_v2.py:1255 +#, python-format +msgid "%s is not a valid ip address" +msgstr "" + +#: neutron/plugins/cisco/db/n1kv_db_v2.py:1259 +#, python-format +msgid "" +"Invalid multicast IP range '%(min_ip)s-%(max_ip)s': Range should be from " +"low address to high address" +msgstr "" + +#: neutron/plugins/cisco/db/n1kv_db_v2.py:1272 +msgid "Arguments segment_type missing for network profile" +msgstr "" + +#: neutron/plugins/cisco/db/n1kv_db_v2.py:1281 +msgid "segment_type should either be vlan, overlay, multi-segment or trunk" +msgstr "" + +#: neutron/plugins/cisco/db/n1kv_db_v2.py:1287 +msgid "Argument physical_network missing for network profile" +msgstr "" + +#: neutron/plugins/cisco/db/n1kv_db_v2.py:1293 +msgid "segment_range not required for trunk" +msgstr "" + +#: neutron/plugins/cisco/db/n1kv_db_v2.py:1299 +msgid "Argument sub_type missing for network profile" +msgstr "" + +#: neutron/plugins/cisco/db/n1kv_db_v2.py:1306 +msgid "Argument segment_range missing for network profile" +msgstr "" + +#: neutron/plugins/cisco/db/n1kv_db_v2.py:1317 +msgid "Argument multicast_ip_range missing for VXLAN multicast network profile" +msgstr "" + +#: neutron/plugins/cisco/db/n1kv_db_v2.py:1345 +#, python-format +msgid "Segment range is invalid, select from %(min)s-%(nmin)s, %(nmax)s-%(max)s" +msgstr "" + +#: neutron/plugins/cisco/db/n1kv_db_v2.py:1363 +#, python-format +msgid "segment range is invalid. Valid range is : %(min)s-%(max)s" +msgstr "" + +#: neutron/plugins/cisco/db/n1kv_db_v2.py:1377 +#, python-format +msgid "NetworkProfile name %s already exists" +msgstr "" + +#: neutron/plugins/cisco/db/n1kv_db_v2.py:1394 +msgid "Segment range overlaps with another profile" +msgstr "" + +#: neutron/plugins/cisco/db/network_db_v2.py:44 +msgid "get_qos() called" +msgstr "" + +#: neutron/plugins/cisco/db/network_db_v2.py:57 +msgid "add_qos() called" +msgstr "" + +#: neutron/plugins/cisco/db/nexus_db_v2.py:32 +#: neutron/plugins/ml2/drivers/cisco/nexus/nexus_db_v2.py:30 +msgid "get_nexusport_binding() called" +msgstr "" + +#: neutron/plugins/cisco/db/nexus_db_v2.py:41 +#: neutron/plugins/ml2/drivers/cisco/nexus/nexus_db_v2.py:39 +msgid "get_nexusvlan_binding() called" +msgstr "" + +#: neutron/plugins/cisco/db/nexus_db_v2.py:47 +#: neutron/plugins/ml2/drivers/cisco/nexus/nexus_db_v2.py:45 +msgid "add_nexusport_binding() called" +msgstr "" + +#: neutron/plugins/cisco/db/nexus_db_v2.py:60 +#: neutron/plugins/ml2/drivers/cisco/nexus/nexus_db_v2.py:58 +msgid "remove_nexusport_binding() called" +msgstr "" + +#: neutron/plugins/cisco/db/nexus_db_v2.py:76 +#: neutron/plugins/ml2/drivers/cisco/nexus/nexus_db_v2.py:74 +msgid "update_nexusport_binding called with no vlan" +msgstr "" + +#: neutron/plugins/cisco/db/nexus_db_v2.py:78 +#: neutron/plugins/ml2/drivers/cisco/nexus/nexus_db_v2.py:76 +msgid "update_nexusport_binding called" +msgstr "" + +#: neutron/plugins/cisco/db/nexus_db_v2.py:89 +msgid "get_nexusvm_binding() called" +msgstr "" + +#: neutron/plugins/cisco/db/nexus_db_v2.py:97 +#: neutron/plugins/ml2/drivers/cisco/nexus/nexus_db_v2.py:94 +msgid "get_port_vlan_switch_binding() called" +msgstr "" + +#: neutron/plugins/cisco/db/nexus_db_v2.py:105 +#: neutron/plugins/ml2/drivers/cisco/nexus/nexus_db_v2.py:102 +#, python-format +msgid "" +"get_port_switch_bindings() called, port:'%(port_id)s', " +"switch:'%(switch_ip)s'" +msgstr "" + +#: neutron/plugins/cisco/db/nexus_db_v2.py:117 +msgid "get_nexussvi_bindings() called" +msgstr "" + +#: neutron/plugins/cisco/models/virt_phy_sw_v2.py:72 +#, python-format +msgid "Loaded device plugin %s" +msgstr "" + +#: neutron/plugins/cisco/models/virt_phy_sw_v2.py:89 +#, python-format +msgid "%(module)s.%(name)s init done" +msgstr "" + +#: neutron/plugins/cisco/models/virt_phy_sw_v2.py:136 +#, python-format +msgid "No %s Plugin loaded" +msgstr "" + +#: neutron/plugins/cisco/models/virt_phy_sw_v2.py:137 +#, python-format +msgid "%(plugin_key)s: %(function_name)s with args %(args)s ignored" +msgstr "" + +#: neutron/plugins/cisco/models/virt_phy_sw_v2.py:167 +msgid "create_network() called" +msgstr "" + +#: neutron/plugins/cisco/models/virt_phy_sw_v2.py:180 +#, python-format +msgid "Provider network added to DB: %(network_id)s, %(vlan_id)s" +msgstr "" + +#: neutron/plugins/cisco/models/virt_phy_sw_v2.py:199 +msgid "update_network() called" +msgstr "" + +#: neutron/plugins/cisco/models/virt_phy_sw_v2.py:223 +#, python-format +msgid "Provider network removed from DB: %s" +msgstr "" + +#: neutron/plugins/cisco/models/virt_phy_sw_v2.py:285 +msgid "create_port() called" +msgstr "" + +#: neutron/plugins/cisco/models/virt_phy_sw_v2.py:350 +#, python-format +msgid "" +"tenant_id: %(tid)s, net_id: %(nid)s, old_device_id: %(odi)s, " +"new_device_id: %(ndi)s, old_host_id: %(ohi)s, new_host_id: %(nhi)s, " +"old_device_owner: %(odo)s, new_device_owner: %(ndo)s" +msgstr "" + +#: neutron/plugins/cisco/models/virt_phy_sw_v2.py:379 +msgid "update_port() called" +msgstr "" + +#: neutron/plugins/cisco/models/virt_phy_sw_v2.py:406 +#, python-format +msgid "Unable to update port '%s' on Nexus switch" +msgstr "" + +#: neutron/plugins/cisco/models/virt_phy_sw_v2.py:425 +msgid "delete_port() called" +msgstr "" + +#: neutron/plugins/cisco/models/virt_phy_sw_v2.py:466 +msgid "L3 enabled on Nexus plugin, create SVI on switch" +msgstr "" + +#: neutron/plugins/cisco/models/virt_phy_sw_v2.py:486 +#: neutron/plugins/cisco/models/virt_phy_sw_v2.py:511 +msgid "L3 disabled or not Nexus plugin, send to vswitch" +msgstr "" + +#: neutron/plugins/cisco/models/virt_phy_sw_v2.py:500 +msgid "L3 enabled on Nexus plugin, delete SVI from switch" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_client.py:226 +msgid "Logical network" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_client.py:251 +msgid "network_segment_pool" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_client.py:296 +msgid "Invalid input for CIDR" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_client.py:436 +#, python-format +msgid "req: %s" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_client.py:446 +#, python-format +msgid "status_code %s" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_client.py:454 +#, python-format +msgid "VSM: %s" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:144 +msgid "_setup_vsm" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:163 +msgid "_populate_policy_profiles" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:190 +msgid "No policy profile populated from VSM" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:227 +#: neutron/plugins/linuxbridge/lb_neutron_plugin.py:347 +#: neutron/plugins/mlnx/mlnx_plugin.py:217 +#: neutron/plugins/openvswitch/ovs_neutron_plugin.py:409 +msgid "provider:network_type required" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:231 +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:245 +#: neutron/plugins/linuxbridge/lb_neutron_plugin.py:357 +#: neutron/plugins/mlnx/mlnx_plugin.py:247 +#: neutron/plugins/openvswitch/ovs_neutron_plugin.py:419 +#: neutron/plugins/openvswitch/ovs_neutron_plugin.py:438 +msgid "provider:segmentation_id required" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:234 +msgid "provider:segmentation_id out of range (1 through 4094)" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:239 +msgid "provider:physical_network specified for Overlay network" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:248 +msgid "provider:segmentation_id out of range (5000+)" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:252 +#: neutron/plugins/linuxbridge/lb_neutron_plugin.py:379 +#: neutron/plugins/mlnx/mlnx_plugin.py:233 +#: neutron/plugins/openvswitch/ovs_neutron_plugin.py:454 +#, python-format +msgid "provider:network_type %s not supported" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:263 +#: neutron/plugins/linuxbridge/lb_neutron_plugin.py:385 +#: neutron/plugins/mlnx/mlnx_plugin.py:273 +#: neutron/plugins/openvswitch/ovs_neutron_plugin.py:460 +#, python-format +msgid "Unknown provider:physical_network %s" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:267 +#: neutron/plugins/linuxbridge/lb_neutron_plugin.py:391 +#: neutron/plugins/mlnx/mlnx_plugin.py:279 +#: neutron/plugins/openvswitch/ovs_neutron_plugin.py:466 +msgid "provider:physical_network required" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:445 +#, python-format +msgid "_populate_member_segments %s" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:484 +msgid "Invalid pairing supplied" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:489 +#, python-format +msgid "Invalid UUID supplied in %s" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:490 +msgid "Invalid UUID supplied" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:519 +#, python-format +msgid "Cannot add a trunk segment '%s' as a member of another trunk segment" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:524 +#, python-format +msgid "Cannot add vlan segment '%s' as a member of a vxlan trunk segment" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:530 +#, python-format +msgid "Network UUID '%s' belongs to a different physical network" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:535 +#, python-format +msgid "Cannot add vxlan segment '%s' as a member of a vlan trunk segment" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:540 +#, python-format +msgid "Vlan tag '%s' is out of range" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:543 +#, python-format +msgid "Vlan tag '%s' is not an integer value" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:548 +#, python-format +msgid "%s is not a valid uuid" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:595 +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:598 +msgid "n1kv:profile_id does not exist" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:610 +msgid "_send_create_logical_network" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:633 +#, python-format +msgid "_send_create_network_profile_request: %s" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:643 +#, python-format +msgid "_send_update_network_profile_request: %s" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:653 +#, python-format +msgid "_send_delete_network_profile_request: %s" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:668 +#, python-format +msgid "_send_create_network_request: %s" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:700 +#, python-format +msgid "_send_update_network_request: %s" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:722 +#, python-format +msgid "add_segments=%s" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:723 +#, python-format +msgid "del_segments=%s" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:747 +#, python-format +msgid "_send_delete_network_request: %s" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:787 +#, python-format +msgid "_send_create_subnet_request: %s" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:797 +#, python-format +msgid "_send_update_subnet_request: %s" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:808 +#, python-format +msgid "_send_delete_subnet_request: %s" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:834 +#, python-format +msgid "_send_create_port_request: %s" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:851 +#, python-format +msgid "_send_update_port_request: %s" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:867 +#, python-format +msgid "_send_delete_port_request: %s" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:898 +#, python-format +msgid "Create network: profile_id=%s" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:906 +#, python-format +msgid "" +"Physical_network %(phy_net)s, seg_type %(net_type)s, seg_id %(seg_id)s, " +"multicast_ip %(multicast_ip)s" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:918 +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:928 +#, python-format +msgid "Seg list %s " +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:968 +#: neutron/plugins/hyperv/hyperv_neutron_plugin.py:252 +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:198 +#: neutron/plugins/metaplugin/meta_neutron_plugin.py:230 +#: neutron/plugins/mlnx/mlnx_plugin.py:360 +#: neutron/plugins/openvswitch/ovs_neutron_plugin.py:514 +#, python-format +msgid "Created network: %s" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:1026 +#, python-format +msgid "Updated network: %s" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:1041 +#, python-format +msgid "Cannot delete network '%s' that is member of a trunk segment" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:1045 +#, python-format +msgid "Cannot delete network '%s' that is a member of a multi-segment network" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:1058 +#, python-format +msgid "Deleted network: %s" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:1068 +#, python-format +msgid "Get network: %s" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:1090 +msgid "Get networks" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:1140 +#, python-format +msgid "Create port: profile_id=%s" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:1186 +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:305 +#, python-format +msgid "Created port: %s" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:1197 +#, python-format +msgid "Update port: %s" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:1244 +#, python-format +msgid "Get port: %s" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:1264 +msgid "Get ports" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:1280 +msgid "Create subnet" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:1288 +#, python-format +msgid "Created subnet: %s" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:1299 +msgid "Update subnet" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:1314 +#, python-format +msgid "Delete subnet: %s" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:1329 +#, python-format +msgid "Get subnet: %s" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:1349 +msgid "Get subnets" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:1434 +#, python-format +msgid "Scheduling router %s" +msgstr "" + +#: neutron/plugins/cisco/nexus/cisco_nexus_network_driver_v2.py:157 +#: neutron/plugins/cisco/nexus/cisco_nexus_network_driver_v2.py:165 +#: neutron/plugins/cisco/nexus/cisco_nexus_network_driver_v2.py:187 +#: neutron/plugins/cisco/nexus/cisco_nexus_network_driver_v2.py:193 +#: neutron/plugins/ml2/drivers/cisco/nexus/nexus_network_driver.py:113 +#: neutron/plugins/ml2/drivers/cisco/nexus/nexus_network_driver.py:152 +#: neutron/plugins/ml2/drivers/cisco/nexus/nexus_network_driver.py:161 +#, python-format +msgid "NexusDriver: %s" +msgstr "" + +#: neutron/plugins/cisco/nexus/cisco_nexus_network_driver_v2.py:172 +#: neutron/plugins/ml2/drivers/cisco/nexus/nexus_network_driver.py:168 +#, python-format +msgid "NexusDriver created VLAN: %s" +msgstr "" + +#: neutron/plugins/cisco/nexus/cisco_nexus_plugin_v2.py:47 +#, python-format +msgid "Loaded driver %s" +msgstr "" + +#: neutron/plugins/cisco/nexus/cisco_nexus_plugin_v2.py:62 +msgid "NexusPlugin:create_network() called" +msgstr "" + +#: neutron/plugins/cisco/nexus/cisco_nexus_plugin_v2.py:111 +#: neutron/plugins/ml2/drivers/cisco/nexus/mech_cisco_nexus.py:108 +#, python-format +msgid "Nexus: create & trunk vlan %s" +msgstr "" + +#: neutron/plugins/cisco/nexus/cisco_nexus_plugin_v2.py:118 +#, python-format +msgid "Nexus: create vlan %s" +msgstr "" + +#: neutron/plugins/cisco/nexus/cisco_nexus_plugin_v2.py:123 +#: neutron/plugins/ml2/drivers/cisco/nexus/mech_cisco_nexus.py:112 +#, python-format +msgid "Nexus: trunk vlan %s" +msgstr "" + +#: neutron/plugins/cisco/nexus/cisco_nexus_plugin_v2.py:136 +#, python-format +msgid "Nexus: delete & untrunk vlan %s" +msgstr "" + +#: neutron/plugins/cisco/nexus/cisco_nexus_plugin_v2.py:142 +#, python-format +msgid "Nexus: delete vlan %s" +msgstr "" + +#: neutron/plugins/cisco/nexus/cisco_nexus_plugin_v2.py:145 +#, python-format +msgid "Nexus: untrunk vlan %s" +msgstr "" + +#: neutron/plugins/cisco/nexus/cisco_nexus_plugin_v2.py:202 +msgid "Grabbing a switch to create SVI" +msgstr "" + +#: neutron/plugins/cisco/nexus/cisco_nexus_plugin_v2.py:205 +msgid "Using round robin to create SVI" +msgstr "" + +#: neutron/plugins/cisco/nexus/cisco_nexus_plugin_v2.py:224 +msgid "No round robin or zero weights, using first switch" +msgstr "" + +#: neutron/plugins/cisco/nexus/cisco_nexus_plugin_v2.py:234 +msgid "NexusPlugin:delete_network() called" +msgstr "" + +#: neutron/plugins/cisco/nexus/cisco_nexus_plugin_v2.py:242 +msgid "NexusPlugin:update_network() called" +msgstr "" + +#: neutron/plugins/cisco/nexus/cisco_nexus_plugin_v2.py:250 +msgid "NexusPlugin:create_port() called" +msgstr "" + +#: neutron/plugins/cisco/nexus/cisco_nexus_plugin_v2.py:258 +msgid "NexusPlugin:delete_port() called" +msgstr "" + +#: neutron/plugins/cisco/nexus/cisco_nexus_plugin_v2.py:270 +#, python-format +msgid "delete_network(): provider vlan %s" +msgstr "" + +#: neutron/plugins/cisco/nexus/cisco_nexus_plugin_v2.py:327 +msgid "NexusPlugin:update_port() called" +msgstr "" + +#: neutron/plugins/cisco/nexus/cisco_nexus_plugin_v2.py:336 +msgid "NexusPlugin:plug_interface() called" +msgstr "" + +#: neutron/plugins/cisco/nexus/cisco_nexus_plugin_v2.py:344 +msgid "NexusPlugin:unplug_interface() called" +msgstr "" + +#: neutron/plugins/common/utils.py:30 +#, python-format +msgid "%s is not a valid VLAN tag" +msgstr "" + +#: neutron/plugins/common/utils.py:34 +msgid "End of VLAN range is less than start of VLAN range" +msgstr "" + +#: neutron/plugins/embrane/base_plugin.py:107 +#: neutron/plugins/embrane/agent/dispatcher.py:132 +#: neutron/services/loadbalancer/drivers/embrane/poller.py:56 +#: neutron/services/loadbalancer/drivers/embrane/agent/dispatcher.py:108 +msgid "Unhandled exception occurred" +msgstr "" + +#: neutron/plugins/embrane/base_plugin.py:172 +#: neutron/plugins/embrane/base_plugin.py:191 +#, python-format +msgid "The following routers have not physical match: %s" +msgstr "" + +#: neutron/plugins/embrane/base_plugin.py:177 +#, python-format +msgid "Requested router: %s" +msgstr "" + +#: neutron/plugins/embrane/base_plugin.py:229 +#, python-format +msgid "Deleting router=%s" +msgstr "" + +#: neutron/plugins/embrane/agent/operations/router_operations.py:97 +#, python-format +msgid "The router %s had no physical representation,likely already deleted" +msgstr "" + +#: neutron/plugins/embrane/agent/operations/router_operations.py:126 +#, python-format +msgid "Interface %s not found in the heleos back-end,likely already deleted" +msgstr "" + +#: neutron/plugins/embrane/common/config.py:23 +#: neutron/services/loadbalancer/drivers/embrane/config.py:25 +msgid "ESM management root address" +msgstr "" + +#: neutron/plugins/embrane/common/config.py:25 +#: neutron/services/loadbalancer/drivers/embrane/config.py:27 +msgid "ESM admin username." +msgstr "" + +#: neutron/plugins/embrane/common/config.py:28 +#: neutron/services/loadbalancer/drivers/embrane/config.py:30 +msgid "ESM admin password." +msgstr "" + +#: neutron/plugins/embrane/common/config.py:30 +msgid "Router image id (Embrane FW/VPN)" +msgstr "" + +#: neutron/plugins/embrane/common/config.py:32 +msgid "In band Security Zone id" +msgstr "" + +#: neutron/plugins/embrane/common/config.py:34 +msgid "Out of band Security Zone id" +msgstr "" + +#: neutron/plugins/embrane/common/config.py:36 +msgid "Management Security Zone id" +msgstr "" + +#: neutron/plugins/embrane/common/config.py:38 +msgid "Dummy user traffic Security Zone id" +msgstr "" + +#: neutron/plugins/embrane/common/config.py:40 +#: neutron/services/loadbalancer/drivers/embrane/config.py:42 +msgid "Shared resource pool id" +msgstr "" + +#: neutron/plugins/embrane/common/config.py:42 +#: neutron/services/loadbalancer/drivers/embrane/config.py:49 +msgid "Define if the requests have run asynchronously or not" +msgstr "" + +#: neutron/plugins/embrane/common/constants.py:49 +#: neutron/services/loadbalancer/drivers/embrane/constants.py:50 +#, python-format +msgid "Dva is pending for the following reason: %s" +msgstr "" + +#: neutron/plugins/embrane/common/constants.py:50 +msgid "" +"Dva can't be found to execute the operation, probably was cancelled " +"through the heleos UI" +msgstr "" + +#: neutron/plugins/embrane/common/constants.py:52 +#: neutron/services/loadbalancer/drivers/embrane/constants.py:53 +#, python-format +msgid "Dva seems to be broken for reason %s" +msgstr "" + +#: neutron/plugins/embrane/common/constants.py:53 +#, python-format +msgid "Dva interface seems to be broken for reason %s" +msgstr "" + +#: neutron/plugins/embrane/common/constants.py:55 +#: neutron/services/loadbalancer/drivers/embrane/constants.py:54 +#, python-format +msgid "Dva creation failed reason %s" +msgstr "" + +#: neutron/plugins/embrane/common/constants.py:56 +#: neutron/services/loadbalancer/drivers/embrane/constants.py:55 +#, python-format +msgid "Dva creation is in pending state for reason %s" +msgstr "" + +#: neutron/plugins/embrane/common/constants.py:58 +#: neutron/services/loadbalancer/drivers/embrane/constants.py:57 +#, python-format +msgid "Dva configuration failed for reason %s" +msgstr "" + +#: neutron/plugins/embrane/common/constants.py:59 +#, python-format +msgid "" +"Failed to delete the backend router for reason %s. Please remove it " +"manually through the heleos UI" +msgstr "" + +#: neutron/plugins/embrane/common/exceptions.py:22 +#, python-format +msgid "An unexpected error occurred:%(err_msg)s" +msgstr "" + +#: neutron/plugins/embrane/common/exceptions.py:26 +#, python-format +msgid "%(err_msg)s" +msgstr "" + +#: neutron/plugins/embrane/common/utils.py:45 +msgid "No ip allocation set" +msgstr "" + +#: neutron/plugins/embrane/l2base/support_exceptions.py:22 +#, python-format +msgid "Cannot retrieve utif info for the following reason: %(err_msg)s" +msgstr "" + +#: neutron/plugins/embrane/l2base/openvswitch/openvswitch_support.py:44 +msgid "" +"No segmentation_id found for the network, please be sure that " +"tenant_network_type is vlan" +msgstr "" + +#: neutron/plugins/hyperv/db.py:40 +#: neutron/plugins/linuxbridge/db/l2network_db_v2.py:113 +#: neutron/plugins/openvswitch/ovs_db_v2.py:131 +#, python-format +msgid "" +"Reserving vlan %(vlan_id)s on physical network %(physical_network)s from " +"pool" +msgstr "" + +#: neutron/plugins/hyperv/db.py:55 +#, python-format +msgid "Reserving flat physical network %(physical_network)s from pool" +msgstr "" + +#: neutron/plugins/hyperv/db.py:78 +#: neutron/plugins/linuxbridge/db/l2network_db_v2.py:136 +#: neutron/plugins/ml2/drivers/type_vlan.py:204 +#: neutron/plugins/openvswitch/ovs_db_v2.py:155 +#, python-format +msgid "" +"Reserving specific vlan %(vlan_id)s on physical network " +"%(physical_network)s from pool" +msgstr "" + +#: neutron/plugins/hyperv/db.py:135 +#, python-format +msgid "Releasing vlan %(vlan_id)s on physical network %(physical_network)s" +msgstr "" + +#: neutron/plugins/hyperv/db.py:140 +#: neutron/plugins/linuxbridge/db/l2network_db_v2.py:177 +#: neutron/plugins/openvswitch/ovs_db_v2.py:196 +#, python-format +msgid "vlan_id %(vlan_id)s on physical network %(physical_network)s not found" +msgstr "" + +#: neutron/plugins/hyperv/db.py:165 neutron/plugins/hyperv/db.py:178 +#: neutron/plugins/linuxbridge/db/l2network_db_v2.py:64 +#: neutron/plugins/linuxbridge/db/l2network_db_v2.py:83 +#: neutron/plugins/ml2/drivers/type_vlan.py:128 +#: neutron/plugins/ml2/drivers/type_vlan.py:149 +#: neutron/plugins/openvswitch/ovs_db_v2.py:87 +#: neutron/plugins/openvswitch/ovs_db_v2.py:105 +#, python-format +msgid "" +"Removing vlan %(vlan_id)s on physical network %(physical_network)s from " +"pool" +msgstr "" + +#: neutron/plugins/hyperv/hyperv_neutron_plugin.py:44 +msgid "Network type for tenant networks (local, flat, vlan or none)" +msgstr "" + +#: neutron/plugins/hyperv/hyperv_neutron_plugin.py:48 +#: neutron/plugins/linuxbridge/common/config.py:33 +#: neutron/plugins/mlnx/common/config.py:30 +#: neutron/plugins/openvswitch/common/config.py:49 +msgid "List of :: or " +msgstr "" + +#: neutron/plugins/hyperv/hyperv_neutron_plugin.py:76 +#: neutron/plugins/hyperv/hyperv_neutron_plugin.py:98 +#, python-format +msgid "segmentation_id specified for %s network" +msgstr "" + +#: neutron/plugins/hyperv/hyperv_neutron_plugin.py:83 +#, python-format +msgid "physical_network specified for %s network" +msgstr "" + +#: neutron/plugins/hyperv/hyperv_neutron_plugin.py:125 +msgid "physical_network not provided" +msgstr "" + +#: neutron/plugins/hyperv/hyperv_neutron_plugin.py:178 +#, python-format +msgid "Invalid tenant_network_type: %s. Agent terminated!" +msgstr "" + +#: neutron/plugins/hyperv/hyperv_neutron_plugin.py:201 +#: neutron/plugins/linuxbridge/lb_neutron_plugin.py:308 +#: neutron/plugins/ml2/drivers/type_vlan.py:94 +#: neutron/plugins/mlnx/mlnx_plugin.py:178 +#: neutron/plugins/openvswitch/ovs_neutron_plugin.py:361 +#, python-format +msgid "Network VLAN ranges: %s" +msgstr "" + +#: neutron/plugins/hyperv/hyperv_neutron_plugin.py:226 +#, python-format +msgid "Network type %s not supported" +msgstr "" + +#: neutron/plugins/hyperv/rpc_callbacks.py:81 +#: neutron/plugins/linuxbridge/lb_neutron_plugin.py:123 +#: neutron/plugins/mlnx/rpc_callbacks.py:101 +#: neutron/plugins/openvswitch/ovs_neutron_plugin.py:125 +#, python-format +msgid "Device %(device)s no longer exists on %(agent_id)s" +msgstr "" + +#: neutron/plugins/hyperv/agent/hyperv_neutron_agent.py:50 +msgid "" +"List of : where the physical networks can be " +"expressed with wildcards, e.g.: .\"*:external\"" +msgstr "" + +#: neutron/plugins/hyperv/agent/hyperv_neutron_agent.py:56 +msgid "Private vswitch name used for local networks" +msgstr "" + +#: neutron/plugins/hyperv/agent/hyperv_neutron_agent.py:58 +#: neutron/plugins/linuxbridge/common/config.py:64 +#: neutron/plugins/mlnx/common/config.py:67 +#: neutron/plugins/nec/common/config.py:29 +#: neutron/plugins/oneconvergence/lib/config.py:47 +#: neutron/plugins/openvswitch/common/config.py:64 +#: neutron/plugins/ryu/common/config.py:43 +msgid "" +"The number of seconds the agent will wait between polling for local " +"device changes." +msgstr "" + +#: neutron/plugins/hyperv/agent/hyperv_neutron_agent.py:62 +msgid "" +"Enables metrics collections for switch ports by using Hyper-V's metric " +"APIs. Collected data can by retrieved by other apps and services, e.g.: " +"Ceilometer. Requires Hyper-V / Windows Server 2012 and above" +msgstr "" + +#: neutron/plugins/hyperv/agent/hyperv_neutron_agent.py:69 +msgid "" +"Specifies the maximum number of retries to enable Hyper-V's port metrics " +"collection. The agent will try to enable the feature once every " +"polling_interval period for at most metrics_max_retries or until it " +"succeedes." +msgstr "" + +#: neutron/plugins/hyperv/agent/hyperv_neutron_agent.py:151 +#, python-format +msgid "Failed reporting state! %s" +msgstr "" + +#: neutron/plugins/hyperv/agent/hyperv_neutron_agent.py:186 +#, python-format +msgid "Invalid physical network mapping: %s" +msgstr "" + +#: neutron/plugins/hyperv/agent/hyperv_neutron_agent.py:207 +#, python-format +msgid "network_delete received. Deleting network %s" +msgstr "" + +#: neutron/plugins/hyperv/agent/hyperv_neutron_agent.py:213 +#, python-format +msgid "Network %s not defined on agent." +msgstr "" + +#: neutron/plugins/hyperv/agent/hyperv_neutron_agent.py:216 +msgid "port_delete received" +msgstr "" + +#: neutron/plugins/hyperv/agent/hyperv_neutron_agent.py:221 +#: neutron/plugins/mlnx/agent/eswitch_neutron_agent.py:171 +msgid "port_update received" +msgstr "" + +#: neutron/plugins/hyperv/agent/hyperv_neutron_agent.py:243 +#: neutron/plugins/mlnx/agent/eswitch_neutron_agent.py:129 +#, python-format +msgid "Provisioning network %s" +msgstr "" + +#: neutron/plugins/hyperv/agent/hyperv_neutron_agent.py:256 +#, python-format +msgid "" +"Cannot provision unknown network type %(network_type)s for network " +"%(net_uuid)s" +msgstr "" + +#: neutron/plugins/hyperv/agent/hyperv_neutron_agent.py:268 +#, python-format +msgid "Reclaiming local network %s" +msgstr "" + +#: neutron/plugins/hyperv/agent/hyperv_neutron_agent.py:276 +#, python-format +msgid "Binding port %s" +msgstr "" + +#: neutron/plugins/hyperv/agent/hyperv_neutron_agent.py:289 +#, python-format +msgid "Binding VLAN ID %(segmentation_id)s to switch port %(port_id)s" +msgstr "" + +#: neutron/plugins/hyperv/agent/hyperv_neutron_agent.py:302 +#: neutron/plugins/mlnx/agent/eswitch_neutron_agent.py:114 +#, python-format +msgid "Unsupported network type %s" +msgstr "" + +#: neutron/plugins/hyperv/agent/hyperv_neutron_agent.py:311 +#, python-format +msgid "Network %s is not avalailable on this agent" +msgstr "" + +#: neutron/plugins/hyperv/agent/hyperv_neutron_agent.py:315 +#, python-format +msgid "Unbinding port %s" +msgstr "" + +#: neutron/plugins/hyperv/agent/hyperv_neutron_agent.py:328 +#, python-format +msgid "Port metrics enabled for port: %s" +msgstr "" + +#: neutron/plugins/hyperv/agent/hyperv_neutron_agent.py:332 +#, python-format +msgid "Port metrics raw enabling for port: %s" +msgstr "" + +#: neutron/plugins/hyperv/agent/hyperv_neutron_agent.py:357 +#: neutron/plugins/mlnx/agent/eswitch_neutron_agent.py:209 +#: neutron/plugins/mlnx/agent/eswitch_neutron_agent.py:312 +#, python-format +msgid "No port %s defined on agent." +msgstr "" + +#: neutron/plugins/hyperv/agent/hyperv_neutron_agent.py:374 +#, python-format +msgid "Adding port %s" +msgstr "" + +#: neutron/plugins/hyperv/agent/hyperv_neutron_agent.py:377 +#, python-format +msgid "Port %(device)s updated. Details: %(device_details)s" +msgstr "" + +#: neutron/plugins/hyperv/agent/hyperv_neutron_agent.py:403 +#, python-format +msgid "Removing port %s" +msgstr "" + +#: neutron/plugins/hyperv/agent/hyperv_neutron_agent.py:411 +#, python-format +msgid "Removing port failed for device %(device)s: %(e)s" +msgstr "" + +#: neutron/plugins/hyperv/agent/hyperv_neutron_agent.py:436 +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:962 +#: neutron/plugins/mlnx/agent/eswitch_neutron_agent.py:382 +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:1306 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1354 +msgid "Agent out of sync with plugin!" +msgstr "" + +#: neutron/plugins/hyperv/agent/hyperv_neutron_agent.py:444 +msgid "Agent loop has new devices!" +msgstr "" + +#: neutron/plugins/hyperv/agent/hyperv_neutron_agent.py:451 +#, python-format +msgid "Error in agent event loop: %s" +msgstr "" + +#: neutron/plugins/hyperv/agent/hyperv_neutron_agent.py:459 +#: neutron/plugins/mlnx/agent/eswitch_neutron_agent.py:405 +#, python-format +msgid "Loop iteration exceeded interval (%(polling_interval)s vs. %(elapsed)s)" +msgstr "" + +#: neutron/plugins/hyperv/agent/hyperv_neutron_agent.py:472 +#: neutron/plugins/ibm/agent/sdnve_neutron_agent.py:269 +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:1017 +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:159 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1540 +msgid "Agent initialized successfully, now running... " +msgstr "" + +#: neutron/plugins/hyperv/agent/security_groups_driver.py:62 +#, python-format +msgid "Hyper-V Exception: %(hyperv_exeption)s while adding rule: %(rule)s" +msgstr "" + +#: neutron/plugins/hyperv/agent/security_groups_driver.py:72 +#, python-format +msgid "Hyper-V Exception: %(hyperv_exeption)s while removing rule: %(rule)s" +msgstr "" + +#: neutron/plugins/hyperv/agent/security_groups_driver.py:92 +msgid "Aplying port filter." +msgstr "" + +#: neutron/plugins/hyperv/agent/security_groups_driver.py:95 +msgid "Updating port rules." +msgstr "" + +#: neutron/plugins/hyperv/agent/security_groups_driver.py:108 +#, python-format +msgid "Creating %(new)s new rules, removing %(old)s old rules." +msgstr "" + +#: neutron/plugins/hyperv/agent/security_groups_driver.py:119 +msgid "Removing port filter" +msgstr "" + +#: neutron/plugins/hyperv/agent/utils.py:36 +#, python-format +msgid "HyperVException: %(msg)s" +msgstr "" + +#: neutron/plugins/hyperv/agent/utils.py:81 +#, python-format +msgid "Vnic not found: %s" +msgstr "" + +#: neutron/plugins/hyperv/agent/utils.py:116 +#, python-format +msgid "Job failed with error %d" +msgstr "" + +#: neutron/plugins/hyperv/agent/utils.py:135 +#, python-format +msgid "" +"WMI job failed with status %(job_state)d. Error details: %(err_sum_desc)s" +" - %(err_desc)s - Error code: %(err_code)d" +msgstr "" + +#: neutron/plugins/hyperv/agent/utils.py:144 +#, python-format +msgid "WMI job failed with status %(job_state)d. Error details: %(error)s" +msgstr "" + +#: neutron/plugins/hyperv/agent/utils.py:148 +#, python-format +msgid "WMI job failed with status %d. No error description available" +msgstr "" + +#: neutron/plugins/hyperv/agent/utils.py:153 +#, python-format +msgid "WMI job succeeded: %(desc)s, Elapsed=%(elap)s" +msgstr "" + +#: neutron/plugins/hyperv/agent/utils.py:167 +#, python-format +msgid "Failed creating port for %s" +msgstr "" + +#: neutron/plugins/hyperv/agent/utils.py:187 +#, python-format +msgid "" +"Failed to disconnect port %(switch_port_name)s from switch " +"%(vswitch_name)s with error %(ret_val)s" +msgstr "" + +#: neutron/plugins/hyperv/agent/utils.py:198 +#, python-format +msgid "" +"Failed to delete port %(switch_port_name)s from switch %(vswitch_name)s " +"with error %(ret_val)s" +msgstr "" + +#: neutron/plugins/hyperv/agent/utils.py:205 +#: neutron/plugins/hyperv/agent/utilsv2.py:135 +#, python-format +msgid "VSwitch not found: %s" +msgstr "" + +#: neutron/plugins/hyperv/agent/utils.py:246 +#: neutron/plugins/hyperv/agent/utils.py:250 +msgid "Metrics collection is not supported on this version of Hyper-V" +msgstr "" + +#: neutron/plugins/hyperv/agent/utilsfactory.py:32 +msgid "Force V1 WMI utility classes" +msgstr "" + +#: neutron/plugins/hyperv/agent/utilsfactory.py:61 +msgid "" +"V1 virtualization namespace no longer supported on Windows Server / " +"Hyper-V Server 2012 R2 or above." +msgstr "" + +#: neutron/plugins/hyperv/agent/utilsfactory.py:68 +#, python-format +msgid "Loading class: %(module_name)s.%(class_name)s" +msgstr "" + +#: neutron/plugins/hyperv/agent/utilsv2.py:158 +#: neutron/plugins/hyperv/agent/utilsv2.py:318 +#, python-format +msgid "Port Allocation not found: %s" +msgstr "" + +#: neutron/plugins/hyperv/agent/utilsv2.py:268 +#, python-format +msgid "Cannot get VM summary data for: %s" +msgstr "" + +#: neutron/plugins/ibm/sdnve_api.py:77 +#, python-format +msgid "The IP addr of available SDN-VE controllers: %s" +msgstr "" + +#: neutron/plugins/ibm/sdnve_api.py:80 +#, python-format +msgid "The SDN-VE controller IP address: %s" +msgstr "" + +#: neutron/plugins/ibm/sdnve_api.py:97 +#, python-format +msgid "unable to serialize object type: '%s'" +msgstr "" + +#: neutron/plugins/ibm/sdnve_api.py:164 +#, python-format +msgid "" +"Sending request to SDN-VE. url: %(myurl)s method: %(method)s body: " +"%(body)s header: %(header)s " +msgstr "" + +#: neutron/plugins/ibm/sdnve_api.py:177 +#, python-format +msgid "Error: Could not reach server: %(url)s Exception: %(excp)s." +msgstr "" + +#: neutron/plugins/ibm/sdnve_api.py:184 +#, python-format +msgid "Error message: %(reply)s -- Status: %(status)s" +msgstr "" + +#: neutron/plugins/ibm/sdnve_api.py:187 +#, python-format +msgid "Received response status: %s" +msgstr "" + +#: neutron/plugins/ibm/sdnve_api.py:194 +#, python-format +msgid "Deserialized body: %s" +msgstr "" + +#: neutron/plugins/ibm/sdnve_api.py:236 +msgid "Bad resource for forming a list request" +msgstr "" + +#: neutron/plugins/ibm/sdnve_api.py:246 +msgid "Bad resource for forming a show request" +msgstr "" + +#: neutron/plugins/ibm/sdnve_api.py:256 +msgid "Bad resource for forming a create request" +msgstr "" + +#: neutron/plugins/ibm/sdnve_api.py:268 +msgid "Bad resource for forming a update request" +msgstr "" + +#: neutron/plugins/ibm/sdnve_api.py:279 +msgid "Bad resource for forming a delete request" +msgstr "" + +#: neutron/plugins/ibm/sdnve_api.py:307 +#, python-format +msgid "Non matching tenant and network types: %(ttype)s %(ntype)s" +msgstr "" + +#: neutron/plugins/ibm/sdnve_api.py:369 +#, python-format +msgid "Did not find tenant: %r" +msgstr "" + +#: neutron/plugins/ibm/sdnve_api_fake.py:32 +msgid "Fake SDNVE controller initialized" +msgstr "" + +#: neutron/plugins/ibm/sdnve_api_fake.py:35 +msgid "Fake SDNVE controller: list" +msgstr "" + +#: neutron/plugins/ibm/sdnve_api_fake.py:39 +msgid "Fake SDNVE controller: show" +msgstr "" + +#: neutron/plugins/ibm/sdnve_api_fake.py:43 +msgid "Fake SDNVE controller: create" +msgstr "" + +#: neutron/plugins/ibm/sdnve_api_fake.py:47 +msgid "Fake SDNVE controller: update" +msgstr "" + +#: neutron/plugins/ibm/sdnve_api_fake.py:51 +msgid "Fake SDNVE controller: delete" +msgstr "" + +#: neutron/plugins/ibm/sdnve_api_fake.py:55 +msgid "Fake SDNVE controller: get tenant by id" +msgstr "" + +#: neutron/plugins/ibm/sdnve_api_fake.py:59 +msgid "Fake SDNVE controller: check and create tenant" +msgstr "" + +#: neutron/plugins/ibm/sdnve_api_fake.py:63 +msgid "Fake SDNVE controller: get controller" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:152 +msgid "Set a new controller if needed." +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:158 +#, python-format +msgid "Set the controller to a new controller: %s" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:166 +#, python-format +msgid "Original SDN-VE HTTP request: %(orig)s; New request: %(new)s" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:176 +#, python-format +msgid "Create network in progress: %r" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:185 +msgid "Create net failed: no SDN-VE tenant." +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:196 +#, python-format +msgid "Create net failed in SDN-VE: %s" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:203 +#, python-format +msgid "Update network in progress: %r" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:223 +#, python-format +msgid "Update net failed in SDN-VE: %s" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:229 +#, python-format +msgid "Delete network in progress: %s" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:239 +#, python-format +msgid "Delete net failed after deleting the network in DB: %s" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:244 +#, python-format +msgid "Get network in progress: %s" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:250 +msgid "Get networks in progress" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:260 +#, python-format +msgid "Create port in progress: %r" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:276 +msgid "Create port does not have tenant id info" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:282 +#, python-format +msgid "Create port does not have tenant id info; obtained is: %s" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:303 +#, python-format +msgid "Create port failed in SDN-VE: %s" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:310 +#, python-format +msgid "Update port in progress: %r" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:337 +#, python-format +msgid "Update port failed in SDN-VE: %s" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:343 +#, python-format +msgid "Delete port in progress: %s" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:356 +#, python-format +msgid "Delete port operation failed in SDN-VE after deleting the port from DB: %s" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:365 +#, python-format +msgid "Create subnet in progress: %r" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:377 +#, python-format +msgid "Create subnet failed in SDN-VE: %s" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:379 +#, python-format +msgid "Subnet created: %s" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:385 +#, python-format +msgid "Update subnet in progress: %r" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:410 +#, python-format +msgid "Update subnet failed in SDN-VE: %s" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:416 +#, python-format +msgid "Delete subnet in progress: %s" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:421 +#, python-format +msgid "" +"Delete subnet operation failed in SDN-VE after deleting the subnet from " +"DB: %s" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:430 +#, python-format +msgid "Create router in progress: %r" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:433 +#, python-format +msgid "Ignoring admin_state_up=False for router=%r. Overriding with True" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:443 +msgid "Create router failed: no SDN-VE tenant." +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:451 +#, python-format +msgid "Create router failed in SDN-VE: %s" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:453 +#, python-format +msgid "Router created: %r" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:458 +#, python-format +msgid "Update router in progress: id=%(id)s router=%(router)r" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:465 +msgid "admin_state_up=False routers are not supported." +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:489 +#, python-format +msgid "Update router failed in SDN-VE: %s" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:495 +#, python-format +msgid "Delete router in progress: %s" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:502 +#, python-format +msgid "" +"Delete router operation failed in SDN-VE after deleting the router in DB:" +" %s" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:507 +#, python-format +msgid "" +"Add router interface in progress: router_id=%(router_id)s " +"interface_info=%(interface_info)r" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:515 +#, python-format +msgid "SdnvePluginV2.add_router_interface called. Port info: %s" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:529 +#, python-format +msgid "Update router-add-interface failed in SDN-VE: %s" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:532 +#, python-format +msgid "Added router interface: %r" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:536 +#, python-format +msgid "" +"Add router interface only called: router_id=%(router_id)s " +"interface_info=%(interface_info)r" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:546 +msgid "" +"SdnvePluginV2._add_router_interface_only: failed to add the interface in " +"the roll back. of a remove_router_interface operation" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:552 +#, python-format +msgid "" +"Remove router interface in progress: router_id=%(router_id)s " +"interface_info=%(interface_info)r" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:561 +msgid "No port ID" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:563 +#, python-format +msgid "SdnvePluginV2.remove_router_interface port: %s" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:567 +msgid "No fixed IP" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:572 +#, python-format +msgid "SdnvePluginV2.remove_router_interface subnet_id: %s" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:595 +#, python-format +msgid "Update router-remove-interface failed SDN-VE: %s" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:616 +#, python-format +msgid "Create floatingip in progress: %r" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:627 +#, python-format +msgid "Creating floating ip operation failed in SDN-VE controller: %s" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:630 +#, python-format +msgid "Created floatingip : %r" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:635 +#, python-format +msgid "Update floatingip in progress: %r" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:655 +#, python-format +msgid "Update floating ip failed in SDN-VE: %s" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:661 +#, python-format +msgid "Delete floatingip in progress: %s" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:666 +#, python-format +msgid "Delete floatingip failed in SDN-VE: %s" +msgstr "" + +#: neutron/plugins/ibm/agent/sdnve_neutron_agent.py:139 +msgid "info_update received" +msgstr "" + +#: neutron/plugins/ibm/agent/sdnve_neutron_agent.py:144 +#, python-format +msgid "info_update received. New controlleris to be set to: %s" +msgstr "" + +#: neutron/plugins/ibm/agent/sdnve_neutron_agent.py:150 +msgid "info_update received. New controlleris set to be out of band" +msgstr "" + +#: neutron/plugins/ibm/agent/sdnve_neutron_agent.py:195 +#, python-format +msgid "Mapping physical network %(physical_network)s to interface %(interface)s" +msgstr "" + +#: neutron/plugins/ibm/agent/sdnve_neutron_agent.py:201 +#, python-format +msgid "" +"Interface %(interface)s for physical network %(physical_network)s does " +"not exist. Agent terminated!" +msgstr "" + +#: neutron/plugins/ibm/agent/sdnve_neutron_agent.py:219 +msgid "Agent in the rpc loop." +msgstr "" + +#: neutron/plugins/ibm/agent/sdnve_neutron_agent.py:241 +#, python-format +msgid "Controller IPs: %s" +msgstr "" + +#: neutron/plugins/ibm/agent/sdnve_neutron_agent.py:263 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1527 +#, python-format +msgid "%s Agent terminated!" +msgstr "" + +#: neutron/plugins/ibm/common/config.py:28 +msgid "If set to True uses a fake controller." +msgstr "" + +#: neutron/plugins/ibm/common/config.py:30 +msgid "Base URL for SDN-VE controller REST API" +msgstr "" + +#: neutron/plugins/ibm/common/config.py:32 +msgid "List of IP addresses of SDN-VE controller(s)" +msgstr "" + +#: neutron/plugins/ibm/common/config.py:34 +msgid "SDN-VE RPC subject" +msgstr "" + +#: neutron/plugins/ibm/common/config.py:36 +msgid "SDN-VE controller port number" +msgstr "" + +#: neutron/plugins/ibm/common/config.py:38 +msgid "SDN-VE request/response format" +msgstr "" + +#: neutron/plugins/ibm/common/config.py:40 +msgid "SDN-VE administrator user id" +msgstr "" + +#: neutron/plugins/ibm/common/config.py:42 +msgid "SDN-VE administrator password" +msgstr "" + +#: neutron/plugins/ibm/common/config.py:44 +#: neutron/plugins/nec/common/config.py:24 +#: neutron/plugins/openvswitch/common/config.py:28 +#: neutron/plugins/ryu/common/config.py:22 +msgid "Integration bridge to use" +msgstr "" + +#: neutron/plugins/ibm/common/config.py:46 +msgid "Reset the integration bridge before use" +msgstr "" + +#: neutron/plugins/ibm/common/config.py:48 +msgid "Indicating if controller is out of band or not" +msgstr "" + +#: neutron/plugins/ibm/common/config.py:51 +msgid "List of :" +msgstr "" + +#: neutron/plugins/ibm/common/config.py:53 +msgid "Tenant type: OVERLAY (default) or OF" +msgstr "" + +#: neutron/plugins/ibm/common/config.py:55 +msgid "" +"The string in tenant description that indicates the tenant is a OVERLAY " +"tenant" +msgstr "" + +#: neutron/plugins/ibm/common/config.py:58 +msgid "The string in tenant description that indicates the tenant is a OF tenant" +msgstr "" + +#: neutron/plugins/ibm/common/config.py:64 +msgid "Agent polling interval if necessary" +msgstr "" + +#: neutron/plugins/ibm/common/config.py:66 +msgid "Using root helper" +msgstr "" + +#: neutron/plugins/ibm/common/config.py:68 +msgid "Whether using rpc" +msgstr "" + +#: neutron/plugins/ibm/common/exceptions.py:23 +#, python-format +msgid "" +"An unexpected error occurred in the SDN-VE Plugin. Here is the error " +"message: %(msg)s" +msgstr "" + +#: neutron/plugins/ibm/common/exceptions.py:28 +#, python-format +msgid "The input does not contain nececessary info: %(msg)s" +msgstr "" + +#: neutron/plugins/linuxbridge/lb_neutron_plugin.py:131 +#: neutron/plugins/linuxbridge/lb_neutron_plugin.py:155 +#: neutron/plugins/ml2/rpc.py:173 neutron/plugins/ml2/rpc.py:195 +#: neutron/plugins/openvswitch/ovs_neutron_plugin.py:133 +#: neutron/plugins/openvswitch/ovs_neutron_plugin.py:158 +#, python-format +msgid "Device %(device)s not bound to the agent host %(host)s" +msgstr "" + +#: neutron/plugins/linuxbridge/lb_neutron_plugin.py:149 +#: neutron/plugins/openvswitch/ovs_neutron_plugin.py:152 +#, python-format +msgid "Device %(device)s up on %(agent_id)s" +msgstr "" + +#: neutron/plugins/linuxbridge/lb_neutron_plugin.py:269 +#: neutron/plugins/mlnx/mlnx_plugin.py:198 +#, python-format +msgid "Invalid tenant_network_type: %s. Service terminated!" +msgstr "" + +#: neutron/plugins/linuxbridge/lb_neutron_plugin.py:280 +msgid "Linux Bridge Plugin initialization complete" +msgstr "" + +#: neutron/plugins/linuxbridge/lb_neutron_plugin.py:306 +#, python-format +msgid "%s. Agent terminated!" +msgstr "" + +#: neutron/plugins/linuxbridge/lb_neutron_plugin.py:351 +#: neutron/plugins/mlnx/mlnx_plugin.py:242 +#: neutron/plugins/openvswitch/ovs_neutron_plugin.py:413 +msgid "provider:segmentation_id specified for flat network" +msgstr "" + +#: neutron/plugins/linuxbridge/lb_neutron_plugin.py:360 +#: neutron/plugins/mlnx/mlnx_plugin.py:250 +#: neutron/plugins/openvswitch/ovs_neutron_plugin.py:422 +#, python-format +msgid "provider:segmentation_id out of range (%(min_id)s through %(max_id)s)" +msgstr "" + +#: neutron/plugins/linuxbridge/lb_neutron_plugin.py:367 +#: neutron/plugins/mlnx/mlnx_plugin.py:258 +#: neutron/plugins/openvswitch/ovs_neutron_plugin.py:442 +msgid "provider:physical_network specified for local network" +msgstr "" + +#: neutron/plugins/linuxbridge/lb_neutron_plugin.py:373 +#: neutron/plugins/mlnx/mlnx_plugin.py:262 +#: neutron/plugins/openvswitch/ovs_neutron_plugin.py:448 +msgid "provider:segmentation_id specified for local network" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:83 +msgid "VXLAN is enabled, a valid local_ip must be provided" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:97 +msgid "Invalid Network ID, will lead to incorrect bridgename" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:104 +msgid "Invalid VLAN ID, will lead to incorrect subinterface name" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:111 +msgid "Invalid Interface ID, will lead to incorrect tap device name" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:120 +#, python-format +msgid "Invalid Segmentation ID: %s, will lead to incorrect vxlan device name" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:183 +#, python-format +msgid "Failed creating vxlan interface for %(segmentation_id)s" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:215 +#, python-format +msgid "" +"Creating subinterface %(interface)s for VLAN %(vlan_id)s on interface " +"%(physical_interface)s" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:228 +#, python-format +msgid "Done creating subinterface %s" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:235 +#, python-format +msgid "Creating vxlan interface %(interface)s for VNI %(segmentation_id)s" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:250 +#, python-format +msgid "Done creating vxlan interface %s" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:303 +#, python-format +msgid "Starting bridge %(bridge_name)s for subinterface %(interface)s" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:318 +#, python-format +msgid "Done starting bridge %(bridge_name)s for subinterface %(interface)s" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:340 +#, python-format +msgid "Unable to add %(interface)s to %(bridge_name)s! Exception: %(e)s" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:353 +#, python-format +msgid "Unable to add vxlan interface for network %s" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:360 +#, python-format +msgid "No mapping for physical network %s" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:369 +#, python-format +msgid "Unknown network_type %(network_type)s for network %(network_id)s." +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:381 +#, python-format +msgid "Tap device: %s does not exist on this host, skipped" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:399 +#, python-format +msgid "Adding device %(tap_device_name)s to bridge %(bridge_name)s" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:408 +#, python-format +msgid "%(tap_device_name)s already exists on bridge %(bridge_name)s" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:446 +#, python-format +msgid "Deleting bridge %s" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:453 +#, python-format +msgid "Done deleting bridge %s" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:456 +#, python-format +msgid "Cannot delete bridge %s, does not exist" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:470 +#, python-format +msgid "Removing device %(interface_name)s from bridge %(bridge_name)s" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:477 +#, python-format +msgid "Done removing device %(interface_name)s from bridge %(bridge_name)s" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:483 +#, python-format +msgid "" +"Cannot remove device %(interface_name)s bridge %(bridge_name)s does not " +"exist" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:491 +#, python-format +msgid "Deleting subinterface %s for vlan" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:498 +#, python-format +msgid "Done deleting subinterface %s" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:502 +#, python-format +msgid "Deleting vxlan interface %s for vlan" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:507 +#, python-format +msgid "Done deleting vxlan interface %s" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:521 +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:555 +#, python-format +msgid "" +"Option \"%(option)s\" must be supported by command \"%(command)s\" to " +"enable %(mode)s mode" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:532 +msgid "No valid Segmentation ID to perform UCAST test." +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:549 +msgid "" +"VXLAN muticast group must be provided in vxlan_group option to enable " +"VXLAN MCAST mode" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:574 +msgid "" +"Linux kernel vxlan module and iproute2 3.8 or above are required to " +"enable VXLAN." +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:584 +#, python-format +msgid "Using %s VXLAN mode" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:661 +#: neutron/plugins/mlnx/agent/eswitch_neutron_agent.py:162 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:279 +msgid "network_delete received" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:675 +#, python-format +msgid "port_update RPC received for port: %s" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:678 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:320 +msgid "fdb_add received" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:700 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:348 +msgid "fdb_remove received" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:722 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:427 +msgid "update chg_ip received" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:747 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:450 +msgid "fdb_update received" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:804 +msgid "Unable to obtain MAC address for unique ID. Agent terminated!" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:808 +#: neutron/plugins/mlnx/agent/eswitch_neutron_agent.py:252 +#: neutron/plugins/nec/agent/nec_neutron_agent.py:144 +#: neutron/plugins/oneconvergence/agent/nvsd_neutron_agent.py:109 +#, python-format +msgid "RPC agent_id: %s" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:879 +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:1114 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1131 +#, python-format +msgid "Port %(device)s updated. Details: %(details)s" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:912 +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:933 +#: neutron/plugins/mlnx/agent/eswitch_neutron_agent.py:368 +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:1134 +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:1196 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1151 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1215 +#, python-format +msgid "Device %s not defined on plugin" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:919 +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:1164 +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:1181 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1183 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1200 +#, python-format +msgid "Attachment %s removed" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:927 +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:1171 +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:1188 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1190 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1207 +#, python-format +msgid "port_removed failed for %(device)s: %(e)s" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:931 +#: neutron/plugins/mlnx/agent/eswitch_neutron_agent.py:366 +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:1193 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1212 +#, python-format +msgid "Port %s updated." +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:957 +msgid "LinuxBridge Agent RPC Daemon Started!" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:975 +#, python-format +msgid "Agent loop found changes! %s" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:981 +#, python-format +msgid "Error in agent loop. Devices info: %s" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:1007 +#: neutron/plugins/mlnx/agent/eswitch_neutron_agent.py:419 +#, python-format +msgid "Parsing physical_interface_mappings failed: %s. Agent terminated!" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:1010 +#: neutron/plugins/mlnx/agent/eswitch_neutron_agent.py:422 +#, python-format +msgid "Interface mappings: %s" +msgstr "" + +#: neutron/plugins/linuxbridge/common/config.py:29 +#: neutron/plugins/mlnx/common/config.py:26 +msgid "Network type for tenant networks (local, vlan, or none)" +msgstr "" + +#: neutron/plugins/linuxbridge/common/config.py:39 +msgid "" +"Enable VXLAN on the agent. Can be enabled when agent is managed by ml2 " +"plugin using linuxbridge mechanism driver" +msgstr "" + +#: neutron/plugins/linuxbridge/common/config.py:43 +msgid "TTL for vxlan interface protocol packets." +msgstr "" + +#: neutron/plugins/linuxbridge/common/config.py:45 +msgid "TOS for vxlan interface protocol packets." +msgstr "" + +#: neutron/plugins/linuxbridge/common/config.py:47 +msgid "Multicast group for vxlan interface." +msgstr "" + +#: neutron/plugins/linuxbridge/common/config.py:49 +msgid "Local IP address of the VXLAN endpoints." +msgstr "" + +#: neutron/plugins/linuxbridge/common/config.py:51 +msgid "" +"Extension to use alongside ml2 plugin's l2population mechanism driver. It" +" enables the plugin to populate VXLAN forwarding table." +msgstr "" + +#: neutron/plugins/linuxbridge/common/config.py:59 +#: neutron/plugins/mlnx/common/config.py:45 +msgid "List of :" +msgstr "" + +#: neutron/plugins/linuxbridge/common/config.py:67 +#: neutron/plugins/mlnx/common/config.py:70 +msgid "Enable server RPC compatibility with old agents" +msgstr "" + +#: neutron/plugins/linuxbridge/db/l2network_db_v2.py:142 +#: neutron/plugins/ml2/drivers/type_vlan.py:210 +#: neutron/plugins/openvswitch/ovs_db_v2.py:161 +#, python-format +msgid "" +"Reserving specific vlan %(vlan_id)s on physical network " +"%(physical_network)s outside pool" +msgstr "" + +#: neutron/plugins/linuxbridge/db/l2network_db_v2.py:166 +#: neutron/plugins/ml2/drivers/type_vlan.py:259 +#: neutron/plugins/openvswitch/ovs_db_v2.py:191 +#, python-format +msgid "" +"Releasing vlan %(vlan_id)s on physical network %(physical_network)s to " +"pool" +msgstr "" + +#: neutron/plugins/linuxbridge/db/l2network_db_v2.py:171 +#: neutron/plugins/ml2/drivers/type_vlan.py:254 +#: neutron/plugins/openvswitch/ovs_db_v2.py:186 +#, python-format +msgid "" +"Releasing vlan %(vlan_id)s on physical network %(physical_network)s " +"outside pool" +msgstr "" + +#: neutron/plugins/linuxbridge/db/l2network_db_v2.py:202 +#: neutron/plugins/mlnx/db/mlnx_db_v2.py:212 +msgid "get_port_from_device() called" +msgstr "" + +#: neutron/plugins/linuxbridge/db/l2network_db_v2.py:230 +#, python-format +msgid "set_port_status as %s called" +msgstr "" + +#: neutron/plugins/metaplugin/meta_neutron_plugin.py:60 +#, python-format +msgid "Flavor %(flavor)s could not be found" +msgstr "" + +#: neutron/plugins/metaplugin/meta_neutron_plugin.py:64 +msgid "Failed to add flavor binding" +msgstr "" + +#: neutron/plugins/metaplugin/meta_neutron_plugin.py:73 +msgid "Start initializing metaplugin" +msgstr "" + +#: neutron/plugins/metaplugin/meta_neutron_plugin.py:125 +#, python-format +msgid "default_flavor %s is not plugin list" +msgstr "" + +#: neutron/plugins/metaplugin/meta_neutron_plugin.py:131 +#, python-format +msgid "default_l3_flavor %s is not plugin list" +msgstr "" + +#: neutron/plugins/metaplugin/meta_neutron_plugin.py:137 +#, python-format +msgid "rpc_flavor %s is not plugin list" +msgstr "" + +#: neutron/plugins/metaplugin/meta_neutron_plugin.py:169 +#, python-format +msgid "Plugin location: %s" +msgstr "" + +#: neutron/plugins/metaplugin/meta_neutron_plugin.py:220 +#, python-format +msgid "Created network: %(net_id)s with flavor %(flavor)s" +msgstr "" + +#: neutron/plugins/metaplugin/meta_neutron_plugin.py:226 +#: neutron/plugins/metaplugin/meta_neutron_plugin.py:370 +msgid "Failed to add flavor bindings" +msgstr "" + +#: neutron/plugins/metaplugin/meta_neutron_plugin.py:363 +#, python-format +msgid "Created router: %(router_id)s with flavor %(flavor)s" +msgstr "" + +#: neutron/plugins/metaplugin/meta_neutron_plugin.py:374 +#, python-format +msgid "Created router: %s" +msgstr "" + +#: neutron/plugins/metaplugin/proxy_neutron_plugin.py:66 +#, python-format +msgid "Update subnet failed: %s" +msgstr "" + +#: neutron/plugins/metaplugin/proxy_neutron_plugin.py:73 +msgid "Subnet in remote have already deleted" +msgstr "" + +#: neutron/plugins/metaplugin/proxy_neutron_plugin.py:94 +#, python-format +msgid "Update network failed: %s" +msgstr "" + +#: neutron/plugins/metaplugin/proxy_neutron_plugin.py:101 +msgid "Network in remote have already deleted" +msgstr "" + +#: neutron/plugins/metaplugin/proxy_neutron_plugin.py:122 +#, python-format +msgid "Update port failed: %s" +msgstr "" + +#: neutron/plugins/metaplugin/proxy_neutron_plugin.py:133 +msgid "Port in remote have already deleted" +msgstr "" + +#: neutron/plugins/metaplugin/common/config.py:23 +msgid "" +"Comma separated list of flavor:neutron_plugin for plugins to load. " +"Extension method is searched in the list order and the first one is used." +msgstr "" + +#: neutron/plugins/metaplugin/common/config.py:29 +msgid "" +"Comma separated list of flavor:neutron_plugin for L3 service plugins to " +"load. This is intended for specifying L2 plugins which support L3 " +"functions. If you use a router service plugin, set this blank." +msgstr "" + +#: neutron/plugins/metaplugin/common/config.py:36 +msgid "" +"Default flavor to use, when flavor:network is not specified at network " +"creation." +msgstr "" + +#: neutron/plugins/metaplugin/common/config.py:41 +msgid "" +"Default L3 flavor to use, when flavor:router is not specified at router " +"creation. Ignored if 'l3_plugin_list' is blank." +msgstr "" + +#: neutron/plugins/metaplugin/common/config.py:47 +msgid "Comma separated list of supported extension aliases." +msgstr "" + +#: neutron/plugins/metaplugin/common/config.py:51 +msgid "" +"Comma separated list of method:flavor to select specific plugin for a " +"method. This has priority over method search order based on " +"'plugin_list'." +msgstr "" + +#: neutron/plugins/metaplugin/common/config.py:57 +msgid "Specifies flavor for plugin to handle 'q-plugin' RPC requests." +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:44 +#, python-format +msgid "MidoNet %(resource_type)s %(id)s could not be found" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:48 +#, python-format +msgid "MidoNet API error: %(msg)s" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:82 +#, python-format +msgid "MidoClient.create_bridge called: kwargs=%(kwargs)s" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:92 +#, python-format +msgid "MidoClient.delete_bridge called: id=%(id)s" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:102 +#, python-format +msgid "MidoClient.get_bridge called: id=%s" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:116 +#, python-format +msgid "MidoClient.update_bridge called: id=%(id)s, kwargs=%(kwargs)s" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:136 +#, python-format +msgid "" +"MidoClient.create_dhcp called: bridge=%(bridge)s, cidr=%(cidr)s, " +"gateway_ip=%(gateway_ip)s, host_rts=%(host_rts)s, " +"dns_servers=%(dns_servers)s" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:154 +#, python-format +msgid "" +"MidoClient.add_dhcp_host called: bridge=%(bridge)s, cidr=%(cidr)s, " +"ip=%(ip)s, mac=%(mac)s" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:159 +msgid "Tried to add tonon-existent DHCP" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:173 +#, python-format +msgid "" +"MidoClient.remove_dhcp_host called: bridge=%(bridge)s, cidr=%(cidr)s, " +"ip=%(ip)s, mac=%(mac)s" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:178 +msgid "Tried to delete mapping from non-existent subnet" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:183 +#, python-format +msgid "MidoClient.remove_dhcp_host: Deleting %(dh)r" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:196 +#, python-format +msgid "" +"MidoClient.delete_dhcp_host called: bridge_id=%(bridge_id)s, " +"cidr=%(cidr)s, ip=%(ip)s, mac=%(mac)s" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:211 +#, python-format +msgid "MidoClient.delete_dhcp called: bridge=%(bridge)s, cidr=%(cidr)s" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:218 +msgid "Tried to delete non-existent DHCP" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:230 +#, python-format +msgid "MidoClient.delete_port called: id=%(id)s, delete_chains=%(delete_chains)s" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:245 +#, python-format +msgid "MidoClient.get_port called: id=%(id)s" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:259 +#, python-format +msgid "MidoClient.add_bridge_port called: bridge=%(bridge)s, kwargs=%(kwargs)s" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:271 +#, python-format +msgid "MidoClient.update_port called: id=%(id)s, kwargs=%(kwargs)s" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:296 +#, python-format +msgid "MidoClient.create_router called: kwargs=%(kwargs)s" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:306 +#, python-format +msgid "MidoClient.delete_router called: id=%(id)s" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:316 +#, python-format +msgid "MidoClient.get_router called: id=%(id)s" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:330 +#, python-format +msgid "MidoClient.update_router called: id=%(id)s, kwargs=%(kwargs)s" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:351 +#, python-format +msgid "" +"MidoClient.add_dhcp_route_option called: bridge=%(bridge)s, " +"cidr=%(cidr)s, gw_ip=%(gw_ip)sdst_ip=%(dst_ip)s" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:359 +msgid "Tried to access non-existent DHCP" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:391 +#, python-format +msgid "MidoClient.unlink called: port=%(port)s" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:396 +#, python-format +msgid "Attempted to unlink a port that was not linked. %s" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:402 +#, python-format +msgid "" +"MidoClient.remove_rules_by_property called: tenant_id=%(tenant_id)s, " +"chain_name=%(chain_name)skey=%(key)s, value=%(value)s" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:429 +#, python-format +msgid "" +"MidoClient.create_router_chains called: router=%(router)s, " +"inbound_chain_name=%(in_chain)s, outbound_chain_name=%(out_chain)s" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:452 +#, python-format +msgid "MidoClient.delete_router_chains called: id=%(id)s" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:467 +#, python-format +msgid "MidoClient.delete_port_chains called: id=%(id)s" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:479 +#, python-format +msgid "" +"MidoClient.get_link_port called: router=%(router)s, " +"peer_router_id=%(peer_router_id)s" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:518 +#, python-format +msgid "" +"MidoClient.add_static_nat called: tenant_id=%(tenant_id)s, " +"chain_name=%(chain_name)s, from_ip=%(from_ip)s, to_ip=%(to_ip)s, " +"port_id=%(port_id)s, nat_type=%(nat_type)s" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:526 +#, python-format +msgid "Invalid NAT type passed in %s" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:578 +#, python-format +msgid "MidoClient.remote_static_route called: router=%(router)s, ip=%(ip)s" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:589 +#, python-format +msgid "" +"MidoClient.update_port_chains called: " +"port=%(port)sinbound_chain_id=%(inbound_chain_id)s, " +"outbound_chain_id=%(outbound_chain_id)s" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:600 +#, python-format +msgid "MidoClient.create_chain called: tenant_id=%(tenant_id)s name=%(name)s" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:608 +#, python-format +msgid "MidoClient.delete_chain called: id=%(id)s" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:614 +#, python-format +msgid "" +"MidoClient.delete_chains_by_names called: tenant_id=%(tenant_id)s " +"names=%(names)s " +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:625 +#, python-format +msgid "" +"MidoClient.get_chain_by_name called: tenant_id=%(tenant_id)s " +"name=%(name)s " +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:636 +#, python-format +msgid "" +"MidoClient.get_port_group_by_name called: tenant_id=%(tenant_id)s " +"name=%(name)s " +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:650 +#, python-format +msgid "MidoClient.create_port_group called: tenant_id=%(tenant_id)s name=%(name)s" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:659 +#, python-format +msgid "" +"MidoClient.delete_port_group_by_name called: tenant_id=%(tenant_id)s " +"name=%(name)s " +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:665 +#, python-format +msgid "Deleting pg %(id)s" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:671 +#, python-format +msgid "" +"MidoClient.add_port_to_port_group_by_name called: tenant_id=%(tenant_id)s" +" name=%(name)s port_id=%(port_id)s" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:685 +#, python-format +msgid "MidoClient.remove_port_from_port_groups called: port_id=%(port_id)s" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:80 +#, python-format +msgid "Invalid nat_type %s" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:137 +#, python-format +msgid "Unrecognized direction %s" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:171 +#, python-format +msgid "There is no %(name)s with ID %(id)s in MidoNet." +msgstr "" + +#: neutron/plugins/midonet/plugin.py:183 +#: neutron/plugins/ml2/drivers/mech_arista/exceptions.py:23 +#: neutron/plugins/ml2/drivers/mech_arista/exceptions.py:27 +#, python-format +msgid "%(msg)s" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:215 +msgid "provider_router_id should be configured in the plugin config file" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:385 +#, python-format +msgid "MidonetPluginV2.create_subnet called: subnet=%r" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:417 +#, python-format +msgid "MidonetPluginV2.create_subnet exiting: sn_entry=%r" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:426 +#, python-format +msgid "MidonetPluginV2.delete_subnet called: id=%s" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:445 +msgid "MidonetPluginV2.delete_subnet exiting" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:452 +#, python-format +msgid "MidonetPluginV2.create_network called: network=%r" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:467 +#, python-format +msgid "MidonetPluginV2.create_network exiting: net=%r" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:476 +#, python-format +msgid "MidonetPluginV2.update_network called: id=%(id)r, network=%(network)r" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:485 +#, python-format +msgid "MidonetPluginV2.update_network exiting: net=%r" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:493 +#, python-format +msgid "MidonetPluginV2.get_network called: id=%(id)r, fields=%(fields)r" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:498 +#, python-format +msgid "MidonetPluginV2.get_network exiting: qnet=%r" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:503 +#, python-format +msgid "MidonetPluginV2.delete_network called: id=%r" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:511 +#, python-format +msgid "Failed to delete neutron db, while Midonet bridge=%r had been deleted" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:516 +#, python-format +msgid "MidonetPluginV2.create_port called: port=%r" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:577 +#, python-format +msgid "Failed to create a port on network %(net_id)s: %(err)s" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:582 +#, python-format +msgid "MidonetPluginV2.create_port exiting: port=%r" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:587 +#, python-format +msgid "MidonetPluginV2.get_port called: id=%(id)s fields=%(fields)r" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:594 +#, python-format +msgid "There is no port with ID %(id)s in MidoNet." +msgstr "" + +#: neutron/plugins/midonet/plugin.py:598 +#, python-format +msgid "MidonetPluginV2.get_port exiting: port=%r" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:603 +#, python-format +msgid "MidonetPluginV2.get_ports called: filters=%(filters)s fields=%(fields)r" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:612 +#, python-format +msgid "" +"MidonetPluginV2.delete_port called: id=%(id)s " +"l3_port_check=%(l3_port_check)r" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:646 +#, python-format +msgid "Failed to delete DHCP mapping for port %(id)s" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:721 +#, python-format +msgid "MidonetPluginV2.create_router called: router=%(router)s" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:768 +#, python-format +msgid "MidonetPluginV2.create_router exiting: router_data=%(router_data)s." +msgstr "" + +#: neutron/plugins/midonet/plugin.py:780 +#, python-format +msgid "" +"MidonetPluginV2.set_router_gateway called: id=%(id)s, " +"gw_router=%(gw_router)s, gw_ip=%(gw_ip)s" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:824 +#, python-format +msgid "MidonetPluginV2.remove_router_gateway called: id=%(id)s" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:844 +#, python-format +msgid "MidonetPluginV2.update_router called: id=%(id)s router=%(router)r" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:884 +#, python-format +msgid "MidonetPluginV2.update_router exiting: router=%r" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:895 +#, python-format +msgid "MidonetPluginV2.delete_router called: id=%s" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:995 +#, python-format +msgid "" +"MidonetPluginV2.add_router_interface called: router_id=%(router_id)s " +"interface_info=%(interface_info)r" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:1019 +msgid "" +"DHCP agent is not working correctly. No port to reach the Metadata server" +" on this network" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:1028 +#, python-format +msgid "" +"Failed to create MidoNet resources to add router interface. " +"info=%(info)s, router_id=%(router_id)s" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:1035 +#, python-format +msgid "MidonetPluginV2.add_router_interface exiting: info=%r" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:1075 +#, python-format +msgid "" +"MidonetPluginV2.update_floatingip called: id=%(id)s " +"floatingip=%(floatingip)s " +msgstr "" + +#: neutron/plugins/midonet/plugin.py:1094 +#, python-format +msgid "MidonetPluginV2.update_floating_ip exiting: fip=%s" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:1116 +#, python-format +msgid "" +"MidonetPluginV2.create_security_group called: " +"security_group=%(security_group)s default_sg=%(default_sg)s " +msgstr "" + +#: neutron/plugins/midonet/plugin.py:1145 +#, python-format +msgid "Failed to create MidoNet resources for sg %(sg)r" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:1152 +#, python-format +msgid "MidonetPluginV2.create_security_group exiting: sg=%r" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:1158 +#, python-format +msgid "MidonetPluginV2.delete_security_group called: id=%s" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:1190 +#, python-format +msgid "" +"MidonetPluginV2.create_security_group_rule called: " +"security_group_rule=%(security_group_rule)r" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:1200 +#, python-format +msgid "MidonetPluginV2.create_security_group_rule exiting: rule=%r" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:1210 +#, python-format +msgid "MidonetPluginV2.delete_security_group_rule called: sg_rule_id=%s" +msgstr "" + +#: neutron/plugins/midonet/common/config.py:23 +msgid "MidoNet API server URI." +msgstr "" + +#: neutron/plugins/midonet/common/config.py:25 +msgid "MidoNet admin username." +msgstr "" + +#: neutron/plugins/midonet/common/config.py:28 +msgid "MidoNet admin password." +msgstr "" + +#: neutron/plugins/midonet/common/config.py:31 +msgid "ID of the project that MidoNet admin userbelongs to." +msgstr "" + +#: neutron/plugins/midonet/common/config.py:34 +msgid "Virtual provider router ID." +msgstr "" + +#: neutron/plugins/midonet/common/config.py:37 +msgid "Operational mode. Internal dev use only." +msgstr "" + +#: neutron/plugins/midonet/common/config.py:40 +msgid "Path to midonet host uuid file" +msgstr "" + +#: neutron/plugins/ml2/config.py:22 +msgid "" +"List of network type driver entrypoints to be loaded from the " +"neutron.ml2.type_drivers namespace." +msgstr "" + +#: neutron/plugins/ml2/config.py:26 +msgid "Ordered list of network_types to allocate as tenant networks." +msgstr "" + +#: neutron/plugins/ml2/config.py:30 +msgid "" +"An ordered list of networking mechanism driver entrypoints to be loaded " +"from the neutron.ml2.mechanism_drivers namespace." +msgstr "" + +#: neutron/plugins/ml2/db.py:41 +#, python-format +msgid "Added segment %(id)s of type %(network_type)s for network %(network_id)s" +msgstr "" + +#: neutron/plugins/ml2/db.py:85 +#, python-format +msgid "Multiple ports have port_id starting with %s" +msgstr "" + +#: neutron/plugins/ml2/db.py:91 +#, python-format +msgid "get_port_from_device_mac() called for mac %s" +msgstr "" + +#: neutron/plugins/ml2/db.py:133 +#, python-format +msgid "No binding found for port %(port_id)s" +msgstr "" + +#: neutron/plugins/ml2/managers.py:36 +#, python-format +msgid "Configured type driver names: %s" +msgstr "" + +#: neutron/plugins/ml2/managers.py:41 +#, python-format +msgid "Loaded type driver names: %s" +msgstr "" + +#: neutron/plugins/ml2/managers.py:49 +#, python-format +msgid "" +"Type driver '%(new_driver)s' ignored because type driver '%(old_driver)s'" +" is already registered for type '%(type)s'" +msgstr "" + +#: neutron/plugins/ml2/managers.py:57 +#, python-format +msgid "Registered types: %s" +msgstr "" + +#: neutron/plugins/ml2/managers.py:65 +#, python-format +msgid "No type driver for tenant network_type: %s. Service terminated!" +msgstr "" + +#: neutron/plugins/ml2/managers.py:69 +#, python-format +msgid "Tenant network_types: %s" +msgstr "" + +#: neutron/plugins/ml2/managers.py:73 +#, python-format +msgid "Initializing driver for type '%s'" +msgstr "" + +#: neutron/plugins/ml2/managers.py:82 +#: neutron/plugins/ml2/drivers/type_tunnel.py:116 +#, python-format +msgid "network_type value '%s' not supported" +msgstr "" + +#: neutron/plugins/ml2/managers.py:108 +#, python-format +msgid "Failed to release segment '%s' because network type is not supported." +msgstr "" + +#: neutron/plugins/ml2/managers.py:124 +#, python-format +msgid "Configured mechanism driver names: %s" +msgstr "" + +#: neutron/plugins/ml2/managers.py:130 +#, python-format +msgid "Loaded mechanism driver names: %s" +msgstr "" + +#: neutron/plugins/ml2/managers.py:142 +#, python-format +msgid "Registered mechanism drivers: %s" +msgstr "" + +#: neutron/plugins/ml2/managers.py:149 +#, python-format +msgid "Initializing mechanism driver '%s'" +msgstr "" + +#: neutron/plugins/ml2/managers.py:171 +#, python-format +msgid "Mechanism driver '%(name)s' failed in %(method)s" +msgstr "" + +#: neutron/plugins/ml2/managers.py:447 +#, python-format +msgid "" +"Attempting to bind port %(port)s on host %(host)s for vnic_type " +"%(vnic_type)s with profile %(profile)s" +msgstr "" + +#: neutron/plugins/ml2/managers.py:458 +#, python-format +msgid "" +"Bound port: %(port)s, host: %(host)s, vnic_type: %(vnic_type)s, profile: " +"%(profile)sdriver: %(driver)s, vif_type: %(vif_type)s, vif_details: " +"%(vif_details)s, segment: %(segment)s" +msgstr "" + +#: neutron/plugins/ml2/managers.py:474 +#, python-format +msgid "Mechanism driver %s failed in bind_port" +msgstr "" + +#: neutron/plugins/ml2/managers.py:478 +#, python-format +msgid "Failed to bind port %(port)s on host %(host)s" +msgstr "" + +#: neutron/plugins/ml2/plugin.py:121 +msgid "Modular L2 Plugin initialization complete" +msgstr "" + +#: neutron/plugins/ml2/plugin.py:152 +msgid "network_type required" +msgstr "" + +#: neutron/plugins/ml2/plugin.py:191 +#, python-format +msgid "Network %s has no segments" +msgstr "" + +#: neutron/plugins/ml2/plugin.py:254 +msgid "binding:profile value too large" +msgstr "" + +#: neutron/plugins/ml2/plugin.py:290 +#, python-format +msgid "Serialized vif_details DB value '%(value)s' for port %(port)s is invalid" +msgstr "" + +#: neutron/plugins/ml2/plugin.py:301 +#, python-format +msgid "Serialized profile DB value '%(value)s' for port %(port)s is invalid" +msgstr "" + +#: neutron/plugins/ml2/plugin.py:354 +#, python-format +msgid "" +"In _notify_port_updated(), no bound segment for port %(port_id)s on " +"network %(network_id)s" +msgstr "" + +#: neutron/plugins/ml2/plugin.py:396 +#, python-format +msgid "mechanism_manager.create_network_postcommit failed, deleting network '%s'" +msgstr "" + +#: neutron/plugins/ml2/plugin.py:456 +#, python-format +msgid "Deleting network %s" +msgstr "" + +#: neutron/plugins/ml2/plugin.py:468 +#, python-format +msgid "Ports to auto-delete: %s" +msgstr "" + +#: neutron/plugins/ml2/plugin.py:474 neutron/plugins/ml2/plugin.py:594 +msgid "Tenant-owned ports exist" +msgstr "" + +#: neutron/plugins/ml2/plugin.py:482 +#, python-format +msgid "Subnets to auto-delete: %s" +msgstr "" + +#: neutron/plugins/ml2/plugin.py:493 +#, python-format +msgid "Deleting network record %s" +msgstr "" + +#: neutron/plugins/ml2/plugin.py:501 neutron/plugins/ml2/plugin.py:607 +msgid "Committing transaction" +msgstr "" + +#: neutron/plugins/ml2/plugin.py:507 +msgid "A concurrent port creation has occurred" +msgstr "" + +#: neutron/plugins/ml2/plugin.py:516 +#, python-format +msgid "Exception auto-deleting port %s" +msgstr "" + +#: neutron/plugins/ml2/plugin.py:524 +#, python-format +msgid "Exception auto-deleting subnet %s" +msgstr "" + +#: neutron/plugins/ml2/plugin.py:533 +msgid "mechanism_manager.delete_network_postcommit failed" +msgstr "" + +#: neutron/plugins/ml2/plugin.py:547 +#, python-format +msgid "mechanism_manager.create_subnet_postcommit failed, deleting subnet '%s'" +msgstr "" + +#: neutron/plugins/ml2/plugin.py:577 +#, python-format +msgid "Deleting subnet %s" +msgstr "" + +#: neutron/plugins/ml2/plugin.py:588 +#, python-format +msgid "Ports to auto-deallocate: %s" +msgstr "" + +#: neutron/plugins/ml2/plugin.py:603 +msgid "Deleting subnet record" +msgstr "" + +#: neutron/plugins/ml2/plugin.py:623 +#, python-format +msgid "Exception deleting fixed_ip from port %s" +msgstr "" + +#: neutron/plugins/ml2/plugin.py:633 +msgid "mechanism_manager.delete_subnet_postcommit failed" +msgstr "" + +#: neutron/plugins/ml2/plugin.py:662 +#, python-format +msgid "mechanism_manager.create_port_postcommit failed, deleting port '%s'" +msgstr "" + +#: neutron/plugins/ml2/plugin.py:718 +#: neutron/tests/unit/ml2/test_ml2_plugin.py:132 +#, python-format +msgid "Deleting port %s" +msgstr "" + +#: neutron/plugins/ml2/plugin.py:736 +#: neutron/tests/unit/ml2/test_ml2_plugin.py:133 +#, python-format +msgid "The port '%s' was deleted" +msgstr "" + +#: neutron/plugins/ml2/plugin.py:745 +msgid "Calling base delete_port" +msgstr "" + +#: neutron/plugins/ml2/plugin.py:757 +msgid "mechanism_manager.delete_port_postcommit failed" +msgstr "" + +#: neutron/plugins/ml2/plugin.py:769 +#, python-format +msgid "Port %(port)s updated up by agent not found" +msgstr "" + +#: neutron/plugins/ml2/rpc.py:81 +#, python-format +msgid "Device %(device)s details requested by agent %(agent_id)s" +msgstr "" + +#: neutron/plugins/ml2/rpc.py:90 +#, python-format +msgid "Device %(device)s requested by agent %(agent_id)s not found in database" +msgstr "" + +#: neutron/plugins/ml2/rpc.py:97 +#, python-format +msgid "" +"Device %(device)s requested by agent %(agent_id)s has network " +"%(network_id)s with no segments" +msgstr "" + +#: neutron/plugins/ml2/rpc.py:107 +#, python-format +msgid "" +"Device %(device)s requested by agent %(agent_id)s on network " +"%(network_id)s not bound, vif_type: %(vif_type)s" +msgstr "" + +#: neutron/plugins/ml2/rpc.py:118 +#, python-format +msgid "" +"Device %(device)s requested by agent %(agent_id)s on network " +"%(network_id)s invalid segment, vif_type: %(vif_type)s" +msgstr "" + +#: neutron/plugins/ml2/rpc.py:142 +#, python-format +msgid "Returning: %s" +msgstr "" + +#: neutron/plugins/ml2/rpc.py:166 +#, python-format +msgid "Device %(device)s no longer exists at agent %(agent_id)s" +msgstr "" + +#: neutron/plugins/ml2/rpc.py:190 +#, python-format +msgid "Device %(device)s up at agent %(agent_id)s" +msgstr "" + +#: neutron/plugins/ml2/common/exceptions.py:23 +#, python-format +msgid "%(method)s failed." +msgstr "" + +#: neutron/plugins/ml2/drivers/mech_agent.py:54 +#: neutron/plugins/ml2/drivers/mechanism_odl.py:345 +#, python-format +msgid "Attempting to bind port %(port)s on network %(network)s" +msgstr "" + +#: neutron/plugins/ml2/drivers/mech_agent.py:61 +#, python-format +msgid "Refusing to bind due to unsupported vnic_type: %s" +msgstr "" + +#: neutron/plugins/ml2/drivers/mech_agent.py:65 +#, python-format +msgid "Checking agent: %s" +msgstr "" + +#: neutron/plugins/ml2/drivers/mech_agent.py:70 +#: neutron/plugins/ml2/drivers/mechanism_odl.py:355 +#, python-format +msgid "Bound using segment: %s" +msgstr "" + +#: neutron/plugins/ml2/drivers/mech_agent.py:73 +#, python-format +msgid "Attempting to bind with dead agent: %s" +msgstr "" + +#: neutron/plugins/ml2/drivers/mech_hyperv.py:44 +#, python-format +msgid "Checking segment: %(segment)s for mappings: %(mappings)s" +msgstr "" + +#: neutron/plugins/ml2/drivers/mech_linuxbridge.py:44 +#: neutron/plugins/ml2/drivers/mech_ofagent.py:50 +#: neutron/plugins/ml2/drivers/mech_openvswitch.py:45 +#, python-format +msgid "" +"Checking segment: %(segment)s for mappings: %(mappings)s with " +"tunnel_types: %(tunnel_types)s" +msgstr "" + +#: neutron/plugins/ml2/drivers/mechanism_fslsdn.py:35 +msgid "CRD service Username" +msgstr "" + +#: neutron/plugins/ml2/drivers/mechanism_fslsdn.py:38 +msgid "CRD Service Password" +msgstr "" + +#: neutron/plugins/ml2/drivers/mechanism_fslsdn.py:40 +msgid "CRD Tenant Name" +msgstr "" + +#: neutron/plugins/ml2/drivers/mechanism_fslsdn.py:43 +msgid "CRD Auth URL" +msgstr "" + +#: neutron/plugins/ml2/drivers/mechanism_fslsdn.py:46 +msgid "URL for connecting to CRD service" +msgstr "" + +#: neutron/plugins/ml2/drivers/mechanism_fslsdn.py:49 +msgid "Timeout value for connecting to CRD service in seconds" +msgstr "" + +#: neutron/plugins/ml2/drivers/mechanism_fslsdn.py:53 +msgid "Region name for connecting to CRD Service in admin context" +msgstr "" + +#: neutron/plugins/ml2/drivers/mechanism_fslsdn.py:57 +msgid "If set, ignore any SSL validation issues" +msgstr "" + +#: neutron/plugins/ml2/drivers/mechanism_fslsdn.py:60 +msgid "Auth strategy for connecting to neutron in admin context" +msgstr "" + +#: neutron/plugins/ml2/drivers/mechanism_fslsdn.py:63 +msgid "Location of ca certificates file to use for CRD client requests." +msgstr "" + +#: neutron/plugins/ml2/drivers/mechanism_fslsdn.py:87 +msgid "Initializing CRD client... " +msgstr "" + +#: neutron/plugins/ml2/drivers/mechanism_ncs.py:29 +msgid "HTTP URL of Tail-f NCS REST interface." +msgstr "" + +#: neutron/plugins/ml2/drivers/mechanism_ncs.py:31 +#: neutron/plugins/ml2/drivers/mechanism_odl.py:50 +msgid "HTTP username for authentication" +msgstr "" + +#: neutron/plugins/ml2/drivers/mechanism_ncs.py:33 +#: neutron/plugins/ml2/drivers/mechanism_odl.py:52 +msgid "HTTP password for authentication" +msgstr "" + +#: neutron/plugins/ml2/drivers/mechanism_ncs.py:35 +#: neutron/plugins/ml2/drivers/mechanism_odl.py:54 +msgid "HTTP timeout in seconds." +msgstr "" + +#: neutron/plugins/ml2/drivers/mechanism_odl.py:48 +msgid "HTTP URL of OpenDaylight REST interface." +msgstr "" + +#: neutron/plugins/ml2/drivers/mechanism_odl.py:56 +msgid "Tomcat session timeout in minutes." +msgstr "" + +#: neutron/plugins/ml2/drivers/mechanism_odl.py:106 +#, python-format +msgid "Failed to authenticate with OpenDaylight: %s" +msgstr "" + +#: neutron/plugins/ml2/drivers/mechanism_odl.py:109 +#, python-format +msgid "Authentication Timed Out: %s" +msgstr "" + +#: neutron/plugins/ml2/drivers/mechanism_odl.py:297 +#, python-format +msgid "%(object_type)s not found (%(obj_id)s)" +msgstr "" + +#: neutron/plugins/ml2/drivers/mechanism_odl.py:333 +#, python-format +msgid "ODL-----> sending URL (%s) <-----ODL" +msgstr "" + +#: neutron/plugins/ml2/drivers/mechanism_odl.py:334 +#, python-format +msgid "ODL-----> sending JSON (%s) <-----ODL" +msgstr "" + +#: neutron/plugins/ml2/drivers/mechanism_odl.py:358 +#, python-format +msgid "" +"Refusing to bind port for segment ID %(id)s, segment %(seg)s, phys net " +"%(physnet)s, and network type %(nettype)s" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_flat.py:30 +msgid "" +"List of physical_network names with which flat networks can be created. " +"Use * to allow flat networks with arbitrary physical_network names." +msgstr "" + +#: neutron/plugins/ml2/drivers/type_flat.py:68 +msgid "Arbitrary flat physical_network names allowed" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_flat.py:73 +#, python-format +msgid "Allowable flat physical_network names: %s" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_flat.py:80 +msgid "ML2 FlatTypeDriver initialization complete" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_flat.py:85 +msgid "physical_network required for flat provider network" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_flat.py:88 +#, python-format +msgid "physical_network '%s' unknown for flat provider network" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_flat.py:95 +#, python-format +msgid "%s prohibited for flat provider network" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_flat.py:109 +#, python-format +msgid "Reserving flat network on physical network %s" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_flat.py:127 +#, python-format +msgid "Releasing flat network on physical network %s" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_flat.py:130 +#, python-format +msgid "No flat network found on physical network %s" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_gre.py:34 +msgid "" +"Comma-separated list of : tuples enumerating ranges of " +"GRE tunnel IDs that are available for tenant network allocation" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_gre.py:85 +#, python-format +msgid "Reserving specific gre tunnel %s from pool" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_gre.py:89 +#, python-format +msgid "Reserving specific gre tunnel %s outside pool" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_gre.py:102 +#, python-format +msgid "Allocating gre tunnel id %(gre_id)s" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_gre.py:120 +#, python-format +msgid "Releasing gre tunnel %s to pool" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_gre.py:125 +#, python-format +msgid "Releasing gre tunnel %s outside pool" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_gre.py:128 +#, python-format +msgid "gre_id %s not found" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_gre.py:138 +#, python-format +msgid "Skipping unreasonable gre ID range %(tun_min)s:%(tun_max)s" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_gre.py:156 +#: neutron/plugins/openvswitch/ovs_db_v2.py:229 +#, python-format +msgid "Removing tunnel %s from pool" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_gre.py:171 +msgid "get_gre_endpoints() called" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_gre.py:180 +#, python-format +msgid "add_gre_endpoint() called for ip %s" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_gre.py:186 +#, python-format +msgid "Gre endpoint with ip %s already exists" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_local.py:35 +msgid "ML2 LocalTypeDriver initialization complete" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_local.py:46 +#, python-format +msgid "%s prohibited for local provider network" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_tunnel.py:61 +#, python-format +msgid "Invalid tunnel ID range: '%(range)s' - %(e)s. Agent terminated!" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_tunnel.py:64 +#, python-format +msgid "%(type)s ID ranges: %(range)s" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_tunnel.py:70 +#: neutron/plugins/openvswitch/ovs_neutron_plugin.py:432 +#, python-format +msgid "provider:physical_network specified for %s network" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_tunnel.py:76 +#, python-format +msgid "segmentation_id required for %s provider network" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_tunnel.py:83 +#, python-format +msgid "%(key)s prohibited for %(tunnel)s provider network" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_tunnel.py:103 +msgid "Network_type value needed by the ML2 plugin" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_vlan.py:37 +msgid "" +"List of :: or " +"specifying physical_network names usable for VLAN provider and tenant " +"networks, as well as ranges of VLAN tags on each available for allocation" +" to tenant networks." +msgstr "" + +#: neutron/plugins/ml2/drivers/type_vlan.py:91 +msgid "Failed to parse network_vlan_ranges. Service terminated!" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_vlan.py:161 +msgid "VlanTypeDriver initialization complete" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_vlan.py:166 +msgid "physical_network required for VLAN provider network" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_vlan.py:169 +#, python-format +msgid "physical_network '%s' unknown for VLAN provider network" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_vlan.py:175 +msgid "segmentation_id required for VLAN provider network" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_vlan.py:178 +#, python-format +msgid "segmentation_id out of range (%(min)s through %(max)s)" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_vlan.py:188 +#, python-format +msgid "%s prohibited for VLAN provider network" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_vlan.py:226 +#, python-format +msgid "" +"Allocating vlan %(vlan_id)s on physical network %(physical_network)s from" +" pool" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_vlan.py:264 +#, python-format +msgid "No vlan_id %(vlan_id)s found on physical network %(physical_network)s" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_vxlan.py:37 +msgid "" +"Comma-separated list of : tuples enumerating ranges of " +"VXLAN VNI IDs that are available for tenant network allocation" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_vxlan.py:41 +msgid "Multicast group for VXLAN. If unset, disables VXLAN multicast mode." +msgstr "" + +#: neutron/plugins/ml2/drivers/type_vxlan.py:93 +#, python-format +msgid "Reserving specific vxlan tunnel %s from pool" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_vxlan.py:97 +#, python-format +msgid "Reserving specific vxlan tunnel %s outside pool" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_vxlan.py:110 +#, python-format +msgid "Allocating vxlan tunnel vni %(vxlan_vni)s" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_vxlan.py:128 +#, python-format +msgid "Releasing vxlan tunnel %s to pool" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_vxlan.py:133 +#, python-format +msgid "Releasing vxlan tunnel %s outside pool" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_vxlan.py:136 +#, python-format +msgid "vxlan_vni %s not found" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_vxlan.py:147 +#, python-format +msgid "Skipping unreasonable VXLAN VNI range %(tun_min)s:%(tun_max)s" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_vxlan.py:192 +msgid "get_vxlan_endpoints() called" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_vxlan.py:202 +#, python-format +msgid "add_vxlan_endpoint() called for ip %s" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:40 +msgid "Allowed physical networks" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:42 +msgid "Unused" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:93 +msgid "" +"Brocade Mechanism: failed to create network, network cannot be created in" +" the configured physical network" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:99 +msgid "" +"Brocade Mechanism: failed to create network, only network type vlan is " +"supported" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:107 +msgid "Brocade Mechanism: failed to create network in db" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:109 +msgid "Brocade Mechanism: create_network_precommit failed" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:111 +#, python-format +msgid "" +"create network (precommit): %(network_id)s of network type = " +"%(network_type)s with vlan = %(vlan_id)s for tenant %(tenant_id)s" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:123 +msgid "create_network_postcommit: called" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:143 +msgid "Brocade NOS driver: failed in create network" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:146 +msgid "Brocade Mechanism: create_network_postcommmit failed" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:148 +#, python-format +msgid "" +"created network (postcommit): %(network_id)s of network type = " +"%(network_type)s with vlan = %(vlan_id)s for tenant %(tenant_id)s" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:160 +msgid "delete_network_precommit: called" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:173 +msgid "Brocade Mechanism: failed to delete network in db" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:175 +msgid "Brocade Mechanism: delete_network_precommit failed" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:177 +#, python-format +msgid "" +"delete network (precommit): %(network_id)s with vlan = %(vlan_id)s for " +"tenant %(tenant_id)s" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:189 +msgid "delete_network_postcommit: called" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:201 +msgid "Brocade NOS driver: failed to delete network" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:203 +msgid "Brocade switch exception, delete_network_postcommit failed" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:206 +#, python-format +msgid "" +"delete network (postcommit): %(network_id)s with vlan = %(vlan_id)s for " +"tenant %(tenant_id)s" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:224 +msgid "create_port_precommit: called" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:242 +msgid "Brocade Mechanism: failed to create port in db" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:244 +msgid "Brocade Mechanism: create_port_precommit failed" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:249 +msgid "create_port_postcommit: called" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:273 +#, python-format +msgid "Brocade NOS driver: failed to associate mac %s" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:276 +msgid "Brocade switch exception: create_port_postcommit failed" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:279 +#, python-format +msgid "" +"created port (postcommit): port_id=%(port_id)s network_id=%(network_id)s " +"tenant_id=%(tenant_id)s" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:287 +msgid "delete_port_precommit: called" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:296 +msgid "Brocade Mechanism: failed to delete port in db" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:298 +msgid "Brocade Mechanism: delete_port_precommit failed" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:303 +msgid "delete_port_postcommit: called" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:327 +#, python-format +msgid "Brocade NOS driver: failed to dissociate MAC %s" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:330 +msgid "Brocade switch exception, delete_port_postcommit failed" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:333 +#, python-format +msgid "" +"delete port (postcommit): port_id=%(port_id)s network_id=%(network_id)s " +"tenant_id=%(tenant_id)s" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:340 +msgid "update_port_precommit(self: called" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:344 +msgid "update_port_postcommit: called" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:348 +msgid "create_subnetwork_precommit: called" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:352 +msgid "create_subnetwork_postcommit: called" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:356 +msgid "delete_subnetwork_precommit: called" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:360 +msgid "delete_subnetwork_postcommit: called" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:364 +msgid "update_subnet_precommit(self: called" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:368 +msgid "update_subnet_postcommit: called" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/nos/nosdriver.py:64 +msgid "" +"Brocade Switch IP address is not set, check config ml2_conf_brocade.ini " +"file" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/nos/nosdriver.py:74 +msgid "Connect failed to switch" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/nos/nosdriver.py:101 +#: neutron/plugins/ml2/drivers/brocade/nos/nosdriver.py:115 +#: neutron/plugins/ml2/drivers/brocade/nos/nosdriver.py:128 +#: neutron/plugins/ml2/drivers/brocade/nos/nosdriver.py:141 +msgid "NETCONF error" +msgstr "" + +#: neutron/plugins/ml2/drivers/cisco/apic/apic_client.py:223 +#, python-format +msgid "data = %s" +msgstr "" + +#: neutron/plugins/ml2/drivers/cisco/apic/apic_client.py:226 +#: neutron/services/loadbalancer/drivers/netscaler/ncc_client.py:168 +#, python-format +msgid "Response: %s" +msgstr "" + +#: neutron/plugins/ml2/drivers/cisco/apic/apic_client.py:290 +#, python-format +msgid "APIC session will expire in %d seconds" +msgstr "" + +#: neutron/plugins/ml2/drivers/cisco/apic/apic_client.py:335 +msgid "APIC session timed-out, logging in again." +msgstr "" + +#: neutron/plugins/ml2/drivers/cisco/apic/config.py:23 +msgid "Host name or IP Address of the APIC controller" +msgstr "" + +#: neutron/plugins/ml2/drivers/cisco/apic/config.py:25 +msgid "Username for the APIC controller" +msgstr "" + +#: neutron/plugins/ml2/drivers/cisco/apic/config.py:27 +msgid "Password for the APIC controller" +msgstr "" + +#: neutron/plugins/ml2/drivers/cisco/apic/config.py:29 +msgid "Communication port for the APIC controller" +msgstr "" + +#: neutron/plugins/ml2/drivers/cisco/apic/config.py:31 +msgid "Name for the VMM domain provider" +msgstr "" + +#: neutron/plugins/ml2/drivers/cisco/apic/config.py:33 +msgid "Name for the VMM domain to be created for Openstack" +msgstr "" + +#: neutron/plugins/ml2/drivers/cisco/apic/config.py:35 +msgid "Name for the vlan namespace to be used for openstack" +msgstr "" + +#: neutron/plugins/ml2/drivers/cisco/apic/config.py:37 +msgid "Range of VLAN's to be used for Openstack" +msgstr "" + +#: neutron/plugins/ml2/drivers/cisco/apic/config.py:39 +msgid "Name of the node profile to be created" +msgstr "" + +#: neutron/plugins/ml2/drivers/cisco/apic/config.py:41 +msgid "Name of the entity profile to be created" +msgstr "" + +#: neutron/plugins/ml2/drivers/cisco/apic/config.py:43 +msgid "Name of the function profile to be created" +msgstr "" + +#: neutron/plugins/ml2/drivers/cisco/apic/config.py:45 +msgid "Clear the node profiles on the APIC at startup (mainly used for testing)" +msgstr "" + +#: neutron/plugins/ml2/drivers/cisco/apic/exceptions.py:25 +#, python-format +msgid "No response from APIC at %(url)s" +msgstr "" + +#: neutron/plugins/ml2/drivers/cisco/apic/exceptions.py:30 +#, python-format +msgid "" +"APIC responded with HTTP status %(status)s: %(reason)s, Request: " +"'%(request)s', APIC error code %(err_code)s: %(err_text)s" +msgstr "" + +#: neutron/plugins/ml2/drivers/cisco/apic/exceptions.py:37 +#, python-format +msgid "APIC failed to provide cookie for %(request)s request" +msgstr "" + +#: neutron/plugins/ml2/drivers/cisco/apic/exceptions.py:42 +msgid "Authorized APIC session not established" +msgstr "" + +#: neutron/plugins/ml2/drivers/cisco/apic/exceptions.py:47 +#, python-format +msgid "The switch and port for host '%(host)s' are not configured" +msgstr "" + +#: neutron/plugins/ml2/drivers/cisco/apic/exceptions.py:52 +#, python-format +msgid "Managed Object '%(mo_class)s' is not supported" +msgstr "" + +#: neutron/plugins/ml2/drivers/cisco/apic/exceptions.py:57 +#, python-format +msgid "" +"Multiple VLAN ranges are not supported in the APIC plugin. Please specify" +" a single VLAN range. Current config: '%(vlan_ranges)s'" +msgstr "" + +#: neutron/plugins/ml2/drivers/cisco/apic/mechanism_apic.py:84 +#, python-format +msgid "Port %s is not bound to a segment" +msgstr "" + +#: neutron/plugins/ml2/drivers/cisco/nexus/config.py:25 +msgid "The physical network managed by the switches." +msgstr "" + +#: neutron/plugins/ml2/drivers/cisco/nexus/exceptions.py:33 +#, python-format +msgid "Credential %(credential_name)s already exists for tenant %(tenant_id)s." +msgstr "" + +#: neutron/plugins/ml2/drivers/cisco/nexus/exceptions.py:54 +#, python-format +msgid "Nexus Port Binding (%(filters)s) is not present" +msgstr "" + +#: neutron/plugins/ml2/drivers/cisco/nexus/exceptions.py:63 +#, python-format +msgid "Missing required field(s) to configure nexus switch: %(fields)s" +msgstr "" + +#: neutron/plugins/ml2/drivers/cisco/nexus/mech_cisco_nexus.py:45 +#, python-format +msgid "nexus_switches found = %s" +msgstr "" + +#: neutron/plugins/ml2/drivers/cisco/nexus/nexus_db_v2.py:87 +msgid "get_nexusvm_bindings() called" +msgstr "" + +#: neutron/plugins/ml2/drivers/l2pop/config.py:25 +msgid "" +"Delay within which agent is expected to update existing ports whent it " +"restarts" +msgstr "" + +#: neutron/plugins/ml2/drivers/l2pop/mech_driver.py:42 +msgid "Experimental L2 population driver" +msgstr "" + +#: neutron/plugins/ml2/drivers/l2pop/mech_driver.py:143 +msgid "Unable to retrieve the agent ip, check the agent configuration." +msgstr "" + +#: neutron/plugins/ml2/drivers/l2pop/mech_driver.py:149 +#, python-format +msgid "Port %(port)s updated by agent %(agent)s isn't bound to any segment" +msgstr "" + +#: neutron/plugins/ml2/drivers/l2pop/mech_driver.py:199 +#, python-format +msgid "" +"Unable to retrieve the agent ip, check the agent %(agent_host)s " +"configuration." +msgstr "" + +#: neutron/plugins/ml2/drivers/l2pop/rpc.py:40 +#, python-format +msgid "" +"Fanout notify l2population agents at %(topic)s the message %(method)s " +"with %(fdb_entries)s" +msgstr "" + +#: neutron/plugins/ml2/drivers/l2pop/rpc.py:51 +#, python-format +msgid "" +"Notify l2population agent %(host)s at %(topic)s the message %(method)s " +"with %(fdb_entries)s" +msgstr "" + +#: neutron/plugins/ml2/drivers/mech_arista/config.py:31 +msgid "" +"Username for Arista EOS. This is required field. If not set, all " +"communications to Arista EOSwill fail." +msgstr "" + +#: neutron/plugins/ml2/drivers/mech_arista/config.py:37 +msgid "" +"Password for Arista EOS. This is required field. If not set, all " +"communications to Arista EOS will fail." +msgstr "" + +#: neutron/plugins/ml2/drivers/mech_arista/config.py:42 +msgid "" +"Arista EOS IP address. This is required field. If not set, all " +"communications to Arista EOSwill fail." +msgstr "" + +#: neutron/plugins/ml2/drivers/mech_arista/config.py:47 +msgid "" +"Defines if hostnames are sent to Arista EOS as FQDNs " +"(\"node1.domain.com\") or as short names (\"node1\"). This is optional. " +"If not set, a value of \"True\" is assumed." +msgstr "" + +#: neutron/plugins/ml2/drivers/mech_arista/config.py:53 +msgid "" +"Sync interval in seconds between Neutron plugin and EOS. This interval " +"defines how often the synchronization is performed. This is an optional " +"field. If not set, a value of 180 seconds is assumed." +msgstr "" + +#: neutron/plugins/ml2/drivers/mech_arista/config.py:60 +msgid "" +"Defines Region Name that is assigned to this OpenStack Controller. This " +"is useful when multiple OpenStack/Neutron controllers are managing the " +"same Arista HW clusters. Note that this name must match with the region " +"name registered (or known) to keystone service. Authentication with " +"Keysotne is performed by EOS. This is optional. If not set, a value of " +"\"RegionOne\" is assumed." +msgstr "" + +#: neutron/plugins/ml2/drivers/mech_arista/mechanism_arista.py:32 +msgid "Unable to reach EOS" +msgstr "" + +#: neutron/plugins/ml2/drivers/mech_arista/mechanism_arista.py:74 +#, python-format +msgid "'timestamp' command '%s' is not available on EOS" +msgstr "" + +#: neutron/plugins/ml2/drivers/mech_arista/mechanism_arista.py:314 +#, python-format +msgid "VM id %(vmid)s not found for port %(portid)s" +msgstr "" + +#: neutron/plugins/ml2/drivers/mech_arista/mechanism_arista.py:333 +#, python-format +msgid "Unknown device owner: %s" +msgstr "" + +#: neutron/plugins/ml2/drivers/mech_arista/mechanism_arista.py:429 +#, python-format +msgid "Executing command on Arista EOS: %s" +msgstr "" + +#: neutron/plugins/ml2/drivers/mech_arista/mechanism_arista.py:437 +#, python-format +msgid "Error %(err)s while trying to execute commands %(cmd)s on EOS %(host)s" +msgstr "" + +#: neutron/plugins/ml2/drivers/mech_arista/mechanism_arista.py:505 +msgid "Required option eapi_host is not set" +msgstr "" + +#: neutron/plugins/ml2/drivers/mech_arista/mechanism_arista.py:509 +msgid "Required option eapi_username is not set" +msgstr "" + +#: neutron/plugins/ml2/drivers/mech_arista/mechanism_arista.py:529 +msgid "Syncing Neutron <-> EOS" +msgstr "" + +#: neutron/plugins/ml2/drivers/mech_arista/mechanism_arista.py:535 +msgid "OpenStack and EOS are in sync!" +msgstr "" + +#: neutron/plugins/ml2/drivers/mech_arista/mechanism_arista.py:557 +#, python-format +msgid "" +"No Tenants configured in Neutron DB. But %d tenants disovered in EOS " +"during synchronization.Enitre EOS region is cleared" +msgstr "" + +#: neutron/plugins/ml2/drivers/mech_arista/mechanism_arista.py:722 +#, python-format +msgid "Network %s is not created as it is not found inArista DB" +msgstr "" + +#: neutron/plugins/ml2/drivers/mech_arista/mechanism_arista.py:736 +#, python-format +msgid "Network name changed to %s" +msgstr "" + +#: neutron/plugins/ml2/drivers/mech_arista/mechanism_arista.py:764 +#, python-format +msgid "Network %s is not updated as it is not found inArista DB" +msgstr "" + +#: neutron/plugins/ml2/drivers/mech_arista/mechanism_arista.py:856 +#, python-format +msgid "VM %s is not created as it is not found in Arista DB" +msgstr "" + +#: neutron/plugins/ml2/drivers/mech_arista/mechanism_arista.py:871 +#, python-format +msgid "Port name changed to %s" +msgstr "" + +#: neutron/plugins/ml2/drivers/mech_arista/mechanism_arista.py:921 +#, python-format +msgid "VM %s is not updated as it is not found in Arista DB" +msgstr "" + +#: neutron/plugins/ml2/drivers/mech_bigswitch/driver.py:47 +msgid "Initializing driver" +msgstr "" + +#: neutron/plugins/ml2/drivers/mech_bigswitch/driver.py:62 +msgid "Initialization done" +msgstr "" + +#: neutron/plugins/ml2/drivers/mech_bigswitch/driver.py:123 +msgid "Ignoring port notification to controller because of missing host ID." +msgstr "" + +#: neutron/plugins/ml2/drivers/mlnx/config.py:24 +#: neutron/plugins/mlnx/common/config.py:48 +msgid "Type of VM network interface: mlnx_direct or hostdev" +msgstr "" + +#: neutron/plugins/ml2/drivers/mlnx/config.py:28 +msgid "Enable server compatibility with old nova" +msgstr "" + +#: neutron/plugins/ml2/drivers/mlnx/mech_mlnx.py:56 +#, python-format +msgid "Checking segment: %(segment)s for mappings: %(mappings)s " +msgstr "" + +#: neutron/plugins/mlnx/agent_notify_api.py:48 +msgid "Sending delete network message" +msgstr "" + +#: neutron/plugins/mlnx/agent_notify_api.py:56 +msgid "Sending update port message" +msgstr "" + +#: neutron/plugins/mlnx/mlnx_plugin.py:115 +msgid "Mellanox Embedded Switch Plugin initialisation complete" +msgstr "" + +#: neutron/plugins/mlnx/mlnx_plugin.py:152 +#, python-format +msgid "Invalid physical network type %(type)s.Server terminated!" +msgstr "" + +#: neutron/plugins/mlnx/mlnx_plugin.py:159 +#, python-format +msgid "Parsing physical_network_type failed: %s. Server terminated!" +msgstr "" + +#: neutron/plugins/mlnx/mlnx_plugin.py:164 +#, python-format +msgid "" +"Invalid physical network type %(type)s for network %(net)s. Server " +"terminated!" +msgstr "" + +#: neutron/plugins/mlnx/mlnx_plugin.py:168 +#, python-format +msgid "Physical Network type mappings: %s" +msgstr "" + +#: neutron/plugins/mlnx/mlnx_plugin.py:176 +#: neutron/plugins/openvswitch/ovs_neutron_plugin.py:359 +#, python-format +msgid "%s. Server terminated!" +msgstr "" + +#: neutron/plugins/mlnx/mlnx_plugin.py:314 +#, python-format +msgid "Unsupported vnic type %(vnic_type)s for physical network type %(net_type)s" +msgstr "" + +#: neutron/plugins/mlnx/mlnx_plugin.py:318 +msgid "Invalid vnic_type on port_create" +msgstr "" + +#: neutron/plugins/mlnx/mlnx_plugin.py:320 +msgid "vnic_type is not defined in port profile" +msgstr "" + +#: neutron/plugins/mlnx/mlnx_plugin.py:364 +msgid "Update network" +msgstr "" + +#: neutron/plugins/mlnx/mlnx_plugin.py:377 +msgid "Delete network" +msgstr "" + +#: neutron/plugins/mlnx/mlnx_plugin.py:425 +#, python-format +msgid "create_port with %s" +msgstr "" + +#: neutron/plugins/mlnx/rpc_callbacks.py:120 +#, python-format +msgid "Device %(device)s up %(agent_id)s" +msgstr "" + +#: neutron/plugins/mlnx/agent/eswitch_neutron_agent.py:56 +#, python-format +msgid "Agent cache inconsistency - port id is not stored for %s" +msgstr "" + +#: neutron/plugins/mlnx/agent/eswitch_neutron_agent.py:71 +#, python-format +msgid "Network %s not defined on Agent." +msgstr "" + +#: neutron/plugins/mlnx/agent/eswitch_neutron_agent.py:84 +#, python-format +msgid "Network %s is not available on this agent" +msgstr "" + +#: neutron/plugins/mlnx/agent/eswitch_neutron_agent.py:95 +#, python-format +msgid "Connecting port %s" +msgstr "" + +#: neutron/plugins/mlnx/agent/eswitch_neutron_agent.py:105 +#, python-format +msgid "Binding Segmentation ID %(seg_id)sto eSwitch for vNIC mac_address %(mac)s" +msgstr "" + +#: neutron/plugins/mlnx/agent/eswitch_neutron_agent.py:124 +#, python-format +msgid "Port_mac %s is not available on this agent" +msgstr "" + +#: neutron/plugins/mlnx/agent/eswitch_neutron_agent.py:131 +msgid "Creating VLAN Network" +msgstr "" + +#: neutron/plugins/mlnx/agent/eswitch_neutron_agent.py:133 +#, python-format +msgid "Unknown network type %(network_type)s for network %(network_id)s" +msgstr "" + +#: neutron/plugins/mlnx/agent/eswitch_neutron_agent.py:165 +msgid "Invalid Network ID, cannot remove Network" +msgstr "" + +#: neutron/plugins/mlnx/agent/eswitch_neutron_agent.py:167 +#, python-format +msgid "Delete network %s" +msgstr "" + +#: neutron/plugins/mlnx/agent/eswitch_neutron_agent.py:207 +#, python-format +msgid "RPC timeout while updating port %s" +msgstr "" + +#: neutron/plugins/mlnx/agent/eswitch_neutron_agent.py:289 +msgid "Ports added!" +msgstr "" + +#: neutron/plugins/mlnx/agent/eswitch_neutron_agent.py:292 +msgid "Ports removed!" +msgstr "" + +#: neutron/plugins/mlnx/agent/eswitch_neutron_agent.py:329 +#, python-format +msgid "Adding port with mac %s" +msgstr "" + +#: neutron/plugins/mlnx/agent/eswitch_neutron_agent.py:332 +#, python-format +msgid "Port %s updated" +msgstr "" + +#: neutron/plugins/mlnx/agent/eswitch_neutron_agent.py:333 +#, python-format +msgid "Device details %s" +msgstr "" + +#: neutron/plugins/mlnx/agent/eswitch_neutron_agent.py:346 +#, python-format +msgid "Device with mac_address %s not defined on Neutron Plugin" +msgstr "" + +#: neutron/plugins/mlnx/agent/eswitch_neutron_agent.py:353 +#, python-format +msgid "Removing device with mac_address %s" +msgstr "" + +#: neutron/plugins/mlnx/agent/eswitch_neutron_agent.py:361 +#, python-format +msgid "Removing port failed for device %(device)s due to %(exc)s" +msgstr "" + +#: neutron/plugins/mlnx/agent/eswitch_neutron_agent.py:376 +msgid "eSwitch Agent Started!" +msgstr "" + +#: neutron/plugins/mlnx/agent/eswitch_neutron_agent.py:389 +msgid "Agent loop process devices!" +msgstr "" + +#: neutron/plugins/mlnx/agent/eswitch_neutron_agent.py:394 +msgid "" +"Request timeout in agent event loop eSwitchD is not responding - " +"exiting..." +msgstr "" + +#: neutron/plugins/mlnx/agent/eswitch_neutron_agent.py:427 +#, python-format +msgid "Failed on Agent initialisation : %s. Agent terminated!" +msgstr "" + +#: neutron/plugins/mlnx/agent/eswitch_neutron_agent.py:432 +msgid "Agent initialised successfully, now running... " +msgstr "" + +#: neutron/plugins/mlnx/agent/utils.py:30 +msgid "" +"Failed to import eventlet.green.zmq. Won't connect to eSwitchD - " +"exiting..." +msgstr "" + +#: neutron/plugins/mlnx/agent/utils.py:74 +#, python-format +msgid "Action %(action)s failed: %(reason)s" +msgstr "" + +#: neutron/plugins/mlnx/agent/utils.py:76 +#, python-format +msgid "Unknown operation status %s" +msgstr "" + +#: neutron/plugins/mlnx/agent/utils.py:81 +msgid "get_attached_vnics" +msgstr "" + +#: neutron/plugins/mlnx/agent/utils.py:88 +#, python-format +msgid "" +"Set Vlan %(segmentation_id)s on Port %(port_mac)s on Fabric " +"%(physical_network)s" +msgstr "" + +#: neutron/plugins/mlnx/agent/utils.py:101 +#, python-format +msgid "Define Fabric %(fabric)s on interface %(ifc)s" +msgstr "" + +#: neutron/plugins/mlnx/agent/utils.py:110 +#, python-format +msgid "Port Up for %(port_mac)s on fabric %(fabric)s" +msgstr "" + +#: neutron/plugins/mlnx/agent/utils.py:119 +#, python-format +msgid "Port Down for %(port_mac)s on fabric %(fabric)s" +msgstr "" + +#: neutron/plugins/mlnx/agent/utils.py:128 +#, python-format +msgid "Port Release for %(port_mac)s on fabric %(fabric)s" +msgstr "" + +#: neutron/plugins/mlnx/common/comm_utils.py:57 +#, python-format +msgid "Request timeout - call again after %s seconds" +msgstr "" + +#: neutron/plugins/mlnx/common/config.py:34 +msgid "" +"List of : with " +"physical_network_type is either eth or ib" +msgstr "" + +#: neutron/plugins/mlnx/common/config.py:37 +msgid "Physical network type for provider network (eth or ib)" +msgstr "" + +#: neutron/plugins/mlnx/common/config.py:52 +msgid "eswitch daemon end point" +msgstr "" + +#: neutron/plugins/mlnx/common/config.py:54 +msgid "" +"The number of milliseconds the agent will wait for response on request to" +" daemon." +msgstr "" + +#: neutron/plugins/mlnx/common/config.py:57 +msgid "" +"The number of retries the agent will send request to daemon before giving" +" up" +msgstr "" + +#: neutron/plugins/mlnx/common/config.py:60 +msgid "" +"backoff rate multiplier for waiting period between retries for request to" +" daemon, i.e. value of 2 will double the request timeout each retry" +msgstr "" + +#: neutron/plugins/mlnx/common/exceptions.py:20 +#, python-format +msgid "Mlnx Exception: %(err_msg)s" +msgstr "" + +#: neutron/plugins/mlnx/common/exceptions.py:24 +msgid "Request Timeout: no response from eSwitchD" +msgstr "" + +#: neutron/plugins/mlnx/common/exceptions.py:28 +#, python-format +msgid "Operation Failed: %(err_msg)s" +msgstr "" + +#: neutron/plugins/mlnx/db/mlnx_db_v2.py:42 +#: neutron/plugins/mlnx/db/mlnx_db_v2.py:63 +#, python-format +msgid "Removing vlan %(seg_id)s on physical network %(net)s from pool" +msgstr "" + +#: neutron/plugins/mlnx/db/mlnx_db_v2.py:119 +#, python-format +msgid "Reserving vlan %(seg_id)s on physical network %(net)s from pool" +msgstr "" + +#: neutron/plugins/mlnx/db/mlnx_db_v2.py:138 +#, python-format +msgid "" +"Reserving specific vlan %(seg_id)s on physical network %(phy_net)s from " +"pool" +msgstr "" + +#: neutron/plugins/mlnx/db/mlnx_db_v2.py:143 +#, python-format +msgid "" +"Reserving specific vlan %(seg_id)s on physical network %(phy_net)s " +"outside pool" +msgstr "" + +#: neutron/plugins/mlnx/db/mlnx_db_v2.py:170 +#, python-format +msgid "Releasing vlan %(seg_id)s on physical network %(phy_net)s to pool" +msgstr "" + +#: neutron/plugins/mlnx/db/mlnx_db_v2.py:175 +#, python-format +msgid "Releasing vlan %(seg_id)s on physical network %(phy_net)s outside pool" +msgstr "" + +#: neutron/plugins/mlnx/db/mlnx_db_v2.py:181 +#, python-format +msgid "vlan_id %(seg_id)s on physical network %(phy_net)s not found" +msgstr "" + +#: neutron/plugins/mlnx/db/mlnx_db_v2.py:239 +msgid "Get_port_from_device_mac() called" +msgstr "" + +#: neutron/plugins/mlnx/db/mlnx_db_v2.py:247 +#, python-format +msgid "Set_port_status as %s called" +msgstr "" + +#: neutron/plugins/nec/nec_plugin.py:192 +#, python-format +msgid "_cleanup_ofc_tenant: No OFC tenant for %s" +msgstr "" + +#: neutron/plugins/nec/nec_plugin.py:195 +#, python-format +msgid "delete_ofc_tenant() failed due to %s" +msgstr "" + +#: neutron/plugins/nec/nec_plugin.py:211 +msgid "activate_port_if_ready(): skip, port.admin_state_up is False." +msgstr "" + +#: neutron/plugins/nec/nec_plugin.py:215 +msgid "activate_port_if_ready(): skip, network.admin_state_up is False." +msgstr "" + +#: neutron/plugins/nec/nec_plugin.py:219 +msgid "activate_port_if_ready(): skip, no portinfo for this port." +msgstr "" + +#: neutron/plugins/nec/nec_plugin.py:223 +msgid "activate_port_if_ready(): skip, ofc_port already exists." +msgstr "" + +#: neutron/plugins/nec/nec_plugin.py:231 +#, python-format +msgid "create_ofc_port() failed due to %s" +msgstr "" + +#: neutron/plugins/nec/nec_plugin.py:244 +#, python-format +msgid "deactivate_port(): skip, ofc_port for port=%s does not exist." +msgstr "" + +#: neutron/plugins/nec/nec_plugin.py:263 +#, python-format +msgid "deactivate_port(): OFC port for port=%s is already removed." +msgstr "" + +#: neutron/plugins/nec/nec_plugin.py:271 +#, python-format +msgid "Failed to delete port=%(port)s from OFC: %(exc)s" +msgstr "" + +#: neutron/plugins/nec/nec_plugin.py:290 +#, python-format +msgid "NECPluginV2.create_network() called, network=%s ." +msgstr "" + +#: neutron/plugins/nec/nec_plugin.py:307 +#, python-format +msgid "Failed to create network id=%(id)s on OFC: %(exc)s" +msgstr "" + +#: neutron/plugins/nec/nec_plugin.py:323 +#, python-format +msgid "NECPluginV2.update_network() called, id=%(id)s network=%(network)s ." +msgstr "" + +#: neutron/plugins/nec/nec_plugin.py:367 +#, python-format +msgid "NECPluginV2.delete_network() called, id=%s ." +msgstr "" + +#: neutron/plugins/nec/nec_plugin.py:401 +#, python-format +msgid "delete_network() failed due to %s" +msgstr "" + +#: neutron/plugins/nec/nec_plugin.py:542 +#, python-format +msgid "NECPluginV2.create_port() called, port=%s ." +msgstr "" + +#: neutron/plugins/nec/nec_plugin.py:606 +#, python-format +msgid "NECPluginV2.update_port() called, id=%(id)s port=%(port)s ." +msgstr "" + +#: neutron/plugins/nec/nec_plugin.py:634 +#, python-format +msgid "NECPluginV2.delete_port() called, id=%s ." +msgstr "" + +#: neutron/plugins/nec/nec_plugin.py:702 +#, python-format +msgid "" +"NECPluginV2RPCCallbacks.get_port_from_device() called, device=%(device)s " +"=> %(ret)s." +msgstr "" + +#: neutron/plugins/nec/nec_plugin.py:726 +#, python-format +msgid "NECPluginV2RPCCallbacks.update_ports() called, kwargs=%s ." +msgstr "" + +#: neutron/plugins/nec/nec_plugin.py:736 +#, python-format +msgid "" +"update_ports(): ignore unchanged portinfo in port_added message " +"(port_id=%s)." +msgstr "" + +#: neutron/plugins/nec/nec_plugin.py:756 +#, python-format +msgid "" +"update_ports(): ignore port_removed message due to portinfo for " +"port_id=%s was not registered" +msgstr "" + +#: neutron/plugins/nec/nec_plugin.py:761 +#, python-format +msgid "" +"update_ports(): ignore port_removed message received from different host " +"(registered_datapath_id=%(registered)s, " +"received_datapath_id=%(received)s)." +msgstr "" + +#: neutron/plugins/nec/nec_router.py:58 +#, python-format +msgid "RouterMixin.create_router() called, router=%s ." +msgstr "" + +#: neutron/plugins/nec/nec_router.py:84 +#, python-format +msgid "RouterMixin.update_router() called, id=%(id)s, router=%(router)s ." +msgstr "" + +#: neutron/plugins/nec/nec_router.py:102 +#, python-format +msgid "RouterMixin.delete_router() called, id=%s." +msgstr "" + +#: neutron/plugins/nec/nec_router.py:121 +#, python-format +msgid "" +"RouterMixin.add_router_interface() called, id=%(id)s, " +"interface=%(interface)s." +msgstr "" + +#: neutron/plugins/nec/nec_router.py:128 +#, python-format +msgid "" +"RouterMixin.remove_router_interface() called, id=%(id)s, " +"interface=%(interface)s." +msgstr "" + +#: neutron/plugins/nec/nec_router.py:311 +#, python-format +msgid "" +"OFC does not support router with provider=%(provider)s, so removed it " +"from supported provider (new router driver map=%(driver_map)s)" +msgstr "" + +#: neutron/plugins/nec/nec_router.py:319 +#, python-format +msgid "" +"default_router_provider %(default)s is supported! Please specify one of " +"%(supported)s" +msgstr "" + +#: neutron/plugins/nec/nec_router.py:333 +#, python-format +msgid "Enabled router drivers: %s" +msgstr "" + +#: neutron/plugins/nec/nec_router.py:336 +#, python-format +msgid "" +"No router provider is enabled. neutron-server terminated! " +"(supported=%(supported)s, configured=%(config)s)" +msgstr "" + +#: neutron/plugins/nec/packet_filter.py:40 +msgid "Disabled packet-filter extension." +msgstr "" + +#: neutron/plugins/nec/packet_filter.py:45 +#, python-format +msgid "create_packet_filter() called, packet_filter=%s ." +msgstr "" + +#: neutron/plugins/nec/packet_filter.py:61 +#, python-format +msgid "update_packet_filter() called, id=%(id)s packet_filter=%(packet_filter)s ." +msgstr "" + +#: neutron/plugins/nec/packet_filter.py:134 +#: neutron/plugins/nec/packet_filter.py:187 +#, python-format +msgid "Failed to create packet_filter id=%(id)s on OFC: %(exc)s" +msgstr "" + +#: neutron/plugins/nec/packet_filter.py:144 +#, python-format +msgid "delete_packet_filter() called, id=%s ." +msgstr "" + +#: neutron/plugins/nec/packet_filter.py:162 +#, python-format +msgid "activate_packet_filter_if_ready() called, packet_filter=%s." +msgstr "" + +#: neutron/plugins/nec/packet_filter.py:171 +#, python-format +msgid "" +"activate_packet_filter_if_ready(): skip pf_id=%s, " +"packet_filter.admin_state_up is False." +msgstr "" + +#: neutron/plugins/nec/packet_filter.py:174 +#, python-format +msgid "" +"activate_packet_filter_if_ready(): skip pf_id=%s, no portinfo for the " +"in_port." +msgstr "" + +#: neutron/plugins/nec/packet_filter.py:177 +msgid "" +"_activate_packet_filter_if_ready(): skip, ofc_packet_filter already " +"exists." +msgstr "" + +#: neutron/plugins/nec/packet_filter.py:180 +#, python-format +msgid "activate_packet_filter_if_ready(): create packet_filter id=%s on OFC." +msgstr "" + +#: neutron/plugins/nec/packet_filter.py:200 +#, python-format +msgid "deactivate_packet_filter_if_ready() called, packet_filter=%s." +msgstr "" + +#: neutron/plugins/nec/packet_filter.py:205 +#, python-format +msgid "" +"deactivate_packet_filter(): skip, Not found OFC Mapping for packet_filter" +" id=%s." +msgstr "" + +#: neutron/plugins/nec/packet_filter.py:210 +#, python-format +msgid "deactivate_packet_filter(): deleting packet_filter id=%s from OFC." +msgstr "" + +#: neutron/plugins/nec/packet_filter.py:219 +#, python-format +msgid "Failed to delete packet_filter id=%(id)s from OFC: %(exc)s" +msgstr "" + +#: neutron/plugins/nec/packet_filter.py:250 +#, python-format +msgid "Error occurred while disabling packet filter(s) for port %s" +msgstr "" + +#: neutron/plugins/nec/router_drivers.py:125 +#, python-format +msgid "create_router() failed due to %s" +msgstr "" + +#: neutron/plugins/nec/router_drivers.py:154 +#, python-format +msgid "_update_ofc_routes() failed due to %s" +msgstr "" + +#: neutron/plugins/nec/router_drivers.py:169 +#, python-format +msgid "delete_router() failed due to %s" +msgstr "" + +#: neutron/plugins/nec/router_drivers.py:180 +#, python-format +msgid "" +"RouterOpenFlowDriver.add_interface(): the requested port has no subnet. " +"add_interface() is skipped. router_id=%(id)s, port=%(port)s)" +msgstr "" + +#: neutron/plugins/nec/router_drivers.py:200 +#, python-format +msgid "add_router_interface() failed due to %s" +msgstr "" + +#: neutron/plugins/nec/router_drivers.py:218 +#, python-format +msgid "delete_router_interface() failed due to %s" +msgstr "" + +#: neutron/plugins/nec/agent/nec_neutron_agent.py:53 +#, python-format +msgid "Update ports: added=%(added)s, removed=%(removed)s" +msgstr "" + +#: neutron/plugins/nec/agent/nec_neutron_agent.py:76 +#: neutron/plugins/oneconvergence/agent/nvsd_neutron_agent.py:51 +#, python-format +msgid "port_update received: %s" +msgstr "" + +#: neutron/plugins/nec/agent/nec_neutron_agent.py:220 +msgid "No port changed." +msgstr "" + +#: neutron/plugins/nec/common/config.py:35 +msgid "Host to connect to" +msgstr "" + +#: neutron/plugins/nec/common/config.py:37 +msgid "Base URL of OFC REST API. It is prepended to each API request." +msgstr "" + +#: neutron/plugins/nec/common/config.py:40 +msgid "Port to connect to" +msgstr "" + +#: neutron/plugins/nec/common/config.py:42 +msgid "Driver to use" +msgstr "" + +#: neutron/plugins/nec/common/config.py:44 +msgid "Enable packet filter" +msgstr "" + +#: neutron/plugins/nec/common/config.py:46 +msgid "Use SSL to connect" +msgstr "" + +#: neutron/plugins/nec/common/config.py:48 +msgid "Key file" +msgstr "" + +#: neutron/plugins/nec/common/config.py:50 +msgid "Certificate file" +msgstr "" + +#: neutron/plugins/nec/common/config.py:52 +msgid "Disable SSL certificate verification" +msgstr "" + +#: neutron/plugins/nec/common/config.py:54 +msgid "" +"Maximum attempts per OFC API request.NEC plugin retries API request to " +"OFC when OFC returns ServiceUnavailable (503).The value must be greater " +"than 0." +msgstr "" + +#: neutron/plugins/nec/common/config.py:63 +msgid "Default router provider to use." +msgstr "" + +#: neutron/plugins/nec/common/config.py:66 +msgid "List of enabled router providers." +msgstr "" + +#: neutron/plugins/nec/common/exceptions.py:20 +#, python-format +msgid "An OFC exception has occurred: %(reason)s" +msgstr "" + +#: neutron/plugins/nec/common/exceptions.py:30 +#, python-format +msgid "The specified OFC resource (%(resource)s) is not found." +msgstr "" + +#: neutron/plugins/nec/common/exceptions.py:34 +#, python-format +msgid "An exception occurred in NECPluginV2 DB: %(reason)s" +msgstr "" + +#: neutron/plugins/nec/common/exceptions.py:38 +#, python-format +msgid "" +"Neutron-OFC resource mapping for %(resource)s %(neutron_id)s is not " +"found. It may be deleted during processing." +msgstr "" + +#: neutron/plugins/nec/common/exceptions.py:44 +#, python-format +msgid "OFC returns Server Unavailable (503) (Retry-After=%(retry_after)s)" +msgstr "" + +#: neutron/plugins/nec/common/exceptions.py:53 +#, python-format +msgid "PortInfo %(id)s could not be found" +msgstr "" + +#: neutron/plugins/nec/common/exceptions.py:57 +msgid "" +"Invalid input for operation: datapath_id should be a hex string with at " +"most 8 bytes" +msgstr "" + +#: neutron/plugins/nec/common/exceptions.py:63 +msgid "Invalid input for operation: port_no should be [0:65535]" +msgstr "" + +#: neutron/plugins/nec/common/exceptions.py:68 +#, python-format +msgid "Router (provider=%(provider)s) does not support an external network" +msgstr "" + +#: neutron/plugins/nec/common/exceptions.py:73 +#, python-format +msgid "Provider %(provider)s could not be found" +msgstr "" + +#: neutron/plugins/nec/common/exceptions.py:77 +#, python-format +msgid "Cannot create more routers with provider=%(provider)s" +msgstr "" + +#: neutron/plugins/nec/common/exceptions.py:81 +#, python-format +msgid "" +"Provider of Router %(router_id)s is %(provider)s. This operation is " +"supported only for router provider %(expected_provider)s." +msgstr "" + +#: neutron/plugins/nec/common/ofc_client.py:54 +#, python-format +msgid "Operation on OFC failed: %(status)s%(msg)s" +msgstr "" + +#: neutron/plugins/nec/common/ofc_client.py:78 +#, python-format +msgid "Client request: %(host)s:%(port)s %(method)s %(action)s [%(body)s]" +msgstr "" + +#: neutron/plugins/nec/common/ofc_client.py:87 +#, python-format +msgid "OFC returns [%(status)s:%(data)s]" +msgstr "" + +#: neutron/plugins/nec/common/ofc_client.py:104 +#, python-format +msgid "OFC returns ServiceUnavailable (retry-after=%s)" +msgstr "" + +#: neutron/plugins/nec/common/ofc_client.py:108 +#, python-format +msgid "Specified resource %s does not exist on OFC " +msgstr "" + +#: neutron/plugins/nec/common/ofc_client.py:112 +#, python-format +msgid "Operation on OFC failed: status=%(status)s, detail=%(detail)s" +msgstr "" + +#: neutron/plugins/nec/common/ofc_client.py:115 +msgid "Operation on OFC failed" +msgstr "" + +#: neutron/plugins/nec/common/ofc_client.py:124 +#, python-format +msgid "Failed to connect OFC : %s" +msgstr "" + +#: neutron/plugins/nec/common/ofc_client.py:140 +#, python-format +msgid "Waiting for %s seconds due to OFC Service_Unavailable." +msgstr "" + +#: neutron/plugins/nec/db/api.py:108 +#, python-format +msgid "del_ofc_item(): NotFound item (resource=%(resource)s, id=%(id)s) " +msgstr "" + +#: neutron/plugins/nec/db/api.py:142 +#, python-format +msgid "del_portinfo(): NotFound portinfo for port_id: %s" +msgstr "" + +#: neutron/plugins/nec/db/api.py:163 +#: neutron/plugins/openvswitch/ovs_db_v2.py:317 +#, python-format +msgid "get_port_with_securitygroups() called:port_id=%s" +msgstr "" + +#: neutron/plugins/nec/db/router.py:85 +#, python-format +msgid "Add provider binding (router=%(router_id)s, provider=%(provider)s)" +msgstr "" + +#: neutron/plugins/nec/drivers/__init__.py:36 +#, python-format +msgid "Loading OFC driver: %s" +msgstr "" + +#: neutron/plugins/nec/drivers/pfc.py:33 +#, python-format +msgid "OFC %(resource)s ID has an invalid format: %(ofc_id)s" +msgstr "" + +#: neutron/plugins/nec/extensions/packetfilter.py:33 +msgid "Number of packet_filters allowed per tenant, -1 for unlimited" +msgstr "" + +#: neutron/plugins/nec/extensions/packetfilter.py:40 +#, python-format +msgid "PacketFilter %(id)s could not be found" +msgstr "" + +#: neutron/plugins/nec/extensions/packetfilter.py:44 +#, python-format +msgid "" +"IP version %(version)s is not supported for %(field)s (%(value)s is " +"specified)" +msgstr "" + +#: neutron/plugins/nec/extensions/packetfilter.py:49 +#, python-format +msgid "Packet Filter priority should be %(min)s-%(max)s (included)" +msgstr "" + +#: neutron/plugins/nec/extensions/packetfilter.py:53 +#, python-format +msgid "%(field)s field cannot be updated" +msgstr "" + +#: neutron/plugins/nec/extensions/packetfilter.py:57 +#, python-format +msgid "" +"The backend does not support duplicated priority. Priority %(priority)s " +"is in use" +msgstr "" + +#: neutron/plugins/nec/extensions/packetfilter.py:62 +#, python-format +msgid "" +"Ether Type '%(eth_type)s' conflicts with protocol '%(protocol)s'. Update " +"or clear protocol before changing ether type." +msgstr "" + +#: neutron/plugins/nuage/plugin.py:89 +#, python-format +msgid "%(resource)s with id %(resource_id)s does not exist" +msgstr "" + +#: neutron/plugins/nuage/plugin.py:98 +#, python-format +msgid "" +"Either %(resource)s %(req_resource)s not found or you dont have " +"credential to access it" +msgstr "" + +#: neutron/plugins/nuage/plugin.py:104 +#, python-format +msgid "" +"More than one entry found for %(resource)s %(req_resource)s. Use id " +"instead" +msgstr "" + +#: neutron/plugins/nuage/plugin.py:214 +#, python-format +msgid "Subnet %s not found on VSD" +msgstr "" + +#: neutron/plugins/nuage/plugin.py:219 +#, python-format +msgid "Port-Mapping for port %s not found on VSD" +msgstr "" + +#: neutron/plugins/nuage/plugin.py:314 +msgid "External network with subnets can not be changed to non-external network" +msgstr "" + +#: neutron/plugins/nuage/plugin.py:368 +msgid "" +"Either net_partition is not provided with subnet OR default net_partition" +" is not created at the start" +msgstr "" + +#: neutron/plugins/nuage/plugin.py:397 +#, python-format +msgid "Only one subnet is allowed per external network %s" +msgstr "" + +#: neutron/plugins/nuage/plugin.py:479 +#, python-format +msgid "" +"Unable to complete operation on subnet %s.One or more ports have an IP " +"allocation from this subnet." +msgstr "" + +#: neutron/plugins/nuage/plugin.py:509 +#, python-format +msgid "" +"Router %s does not hold default zone OR net_partition mapping. Router-IF " +"add failed" +msgstr "" + +#: neutron/plugins/nuage/plugin.py:519 +#, python-format +msgid "Subnet %s does not hold Nuage VSD reference. Router-IF add failed" +msgstr "" + +#: neutron/plugins/nuage/plugin.py:529 +#, python-format +msgid "" +"Subnet %(subnet)s and Router %(router)s belong to different net_partition" +" Router-IF add not permitted" +msgstr "" + +#: neutron/plugins/nuage/plugin.py:541 +#, python-format +msgid "Subnet %s has one or more active VMs Router-IF add not permitted" +msgstr "" + +#: neutron/plugins/nuage/plugin.py:587 neutron/plugins/nuage/plugin.py:592 +#: neutron/plugins/nuage/plugin.py:598 +#, python-format +msgid "No router interface found for Router %s. Router-IF delete failed" +msgstr "" + +#: neutron/plugins/nuage/plugin.py:614 +#, python-format +msgid "Subnet %s has one or more active VMs Router-IF delete not permitted" +msgstr "" + +#: neutron/plugins/nuage/plugin.py:623 +#, python-format +msgid "" +"Router %s does not hold net_partition assoc on Nuage VSD. Router-IF " +"delete failed" +msgstr "" + +#: neutron/plugins/nuage/plugin.py:662 +msgid "" +"Either net_partition is not provided with router OR default net_partition" +" is not created at the start" +msgstr "" + +#: neutron/plugins/nuage/plugin.py:708 +msgid "for same subnet, multiple static routes not allowed" +msgstr "" + +#: neutron/plugins/nuage/plugin.py:724 +#, python-format +msgid "Router %s does not hold net-partition assoc on VSD. extra-route failed" +msgstr "" + +#: neutron/plugins/nuage/plugin.py:837 +#, python-format +msgid "One or more router still attached to net_partition %s." +msgstr "" + +#: neutron/plugins/nuage/plugin.py:842 +#, python-format +msgid "NetPartition with %s does not exist" +msgstr "" + +#: neutron/plugins/nuage/plugin.py:888 +#, python-format +msgid "router %s is not associated with any net-partition" +msgstr "" + +#: neutron/plugins/nuage/plugin.py:903 +msgid "Floating IP can not be associated to VM in different router context" +msgstr "" + +#: neutron/plugins/nuage/common/config.py:22 +msgid "IP Address and Port of Nuage's VSD server" +msgstr "" + +#: neutron/plugins/nuage/common/config.py:25 +msgid "Username and password for authentication" +msgstr "" + +#: neutron/plugins/nuage/common/config.py:27 +msgid "Boolean for SSL connection with VSD server" +msgstr "" + +#: neutron/plugins/nuage/common/config.py:29 +msgid "Nuage provided base uri to reach out to VSD" +msgstr "" + +#: neutron/plugins/nuage/common/config.py:31 +msgid "" +"Organization name in which VSD will orchestrate network resources using " +"openstack" +msgstr "" + +#: neutron/plugins/nuage/common/config.py:34 +msgid "Nuage provided uri for initial authorization to access VSD" +msgstr "" + +#: neutron/plugins/nuage/common/config.py:38 +msgid "" +"Default Network partition in which VSD will orchestrate network resources" +" using openstack" +msgstr "" + +#: neutron/plugins/nuage/common/config.py:42 +msgid "Per Net Partition quota of floating ips" +msgstr "" + +#: neutron/plugins/nuage/common/exceptions.py:24 +#, python-format +msgid "Nuage Plugin does not support this operation: %(msg)s" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:93 +msgid "Agent terminated!: Failed to get a datapath." +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:112 +msgid "Agent terminated" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:146 +msgid "Agent failed to create agent config map" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:273 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1285 +#, python-format +msgid "Unable to create tunnel port. Invalid remote IP: %s" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:278 +#, python-format +msgid "ryu send_msg() result: %s" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:342 +#, python-format +msgid "network_delete received network %s" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:348 +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:575 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:287 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:610 +#, python-format +msgid "Network %s not used on agent." +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:357 +#, python-format +msgid "port_update received port %s" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:360 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:299 +msgid "tunnel_update received" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:366 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:308 +msgid "No tunnel_type specified, cannot create tunnels" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:369 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:311 +#, python-format +msgid "tunnel_type %s not supported by agent" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:490 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:518 +#, python-format +msgid "No local VLAN available for net-id=%s" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:493 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:526 +#, python-format +msgid "Assigning %(vlan_id)s as local vlan for net-id=%(net_uuid)s" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:505 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:548 +#, python-format +msgid "" +"Cannot provision %(network_type)s network for net-id=%(net_uuid)s - " +"tunneling disabled" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:513 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:567 +#, python-format +msgid "" +"Cannot provision flat network for net-id=%(net_uuid)s - no bridge for " +"physical_network %(physical_network)s" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:523 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:587 +#, python-format +msgid "" +"Cannot provision VLAN network for net-id=%(net_uuid)s - no bridge for " +"physical_network %(physical_network)s" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:532 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:596 +#, python-format +msgid "" +"Cannot provision unknown network type %(network_type)s for net-" +"id=%(net_uuid)s" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:578 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:613 +#, python-format +msgid "Reclaiming vlan = %(vlan_id)s from net-id = %(net_uuid)s" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:612 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:653 +#, python-format +msgid "" +"Cannot reclaim unknown network type %(network_type)s for net-" +"id=%(net_uuid)s" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:663 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:699 +#, python-format +msgid "port_unbound() net_uuid %s not in local_vlan_map" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:737 +#, python-format +msgid "ancillary bridge list: %s." +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:827 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:788 +msgid "" +"Failed to create OVS patch port. Cannot have tunneling enabled on this " +"agent, since this version of OVS does not support tunnels or patch ports." +" Agent terminated!" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:911 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:902 +#, python-format +msgid "Mapping physical network %(physical_network)s to bridge %(bridge)s" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:917 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:908 +#, python-format +msgid "" +"Bridge %(bridge)s for physical network %(physical_network)s does not " +"exist. Agent terminated!" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:988 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1012 +#, python-format +msgid "Port '%(port_name)s' has lost its vlan tag '%(vlan_tag)d'!" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:1021 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1041 +#, python-format +msgid "VIF port: %s has no ofport configured, and might not be able to transmit" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:1029 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1051 +#, python-format +msgid "No VIF port for port %s defined on agent." +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:1042 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1064 +#: neutron/tests/unit/ofagent/test_ofa_neutron_agent.py:686 +#: neutron/tests/unit/openvswitch/test_ovs_neutron_agent.py:820 +msgid "ofport should have a value that can be interpreted as an integer" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:1045 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1067 +#: neutron/tests/unit/ofagent/test_ofa_neutron_agent.py:669 +#: neutron/tests/unit/ofagent/test_ofa_neutron_agent.py:689 +#: neutron/tests/unit/openvswitch/test_ovs_neutron_agent.py:803 +#: neutron/tests/unit/openvswitch/test_ovs_neutron_agent.py:823 +#: neutron/tests/unit/openvswitch/test_ovs_neutron_agent.py:840 +#, python-format +msgid "Failed to set-up %(type)s tunnel port to %(ip)s" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:1094 +#, python-format +msgid "Processing port %s" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:1099 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1126 +#, python-format +msgid "" +"Port %s was not found on the integration bridge and will therefore not be" +" processed" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:1108 +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:1147 +#, python-format +msgid "Unable to get port details for %(device)s: %(e)s" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:1125 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1142 +#, python-format +msgid "Setting status for %s to UP" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:1129 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1146 +#, python-format +msgid "Setting status for %s to DOWN" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:1132 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1149 +#, python-format +msgid "Configuration for device %s completed." +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:1142 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1170 +#, python-format +msgid "Ancillary Port %s added" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:1217 +#, python-format +msgid "" +"process_network_ports - iteration:%(iter_num)d - " +"treat_devices_added_or_updated completed in %(elapsed).3f" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:1225 +#, python-format +msgid "" +"process_network_ports - iteration:%(iter_num)d - treat_devices_removed " +"completed in %(elapsed).3f" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:1238 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1263 +#, python-format +msgid "" +"process_ancillary_network_ports - iteration: %(iter_num)d - " +"treat_ancillary_devices_added completed in %(elapsed).3f" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:1247 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1272 +#, python-format +msgid "" +"process_ancillary_network_ports - iteration: %(iter_num)d - " +"treat_ancillary_devices_removed completed in %(elapsed).3f" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:1274 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1314 +#, python-format +msgid "Unable to sync tunnel IP %(local_ip)s: %(e)s" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:1302 +#, python-format +msgid "Agent ovsdb_monitor_loop - iteration:%d started" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:1313 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1368 +msgid "Agent tunnel out of sync with plugin!" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:1317 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1372 +msgid "Error while synchronizing tunnels" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:1321 +#, python-format +msgid "" +"Agent ovsdb_monitor_loop - iteration:%(iter_num)d - starting polling. " +"Elapsed:%(elapsed).3f" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:1334 +#, python-format +msgid "" +"Agent ovsdb_monitor_loop - iteration:%(iter_num)d - port information " +"retrieved. Elapsed:%(elapsed).3f" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:1344 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1399 +#, python-format +msgid "Starting to process devices in:%s" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:1348 +#, python-format +msgid "" +"Agent ovsdb_monitor_loop - iteration:%(iter_num)d - ports processed. " +"Elapsed:%(elapsed).3f" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:1363 +#, python-format +msgid "" +"Agent ovsdb_monitor_loop - iteration:%(iter_num)d - ancillary port info " +"retrieved. Elapsed:%(elapsed).3f" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:1373 +#, python-format +msgid "" +"Agent ovsdb_monitor_loop - iteration:%(iter_num)d - ancillary ports " +"processed. Elapsed:%(elapsed).3f" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:1388 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1441 +msgid "Error while processing VIF ports" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:1395 +#, python-format +msgid "" +"Agent ovsdb_monitor_loop - iteration:%(iter_num)d completed. Processed " +"ports statistics:%(port_stats)s. Elapsed:%(elapsed).3f" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:1431 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1485 +#, python-format +msgid "Parsing bridge_mappings failed: %s." +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:1454 +#, python-format +msgid "Invalid tunnel type specificed: %s" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:1457 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1512 +msgid "Tunneling cannot be enabled without a valid local_ip." +msgstr "" + +#: neutron/plugins/ofagent/common/config.py:24 +msgid "Number of seconds to retry acquiring an Open vSwitch datapath" +msgstr "" + +#: neutron/plugins/oneconvergence/plugin.py:240 +msgid "Failed to create subnet, deleting it from neutron" +msgstr "" + +#: neutron/plugins/oneconvergence/plugin.py:304 +#, python-format +msgid "Deleting newly created neutron port %s" +msgstr "" + +#: neutron/plugins/oneconvergence/plugin.py:374 +msgid "Failed to create floatingip" +msgstr "" + +#: neutron/plugins/oneconvergence/plugin.py:413 +msgid "Failed to create router" +msgstr "" + +#: neutron/plugins/oneconvergence/agent/nvsd_neutron_agent.py:154 +msgid "Port list is updated" +msgstr "" + +#: neutron/plugins/oneconvergence/agent/nvsd_neutron_agent.py:161 +msgid "AGENT looping....." +msgstr "" + +#: neutron/plugins/oneconvergence/agent/nvsd_neutron_agent.py:173 +msgid "NVSD Agent initialized successfully, now running... " +msgstr "" + +#: neutron/plugins/oneconvergence/lib/config.py:26 +msgid "NVSD Controller IP address" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/config.py:29 +msgid "NVSD Controller Port number" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/config.py:32 +msgid "NVSD Controller username" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/config.py:35 +msgid "NVSD Controller password" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/config.py:38 +msgid "NVSD controller REST API request timeout in seconds" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/config.py:40 +msgid "Number of login retries to NVSD controller" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/config.py:45 +msgid "integration bridge" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/exception.py:23 +#, python-format +msgid "An unknown nvsd plugin exception occurred: %(reason)s" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/exception.py:27 +#: neutron/plugins/vmware/api_client/exception.py:68 +msgid "The request has timed out." +msgstr "" + +#: neutron/plugins/oneconvergence/lib/exception.py:31 +msgid "Invalid access credentials to the Server." +msgstr "" + +#: neutron/plugins/oneconvergence/lib/exception.py:35 +#, python-format +msgid "A resource is not found: %(reason)s" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/exception.py:39 +#, python-format +msgid "Request sent to server is invalid: %(reason)s" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/exception.py:43 +#, python-format +msgid "Internal Server Error: %(reason)s" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/exception.py:47 +msgid "Connection is closed by the server." +msgstr "" + +#: neutron/plugins/oneconvergence/lib/exception.py:51 +#, python-format +msgid "The request is forbidden access to the resource: %(reason)s" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/exception.py:55 +#, python-format +msgid "Internal Server Error from NVSD controller: %(reason)s" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/nvsdlib.py:55 +#, python-format +msgid "Could not create a %(resource)s under tenant %(tenant_id)s" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/nvsdlib.py:59 +#, python-format +msgid "Failed to %(method)s %(resource)s id=%(resource_id)s" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/nvsdlib.py:65 +#, python-format +msgid "Failed to %(method)s %(resource)s" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/nvsdlib.py:109 +#, python-format +msgid "Network %(id)s created under tenant %(tenant_id)s" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/nvsdlib.py:126 +#, python-format +msgid "Network %(id)s updated under tenant %(tenant_id)s" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/nvsdlib.py:147 +#, python-format +msgid "Network %(id)s deleted under tenant %(tenant_id)s" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/nvsdlib.py:160 +#, python-format +msgid "Subnet %(id)s created under tenant %(tenant_id)s" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/nvsdlib.py:174 +#, python-format +msgid "Subnet %(id)s deleted under tenant %(tenant_id)s" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/nvsdlib.py:190 +#, python-format +msgid "Subnet %(id)s updated under tenant %(tenant_id)s" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/nvsdlib.py:222 +#, python-format +msgid "Port %(id)s created under tenant %(tenant_id)s" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/nvsdlib.py:246 +#, python-format +msgid "Port %(id)s updated under tenant %(tenant_id)s" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/nvsdlib.py:259 +#, python-format +msgid "Port %(id)s deleted under tenant %(tenant_id)s" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/nvsdlib.py:281 +#, python-format +msgid "Flatingip %(id)s created under tenant %(tenant_id)s" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/nvsdlib.py:298 +#, python-format +msgid "Flatingip %(id)s updated under tenant %(tenant_id)s" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/nvsdlib.py:312 +#, python-format +msgid "Flatingip %(id)s deleted under tenant %(tenant_id)s" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/nvsdlib.py:325 +#, python-format +msgid "Router %(id)s created under tenant %(tenant_id)s" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/nvsdlib.py:341 +#, python-format +msgid "Router %(id)s updated under tenant %(tenant_id)s" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/nvsdlib.py:351 +#, python-format +msgid "Router %(id)s deleted under tenant %(tenant_id)s" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/plugin_helper.py:81 +#, python-format +msgid "Unable to connect to NVSD controller. Exiting after %(retries)s attempts" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/plugin_helper.py:91 +#, python-format +msgid "Login Failed: %s" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/plugin_helper.py:92 +#, python-format +msgid "Unable to establish connection with Controller %s" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/plugin_helper.py:94 +msgid "Retrying after 1 second..." +msgstr "" + +#: neutron/plugins/oneconvergence/lib/plugin_helper.py:98 +#, python-format +msgid "Login Successful %(uri)s %(status)s" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/plugin_helper.py:102 +#, python-format +msgid "AuthToken = %s" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/plugin_helper.py:104 +msgid "login failed" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/plugin_helper.py:112 +msgid "No Token, Re-login" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/plugin_helper.py:129 +#, python-format +msgid "request: %(method)s %(uri)s successful" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/plugin_helper.py:136 +#, python-format +msgid "request: Request failed from Controller side :%s" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/plugin_helper.py:141 +#, python-format +msgid "Response is Null, Request timed out: %(method)s to %(uri)s" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/plugin_helper.py:153 +#, python-format +msgid "Request %(method)s %(uri)s body = %(body)s failed with status %(status)s" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/plugin_helper.py:157 +#, python-format +msgid "%s" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/plugin_helper.py:161 +#, python-format +msgid "%(method)s to %(url)s, unexpected response code: %(status)d" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/plugin_helper.py:167 +#, python-format +msgid "Request failed from Controller side with Status=%s" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/plugin_helper.py:171 +#, python-format +msgid "Success: %(method)s %(url)s status=%(status)s" +msgstr "" + +#: neutron/plugins/openvswitch/ovs_db_v2.py:210 +#, python-format +msgid "Skipping unreasonable tunnel ID range %(tun_min)s:%(tun_max)s" +msgstr "" + +#: neutron/plugins/openvswitch/ovs_db_v2.py:258 +#, python-format +msgid "Reserving tunnel %s from pool" +msgstr "" + +#: neutron/plugins/openvswitch/ovs_db_v2.py:273 +#, python-format +msgid "Reserving specific tunnel %s from pool" +msgstr "" + +#: neutron/plugins/openvswitch/ovs_db_v2.py:276 +#, python-format +msgid "Reserving specific tunnel %s outside pool" +msgstr "" + +#: neutron/plugins/openvswitch/ovs_db_v2.py:299 +#, python-format +msgid "Releasing tunnel %s outside pool" +msgstr "" + +#: neutron/plugins/openvswitch/ovs_db_v2.py:301 +#, python-format +msgid "Releasing tunnel %s to pool" +msgstr "" + +#: neutron/plugins/openvswitch/ovs_db_v2.py:303 +#, python-format +msgid "tunnel_id %s not found" +msgstr "" + +#: neutron/plugins/openvswitch/ovs_db_v2.py:375 +#, python-format +msgid "Adding a tunnel endpoint for %s" +msgstr "" + +#: neutron/plugins/openvswitch/ovs_db_v2.py:391 +#, python-format +msgid "" +"Adding a tunnel endpoint failed due to a concurrenttransaction had been " +"committed (%s attempts left)" +msgstr "" + +#: neutron/plugins/openvswitch/ovs_db_v2.py:396 +msgid "Unable to generate a new tunnel id" +msgstr "" + +#: neutron/plugins/openvswitch/ovs_neutron_plugin.py:307 +#, python-format +msgid "Invalid tenant_network_type: %s. Server terminated!" +msgstr "" + +#: neutron/plugins/openvswitch/ovs_neutron_plugin.py:324 +#, python-format +msgid "Tunneling disabled but tenant_network_type is '%s'. Server terminated!" +msgstr "" + +#: neutron/plugins/openvswitch/ovs_neutron_plugin.py:370 +#, python-format +msgid "Invalid tunnel ID range: '%(range)s' - %(e)s. Server terminated!" +msgstr "" + +#: neutron/plugins/openvswitch/ovs_neutron_plugin.py:374 +#, python-format +msgid "Tunnel ID ranges: %s" +msgstr "" + +#: neutron/plugins/openvswitch/ovs_neutron_plugin.py:429 +#, python-format +msgid "%s networks are not enabled" +msgstr "" + +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:232 +msgid "OVS version can not support ARP responder." +msgstr "" + +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:296 +#, python-format +msgid "port_update message processed for port %s" +msgstr "" + +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:497 +#, python-format +msgid "Action %s not supported" +msgstr "" + +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:767 +#, python-format +msgid "Adding %s to list of bridges." +msgstr "" + +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:882 +#, python-format +msgid "" +"Creating an interface named %(name)s exceeds the %(limit)d character " +"limitation. It was shortened to %(new_name)s to fit." +msgstr "" + +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1242 +#, python-format +msgid "" +"process_network_ports - iteration:%(iter_num)d " +"-treat_devices_added_or_updated completed in %(elapsed).3f" +msgstr "" + +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1250 +#, python-format +msgid "" +"process_network_ports - iteration:%(iter_num)d -treat_devices_removed " +"completed in %(elapsed).3f" +msgstr "" + +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1351 +#, python-format +msgid "Agent rpc_loop - iteration:%d started" +msgstr "" + +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1376 +#, python-format +msgid "" +"Agent rpc_loop - iteration:%(iter_num)d - starting polling. " +"Elapsed:%(elapsed).3f" +msgstr "" + +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1389 +#, python-format +msgid "" +"Agent rpc_loop - iteration:%(iter_num)d - port information retrieved. " +"Elapsed:%(elapsed).3f" +msgstr "" + +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1404 +#, python-format +msgid "" +"Agent rpc_loop - iteration:%(iter_num)d -ports processed. " +"Elapsed:%(elapsed).3f" +msgstr "" + +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1418 +#, python-format +msgid "" +"Agent rpc_loop - iteration:%(iter_num)d -ancillary port info retrieved. " +"Elapsed:%(elapsed).3f" +msgstr "" + +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1427 +#, python-format +msgid "" +"Agent rpc_loop - iteration:%(iter_num)d - ancillary ports processed. " +"Elapsed:%(elapsed).3f" +msgstr "" + +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1448 +#, python-format +msgid "" +"Agent rpc_loop - iteration:%(iter_num)d completed. Processed ports " +"statistics: %(port_stats)s. Elapsed:%(elapsed).3f" +msgstr "" + +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1509 +#, python-format +msgid "Invalid tunnel type specified: %s" +msgstr "" + +#: neutron/plugins/openvswitch/common/config.py:30 +msgid "Enable tunneling support" +msgstr "" + +#: neutron/plugins/openvswitch/common/config.py:32 +msgid "Tunnel bridge to use" +msgstr "" + +#: neutron/plugins/openvswitch/common/config.py:34 +msgid "Peer patch port in integration bridge for tunnel bridge" +msgstr "" + +#: neutron/plugins/openvswitch/common/config.py:37 +msgid "Peer patch port in tunnel bridge for integration bridge" +msgstr "" + +#: neutron/plugins/openvswitch/common/config.py:40 +msgid "Local IP address of GRE tunnel endpoints." +msgstr "" + +#: neutron/plugins/openvswitch/common/config.py:43 +msgid "List of :" +msgstr "" + +#: neutron/plugins/openvswitch/common/config.py:45 +msgid "Network type for tenant networks (local, vlan, gre, vxlan, or none)" +msgstr "" + +#: neutron/plugins/openvswitch/common/config.py:53 +msgid "List of :" +msgstr "" + +#: neutron/plugins/openvswitch/common/config.py:55 +msgid "The type of tunnels to use when utilizing tunnels, either 'gre' or 'vxlan'" +msgstr "" + +#: neutron/plugins/openvswitch/common/config.py:58 +msgid "" +"Use veths instead of patch ports to interconnect the integration bridge " +"to physical bridges" +msgstr "" + +#: neutron/plugins/openvswitch/common/config.py:68 +msgid "Minimize polling by monitoring ovsdb for interface changes." +msgstr "" + +#: neutron/plugins/openvswitch/common/config.py:72 +msgid "" +"The number of seconds to wait before respawning the ovsdb monitor after " +"losing communication with it" +msgstr "" + +#: neutron/plugins/openvswitch/common/config.py:75 +msgid "Network types supported by the agent (gre and/or vxlan)" +msgstr "" + +#: neutron/plugins/openvswitch/common/config.py:80 +msgid "MTU size of veth interfaces" +msgstr "" + +#: neutron/plugins/openvswitch/common/config.py:82 +msgid "" +"Use ml2 l2population mechanism driver to learn remote mac and IPs and " +"improve tunnel scalability" +msgstr "" + +#: neutron/plugins/openvswitch/common/config.py:85 +msgid "Enable local ARP responder if it is supported" +msgstr "" + +#: neutron/plugins/openvswitch/common/config.py:87 +msgid "" +"Set or un-set the don't fragment (DF) bit on outgoing IP packet carrying " +"GRE/VXLAN tunnel" +msgstr "" + +#: neutron/plugins/plumgrid/common/exceptions.py:24 +#, python-format +msgid "PLUMgrid Plugin Error: %(err_msg)s" +msgstr "" + +#: neutron/plugins/plumgrid/common/exceptions.py:28 +#, python-format +msgid "Connection failed with PLUMgrid Director: %(err_msg)s" +msgstr "" + +#: neutron/plugins/plumgrid/drivers/fake_plumlib.py:31 +msgid "Python PLUMgrid Fake Library Started " +msgstr "" + +#: neutron/plugins/plumgrid/drivers/fake_plumlib.py:36 +#, python-format +msgid "Fake Director: %s" +msgstr "" + +#: neutron/plugins/plumgrid/drivers/plumlib.py:37 +msgid "Python PLUMgrid Library Started " +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:44 +msgid "PLUMgrid Director server to connect to" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:46 +msgid "PLUMgrid Director server port to connect to" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:48 +msgid "PLUMgrid Director admin username" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:50 +msgid "PLUMgrid Director admin password" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:52 +msgid "PLUMgrid Director server timeout" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:55 +msgid "PLUMgrid Driver" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:72 +msgid "Neutron PLUMgrid Director: Starting Plugin" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:77 +msgid "Neutron PLUMgrid Director: Neutron server with PLUMgrid Plugin has started" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:90 +#, python-format +msgid "Neutron PLUMgrid Director: %s" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:101 +msgid "Neutron PLUMgrid Director: create_network() called" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:115 +msgid "PLUMgrid Library: create_network() called" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:130 +msgid "Neutron PLUMgrid Director: update_network() called" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:142 +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:168 +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:352 +msgid "PLUMgrid Library: update_network() called" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:157 +msgid "Neutron PLUMgrid Director: delete_network() called" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:180 +msgid "Neutron PLUMgrid Director: create_port() called" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:199 +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:226 +msgid "PLUMgrid Library: create_port() called" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:214 +msgid "Neutron PLUMgrid Director: update_port() called" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:242 +msgid "Neutron PLUMgrid Director: delete_port() called" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:257 +msgid "PLUMgrid Library: delete_port() called" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:286 +msgid "Neutron PLUMgrid Director: create_subnet() called" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:310 +msgid "PLUMgrid Library: create_subnet() called" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:320 +msgid "Neutron PLUMgrid Director: delete_subnet() called" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:332 +msgid "PLUMgrid Library: delete_subnet() called" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:340 +msgid "update_subnet() called" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:364 +msgid "Neutron PLUMgrid Director: create_router() called" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:376 +msgid "PLUMgrid Library: create_router() called" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:386 +msgid "Neutron PLUMgrid Director: update_router() called" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:392 +msgid "PLUMgrid Library: update_router() called" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:401 +msgid "Neutron PLUMgrid Director: delete_router() called" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:411 +msgid "PLUMgrid Library: delete_router() called" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:419 +msgid "Neutron PLUMgrid Director: add_router_interface() called" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:439 +msgid "PLUMgrid Library: add_router_interface() called" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:450 +msgid "Neutron PLUMgrid Director: remove_router_interface() called" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:472 +msgid "PLUMgrid Library: remove_router_interface() called" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:483 +msgid "Neutron PLUMgrid Director: create_floatingip() called" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:490 +msgid "PLUMgrid Library: create_floatingip() called" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:499 +msgid "Neutron PLUMgrid Director: update_floatingip() called" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:508 +msgid "PLUMgrid Library: update_floatingip() called" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:518 +msgid "Neutron PLUMgrid Director: delete_floatingip() called" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:528 +msgid "PLUMgrid Library: delete_floatingip() called" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:535 +msgid "Neutron PLUMgrid Director: disassociate_floatingips() called" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:542 +msgid "PLUMgrid Library: disassociate_floatingips() called" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:572 +msgid "" +"Networks with admin_state_up=False are not supported by PLUMgrid plugin " +"yet." +msgstr "" + +#: neutron/plugins/ryu/ryu_neutron_plugin.py:60 +#, python-format +msgid "get_ofp_rest_api: %s" +msgstr "" + +#: neutron/plugins/ryu/ryu_neutron_plugin.py:124 +msgid "Invalid configuration. check ryu.ini" +msgstr "" + +#: neutron/plugins/ryu/agent/ryu_neutron_agent.py:95 +#, python-format +msgid "Could not get IPv4 address from %(nic)s: %(cfg)s" +msgstr "" + +#: neutron/plugins/ryu/agent/ryu_neutron_agent.py:161 +#, python-format +msgid "External port %s" +msgstr "" + +#: neutron/plugins/ryu/agent/ryu_neutron_agent.py:169 +msgid "Get Ryu rest API address" +msgstr "" + +#: neutron/plugins/ryu/agent/ryu_neutron_agent.py:217 +msgid "Ryu rest API port isn't specified" +msgstr "" + +#: neutron/plugins/ryu/agent/ryu_neutron_agent.py:218 +#, python-format +msgid "Going to ofp controller mode %s" +msgstr "" + +#: neutron/plugins/ryu/agent/ryu_neutron_agent.py:294 +#, python-format +msgid "tunnel_ip %s" +msgstr "" + +#: neutron/plugins/ryu/agent/ryu_neutron_agent.py:296 +#, python-format +msgid "ovsdb_port %s" +msgstr "" + +#: neutron/plugins/ryu/agent/ryu_neutron_agent.py:298 +#, python-format +msgid "ovsdb_ip %s" +msgstr "" + +#: neutron/plugins/ryu/agent/ryu_neutron_agent.py:304 +#, python-format +msgid "Initialization failed: %s" +msgstr "" + +#: neutron/plugins/ryu/agent/ryu_neutron_agent.py:307 +msgid "" +"Ryu initialization on the node is done. Agent initialized successfully, " +"now running..." +msgstr "" + +#: neutron/plugins/ryu/common/config.py:24 +msgid "OpenFlow REST API location" +msgstr "" + +#: neutron/plugins/ryu/common/config.py:26 +msgid "Minimum tunnel ID to use" +msgstr "" + +#: neutron/plugins/ryu/common/config.py:28 +msgid "Maximum tunnel ID to use" +msgstr "" + +#: neutron/plugins/ryu/common/config.py:30 +msgid "Tunnel IP to use" +msgstr "" + +#: neutron/plugins/ryu/common/config.py:32 +msgid "Tunnel interface to use" +msgstr "" + +#: neutron/plugins/ryu/common/config.py:34 +msgid "OVSDB port to connect to" +msgstr "" + +#: neutron/plugins/ryu/common/config.py:36 +msgid "OVSDB IP to connect to" +msgstr "" + +#: neutron/plugins/ryu/common/config.py:38 +msgid "OVSDB interface to connect to" +msgstr "" + +#: neutron/plugins/ryu/db/api_v2.py:39 +#, python-format +msgid "get_port_from_device() called:port_id=%s" +msgstr "" + +#: neutron/plugins/ryu/db/api_v2.py:75 +#, python-format +msgid "" +"Invalid tunnel key options tunnel_key_min: %(key_min)d tunnel_key_max: " +"%(key_max)d. Using default value" +msgstr "" + +#: neutron/plugins/ryu/db/api_v2.py:155 +#, python-format +msgid "last_key %(last_key)s new_key %(new_key)s" +msgstr "" + +#: neutron/plugins/ryu/db/api_v2.py:158 +msgid "No key found" +msgstr "" + +#: neutron/plugins/ryu/db/api_v2.py:191 +#, python-format +msgid "Transaction retry exhausted (%d). Abandoned tunnel key allocation." +msgstr "" + +#: neutron/plugins/vmware/check_nsx_config.py:45 +#: neutron/plugins/vmware/check_nsx_config.py:80 +#, python-format +msgid "Error '%(err)s' when connecting to controller(s): %(ctl)s." +msgstr "" + +#: neutron/plugins/vmware/dhcpmeta_modes.py:55 +#, python-format +msgid "Invalid agent_mode: %s" +msgstr "" + +#: neutron/plugins/vmware/dhcpmeta_modes.py:106 +msgid "network_auto_schedule has been disabled" +msgstr "" + +#: neutron/plugins/vmware/dhcpmeta_modes.py:127 +#, python-format +msgid "Unable to run Neutron with config option '%s', as NSX does not support it" +msgstr "" + +#: neutron/plugins/vmware/dhcpmeta_modes.py:130 +#, python-format +msgid "Unmet dependency for config option '%s'" +msgstr "" + +#: neutron/plugins/vmware/nsx_cluster.py:49 +#, python-format +msgid "" +"Attribute '%s' has been deprecated or moved to a new section. See new " +"configuration file for details." +msgstr "" + +#: neutron/plugins/vmware/nsx_cluster.py:61 +#, python-format +msgid "The following cluster attributes were not specified: %s'" +msgstr "" + +#: neutron/plugins/vmware/api_client/__init__.py:28 +#, python-format +msgid "Invalid connection type: %s" +msgstr "" + +#: neutron/plugins/vmware/api_client/base.py:103 +#, python-format +msgid "[%d] no API providers currently available." +msgstr "" + +#: neutron/plugins/vmware/api_client/base.py:106 +#, python-format +msgid "[%d] Waiting to acquire API client connection." +msgstr "" + +#: neutron/plugins/vmware/api_client/base.py:110 +#, python-format +msgid "[%(rid)d] Connection %(conn)s idle for %(sec)0.2f seconds; reconnecting." +msgstr "" + +#: neutron/plugins/vmware/api_client/base.py:119 +#, python-format +msgid "[%(rid)d] Acquired connection %(conn)s. %(qsize)d connection(s) available." +msgstr "" + +#: neutron/plugins/vmware/api_client/base.py:140 +#, python-format +msgid "" +"[%(rid)d] Released connection %(conn)s is not an API provider for the " +"cluster" +msgstr "" + +#: neutron/plugins/vmware/api_client/base.py:150 +#, python-format +msgid "[%(rid)d] Connection returned in bad state, reconnecting to %(conn)s" +msgstr "" + +#: neutron/plugins/vmware/api_client/base.py:176 +#, python-format +msgid "[%(rid)d] Released connection %(conn)s. %(qsize)d connection(s) available." +msgstr "" + +#: neutron/plugins/vmware/api_client/base.py:186 +#, python-format +msgid "Login request for an invalid connection: '%s'" +msgstr "" + +#: neutron/plugins/vmware/api_client/base.py:197 +msgid "Waiting for auth to complete" +msgstr "" + +#: neutron/plugins/vmware/api_client/base.py:239 +#, python-format +msgid "Invalid conn_params value: '%s'" +msgstr "" + +#: neutron/plugins/vmware/api_client/client.py:93 +#, python-format +msgid "Request returns \"%s\"" +msgstr "" + +#: neutron/plugins/vmware/api_client/client.py:106 +#, python-format +msgid "Request timed out: %(method)s to %(url)s" +msgstr "" + +#: neutron/plugins/vmware/api_client/client.py:117 +#, python-format +msgid "Received error code: %s" +msgstr "" + +#: neutron/plugins/vmware/api_client/client.py:118 +#, python-format +msgid "Server Error Message: %s" +msgstr "" + +#: neutron/plugins/vmware/api_client/client.py:124 +#, python-format +msgid "" +"%(method)s to %(url)s, unexpected response code: %(status)d (content = " +"'%(body)s')" +msgstr "" + +#: neutron/plugins/vmware/api_client/client.py:141 +msgid "Unable to determine NSX version. Plugin might not work as expected." +msgstr "" + +#: neutron/plugins/vmware/api_client/eventlet_client.py:145 +#, python-format +msgid "Login error \"%s\"" +msgstr "" + +#: neutron/plugins/vmware/api_client/eventlet_client.py:150 +#, python-format +msgid "Saving new authentication cookie '%s'" +msgstr "" + +#: neutron/plugins/vmware/api_client/eventlet_request.py:102 +msgid "Joining an invalid green thread" +msgstr "" + +#: neutron/plugins/vmware/api_client/eventlet_request.py:122 +#, python-format +msgid "[%d] Request timeout." +msgstr "" + +#: neutron/plugins/vmware/api_client/eventlet_request.py:123 +msgid "Request timeout" +msgstr "" + +#: neutron/plugins/vmware/api_client/eventlet_request.py:149 +#, python-format +msgid "[%(rid)d] Completed request '%(method)s %(url)s': %(status)s" +msgstr "" + +#: neutron/plugins/vmware/api_client/eventlet_request.py:156 +#, python-format +msgid "[%(rid)d] Error while handling request: %(req)s" +msgstr "" + +#: neutron/plugins/vmware/api_client/eventlet_request.py:212 +#, python-format +msgid "[%(rid)d] Failed to parse API provider: %(e)s" +msgstr "" + +#: neutron/plugins/vmware/api_client/exception.py:41 +msgid "Server denied session's authentication credentials." +msgstr "" + +#: neutron/plugins/vmware/api_client/exception.py:45 +msgid "An entity referenced in the request was not found." +msgstr "" + +#: neutron/plugins/vmware/api_client/exception.py:49 +msgid "Request conflicts with configuration on a different entity." +msgstr "" + +#: neutron/plugins/vmware/api_client/exception.py:54 +msgid "" +"Request could not completed because the associated resource could not be " +"reached." +msgstr "" + +#: neutron/plugins/vmware/api_client/exception.py:59 +msgid "The request is forbidden from accessing the referenced resource." +msgstr "" + +#: neutron/plugins/vmware/api_client/exception.py:64 +msgid "Create/Update actions are forbidden when in read-only mode." +msgstr "" + +#: neutron/plugins/vmware/api_client/exception.py:72 +msgid "The server is unable to fulfill the request due to a bad syntax" +msgstr "" + +#: neutron/plugins/vmware/api_client/exception.py:77 +msgid "The backend received an invalid security certificate." +msgstr "" + +#: neutron/plugins/vmware/api_client/request.py:85 +msgid "No API connections available" +msgstr "" + +#: neutron/plugins/vmware/api_client/request.py:90 +#, python-format +msgid "[%(rid)d] Issuing - request %(conn)s" +msgstr "" + +#: neutron/plugins/vmware/api_client/request.py:116 +#, python-format +msgid "Setting X-Nvp-Wait-For-Config-Generation request header: '%s'" +msgstr "" + +#: neutron/plugins/vmware/api_client/request.py:122 +#, python-format +msgid "[%(rid)d] Exception issuing request: %(e)s" +msgstr "" + +#: neutron/plugins/vmware/api_client/request.py:130 +#, python-format +msgid "[%(rid)d] Completed request '%(conn)s': %(status)s (%(elapsed)s seconds)" +msgstr "" + +#: neutron/plugins/vmware/api_client/request.py:139 +#, python-format +msgid "Reading X-Nvp-config-Generation response header: '%s'" +msgstr "" + +#: neutron/plugins/vmware/api_client/request.py:168 +#, python-format +msgid "[%d] Maximum redirects exceeded, aborting request" +msgstr "" + +#: neutron/plugins/vmware/api_client/request.py:178 +#, python-format +msgid "[%(rid)d] Redirecting request to: %(conn)s" +msgstr "" + +#: neutron/plugins/vmware/api_client/request.py:191 +#, python-format +msgid "[%(rid)d] Request '%(method)s %(url)s' received: %(status)s" +msgstr "" + +#: neutron/plugins/vmware/api_client/request.py:195 +#, python-format +msgid "Server error return: %s" +msgstr "" + +#: neutron/plugins/vmware/api_client/request.py:199 +msgid "Invalid server response" +msgstr "" + +#: neutron/plugins/vmware/api_client/request.py:204 +#, python-format +msgid "[%(rid)d] Failed request '%(conn)s': '%(msg)s' (%(elapsed)s seconds)" +msgstr "" + +#: neutron/plugins/vmware/api_client/request.py:238 +#, python-format +msgid "[%d] Received redirect status without location header field" +msgstr "" + +#: neutron/plugins/vmware/api_client/request.py:255 +#, python-format +msgid "[%(rid)d] Received invalid redirect location: '%(url)s'" +msgstr "" + +#: neutron/plugins/vmware/api_client/request.py:259 +#, python-format +msgid "[%(rid)d] Received malformed redirect location: %(url)s" +msgstr "" + +#: neutron/plugins/vmware/api_client/version.py:30 +#, python-format +msgid "Unable to fetch NSX version from response headers :%s" +msgstr "" + +#: neutron/plugins/vmware/common/config.py:39 +msgid "" +"Maximum number of ports of a logical switch on a bridged transport zone " +"(default 5000)" +msgstr "" + +#: neutron/plugins/vmware/common/config.py:43 +msgid "" +"Maximum number of ports of a logical switch on an overlay transport zone " +"(default 256)" +msgstr "" + +#: neutron/plugins/vmware/common/config.py:47 +msgid "Maximum concurrent connections to each NSX controller." +msgstr "" + +#: neutron/plugins/vmware/common/config.py:52 +msgid "" +"Number of seconds a generation id should be valid for (default -1 meaning" +" do not time out)" +msgstr "" + +#: neutron/plugins/vmware/common/config.py:56 +msgid "" +"If set to access_network this enables a dedicated connection to the " +"metadata proxy for metadata server access via Neutron router. If set to " +"dhcp_host_route this enables host route injection via the dhcp agent. " +"This option is only useful if running on a host that does not support " +"namespaces otherwise access_network should be used." +msgstr "" + +#: neutron/plugins/vmware/common/config.py:65 +msgid "" +"The default network tranport type to use (stt, gre, bridge, ipsec_gre, or" +" ipsec_stt)" +msgstr "" + +#: neutron/plugins/vmware/common/config.py:69 +msgid "The mode used to implement DHCP/metadata services." +msgstr "" + +#: neutron/plugins/vmware/common/config.py:71 +msgid "" +"The default option leverages service nodes to perform packet replication " +"though one could set to this to 'source' to perform replication locally. " +"This is useful if one does not want to deploy a service node(s)." +msgstr "" + +#: neutron/plugins/vmware/common/config.py:80 +msgid "" +"Interval in seconds between runs of the state synchronization task. Set " +"it to 0 to disable it" +msgstr "" + +#: neutron/plugins/vmware/common/config.py:84 +msgid "" +"Maximum value for the additional random delay in seconds between runs of " +"the state synchronization task" +msgstr "" + +#: neutron/plugins/vmware/common/config.py:89 +msgid "" +"Minimum delay, in seconds, between two state synchronization queries to " +"NSX. It must not exceed state_sync_interval" +msgstr "" + +#: neutron/plugins/vmware/common/config.py:94 +msgid "" +"Minimum number of resources to be retrieved from NSX during state " +"synchronization" +msgstr "" + +#: neutron/plugins/vmware/common/config.py:98 +msgid "" +"Always read operational status from backend on show operations. Enabling " +"this option might slow down the system." +msgstr "" + +#: neutron/plugins/vmware/common/config.py:107 +msgid "User name for NSX controllers in this cluster" +msgstr "" + +#: neutron/plugins/vmware/common/config.py:112 +msgid "Password for NSX controllers in this cluster" +msgstr "" + +#: neutron/plugins/vmware/common/config.py:115 +msgid "Total time limit for a cluster request" +msgstr "" + +#: neutron/plugins/vmware/common/config.py:118 +msgid "Time before aborting a request" +msgstr "" + +#: neutron/plugins/vmware/common/config.py:121 +msgid "Number of time a request should be retried" +msgstr "" + +#: neutron/plugins/vmware/common/config.py:124 +msgid "Number of times a redirect should be followed" +msgstr "" + +#: neutron/plugins/vmware/common/config.py:127 +msgid "Lists the NSX controllers in this cluster" +msgstr "" + +#: neutron/plugins/vmware/common/config.py:132 +msgid "" +"This is uuid of the default NSX Transport zone that will be used for " +"creating tunneled isolated \"Neutron\" networks. It needs to be created " +"in NSX before starting Neutron with the nsx plugin." +msgstr "" + +#: neutron/plugins/vmware/common/config.py:137 +msgid "" +"Unique identifier of the NSX L3 Gateway service which will be used for " +"implementing routers and floating IPs" +msgstr "" + +#: neutron/plugins/vmware/common/config.py:141 +msgid "" +"Unique identifier of the NSX L2 Gateway service which will be used by " +"default for network gateways" +msgstr "" + +#: neutron/plugins/vmware/common/config.py:144 +msgid "" +"Unique identifier of the Service Cluster which will be used by logical " +"services like dhcp and metadata" +msgstr "" + +#: neutron/plugins/vmware/common/config.py:147 +msgid "" +"Name of the interface on a L2 Gateway transport nodewhich should be used " +"by default when setting up a network connection" +msgstr "" + +#: neutron/plugins/vmware/common/config.py:157 +msgid "User name for vsm" +msgstr "" + +#: neutron/plugins/vmware/common/config.py:161 +msgid "Password for vsm" +msgstr "" + +#: neutron/plugins/vmware/common/config.py:163 +msgid "uri for vsm" +msgstr "" + +#: neutron/plugins/vmware/common/config.py:165 +msgid "Optional parameter identifying the ID of datacenter to deploy NSX Edges" +msgstr "" + +#: neutron/plugins/vmware/common/config.py:168 +#: neutron/plugins/vmware/common/config.py:174 +msgid "Optional parameter identifying the ID of datastore to deploy NSX Edges" +msgstr "" + +#: neutron/plugins/vmware/common/config.py:171 +msgid "Optional parameter identifying the ID of resource to deploy NSX Edges" +msgstr "" + +#: neutron/plugins/vmware/common/config.py:177 +msgid "Network ID for physical network connectivity" +msgstr "" + +#: neutron/plugins/vmware/common/config.py:180 +msgid "Task status check interval" +msgstr "" + +#: neutron/plugins/vmware/common/config.py:194 +#, python-format +msgid "Invalid replication_mode: %s" +msgstr "" + +#: neutron/plugins/vmware/common/exceptions.py:21 +#, python-format +msgid "An unexpected error occurred in the NSX Plugin: %(err_msg)s" +msgstr "" + +#: neutron/plugins/vmware/common/exceptions.py:25 +#, python-format +msgid "Unable to fulfill request with version %(version)s." +msgstr "" + +#: neutron/plugins/vmware/common/exceptions.py:29 +#, python-format +msgid "Invalid NSX connection parameters: %(conn_params)s" +msgstr "" + +#: neutron/plugins/vmware/common/exceptions.py:33 +#, python-format +msgid "" +"Invalid cluster values: %(invalid_attrs)s. Please ensure that these " +"values are specified in the [DEFAULT] section of the NSX plugin ini file." +msgstr "" + +#: neutron/plugins/vmware/common/exceptions.py:39 +#, python-format +msgid "Unable to find cluster config entry for nova zone: %(nova_zone)s" +msgstr "" + +#: neutron/plugins/vmware/common/exceptions.py:44 +#, python-format +msgid "" +"Unable to create port on network %(network)s. Maximum number of ports " +"reached" +msgstr "" + +#: neutron/plugins/vmware/common/exceptions.py:49 +#, python-format +msgid "" +"While retrieving NAT rules, %(actual_rules)s were found whereas rules in " +"the (%(min_rules)s,%(max_rules)s) interval were expected" +msgstr "" + +#: neutron/plugins/vmware/common/exceptions.py:55 +#, python-format +msgid "Invalid NSX attachment type '%(attachment_type)s'" +msgstr "" + +#: neutron/plugins/vmware/common/exceptions.py:59 +msgid "" +"The networking backend is currently in maintenance mode and therefore " +"unable to accept requests which modify its state. Please try later." +msgstr "" + +#: neutron/plugins/vmware/common/exceptions.py:65 +#, python-format +msgid "Gateway Service %(gateway)s is already in use" +msgstr "" + +#: neutron/plugins/vmware/common/exceptions.py:69 +msgid "" +"An invalid security certificate was specified for the gateway device. " +"Certificates must be enclosed between '-----BEGIN CERTIFICATE-----' and '" +"-----END CERTIFICATE-----'" +msgstr "" + +#: neutron/plugins/vmware/common/exceptions.py:76 +#, python-format +msgid "Quota exceeded for Vcns resource: %(overs)s: %(err_msg)s" +msgstr "" + +#: neutron/plugins/vmware/common/exceptions.py:80 +#, python-format +msgid "Router %(router_id)s is in use by Loadbalancer Service %(vip_id)s" +msgstr "" + +#: neutron/plugins/vmware/common/exceptions.py:85 +#, python-format +msgid "Router %(router_id)s is in use by firewall Service %(firewall_id)s" +msgstr "" + +#: neutron/plugins/vmware/common/exceptions.py:90 +#, python-format +msgid "Error happened in NSX VCNS Driver: %(err_msg)s" +msgstr "" + +#: neutron/plugins/vmware/common/exceptions.py:94 +#, python-format +msgid "" +"Service cluster: '%(cluster_id)s' is unavailable. Please, check NSX setup" +" and/or configuration" +msgstr "" + +#: neutron/plugins/vmware/common/exceptions.py:99 +#, python-format +msgid "" +"An error occurred while connecting LSN %(lsn_id)s and network %(net_id)s " +"via port %(port_id)s" +msgstr "" + +#: neutron/plugins/vmware/common/exceptions.py:108 +#, python-format +msgid "Unable to find LSN for %(entity)s %(entity_id)s" +msgstr "" + +#: neutron/plugins/vmware/common/exceptions.py:112 +#, python-format +msgid "Unable to find port for LSN %(lsn_id)s and %(entity)s %(entity_id)s" +msgstr "" + +#: neutron/plugins/vmware/common/exceptions.py:117 +#, python-format +msgid "Unable to migrate network '%(net_id)s' to LSN: %(reason)s" +msgstr "" + +#: neutron/plugins/vmware/common/exceptions.py:121 +#, python-format +msgid "Configuration conflict on Logical Service Node %(lsn_id)s" +msgstr "" + +#: neutron/plugins/vmware/common/nsx_utils.py:63 +#, python-format +msgid "Unable to find NSX switches for Neutron network %s" +msgstr "" + +#: neutron/plugins/vmware/common/nsx_utils.py:110 +#, python-format +msgid "Unable to find NSX port for Neutron port %s" +msgstr "" + +#: neutron/plugins/vmware/common/nsx_utils.py:150 +#, python-format +msgid "Unable to find NSX security profile for Neutron security group %s" +msgstr "" + +#: neutron/plugins/vmware/common/nsx_utils.py:154 +#, python-format +msgid "Multiple NSX security profiles found for Neutron security group %s" +msgstr "" + +#: neutron/plugins/vmware/common/nsx_utils.py:185 +#, python-format +msgid "Unable to find NSX router for Neutron router %s" +msgstr "" + +#: neutron/plugins/vmware/common/nsx_utils.py:243 +#, python-format +msgid "" +"Unable to retrieve operational status for gateway devices belonging to " +"tenant: %s" +msgstr "" + +#: neutron/plugins/vmware/common/nsx_utils.py:246 +msgid "Unable to retrieve operational status for gateway devices" +msgstr "" + +#: neutron/plugins/vmware/common/sync.py:231 +#, python-format +msgid "" +"Minimum request delay:%(req_delay)s must not exceed synchronization " +"interval:%(sync_interval)s" +msgstr "" + +#: neutron/plugins/vmware/common/sync.py:264 +#, python-format +msgid "Logical switch for neutron network %s not found on NSX." +msgstr "" + +#: neutron/plugins/vmware/common/sync.py:299 +#: neutron/plugins/vmware/common/sync.py:381 +#: neutron/plugins/vmware/common/sync.py:476 +#, python-format +msgid "Updating status for neutron resource %(q_id)s to: %(status)s" +msgstr "" + +#: neutron/plugins/vmware/common/sync.py:351 +#, python-format +msgid "Logical router for neutron router %s not found on NSX." +msgstr "" + +#: neutron/plugins/vmware/common/sync.py:401 +#, python-format +msgid "Unable to find Neutron router id for NSX logical router: %s" +msgstr "" + +#: neutron/plugins/vmware/common/sync.py:443 +#, python-format +msgid "Logical switch port for neutron port %s not found on NSX." +msgstr "" + +#: neutron/plugins/vmware/common/sync.py:536 +#, python-format +msgid "" +"Requested page size is %(cur_chunk_size)d.It might be necessary to do " +"%(num_requests)d round-trips to NSX for fetching data. Please tune sync " +"parameters to ensure chunk size is less than %(max_page_size)d" +msgstr "" + +#: neutron/plugins/vmware/common/sync.py:569 +#, python-format +msgid "Fetching up to %s resources from NSX backend" +msgstr "" + +#: neutron/plugins/vmware/common/sync.py:589 +#, python-format +msgid "Total data size: %d" +msgstr "" + +#: neutron/plugins/vmware/common/sync.py:593 +#, python-format +msgid "" +"Fetched %(num_lswitches)d logical switches, %(num_lswitchports)d logical " +"switch ports,%(num_lrouters)d logical routers" +msgstr "" + +#: neutron/plugins/vmware/common/sync.py:609 +#, python-format +msgid "Running state synchronization task. Chunk: %s" +msgstr "" + +#: neutron/plugins/vmware/common/sync.py:619 +#, python-format +msgid "" +"An error occurred while communicating with NSX backend. Will retry " +"synchronization in %d seconds" +msgstr "" + +#: neutron/plugins/vmware/common/sync.py:623 +#, python-format +msgid "Time elapsed querying NSX: %s" +msgstr "" + +#: neutron/plugins/vmware/common/sync.py:630 +#, python-format +msgid "Number of chunks: %d" +msgstr "" + +#: neutron/plugins/vmware/common/sync.py:648 +#, python-format +msgid "Time elapsed hashing data: %s" +msgstr "" + +#: neutron/plugins/vmware/common/sync.py:660 +#, python-format +msgid "Synchronization for chunk %(chunk_num)d of %(total_chunks)d performed" +msgstr "" + +#: neutron/plugins/vmware/common/sync.py:672 +#, python-format +msgid "Time elapsed at end of sync: %s" +msgstr "" + +#: neutron/plugins/vmware/common/utils.py:64 +#, python-format +msgid "Specified name:'%s' exceeds maximum length. It will be truncated on NSX" +msgstr "" + +#: neutron/plugins/vmware/dbexts/db.py:76 +#, python-format +msgid "Port mapping for %s already available" +msgstr "" + +#: neutron/plugins/vmware/dbexts/db.py:123 +#, python-format +msgid "NSX identifiers for neutron port %s not yet stored in Neutron DB" +msgstr "" + +#: neutron/plugins/vmware/dbexts/db.py:134 +#, python-format +msgid "NSX identifiers for neutron router %s not yet stored in Neutron DB" +msgstr "" + +#: neutron/plugins/vmware/dbexts/db.py:149 +#, python-format +msgid "NSX identifiers for neutron security group %s not yet stored in Neutron DB" +msgstr "" + +#: neutron/plugins/vmware/dbexts/lsn_db.py:87 +#: neutron/plugins/vmware/dhcp_meta/lsnmanager.py:71 +#, python-format +msgid "Unable to find Logical Service Node for network %s" +msgstr "" + +#: neutron/plugins/vmware/dbexts/networkgw_db.py:45 +#, python-format +msgid "" +"Network Gateway '%(gateway_id)s' still has active mappings with one or " +"more neutron networks." +msgstr "" + +#: neutron/plugins/vmware/dbexts/networkgw_db.py:50 +#, python-format +msgid "Network Gateway %(gateway_id)s could not be found" +msgstr "" + +#: neutron/plugins/vmware/dbexts/networkgw_db.py:54 +#, python-format +msgid "" +"Network Gateway Device '%(device_id)s' is still used by one or more " +"network gateways." +msgstr "" + +#: neutron/plugins/vmware/dbexts/networkgw_db.py:59 +#, python-format +msgid "Network Gateway Device %(device_id)s could not be found." +msgstr "" + +#: neutron/plugins/vmware/dbexts/networkgw_db.py:63 +#, python-format +msgid "" +"Port '%(port_id)s' is owned by '%(device_owner)s' and therefore cannot be" +" deleted directly via the port API." +msgstr "" + +#: neutron/plugins/vmware/dbexts/networkgw_db.py:68 +#, python-format +msgid "" +"The specified mapping '%(mapping)s' is already in use on network gateway " +"'%(gateway_id)s'." +msgstr "" + +#: neutron/plugins/vmware/dbexts/networkgw_db.py:73 +#, python-format +msgid "" +"Multiple network connections found on '%(gateway_id)s' with provided " +"criteria." +msgstr "" + +#: neutron/plugins/vmware/dbexts/networkgw_db.py:78 +#, python-format +msgid "" +"The connection %(network_mapping_info)s was not found on the network " +"gateway '%(network_gateway_id)s'" +msgstr "" + +#: neutron/plugins/vmware/dbexts/networkgw_db.py:83 +#, python-format +msgid "The network gateway %(gateway_id)s cannot be updated or deleted" +msgstr "" + +#: neutron/plugins/vmware/dbexts/networkgw_db.py:191 +msgid "" +"A network identifier must be specified when connecting a network to a " +"network gateway. Unable to complete operation" +msgstr "" + +#: neutron/plugins/vmware/dbexts/networkgw_db.py:197 +#, python-format +msgid "" +"Invalid keys found among the ones provided in request body: " +"%(connection_attrs)s." +msgstr "" + +#: neutron/plugins/vmware/dbexts/networkgw_db.py:203 +msgid "" +"In order to specify a segmentation id the segmentation type must be " +"specified as well" +msgstr "" + +#: neutron/plugins/vmware/dbexts/networkgw_db.py:207 +msgid "Cannot specify a segmentation id when the segmentation type is flat" +msgstr "" + +#: neutron/plugins/vmware/dbexts/networkgw_db.py:262 +#, python-format +msgid "Created network gateway with id:%s" +msgstr "" + +#: neutron/plugins/vmware/dbexts/networkgw_db.py:274 +#, python-format +msgid "Updated network gateway with id:%s" +msgstr "" + +#: neutron/plugins/vmware/dbexts/networkgw_db.py:289 +#, python-format +msgid "Network gateway '%s' was destroyed." +msgstr "" + +#: neutron/plugins/vmware/dbexts/networkgw_db.py:306 +#, python-format +msgid "Connecting network '%(network_id)s' to gateway '%(network_gateway_id)s'" +msgstr "" + +#: neutron/plugins/vmware/dbexts/networkgw_db.py:347 +#, python-format +msgid "" +"Requested network '%(network_id)s' not found.Unable to create network " +"connection on gateway '%(network_gateway_id)s" +msgstr "" + +#: neutron/plugins/vmware/dbexts/networkgw_db.py:355 +#, python-format +msgid "" +"Gateway port for '%(network_gateway_id)s' created on network " +"'%(network_id)s':%(port_id)s" +msgstr "" + +#: neutron/plugins/vmware/dbexts/networkgw_db.py:371 +#, python-format +msgid "Ensured no Ip addresses are configured on port %s" +msgstr "" + +#: neutron/plugins/vmware/dbexts/networkgw_db.py:381 +#, python-format +msgid "" +"Disconnecting network '%(network_id)s' from gateway " +"'%(network_gateway_id)s'" +msgstr "" + +#: neutron/plugins/vmware/dbexts/networkgw_db.py:475 +#, python-format +msgid "Created network gateway device: %s" +msgstr "" + +#: neutron/plugins/vmware/dbexts/networkgw_db.py:486 +#, python-format +msgid "Updated network gateway device: %s" +msgstr "" + +#: neutron/plugins/vmware/dbexts/networkgw_db.py:499 +#, python-format +msgid "Deleted network gateway device: %s." +msgstr "" + +#: neutron/plugins/vmware/dbexts/nsxrouter.py:61 +#, python-format +msgid "Nsx router extension successfully processed for router:%s" +msgstr "" + +#: neutron/plugins/vmware/dbexts/qos_db.py:294 +#, python-format +msgid "DSCP value (%s) will be ignored with 'trusted' marking" +msgstr "" + +#: neutron/plugins/vmware/dbexts/vcns_db.py:77 +#, python-format +msgid "Rule Resource binding with id:%s not found!" +msgstr "" + +#: neutron/plugins/vmware/dbexts/vcns_db.py:94 +msgid "Rule Resource binding not found!" +msgstr "" + +#: neutron/plugins/vmware/dbexts/vcns_db.py:123 +#: neutron/plugins/vmware/dbexts/vcns_db.py:133 +#, python-format +msgid "VIP Resource binding with id:%s not found!" +msgstr "" + +#: neutron/plugins/vmware/dbexts/vcns_db.py:162 +#, python-format +msgid "" +"Pool Resource binding with edge_id:%(edge_id)s pool_vseid:%(pool_vseid)s " +"not found!" +msgstr "" + +#: neutron/plugins/vmware/dbexts/vcns_db.py:174 +#, python-format +msgid "Pool Resource binding with id:%s not found!" +msgstr "" + +#: neutron/plugins/vmware/dbexts/vcns_db.py:200 +#, python-format +msgid "Monitor Resource binding with id:%s not found!" +msgstr "" + +#: neutron/plugins/vmware/dhcp_meta/lsnmanager.py:40 +msgid "" +"Pull LSN information from NSX in case it is missing from the local data " +"store. This is useful to rebuild the local store in case of server " +"recovery." +msgstr "" + +#: neutron/plugins/vmware/dhcp_meta/lsnmanager.py:82 +#, python-format +msgid "Unable to create LSN for network %s" +msgstr "" + +#: neutron/plugins/vmware/dhcp_meta/lsnmanager.py:90 +#, python-format +msgid "Unable to delete Logical Service Node %s" +msgstr "" + +#: neutron/plugins/vmware/dhcp_meta/lsnmanager.py:107 +#, python-format +msgid "" +"Unable to find Logical Service Node Port for LSN %(lsn_id)s and subnet " +"%(subnet_id)s" +msgstr "" + +#: neutron/plugins/vmware/dhcp_meta/lsnmanager.py:129 +#, python-format +msgid "" +"Unable to find Logical Service Node Port for LSN %(lsn_id)s and mac " +"address %(mac)s" +msgstr "" + +#: neutron/plugins/vmware/dhcp_meta/lsnmanager.py:149 +#, python-format +msgid "Unable to create port for LSN %s" +msgstr "" + +#: neutron/plugins/vmware/dhcp_meta/lsnmanager.py:157 +#, python-format +msgid "Unable to delete LSN Port %s" +msgstr "" + +#: neutron/plugins/vmware/dhcp_meta/lsnmanager.py:174 +#, python-format +msgid "Metadata port not found while attempting to delete it from network %s" +msgstr "" + +#: neutron/plugins/vmware/dhcp_meta/lsnmanager.py:177 +#, python-format +msgid "Unable to find Logical Services Node Port with MAC %s" +msgstr "" + +#: neutron/plugins/vmware/dhcp_meta/lsnmanager.py:262 +#, python-format +msgid "" +"Unable to configure dhcp for Logical Service Node %(lsn_id)s and port " +"%(lsn_port_id)s" +msgstr "" + +#: neutron/plugins/vmware/dhcp_meta/lsnmanager.py:283 +#, python-format +msgid "Unable to configure metadata for subnet %s" +msgstr "" + +#: neutron/plugins/vmware/dhcp_meta/lsnmanager.py:305 +#, python-format +msgid "Error while configuring LSN port %s" +msgstr "" + +#: neutron/plugins/vmware/dhcp_meta/lsnmanager.py:377 +#, python-format +msgid "Unable to save LSN for network %s" +msgstr "" + +#: neutron/plugins/vmware/dhcp_meta/lsnmanager.py:443 +#, python-format +msgid "Unable to save LSN port for subnet %s" +msgstr "" + +#: neutron/plugins/vmware/dhcp_meta/migration.py:81 +#, python-format +msgid "Port %s is already gone" +msgstr "" + +#: neutron/plugins/vmware/dhcp_meta/migration.py:112 +msgid "LSN already exist" +msgstr "" + +#: neutron/plugins/vmware/dhcp_meta/migration.py:116 +msgid "Cannot migrate an external network" +msgstr "" + +#: neutron/plugins/vmware/dhcp_meta/migration.py:125 +msgid "Cannot migrate a 'metadata' network" +msgstr "" + +#: neutron/plugins/vmware/dhcp_meta/migration.py:128 +msgid "Unable to support multiple subnets per network" +msgstr "" + +#: neutron/plugins/vmware/dhcp_meta/nsx.py:39 +msgid "Comma separated list of additional domain name servers" +msgstr "" + +#: neutron/plugins/vmware/dhcp_meta/nsx.py:47 +msgid "Default DHCP lease time" +msgstr "" + +#: neutron/plugins/vmware/dhcp_meta/nsx.py:55 +msgid "IP address used by Metadata server." +msgstr "" + +#: neutron/plugins/vmware/dhcp_meta/nsx.py:59 +msgid "TCP Port used by Metadata server." +msgstr "" + +#: neutron/plugins/vmware/dhcp_meta/nsx.py:136 +#, python-format +msgid "" +"Error while creating subnet %(cidr)s for network %(network)s. Please, " +"contact administrator" +msgstr "" + +#: neutron/plugins/vmware/dhcp_meta/nsx.py:206 +#: neutron/plugins/vmware/dhcp_meta/nsx.py:224 +#, python-format +msgid "Performing DHCP %(action)s for resource: %(resource)s" +msgstr "" + +#: neutron/plugins/vmware/dhcp_meta/nsx.py:211 +#, python-format +msgid "Network %s is external: no LSN to create" +msgstr "" + +#: neutron/plugins/vmware/dhcp_meta/nsx.py:219 +#, python-format +msgid "Logical Services Node for network %s configured successfully" +msgstr "" + +#: neutron/plugins/vmware/dhcp_meta/nsx.py:241 +#, python-format +msgid "Error while configuring DHCP for port %s" +msgstr "" + +#: neutron/plugins/vmware/dhcp_meta/nsx.py:253 +#, python-format +msgid "DHCP is disabled for subnet %s: nothing to do" +msgstr "" + +#: neutron/plugins/vmware/dhcp_meta/nsx.py:272 +#, python-format +msgid "DHCP for port %s configured successfully" +msgstr "" + +#: neutron/plugins/vmware/dhcp_meta/nsx.py:280 +#, python-format +msgid "Network %s is external: nothing to do" +msgstr "" + +#: neutron/plugins/vmware/dhcp_meta/nsx.py:288 +#, python-format +msgid "Configuring metadata entry for port %s" +msgstr "" + +#: neutron/plugins/vmware/dhcp_meta/nsx.py:300 +#, python-format +msgid "Metadata for port %s configured successfully" +msgstr "" + +#: neutron/plugins/vmware/dhcp_meta/nsx.py:304 +#, python-format +msgid "Handle metadata access via router: %(r)s and interface %(i)s" +msgstr "" + +#: neutron/plugins/vmware/dhcp_meta/nsx.py:321 +#, python-format +msgid "Metadata for router %s handled successfully" +msgstr "" + +#: neutron/plugins/vmware/dhcp_meta/rpc.py:75 +#, python-format +msgid "Subnet %s does not have a gateway, the metadata route will not be created" +msgstr "" + +#: neutron/plugins/vmware/dhcp_meta/rpc.py:99 +msgid "Metadata access network is disabled" +msgstr "" + +#: neutron/plugins/vmware/dhcp_meta/rpc.py:102 +msgid "" +"Overlapping IPs must be enabled in order to setup the metadata access " +"network" +msgstr "" + +#: neutron/plugins/vmware/dhcp_meta/rpc.py:122 +#, python-format +msgid "" +"No router interface found for router '%s'. No metadata access network " +"should be created or destroyed" +msgstr "" + +#: neutron/plugins/vmware/dhcp_meta/rpc.py:130 +#, python-format +msgid "" +"An error occurred while operating on the metadata access network for " +"router:'%s'" +msgstr "" + +#: neutron/plugins/vmware/extensions/networkgw.py:84 +msgid "Cannot create a gateway with an empty device list" +msgstr "" + +#: neutron/plugins/vmware/extensions/networkgw.py:100 +#, python-format +msgid "Unexpected keys found in device description:%s" +msgstr "" + +#: neutron/plugins/vmware/extensions/networkgw.py:104 +#, python-format +msgid "%s: provided data are not iterable" +msgstr "" + +#: neutron/plugins/vmware/extensions/networkgw.py:111 +msgid "A connector type is required to create a gateway device" +msgstr "" + +#: neutron/plugins/vmware/extensions/networkgw.py:120 +#, python-format +msgid "Unknown connector type: %s" +msgstr "" + +#: neutron/plugins/vmware/extensions/networkgw.py:127 +msgid "Number of network gateways allowed per tenant, -1 for unlimited" +msgstr "" + +#: neutron/plugins/vmware/extensions/qos.py:34 +msgid "Need to be admin in order to create queue called default" +msgstr "" + +#: neutron/plugins/vmware/extensions/qos.py:38 +msgid "Default queue already exists." +msgstr "" + +#: neutron/plugins/vmware/extensions/qos.py:42 +#, python-format +msgid "Invalid value for dscp %(data)s must be integer value between 0 and 63." +msgstr "" + +#: neutron/plugins/vmware/extensions/qos.py:47 +msgid "The qos marking cannot be set to 'trusted' when the DSCP field is set" +msgstr "" + +#: neutron/plugins/vmware/extensions/qos.py:52 +msgid "Invalid bandwidth rate, min greater than max." +msgstr "" + +#: neutron/plugins/vmware/extensions/qos.py:56 +#, python-format +msgid "Invalid bandwidth rate, %(data)s must be a non negative integer." +msgstr "" + +#: neutron/plugins/vmware/extensions/qos.py:61 +#, python-format +msgid "Queue %(id)s does not exist" +msgstr "" + +#: neutron/plugins/vmware/extensions/qos.py:65 +msgid "Unable to delete queue attached to port." +msgstr "" + +#: neutron/plugins/vmware/extensions/qos.py:69 +msgid "Port is not associated with lqueue" +msgstr "" + +#: neutron/plugins/vmware/extensions/qos.py:80 +#, python-format +msgid "'%s' must be a non negative integer." +msgstr "" + +#: neutron/plugins/vmware/nsxlib/__init__.py:77 +#, python-format +msgid "Error. %(type)s exception: %(exc)s." +msgstr "" + +#: neutron/plugins/vmware/nsxlib/__init__.py:81 +#, python-format +msgid "locals=[%s]" +msgstr "" + +#: neutron/plugins/vmware/nsxlib/lsn.py:171 +#, python-format +msgid "" +"Attempt to plug Logical Services Node %(lsn)s into network with port " +"%(port)s failed. PatchAttachment already exists with another port" +msgstr "" + +#: neutron/plugins/vmware/nsxlib/router.py:252 +#, python-format +msgid "Cannot update NSX routes %(routes)s for router %(router_id)s" +msgstr "" + +#: neutron/plugins/vmware/nsxlib/router.py:346 +#, python-format +msgid "Created logical port %(lport_uuid)s on logical router %(lrouter_uuid)s" +msgstr "" + +#: neutron/plugins/vmware/nsxlib/router.py:374 +#, python-format +msgid "Updated logical port %(lport_uuid)s on logical router %(lrouter_uuid)s" +msgstr "" + +#: neutron/plugins/vmware/nsxlib/router.py:385 +#, python-format +msgid "" +"Delete logical router port %(lport_uuid)s on logical router " +"%(lrouter_uuid)s" +msgstr "" + +#: neutron/plugins/vmware/nsxlib/router.py:449 +#, python-format +msgid "Invalid keys for NAT match: %s" +msgstr "" + +#: neutron/plugins/vmware/nsxlib/router.py:455 +#, python-format +msgid "Creating NAT rule: %s" +msgstr "" + +#: neutron/plugins/vmware/nsxlib/router.py:470 +msgid "" +"No SNAT rules cannot be applied as they are not available in this version" +" of the NSX platform" +msgstr "" + +#: neutron/plugins/vmware/nsxlib/router.py:475 +msgid "" +"No DNAT rules cannot be applied as they are not available in this version" +" of the NSX platform" +msgstr "" + +#: neutron/plugins/vmware/nsxlib/router.py:609 +#, python-format +msgid "Router Port %(lport_id)s not found on router %(lrouter_id)s" +msgstr "" + +#: neutron/plugins/vmware/nsxlib/router.py:614 +#, python-format +msgid "" +"An exception occurred while updating IP addresses on a router logical " +"port:%s" +msgstr "" + +#: neutron/plugins/vmware/nsxlib/secgroup.py:94 +#, python-format +msgid "Created Security Profile: %s" +msgstr "" + +#: neutron/plugins/vmware/nsxlib/secgroup.py:120 +#, python-format +msgid "Updated Security Profile: %s" +msgstr "" + +#: neutron/plugins/vmware/nsxlib/secgroup.py:140 +#, python-format +msgid "Unable to find security profile %s on NSX backend" +msgstr "" + +#: neutron/plugins/vmware/nsxlib/switch.py:130 +#, python-format +msgid "Created logical switch: %s" +msgstr "" + +#: neutron/plugins/vmware/nsxlib/switch.py:150 +#: neutron/plugins/vmware/nsxlib/switch.py:165 +#, python-format +msgid "Network not found, Error: %s" +msgstr "" + +#: neutron/plugins/vmware/nsxlib/switch.py:188 +msgid "Port or Network not found" +msgstr "" + +#: neutron/plugins/vmware/nsxlib/switch.py:247 +#, python-format +msgid "Lswitch %s not found in NSX" +msgstr "" + +#: neutron/plugins/vmware/nsxlib/switch.py:256 +msgid "Unable to get ports" +msgstr "" + +#: neutron/plugins/vmware/nsxlib/switch.py:273 +#, python-format +msgid "" +"Looking for port with q_port_id tag '%(neutron_port_id)s' on: " +"'%(lswitch_uuid)s'" +msgstr "" + +#: neutron/plugins/vmware/nsxlib/switch.py:281 +#, python-format +msgid "" +"Found '%(num_ports)d' ports with q_port_id tag: '%(neutron_port_id)s'. " +"Only 1 was expected." +msgstr "" + +#: neutron/plugins/vmware/nsxlib/switch.py:290 +#, python-format +msgid "get_port() %(network)s %(port)s" +msgstr "" + +#: neutron/plugins/vmware/nsxlib/switch.py:298 +#: neutron/plugins/vmware/nsxlib/switch.py:329 +#, python-format +msgid "Port or Network not found, Error: %s" +msgstr "" + +#: neutron/plugins/vmware/nsxlib/switch.py:324 +#, python-format +msgid "Updated logical port %(result)s on logical switch %(uuid)s" +msgstr "" + +#: neutron/plugins/vmware/nsxlib/switch.py:359 +#, python-format +msgid "Created logical port %(result)s on logical switch %(uuid)s" +msgstr "" + +#: neutron/plugins/vmware/nsxlib/switch.py:371 +#, python-format +msgid "Port not found, Error: %s" +msgstr "" + +#: neutron/plugins/vmware/nsxlib/versioning.py:56 +msgid "Operation may not be supported" +msgstr "" + +#: neutron/plugins/vmware/nsxlib/versioning.py:64 +msgid "" +"NSX version is not set. Unable to complete request correctly. Check log " +"for NSX communication errors." +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:211 +#, python-format +msgid "Unable to process default l2 gw service:%s" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:246 +#, python-format +msgid "Created NSX router port:%s" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:248 +#: neutron/plugins/vmware/plugins/service.py:438 +#, python-format +msgid "Unable to create port on NSX logical router %s" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:251 +#, python-format +msgid "" +"Unable to create logical router port for neutron port id %(port_id)s on " +"router %(nsx_router_id)s" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:329 +#, python-format +msgid "Attached %(att)s to NSX router port %(port)s" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:335 +#, python-format +msgid "" +"Unable to plug attachment in NSX logical router port %(r_port_id)s, " +"associated with Neutron %(q_port_id)s" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:341 +#, python-format +msgid "" +"Unable to plug attachment in router port %(r_port_id)s for neutron port " +"id %(q_port_id)s on router %(router_id)s" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:387 +msgid "An exception occurred while selecting logical switch for the port" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:427 +#, python-format +msgid "" +"An exception occurred while creating the neutron port %s on the NSX " +"plaform" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:439 +#: neutron/plugins/vmware/plugins/base.py:491 +#: neutron/plugins/vmware/plugins/base.py:689 +#, python-format +msgid "" +"NSX plugin does not support regular VIF ports on external networks. Port " +"%s will be down." +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:460 +#, python-format +msgid "" +"_nsx_create_port completed for port %(name)s on network %(network_id)s. " +"The new port id is %(id)s." +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:471 +#, python-format +msgid "" +"Concurrent network deletion detected; Back-end Port %(nsx_id)s creation " +"to be rolled back for Neutron port: %(neutron_id)s" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:483 +#, python-format +msgid "NSX Port %s already gone" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:498 +#, python-format +msgid "Port '%s' was already deleted on NSX platform" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:505 +#, python-format +msgid "_nsx_delete_port completed for port %(port_id)s on network %(net_id)s" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:510 +#, python-format +msgid "Port %s not found in NSX" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:519 +#, python-format +msgid "" +"Neutron port %(port_id)s not found on NSX backend. Terminating delete " +"operation. A dangling router port might have been left on router " +"%(router_id)s" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:534 +#: neutron/plugins/vmware/plugins/base.py:1069 +#, python-format +msgid "" +"Ignoring exception as this means the peer for port '%s' has already been " +"deleted." +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:546 +#, python-format +msgid "" +"It is not allowed to create router interface ports on external networks " +"as '%s'" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:571 +#, python-format +msgid "" +"_nsx_create_router_port completed for port %(name)s on network " +"%(network_id)s. The new port id is %(id)s." +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:584 +#, python-format +msgid "" +"device_id field must be populated in order to create an external gateway " +"port for network %s" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:594 +#, python-format +msgid "The gateway port for the NSX router %s was not found on the backend" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:635 +#, python-format +msgid "" +"_nsx_create_ext_gw_port completed on external network %(ext_net_id)s, " +"attached to router:%(router_id)s. NSX port id is %(nsx_port_id)s" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:670 +#: neutron/plugins/vmware/plugins/base.py:1806 +#, python-format +msgid "Logical router resource %s not found on NSX platform" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:674 +#: neutron/plugins/vmware/plugins/base.py:1810 +msgid "Unable to update logical routeron NSX Platform" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:676 +#, python-format +msgid "" +"_nsx_delete_ext_gw_port completed on external network %(ext_net_id)s, " +"attached to NSX router:%(router_id)s" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:718 +#, python-format +msgid "" +"_nsx_create_l2_gw_port completed for port %(name)s on network " +"%(network_id)s. The new port id is %(id)s." +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:760 +#, python-format +msgid "%s required" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:765 +msgid "Segmentation ID cannot be specified with flat network type" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:769 +msgid "Segmentation ID must be specified with vlan network type" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:773 +#: neutron/plugins/vmware/plugins/base.py:789 +#, python-format +msgid "%(segmentation_id)s out of range (%(min_id)s through %(max_id)s)" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:795 +#, python-format +msgid "%(net_type_param)s %(net_type_value)s not supported" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:840 +#, python-format +msgid "No switch has available ports (%d checked)" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:874 +#, python-format +msgid "Maximum number of logical ports reached for logical network %s" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:966 +#, python-format +msgid "" +"Network with admin_state_up=False are not yet supported by this plugin. " +"Ignoring setting for network %s" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:1058 +#, python-format +msgid "" +"A nsx lport identifier was not found for neutron port '%s'. Unable to " +"remove the peer router port for this switch port" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:1077 +#, python-format +msgid "delete_network completed for tenant: %s" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:1080 +#: neutron/plugins/vmware/plugins/service.py:540 +#, python-format +msgid "Did not found lswitch %s in NSX" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:1115 +msgid "admin_state_up=False networks are not supported." +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:1135 +#, python-format +msgid "Unable to find NSX mappings for neutron network:%s" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:1142 +#, python-format +msgid "" +"Logical switch update on NSX backend failed. Neutron network " +"id:%(net_id)s; NSX lswitch id:%(lswitch_id)s;Error:%(error)s" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:1213 +#, python-format +msgid "port created on NSX backend for tenant %(tenant_id)s: (%(id)s)" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:1216 +#, python-format +msgid "Logical switch for network %s was not found in NSX." +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:1227 +msgid "Unable to create port or set port attachment in NSX." +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:1315 +#, python-format +msgid "Updating port: %s" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:1350 +#, python-format +msgid "Unable to update port id: %s." +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:1433 +msgid "" +"Cannot create a distributed router with the NSX platform currently in " +"execution. Please, try without specifying the 'distributed' attribute." +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:1439 +msgid "Unable to create logical router on NSX Platform" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:1450 +#, python-format +msgid "" +"Unable to create L3GW port on logical router %(router_uuid)s. Verify " +"Default Layer-3 Gateway service %(def_l3_gw_svc)s id is correct" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:1460 +#, python-format +msgid "Unable to create router %s on NSX backend" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:1489 +#: neutron/plugins/vmware/plugins/base.py:1574 +#: neutron/plugins/vmware/plugins/service.py:200 +#: neutron/plugins/vmware/plugins/service.py:1235 +#, python-format +msgid "Network '%s' is not a valid external network" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:1538 +#, python-format +msgid "Failed to set gateway info for router being created:%s - removing router" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:1541 +#, python-format +msgid "" +"Create router failed while setting external gateway. Router:%s has been " +"removed from DB and backend" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:1583 +msgid "" +"'routes' cannot contain route '0.0.0.0/0', this must be updated through " +"the default gateway attribute" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:1599 +#, python-format +msgid "Logical router %s not found on NSX Platform" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:1603 +msgid "Unable to update logical router on NSX Platform" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:1605 +msgid "" +"Request cannot contain 'routes' with the NSX platform currently in " +"execution. Please, try without specifying the static routes." +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:1662 +#, python-format +msgid "Logical router '%s' not found on NSX Platform" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:1666 +#, python-format +msgid "Unable to delete logical router '%s' on NSX Platform" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:1676 +#, python-format +msgid "" +"Unable to remove NSX mapping for Neutron router %(router_id)s because of " +"the following exception:%(d_exc)s" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:1744 +#, python-format +msgid "" +"Add_router_interface completed for subnet:%(subnet_id)s and " +"router:%(router_id)s" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:1842 +#, python-format +msgid "" +"An error occurred while removing NAT rules on the NSX platform for " +"floating ip:%s" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:1847 +msgid "An incorrect number of matching NAT rules was found on the NSX platform" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:1979 +#, python-format +msgid "" +"An error occurred while creating NAT rules on the NSX platform for " +"floating ip:%(floating_ip)s mapped to internal ip:%(internal_ip)s" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:1985 +msgid "Failed to update NAT rules for floatingip update" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:2022 +#, python-format +msgid "The port '%s' is not associated with floating IPs" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:2025 +#, python-format +msgid "Nat rules not found in nsx for port: %s" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:2061 +#, python-format +msgid "Unable to create l2_gw_service for: %s" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:2084 +msgid "" +"Unable to remove gateway service from NSX plaform - the resource was not " +"found" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:2113 +#, python-format +msgid "Unable to update name on NSX backend for network gateway: %s" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:2142 +#, python-format +msgid "" +"Rolling back database changes for gateway device %s because of an error " +"in the NSX backend" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:2187 +#: neutron/plugins/vmware/plugins/base.py:2225 +#, python-format +msgid "" +"Neutron gateway device: %(neutron_id)s; NSX transport node identifier: " +"%(nsx_id)s; Operational status: %(status)s." +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:2322 +#, python-format +msgid "" +"Removal of gateway device: %(neutron_id)s failed on NSX backend (NSX " +"id:%(nsx_id)s) because the NSX resource was not found" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:2329 +#, python-format +msgid "" +"Removal of gateway device: %(neutron_id)s failed on NSX backend (NSX " +"id:%(nsx_id)s). Neutron and NSX states have diverged." +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:2377 +#, python-format +msgid "" +"Error while updating security profile %(uuid)s with name %(name)s: " +"%(error)s." +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:2409 +#, python-format +msgid "" +"The NSX security profile %(sec_profile_id)s, associated with the Neutron " +"security group %(sec_group_id)s was not found on the backend" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:2417 +#, python-format +msgid "" +"An exception occurred while removing the NSX security profile " +"%(sec_profile_id)s, associated with Netron security group " +"%(sec_group_id)s" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:2424 +#, python-format +msgid "Unable to remove security group %s from backend" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:2437 +#, python-format +msgid "Port values not valid for protocol: %s" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:143 +#, python-format +msgid "EDGE: router = %s" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:174 +msgid "EDGE: _vcns_create_ext_gw_port" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:183 +msgid "EDGE: _vcns_delete_ext_gw_port" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:338 +#, python-format +msgid "VCNS: delete default gateway %s" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:423 +#, python-format +msgid "An exception occurred while creating a port on lswitch %s" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:471 +#, python-format +msgid "Unable to create integration logic switch for router %s" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:481 +#, python-format +msgid "Unable to add router interface to integration lswitch for router %s" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:491 +#, python-format +msgid "Unable to create advance service router for %s" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:882 +#, python-format +msgid "Failed to create firewall on vShield Edge bound on router %s" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:890 +msgid "Bad Firewall request Input" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:905 +#: neutron/plugins/vmware/plugins/service.py:1221 +msgid "router_id is not provided!" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:909 +#: neutron/plugins/vmware/plugins/service.py:1609 +#, python-format +msgid "router_id:%s is not an advanced router!" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:914 +msgid "A firewall is already associated with the router" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:1111 +#, python-format +msgid "Failed to find the edge with vip_id: %s" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:1148 +#, python-format +msgid "" +"Operation can't be performed, Since resource %(model)s : %(id)s is in " +"DELETEing status!" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:1157 +#, python-format +msgid "Resource %(model)s : %(id)s not found!" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:1182 +#, python-format +msgid "Failed to create healthmonitor associated with pool id: %s!" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:1199 +msgid "Failed to create pool on vshield edge" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:1218 +msgid "create_vip() called" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:1226 +#, python-format +msgid "router_id: %s is not an advanced router!" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:1259 +msgid "Failed to create vip!" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:1299 +#, python-format +msgid "Failed to update vip with id: %s!" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:1316 +#, python-format +msgid "Failed to delete vip with id: %s!" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:1372 +#, python-format +msgid "Failed to update pool with id: %s!" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:1394 +#: neutron/plugins/vmware/plugins/service.py:1441 +#: neutron/plugins/vmware/plugins/service.py:1464 +msgid "Failed to update pool with the member" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:1420 +msgid "Failed to update old pool with the member" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:1484 +#, python-format +msgid "Failed to update monitor with id: %s!" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:1494 +msgid "Vcns right now can only support one monitor per pool" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:1523 +msgid "Failed to associate monitor with pool!" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:1554 +msgid "Failed to update pool with pool_monitor!" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:1571 +#, python-format +msgid "" +"Failed to update ipsec vpn configuration on edge, since the router: %s " +"does not have a gateway yet!" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:1596 +msgid "Bad or unsupported Input request!" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:1599 +#, python-format +msgid "" +"Failed to update ipsec VPN configuration with vpnservice: " +"%(vpnservice_id)s on vShield Edge: %(edge_id)s" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:1606 +msgid "create_vpnservice() called" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:1614 +#, python-format +msgid "a vpnservice is already associated with the router: %s" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:1727 +#, python-format +msgid "Start deploying %(edge_id)s for router %(name)s" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:1733 +#: neutron/plugins/vmware/plugins/service.py:1770 +#, python-format +msgid "Failed to deploy Edge for router %s" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:1751 +#, python-format +msgid "Router %s not found" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:1754 +#, python-format +msgid "Successfully deployed %(edge_id)s for router %(name)s" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:1786 +#, python-format +msgid "interface_update_result %d" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:1789 +#, python-format +msgid "snat_create_result %d" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:1792 +#, python-format +msgid "snat_delete_result %d" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:1795 +#, python-format +msgid "dnat_create_result %d" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:1798 +#, python-format +msgid "dnat_delete_result %d" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:1801 +#, python-format +msgid "routes_update_result %d" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:1804 +#, python-format +msgid "nat_update_result %d" +msgstr "" + +#: neutron/plugins/vmware/shell/commands.py:24 +#, python-format +msgid "" +"\n" +"Service type = %s\n" +msgstr "" + +#: neutron/plugins/vmware/shell/commands.py:27 +#, python-format +msgid "Service uuids = %s\n" +msgstr "" + +#: neutron/plugins/vmware/shell/commands.py:28 +#, python-format +msgid "" +"Port uuids = %s\n" +"\n" +msgstr "" + +#: neutron/plugins/vmware/shell/commands.py:37 +msgid "ID or name of network to run report on" +msgstr "" + +#: neutron/plugins/vmware/shell/commands.py:46 +msgid "Migration report is:\n" +msgstr "" + +#: neutron/plugins/vmware/shell/commands.py:56 +msgid "ID or name of network to migrate" +msgstr "" + +#: neutron/plugins/vmware/shell/commands.py:66 +msgid "Migration has been successful:\n" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_appliance_driver.py:135 +#, python-format +msgid "" +"VCNS: Failed to get edge status:\n" +"%s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_appliance_driver.py:162 +#, python-format +msgid "VCNS: start updating vnic %s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_appliance_driver.py:167 +#, python-format +msgid "" +"VCNS: Failed to update vnic %(config)s:\n" +"%(response)s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_appliance_driver.py:173 +#, python-format +msgid "VCNS: Failed to update vnic %d" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_appliance_driver.py:181 +#, python-format +msgid "VCNS: update vnic %(index)d: %(addr)s %(netmask)s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_appliance_driver.py:190 +#, python-format +msgid "Vnic %d currently not supported" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_appliance_driver.py:211 +#, python-format +msgid "VCNS: start deploying edge %s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_appliance_driver.py:219 +#, python-format +msgid "VCNS: deploying edge %s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_appliance_driver.py:224 +#, python-format +msgid "VCNS: deploy edge failed for router %s." +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_appliance_driver.py:243 +#, python-format +msgid "VCNS: Edge %s status query failed." +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_appliance_driver.py:248 +#, python-format +msgid "VCNS: Unable to retrieve edge %(edge_id)s status. Retry %(retries)d." +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_appliance_driver.py:255 +#, python-format +msgid "VCNS: Unable to retrieve edge %s status. Abort." +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_appliance_driver.py:259 +#, python-format +msgid "VCNS: Edge %s status" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_appliance_driver.py:266 +#, python-format +msgid "VCNS: Failed to deploy edge %(edge_id)s for %(name)s, status %(status)d" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_appliance_driver.py:273 +#, python-format +msgid "VCNS: Edge %(edge_id)s deployed for router %(name)s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_appliance_driver.py:280 +#, python-format +msgid "VCNS: start destroying edge %s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_appliance_driver.py:288 +#, python-format +msgid "" +"VCNS: Failed to delete %(edge_id)s:\n" +"%(response)s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_appliance_driver.py:294 +#, python-format +msgid "VCNS: Failed to delete %s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_appliance_driver.py:304 +#, python-format +msgid "" +"VCNS: Failed to get edges:\n" +"%s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_appliance_driver.py:382 +#, python-format +msgid "" +"VCNS: Failed to get nat config:\n" +"%s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_appliance_driver.py:389 +#, python-format +msgid "VCNS: start creating nat rules: %s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_appliance_driver.py:405 +#: neutron/plugins/vmware/vshield/edge_appliance_driver.py:530 +#, python-format +msgid "" +"VCNS: Failed to create snat rule:\n" +"%s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_appliance_driver.py:413 +#, python-format +msgid "VCNS: create snat rule %(src)s/%(translated)s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_appliance_driver.py:436 +#, python-format +msgid "VCNS: start deleting %(type)s rules: %(addr)s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_appliance_driver.py:447 +#, python-format +msgid "" +"VCNS: Failed to delete snat rule:\n" +"%s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_appliance_driver.py:454 +#, python-format +msgid "VCNS: delete snat rule %s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_appliance_driver.py:472 +#, python-format +msgid "VCNS: create dnat rule %(dst)s/%(translated)s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_appliance_driver.py:493 +#, python-format +msgid "VCNS: delete dnat rule %s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_appliance_driver.py:517 +#, python-format +msgid "VCNS: start updating nat rules: %s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_appliance_driver.py:538 +#, python-format +msgid "" +"VCNS: update nat rule\n" +"SNAT:%(snat)s\n" +"DNAT:%(dnat)s\n" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_appliance_driver.py:576 +#, python-format +msgid "VCNS: start updating routes for %s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_appliance_driver.py:600 +#, python-format +msgid "" +"VCNS: Failed to update routes:\n" +"%s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_appliance_driver.py:652 +msgid "Failed to get service config" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_appliance_driver.py:664 +msgid "Failed to enable loadbalancer service config" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_firewall_driver.py:41 +#, python-format +msgid "Invalid action value %s in a firewall rule" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_firewall_driver.py:50 +#, python-format +msgid "Invalid action value %s in a vshield firewall rule" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_firewall_driver.py:190 +#, python-format +msgid "Failed to get firewall with edge id: %s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_firewall_driver.py:208 +#, python-format +msgid "No rule id:%s found in the edge_firewall_binding" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_firewall_driver.py:218 +#, python-format +msgid "Failed to get firewall rule: %(rule_id)s with edge_id: %(edge_id)s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_firewall_driver.py:234 +#, python-format +msgid "Failed to update firewall with edge_id: %s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_firewall_driver.py:246 +#, python-format +msgid "Failed to delete firewall with edge_id:%s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_firewall_driver.py:260 +#, python-format +msgid "Failed to update firewall rule: %(rule_id)s with edge_id: %(edge_id)s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_firewall_driver.py:273 +#, python-format +msgid "Failed to delete firewall rule: %(rule_id)s with edge_id: %(edge_id)s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_firewall_driver.py:290 +#: neutron/plugins/vmware/vshield/edge_firewall_driver.py:318 +#, python-format +msgid "Failed to add firewall rule above: %(rule_id)s with edge_id: %(edge_id)s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_firewall_driver.py:329 +#, python-format +msgid "Failed to append a firewall rulewith edge_id: %s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_firewall_driver.py:350 +msgid "Can't execute insert rule operation without reference rule_id" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_ipsecvpn_driver.py:50 +#, python-format +msgid "" +"Unsupported ike_version: %s! Only 'v1' ike version is supported on " +"vshield Edge!" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_ipsecvpn_driver.py:64 +msgid "" +"IKEPolicy and IPsecPolicy should have consistent auth_algorithm, " +"encryption_algorithm and pfs for VSE!" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_ipsecvpn_driver.py:72 +#, python-format +msgid "" +"Unsupported encryption_algorithm: %s! '3des', 'aes-128' and 'aes-256' are" +" supported on VSE right now." +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_ipsecvpn_driver.py:81 +#, python-format +msgid "Unsupported pfs: %s! 'group2' and 'group5' are supported on VSE right now." +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_ipsecvpn_driver.py:89 +#, python-format +msgid "" +"Unsupported transform protocol: %s! 'esp' is supported by default on VSE " +"right now." +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_ipsecvpn_driver.py:98 +#, python-format +msgid "" +"Unsupported encapsulation mode: %s! 'tunnel' is supported by default on " +"VSE right now." +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_ipsecvpn_driver.py:136 +#, python-format +msgid "Failed to update ipsec vpn configuration with edge_id: %s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_ipsecvpn_driver.py:143 +#, python-format +msgid "IPsec config not found on edge: %s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_ipsecvpn_driver.py:146 +#, python-format +msgid "Failed to delete ipsec vpn configuration with edge_id: %s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_loadbalancer_driver.py:154 +#, python-format +msgid "Invalid %(protocol)s persistence method: %(type)s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_loadbalancer_driver.py:180 +#, python-format +msgid "Failed to create app profile on edge: %s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_loadbalancer_driver.py:191 +#, python-format +msgid "Failed to create vip on vshield edge: %s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_loadbalancer_driver.py:209 +#, python-format +msgid "vip_binding not found with id: %(id)s edge_id: %(edge_id)s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_loadbalancer_driver.py:226 +msgid "Failed to get vip on edge" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_loadbalancer_driver.py:243 +#, python-format +msgid "Failed to update app profile on edge: %s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_loadbalancer_driver.py:251 +#, python-format +msgid "Failed to update vip on edge: %s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_loadbalancer_driver.py:262 +#, python-format +msgid "vip not found on edge: %s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_loadbalancer_driver.py:265 +#, python-format +msgid "Failed to delete vip on edge: %s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_loadbalancer_driver.py:270 +#, python-format +msgid "app profile not found on edge: %s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_loadbalancer_driver.py:273 +#, python-format +msgid "Failed to delete app profile on edge: %s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_loadbalancer_driver.py:284 +msgid "Failed to create pool" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_loadbalancer_driver.py:301 +#, python-format +msgid "pool_binding not found with id: %(id)s edge_id: %(edge_id)s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_loadbalancer_driver.py:311 +msgid "Failed to get pool on edge" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_loadbalancer_driver.py:323 +msgid "Failed to update pool" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_loadbalancer_driver.py:333 +msgid "Failed to delete pool" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_loadbalancer_driver.py:343 +#, python-format +msgid "Failed to create monitor on edge: %s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_loadbalancer_driver.py:361 +#, python-format +msgid "monitor_binding not found with id: %(id)s edge_id: %(edge_id)s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_loadbalancer_driver.py:371 +#, python-format +msgid "Failed to get monitor on edge: %s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_loadbalancer_driver.py:388 +#, python-format +msgid "Failed to update monitor on edge: %s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_loadbalancer_driver.py:399 +msgid "Failed to delete monitor" +msgstr "" + +#: neutron/plugins/vmware/vshield/vcns.py:54 +#, python-format +msgid "VcnsApiHelper('%(method)s', '%(uri)s', '%(body)s')" +msgstr "" + +#: neutron/plugins/vmware/vshield/vcns.py:62 +#, python-format +msgid "Header: '%s'" +msgstr "" + +#: neutron/plugins/vmware/vshield/vcns.py:63 +#, python-format +msgid "Content: '%s'" +msgstr "" + +#: neutron/plugins/vmware/vshield/common/exceptions.py:35 +#, python-format +msgid "%(resource)s not found: %(msg)s" +msgstr "" + +#: neutron/plugins/vmware/vshield/common/exceptions.py:39 +#, python-format +msgid "An unknown exception %(status)s occurred: %(response)s." +msgstr "" + +#: neutron/plugins/vmware/vshield/common/exceptions.py:50 +#, python-format +msgid "Resource %(uri)s has been redirected" +msgstr "" + +#: neutron/plugins/vmware/vshield/common/exceptions.py:54 +#, python-format +msgid "Request %(uri)s is Bad, response %(response)s" +msgstr "" + +#: neutron/plugins/vmware/vshield/common/exceptions.py:58 +#, python-format +msgid "Forbidden: %(uri)s" +msgstr "" + +#: neutron/plugins/vmware/vshield/common/exceptions.py:62 +#, python-format +msgid "Resource %(uri)s not found" +msgstr "" + +#: neutron/plugins/vmware/vshield/common/exceptions.py:66 +#, python-format +msgid "Media Type %(uri)s is not supported" +msgstr "" + +#: neutron/plugins/vmware/vshield/common/exceptions.py:70 +#, python-format +msgid "Service Unavailable: %(uri)s" +msgstr "" + +#: neutron/plugins/vmware/vshield/tasks/tasks.py:46 +#, python-format +msgid "Invalid state %(state)d" +msgstr "" + +#: neutron/plugins/vmware/vshield/tasks/tasks.py:50 +#, python-format +msgid "State %(state)d skipped. Current state %(current)d" +msgstr "" + +#: neutron/plugins/vmware/vshield/tasks/tasks.py:96 +#, python-format +msgid "Task %(task)s encountered exception in %(func)s at state %(state)s" +msgstr "" + +#: neutron/plugins/vmware/vshield/tasks/tasks.py:182 +#, python-format +msgid "Start task %s" +msgstr "" + +#: neutron/plugins/vmware/vshield/tasks/tasks.py:188 +#: neutron/plugins/vmware/vshield/tasks/tasks.py:208 +#: neutron/plugins/vmware/vshield/tasks/tasks.py:231 +#, python-format +msgid "Task %(task)s encountered exception in %(cb)s" +msgstr "" + +#: neutron/plugins/vmware/vshield/tasks/tasks.py:194 +#: neutron/plugins/vmware/vshield/tasks/tasks.py:213 +#, python-format +msgid "Task %(task)s return %(status)s" +msgstr "" + +#: neutron/plugins/vmware/vshield/tasks/tasks.py:296 +msgid "Stopping TaskManager" +msgstr "" + +#: neutron/plugins/vmware/vshield/tasks/tasks.py:321 +msgid "TaskManager terminating because of an exception" +msgstr "" + +#: neutron/plugins/vmware/vshield/tasks/tasks.py:343 +msgid "TaskManager terminated" +msgstr "" + +#: neutron/plugins/vmware/vshield/tasks/tasks.py:375 +msgid "Exception in _check_pending_tasks" +msgstr "" + +#: neutron/scheduler/dhcp_agent_scheduler.py:51 +#, python-format +msgid "Agent %s already present" +msgstr "" + +#: neutron/scheduler/dhcp_agent_scheduler.py:52 +#, python-format +msgid "" +"Network %(network_id)s is scheduled to be hosted by DHCP agent " +"%(agent_id)s" +msgstr "" + +#: neutron/scheduler/dhcp_agent_scheduler.py:70 +#, python-format +msgid "Network %s is hosted already" +msgstr "" + +#: neutron/scheduler/dhcp_agent_scheduler.py:79 +#: neutron/scheduler/dhcp_agent_scheduler.py:88 +msgid "No more DHCP agents" +msgstr "" + +#: neutron/scheduler/dhcp_agent_scheduler.py:108 +msgid "No non-hosted networks" +msgstr "" + +#: neutron/scheduler/dhcp_agent_scheduler.py:119 +#, python-format +msgid "DHCP agent %s is not active" +msgstr "" + +#: neutron/scheduler/l3_agent_scheduler.py:63 +#, python-format +msgid "No enabled L3 agent on host %s" +msgstr "" + +#: neutron/scheduler/l3_agent_scheduler.py:68 +#, python-format +msgid "L3 agent %s is not active" +msgstr "" + +#: neutron/scheduler/l3_agent_scheduler.py:76 +#: neutron/scheduler/l3_agent_scheduler.py:127 +#, python-format +msgid "Router %(router_id)s has already been hosted by L3 agent %(agent_id)s" +msgstr "" + +#: neutron/scheduler/l3_agent_scheduler.py:95 +msgid "No non-hosted routers" +msgstr "" + +#: neutron/scheduler/l3_agent_scheduler.py:109 +#, python-format +msgid "No routers compatible with L3 agent configuration on host %s" +msgstr "" + +#: neutron/scheduler/l3_agent_scheduler.py:135 +msgid "No active L3 agents" +msgstr "" + +#: neutron/scheduler/l3_agent_scheduler.py:140 +#, python-format +msgid "No L3 agents can host the router %s" +msgstr "" + +#: neutron/scheduler/l3_agent_scheduler.py:153 +#, python-format +msgid "Router %(router_id)s is scheduled to L3 agent %(agent_id)s" +msgstr "" + +#: neutron/server/__init__.py:42 +msgid "" +"ERROR: Unable to find configuration file via the default search paths " +"(~/.neutron/, ~/, /etc/neutron/, /etc/) and the '--config-file' option!" +msgstr "" + +#: neutron/server/__init__.py:54 +msgid "RPC was already started in parent process by plugin." +msgstr "" + +#: neutron/server/__init__.py:66 +#, python-format +msgid "ERROR: %s" +msgstr "" + +#: neutron/services/provider_configuration.py:27 +msgid "" +"Defines providers for advanced services using the format: " +"::[:default]" +msgstr "" + +#: neutron/services/provider_configuration.py:45 +#, python-format +msgid "Provider name is limited by 255 characters: %s" +msgstr "" + +#: neutron/services/provider_configuration.py:54 +msgid "Invalid service provider format" +msgstr "" + +#: neutron/services/provider_configuration.py:62 +#, python-format +msgid "Invalid provider format. Last part should be 'default' or empty: %s" +msgstr "" + +#: neutron/services/provider_configuration.py:68 +#, python-format +msgid "Service type '%(svc_type)s' is not allowed, allowed types: %(allowed)s" +msgstr "" + +#: neutron/services/provider_configuration.py:82 +#, python-format +msgid "" +"Service provider '%(provider)s' could not be found for service type " +"%(service_type)s" +msgstr "" + +#: neutron/services/provider_configuration.py:87 +#, python-format +msgid "Service type %(service_type)s does not have a default service provider" +msgstr "" + +#: neutron/services/provider_configuration.py:92 +#, python-format +msgid "" +"Resource '%(resource_id)s' is already associated with provider " +"'%(provider)s' for service type '%(service_type)s'" +msgstr "" + +#: neutron/services/provider_configuration.py:105 +#, python-format +msgid "Driver %s is not unique across providers" +msgstr "" + +#: neutron/services/provider_configuration.py:115 +#, python-format +msgid "Multiple default providers for service %s" +msgstr "" + +#: neutron/services/provider_configuration.py:126 +#, python-format +msgid "Multiple providers specified for service %s" +msgstr "" + +#: neutron/services/service_base.py:70 +#, python-format +msgid "No providers specified for '%s' service, exiting" +msgstr "" + +#: neutron/services/service_base.py:81 +#, python-format +msgid "Loaded '%(provider)s' provider for service %(service_type)s" +msgstr "" + +#: neutron/services/service_base.py:87 +#, python-format +msgid "Error loading provider '%(provider)s' for service %(service_type)s" +msgstr "" + +#: neutron/services/service_base.py:98 +#, python-format +msgid "Default provider is not specified for service type %s" +msgstr "" + +#: neutron/services/firewall/fwaas_plugin.py:43 +msgid "set_firewall_status() called" +msgstr "" + +#: neutron/services/firewall/fwaas_plugin.py:51 +#, python-format +msgid "Firewall %(fw_id)s in PENDING_DELETE state, not changing to %(status)s" +msgstr "" + +#: neutron/services/firewall/fwaas_plugin.py:66 +msgid "firewall_deleted() called" +msgstr "" + +#: neutron/services/firewall/fwaas_plugin.py:74 +#, python-format +msgid "Firewall %(fw)s unexpectedly deleted by agent, status was %(status)s" +msgstr "" + +#: neutron/services/firewall/fwaas_plugin.py:82 +msgid "get_firewalls_for_tenant() called" +msgstr "" + +#: neutron/services/firewall/fwaas_plugin.py:91 +msgid "get_firewalls_for_tenant_without_rules() called" +msgstr "" + +#: neutron/services/firewall/fwaas_plugin.py:97 +msgid "get_tenants_with_firewalls() called" +msgstr "" + +#: neutron/services/firewall/fwaas_plugin.py:145 +#, python-format +msgid "" +"Exceeded allowed count of firewalls for tenant %(tenant_id)s. Only one " +"firewall is supported per tenant." +msgstr "" + +#: neutron/services/firewall/agents/firewall_agent_api.py:31 +msgid "Name of the FWaaS Driver" +msgstr "" + +#: neutron/services/firewall/agents/firewall_agent_api.py:35 +msgid "Enable FWaaS" +msgstr "" + +#: neutron/services/firewall/agents/l3reference/firewall_l3_agent.py:43 +msgid "Retrieve Firewall with rules from Plugin" +msgstr "" + +#: neutron/services/firewall/agents/l3reference/firewall_l3_agent.py:52 +msgid "Retrieve Tenants with Firewalls configured from Plugin" +msgstr "" + +#: neutron/services/firewall/agents/l3reference/firewall_l3_agent.py:64 +msgid "Initializing firewall agent" +msgstr "" + +#: neutron/services/firewall/agents/l3reference/firewall_l3_agent.py:72 +#, python-format +msgid "FWaaS Driver Loaded: '%s'" +msgstr "" + +#: neutron/services/firewall/agents/l3reference/firewall_l3_agent.py:75 +#, python-format +msgid "Error importing FWaaS device driver: %s" +msgstr "" + +#: neutron/services/firewall/agents/l3reference/firewall_l3_agent.py:112 +#, python-format +msgid "%(func_name)s from agent for fw: %(fwid)s" +msgstr "" + +#: neutron/services/firewall/agents/l3reference/firewall_l3_agent.py:120 +#, python-format +msgid "No Routers on tenant: %s" +msgstr "" + +#: neutron/services/firewall/agents/l3reference/firewall_l3_agent.py:127 +#: neutron/services/firewall/agents/l3reference/firewall_l3_agent.py:260 +#, python-format +msgid "Apply fw on Router List: '%s'" +msgstr "" + +#: neutron/services/firewall/agents/l3reference/firewall_l3_agent.py:139 +#, python-format +msgid "Firewall Driver Error for %(func_name)s for fw: %(fwid)s" +msgstr "" + +#: neutron/services/firewall/agents/l3reference/firewall_l3_agent.py:154 +#, python-format +msgid "FWaaS RPC failure in %(func_name)s for fw: %(fwid)s" +msgstr "" + +#: neutron/services/firewall/agents/l3reference/firewall_l3_agent.py:171 +#: neutron/services/firewall/agents/l3reference/firewall_l3_agent.py:187 +#, python-format +msgid "Firewall Driver Error on fw state %(fwmsg)s for fw: %(fwid)s" +msgstr "" + +#: neutron/services/firewall/agents/l3reference/firewall_l3_agent.py:199 +#, python-format +msgid "Process router add, router_id: '%s'" +msgstr "" + +#: neutron/services/firewall/agents/l3reference/firewall_l3_agent.py:210 +#, python-format +msgid "Process router add, fw_list: '%s'" +msgstr "" + +#: neutron/services/firewall/agents/l3reference/firewall_l3_agent.py:227 +#, python-format +msgid "FWaaS RPC info call failed for '%s'." +msgstr "" + +#: neutron/services/firewall/agents/l3reference/firewall_l3_agent.py:242 +#, python-format +msgid "Tenants with Firewalls: '%s'" +msgstr "" + +#: neutron/services/firewall/agents/l3reference/firewall_l3_agent.py:252 +#, python-format +msgid "Router List: '%s'" +msgstr "" + +#: neutron/services/firewall/agents/l3reference/firewall_l3_agent.py:254 +#, python-format +msgid "fw_list: '%s'" +msgstr "" + +#: neutron/services/firewall/agents/l3reference/firewall_l3_agent.py:271 +msgid "Failed fwaas process services sync" +msgstr "" + +#: neutron/services/firewall/agents/varmour/varmour_api.py:29 +msgid "vArmour director ip" +msgstr "" + +#: neutron/services/firewall/agents/varmour/varmour_api.py:31 +msgid "vArmour director port" +msgstr "" + +#: neutron/services/firewall/agents/varmour/varmour_api.py:33 +msgid "vArmour director username" +msgstr "" + +#: neutron/services/firewall/agents/varmour/varmour_api.py:35 +msgid "vArmour director password" +msgstr "" + +#: neutron/services/firewall/agents/varmour/varmour_api.py:45 +msgid "An unknown exception." +msgstr "" + +#: neutron/services/firewall/agents/varmour/varmour_api.py:59 +msgid "Invalid login credential." +msgstr "" + +#: neutron/services/firewall/agents/varmour/varmour_api.py:65 +msgid "vArmourRestAPI: started" +msgstr "" + +#: neutron/services/firewall/agents/varmour/varmour_api.py:98 +#, python-format +msgid "vArmourRestAPI: %(server)s %(port)s" +msgstr "" + +#: neutron/services/firewall/agents/varmour/varmour_api.py:104 +#, python-format +msgid "vArmourRestAPI Sending: %(method)s %(action)s %(headers)s %(body_data)s" +msgstr "" + +#: neutron/services/firewall/agents/varmour/varmour_api.py:115 +#, python-format +msgid "vArmourRestAPI Response: %(status)s %(resp_str)s" +msgstr "" + +#: neutron/services/firewall/agents/varmour/varmour_api.py:123 +msgid "vArmourRestAPI: Could not establish HTTP connection" +msgstr "" + +#: neutron/services/firewall/agents/varmour/varmour_router.py:49 +msgid "vArmourL3NATAgent: __init__" +msgstr "" + +#: neutron/services/firewall/agents/varmour/varmour_router.py:63 +#, python-format +msgid "_router_added: %s" +msgstr "" + +#: neutron/services/firewall/agents/varmour/varmour_router.py:70 +#, python-format +msgid "_router_removed: %s" +msgstr "" + +#: neutron/services/firewall/agents/varmour/varmour_router.py:111 +#, python-format +msgid "_va_unset_zone_interfaces: %s" +msgstr "" + +#: neutron/services/firewall/agents/varmour/varmour_router.py:143 +#, python-format +msgid "_va_set_interface_ip: %(pif)s %(cidr)s" +msgstr "" + +#: neutron/services/firewall/agents/varmour/varmour_router.py:163 +#, python-format +msgid "_va_config_trusted_zone: %s" +msgstr "" + +#: neutron/services/firewall/agents/varmour/varmour_router.py:194 +#, python-format +msgid "_va_config_untrusted_zone: %s" +msgstr "" + +#: neutron/services/firewall/agents/varmour/varmour_router.py:209 +#, python-format +msgid "_va_config_untrusted_zone: gw=%r" +msgstr "" + +#: neutron/services/firewall/agents/varmour/varmour_router.py:222 +#, python-format +msgid "_va_config_router_snat_rules: %s" +msgstr "" + +#: neutron/services/firewall/agents/varmour/varmour_router.py:254 +#, python-format +msgid "_va_config_floating_ips: %s" +msgstr "" + +#: neutron/services/firewall/agents/varmour/varmour_router.py:276 +#, python-format +msgid "process_router: %s" +msgstr "" + +#: neutron/services/firewall/agents/varmour/varmour_router.py:287 +msgid "Unable to parse interface mapping." +msgstr "" + +#: neutron/services/firewall/agents/varmour/varmour_router.py:290 +msgid "Unable to read interface mapping." +msgstr "" + +#: neutron/services/firewall/agents/varmour/varmour_router.py:309 +#, python-format +msgid "external_gateway_added: %s" +msgstr "" + +#: neutron/services/firewall/drivers/linux/iptables_fwaas.py:47 +msgid "Initializing fwaas iptables driver" +msgstr "" + +#: neutron/services/firewall/drivers/linux/iptables_fwaas.py:50 +#, python-format +msgid "Creating firewall %(fw_id)s for tenant %(tid)s)" +msgstr "" + +#: neutron/services/firewall/drivers/linux/iptables_fwaas.py:59 +#, python-format +msgid "Failed to create firewall: %s" +msgstr "" + +#: neutron/services/firewall/drivers/linux/iptables_fwaas.py:63 +#, python-format +msgid "Deleting firewall %(fw_id)s for tenant %(tid)s)" +msgstr "" + +#: neutron/services/firewall/drivers/linux/iptables_fwaas.py:75 +#, python-format +msgid "Failed to delete firewall: %s" +msgstr "" + +#: neutron/services/firewall/drivers/linux/iptables_fwaas.py:79 +#, python-format +msgid "Updating firewall %(fw_id)s for tenant %(tid)s)" +msgstr "" + +#: neutron/services/firewall/drivers/linux/iptables_fwaas.py:88 +#, python-format +msgid "Failed to update firewall: %s" +msgstr "" + +#: neutron/services/firewall/drivers/linux/iptables_fwaas.py:92 +#, python-format +msgid "Applying firewall %(fw_id)s for tenant %(tid)s)" +msgstr "" + +#: neutron/services/firewall/drivers/linux/iptables_fwaas.py:111 +#, python-format +msgid "Failed to apply default policy on firewall: %s" +msgstr "" + +#: neutron/services/firewall/drivers/varmour/varmour_fwaas.py:28 +msgid "Initializing fwaas vArmour driver" +msgstr "" + +#: neutron/services/firewall/drivers/varmour/varmour_fwaas.py:33 +#, python-format +msgid "create_firewall (%s)" +msgstr "" + +#: neutron/services/firewall/drivers/varmour/varmour_fwaas.py:38 +#, python-format +msgid "update_firewall (%s)" +msgstr "" + +#: neutron/services/firewall/drivers/varmour/varmour_fwaas.py:46 +#, python-format +msgid "delete_firewall (%s)" +msgstr "" + +#: neutron/services/firewall/drivers/varmour/varmour_fwaas.py:51 +#, python-format +msgid "apply_default_policy (%s)" +msgstr "" + +#: neutron/services/firewall/drivers/varmour/varmour_fwaas.py:61 +#, python-format +msgid "Updating firewall (%s)" +msgstr "" + +#: neutron/services/firewall/drivers/varmour/varmour_fwaas.py:110 +msgid "Unsupported IP version rule." +msgstr "" + +#: neutron/services/l3_router/l3_apic.py:55 +msgid "L3 Router Service Plugin for basic L3 using the APIC" +msgstr "" + +#: neutron/services/l3_router/l3_apic.py:96 +#, python-format +msgid "Error attaching subnet %(subnet_id)s to router %(router_id)s" +msgstr "" + +#: neutron/services/l3_router/l3_apic.py:131 +#, python-format +msgid "Error detaching subnet %(subnet_id)s from router %(router_id)s" +msgstr "" + +#: neutron/services/loadbalancer/agent_scheduler.py:101 +#, python-format +msgid "Pool %(pool_id)s has already been hosted by lbaas agent %(agent_id)s" +msgstr "" + +#: neutron/services/loadbalancer/agent_scheduler.py:109 +#, python-format +msgid "No active lbaas agents for pool %s" +msgstr "" + +#: neutron/services/loadbalancer/agent_scheduler.py:115 +#, python-format +msgid "No lbaas agent supporting device driver %s" +msgstr "" + +#: neutron/services/loadbalancer/agent_scheduler.py:124 +#, python-format +msgid "Pool %(pool_id)s is scheduled to lbaas agent %(agent_id)s" +msgstr "" + +#: neutron/services/loadbalancer/plugin.py:82 +#, python-format +msgid "Delete associated loadbalancer pools before removing providers %s" +msgstr "" + +#: neutron/services/loadbalancer/plugin.py:91 +#, python-format +msgid "Error retrieving driver for provider %s" +msgstr "" + +#: neutron/services/loadbalancer/plugin.py:99 +#, python-format +msgid "Error retrieving provider for pool %s" +msgstr "" + +#: neutron/services/loadbalancer/plugin.py:190 +#, python-format +msgid "Failed to delete pool %s, putting it in ERROR state" +msgstr "" + +#: neutron/services/loadbalancer/agent/agent.py:36 +msgid "Seconds between periodic task runs" +msgstr "" + +#: neutron/services/loadbalancer/agent/agent_manager.py:39 +msgid "Drivers used to manage loadbalancing devices" +msgstr "" + +#: neutron/services/loadbalancer/agent/agent_manager.py:45 +#, python-format +msgid "Unknown device with pool_id %(pool_id)s" +msgstr "" + +#: neutron/services/loadbalancer/agent/agent_manager.py:94 +#, python-format +msgid "Error importing loadbalancer device driver: %s" +msgstr "" + +#: neutron/services/loadbalancer/agent/agent_manager.py:101 +#, python-format +msgid "Multiple device drivers with the same name found: %s" +msgstr "" + +#: neutron/services/loadbalancer/agent/agent_manager.py:141 +#, python-format +msgid "Error updating statistics on pool %s" +msgstr "" + +#: neutron/services/loadbalancer/agent/agent_manager.py:157 +msgid "Unable to retrieve ready devices" +msgstr "" + +#: neutron/services/loadbalancer/agent/agent_manager.py:174 +#: neutron/services/loadbalancer/agent/agent_manager.py:239 +#, python-format +msgid "No device driver on agent: %s." +msgstr "" + +#: neutron/services/loadbalancer/agent/agent_manager.py:184 +#, python-format +msgid "Unable to deploy instance for pool: %s" +msgstr "" + +#: neutron/services/loadbalancer/agent/agent_manager.py:194 +#, python-format +msgid "Unable to destroy device for pool: %s" +msgstr "" + +#: neutron/services/loadbalancer/agent/agent_manager.py:207 +#, python-format +msgid "%(operation)s %(obj)s %(id)s failed on device driver %(driver)s" +msgstr "" + +#: neutron/services/loadbalancer/agent/agent_manager.py:333 +#, python-format +msgid "Destroying pool %s due to agent disabling" +msgstr "" + +#: neutron/services/loadbalancer/agent/agent_manager.py:336 +#, python-format +msgid "Agent_updated by server side %s!" +msgstr "" + +#: neutron/services/loadbalancer/drivers/common/agent_driver_base.py:40 +msgid "Driver to use for scheduling pool to a default loadbalancer agent" +msgstr "" + +#: neutron/services/loadbalancer/drivers/common/agent_driver_base.py:48 +msgid "Device driver for agent should be specified in plugin driver." +msgstr "" + +#: neutron/services/loadbalancer/drivers/common/agent_driver_base.py:72 +#, python-format +msgid "Multiple lbaas agents found on host %s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/common/agent_driver_base.py:152 +#, python-format +msgid "Unknown object type: %s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/common/agent_driver_base.py:163 +#, python-format +msgid "" +"Cannot update status: %(obj_type)s %(obj_id)s not found in the DB, it was" +" probably deleted concurrently" +msgstr "" + +#: neutron/services/loadbalancer/drivers/common/agent_driver_base.py:186 +#, python-format +msgid "Unable to find port %s to plug." +msgstr "" + +#: neutron/services/loadbalancer/drivers/common/agent_driver_base.py:210 +#: neutron/services/loadbalancer/drivers/common/agent_driver_base.py:227 +#, python-format +msgid "" +"Unable to find port %s to unplug. This can occur when the Vip has been " +"deleted first." +msgstr "" + +#: neutron/services/loadbalancer/drivers/embrane/config.py:32 +msgid "Load Balancer image id (Embrane LB)" +msgstr "" + +#: neutron/services/loadbalancer/drivers/embrane/config.py:34 +msgid "In band Security Zone id for LBs" +msgstr "" + +#: neutron/services/loadbalancer/drivers/embrane/config.py:36 +msgid "Out of band Security Zone id for LBs" +msgstr "" + +#: neutron/services/loadbalancer/drivers/embrane/config.py:38 +msgid "Management Security Zone id for LBs" +msgstr "" + +#: neutron/services/loadbalancer/drivers/embrane/config.py:40 +msgid "Dummy user traffic Security Zone id for LBs" +msgstr "" + +#: neutron/services/loadbalancer/drivers/embrane/config.py:44 +msgid "choose LB image flavor to use, accepted values: small, medium" +msgstr "" + +#: neutron/services/loadbalancer/drivers/embrane/config.py:47 +msgid "resource synchronization interval in seconds" +msgstr "" + +#: neutron/services/loadbalancer/drivers/embrane/constants.py:51 +#, python-format +msgid "%s, probably was cancelled through the heleos UI" +msgstr "" + +#: neutron/services/loadbalancer/drivers/embrane/constants.py:58 +#, python-format +msgid "" +"Failed to delete the backend load balancer for reason %s. Please remove " +"it manually through the heleos UI" +msgstr "" + +#: neutron/services/loadbalancer/drivers/embrane/constants.py:61 +#, python-format +msgid "" +"No subnet is associated to member %s (required to identify the proper " +"load balancer port)" +msgstr "" + +#: neutron/services/loadbalancer/drivers/embrane/driver.py:88 +msgid "Connection limit is not supported by Embrane LB" +msgstr "" + +#: neutron/services/loadbalancer/drivers/embrane/driver.py:94 +#, python-format +msgid "Session persistence %s not supported by Embrane LBaaS" +msgstr "" + +#: neutron/services/loadbalancer/drivers/embrane/driver.py:132 +#, python-format +msgid "Subnet assigned to pool %s doesn't exist, backend port can't be created" +msgstr "" + +#: neutron/services/loadbalancer/drivers/embrane/agent/lb_operations.py:111 +#, python-format +msgid "" +"The load balancer %s had no physical representation, likely already " +"deleted" +msgstr "" + +#: neutron/services/loadbalancer/drivers/haproxy/namespace_driver.py:46 +msgid "Location to store config and state files" +msgstr "" + +#: neutron/services/loadbalancer/drivers/haproxy/namespace_driver.py:52 +msgid "The user group" +msgstr "" + +#: neutron/services/loadbalancer/drivers/haproxy/namespace_driver.py:58 +msgid "" +"When delete and re-add the same vip, send this many gratuitous ARPs to " +"flush the ARP cache in the Router. Set it below or equal to 0 to disable " +"this feature." +msgstr "" + +#: neutron/services/loadbalancer/drivers/haproxy/namespace_driver.py:75 +#, python-format +msgid "Error importing interface driver: %s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/haproxy/namespace_driver.py:171 +#, python-format +msgid "Stats socket not found for pool %s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/haproxy/namespace_driver.py:213 +#, python-format +msgid "Error while connecting to stats socket: %s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/haproxy/namespace_driver.py:392 +#, python-format +msgid "Unable to kill haproxy process: %s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/netscaler/ncc_client.py:43 +#, python-format +msgid "NCC Error %d" +msgstr "" + +#: neutron/services/loadbalancer/drivers/netscaler/ncc_client.py:54 +msgid "No NetScaler Control Center URI specified. Cannot connect." +msgstr "" + +#: neutron/services/loadbalancer/drivers/netscaler/ncc_client.py:133 +#, python-format +msgid "Connection error occurred while connecting to %s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/netscaler/ncc_client.py:138 +#, python-format +msgid "SSL error occurred while connecting to %s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/netscaler/ncc_client.py:143 +#, python-format +msgid "Request to %s timed out" +msgstr "" + +#: neutron/services/loadbalancer/drivers/netscaler/ncc_client.py:150 +msgid "Request did not specify a valid URL" +msgstr "" + +#: neutron/services/loadbalancer/drivers/netscaler/ncc_client.py:154 +#, python-format +msgid "Too many redirects occurred for request to %s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/netscaler/ncc_client.py:158 +#, python-format +msgid "A request error while connecting to %s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/netscaler/ncc_client.py:163 +#, python-format +msgid "A unknown error occurred during request to %s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/netscaler/ncc_client.py:171 +#, python-format +msgid "Unable to login. Invalid credentials passed.for: %s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/netscaler/ncc_client.py:175 +#, python-format +msgid "Failed %(method)s operation on %(url)s status code: %(response_status)s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/netscaler/netscaler_driver.py:29 +msgid "The URL to reach the NetScaler Control Center Server." +msgstr "" + +#: neutron/services/loadbalancer/drivers/netscaler/netscaler_driver.py:33 +msgid "Username to login to the NetScaler Control Center Server." +msgstr "" + +#: neutron/services/loadbalancer/drivers/netscaler/netscaler_driver.py:37 +msgid "Password to login to the NetScaler Control Center Server." +msgstr "" + +#: neutron/services/loadbalancer/drivers/netscaler/netscaler_driver.py:75 +#, python-format +msgid "NetScaler driver vip creation: %s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/netscaler/netscaler_driver.py:90 +#, python-format +msgid "NetScaler driver vip %(vip_id)s update: %(vip_obj)s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/netscaler/netscaler_driver.py:105 +#, python-format +msgid "NetScaler driver vip removal: %s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/netscaler/netscaler_driver.py:126 +#, python-format +msgid "NetScaler driver pool creation: %s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/netscaler/netscaler_driver.py:141 +#, python-format +msgid "NetScaler driver pool %(pool_id)s update: %(pool_obj)s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/netscaler/netscaler_driver.py:156 +#, python-format +msgid "NetScaler driver pool removal: %s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/netscaler/netscaler_driver.py:173 +#, python-format +msgid "NetScaler driver poolmember creation: %s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/netscaler/netscaler_driver.py:191 +#, python-format +msgid "NetScaler driver poolmember %(member_id)s update: %(member_obj)s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/netscaler/netscaler_driver.py:208 +#, python-format +msgid "NetScaler driver poolmember removal: %s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/netscaler/netscaler_driver.py:226 +#, python-format +msgid "" +"NetScaler driver healthmonitor creation for pool %(pool_id)s: " +"%(monitor_obj)s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/netscaler/netscaler_driver.py:249 +#, python-format +msgid "NetScaler driver healthmonitor %(monitor_id)s update: %(monitor_obj)s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/netscaler/netscaler_driver.py:270 +#, python-format +msgid "NetScaler driver healthmonitor %(monitor_id)sremoval for pool %(pool_id)s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/netscaler/netscaler_driver.py:290 +#, python-format +msgid "NetScaler driver pool stats retrieval: %s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/netscaler/netscaler_driver.py:415 +#, python-format +msgid "" +"Filtering ports based on network_id=%(network_id)s, " +"tenant_id=%(tenant_id)s, device_id=%(device_id)s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/netscaler/netscaler_driver.py:430 +#, python-format +msgid "Found an existing SNAT port for subnet %s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/netscaler/netscaler_driver.py:433 +#, python-format +msgid "Found no SNAT ports for subnet %s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/netscaler/netscaler_driver.py:454 +#, python-format +msgid "Created SNAT port: %s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/netscaler/netscaler_driver.py:462 +#, python-format +msgid "Removed SNAT port: %s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/netscaler/netscaler_driver.py:469 +#, python-format +msgid "No SNAT port found for subnet %s. Creating one..." +msgstr "" + +#: neutron/services/loadbalancer/drivers/netscaler/netscaler_driver.py:477 +#, python-format +msgid "SNAT port: %s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/netscaler/netscaler_driver.py:487 +#, python-format +msgid "Removing SNAT port for subnet %s as this is the last pool using it..." +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:62 +msgid "IP address of vDirect server." +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:64 +msgid "IP address of secondary vDirect server." +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:67 +msgid "vDirect user name." +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:70 +msgid "vDirect user password." +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:73 +msgid "Service ADC type. Default: VA." +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:76 +msgid "Service ADC version." +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:79 +msgid "Enables or disables the Service HA pair. Default: False." +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:83 +msgid "Service throughput. Default: 1000." +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:86 +msgid "Service SSL throughput. Default: 100." +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:89 +msgid "Service compression throughput. Default: 100." +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:92 +msgid "Size of service cache. Default: 20." +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:95 +msgid "Name of l2_l3 workflow. Default: openstack_l2_l3." +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:99 +msgid "Name of l4 workflow. Default: openstack_l4." +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:107 +msgid "Parameter for l2_l3 workflow constructor." +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:114 +msgid "Parameter for l2_l3 workflow setup." +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:117 +msgid "List of actions that are not pushed to the completion queue." +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:121 +msgid "Name of the l4 workflow action. Default: BaseCreate." +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:125 +msgid "Resource pool IDs." +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:128 +msgid "A required VLAN for the interswitch link to use." +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:131 +msgid "" +"Enable or disable Alteon interswitch link for stateful session failover. " +"Default: False." +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:231 +#, python-format +msgid "" +"vip: %(vip)s, extended_vip: %(extended_vip)s, service_name: " +"%(service_name)s, " +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:265 +#, python-format +msgid "Retrieved pip nport: %(port)r for vip: %(vip)s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:273 +#, python-format +msgid "Found no pip nports associated with vip: %s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:282 +#, python-format +msgid "Failed to remove workflow %s. Going to set vip to ERROR status" +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:296 +#, python-format +msgid "pip nport id: %s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:300 +#, python-format +msgid "pip nport delete failed: %r" +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:388 +#, python-format +msgid "" +"_handle_pool_health_monitor. health_monitor = %(hm_id)s pool_id = " +"%(pool_id)s delete = %(delete)s vip_id = %(vip_id)s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:419 +msgid "Starting operation completion handling thread" +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:449 +#, python-format +msgid "_update_workflow response: %s " +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:458 +#: neutron/services/loadbalancer/drivers/radware/driver.py:489 +#, python-format +msgid "Pushing operation %s to the queue" +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:466 +#, python-format +msgid "Remove the workflow %s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:474 +#, python-format +msgid "Post-remove workflow function %r completed" +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:478 +#, python-format +msgid "Post-remove workflow function %r failed" +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:595 +#, python-format +msgid "create_workflow response: %s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:664 +#, python-format +msgid "" +"vDirectRESTClient:init server=%(server)s, secondary " +"server=%(sec_server)s, port=%(port)d, ssl=%(ssl)r" +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:670 +#, python-format +msgid "Fliping servers. Current is: %(server)s, switching to %(secondary)s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:683 +msgid "" +"REST client is not able to recover since only one vDirect server is " +"configured." +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:691 +#, python-format +msgid "vDirect server is not responding (%s)." +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:695 +#, python-format +msgid "vDirect server is not active (%s)." +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:723 +msgid "vdirectRESTClient: Could not establish HTTPS connection" +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:730 +msgid "vdirectRESTClient: Could not establish HTTP connection" +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:747 +#, python-format +msgid "vdirectRESTClient: %(action)s failure, %(e)r" +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:815 +#, python-format +msgid "" +"Operation %(oper)s is completed after %(sec_to_completion)d sec with " +"success status: %(success)s :" +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:827 +#, python-format +msgid "Operation %(operation)s failed. Reason: %(msg)s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:859 +#, python-format +msgid "Operation %s is not completed yet.." +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:874 +msgid "Exception was thrown inside OperationCompletionHandler" +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:883 +#, python-format +msgid "Post-operation function %(func)r completed after operation %(oper)r" +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:889 +#, python-format +msgid "Post-operation function %(func)r failed after operation %(oper)r" +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:930 +#, python-format +msgid "_update: %s " +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:970 +#, python-format +msgid "_remove_object_from_db %s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/exceptions.py:22 +msgid "An unknown exception occurred in Radware LBaaS provider." +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/exceptions.py:26 +msgid "" +"vDirect user/password missing. Specify in configuration file, under " +"[radware] section" +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/exceptions.py:31 +#, python-format +msgid "" +"Workflow %(workflow)s is missing on vDirect server. Upload missing " +"workflow" +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/exceptions.py:36 +#, python-format +msgid "" +"REST request failed with status %(status)s. Reason: %(reason)s, " +"Description: %(description)s. Success status codes are %(success_codes)s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/exceptions.py:42 +#, python-format +msgid "%(operation)s operation is not supported for %(entity)s." +msgstr "" + +#: neutron/services/metering/agents/metering_agent.py:70 +msgid "Metering driver" +msgstr "" + +#: neutron/services/metering/agents/metering_agent.py:72 +msgid "Interval between two metering measures" +msgstr "" + +#: neutron/services/metering/agents/metering_agent.py:74 +msgid "Interval between two metering reports" +msgstr "" + +#: neutron/services/metering/agents/metering_agent.py:98 +#, python-format +msgid "Loading Metering driver %s" +msgstr "" + +#: neutron/services/metering/agents/metering_agent.py:100 +msgid "A metering driver must be specified" +msgstr "" + +#: neutron/services/metering/agents/metering_agent.py:115 +#, python-format +msgid "Send metering report: %s" +msgstr "" + +#: neutron/services/metering/agents/metering_agent.py:179 +#, python-format +msgid "Driver %(driver)s does not implement %(func)s" +msgstr "" + +#: neutron/services/metering/agents/metering_agent.py:183 +#, python-format +msgid "Driver %(driver)s:%(func)s runtime error" +msgstr "" + +#: neutron/services/metering/agents/metering_agent.py:218 +msgid "Get router traffic counters" +msgstr "" + +#: neutron/services/metering/agents/metering_agent.py:222 +msgid "Update metering rules from agent" +msgstr "" + +#: neutron/services/metering/agents/metering_agent.py:227 +msgid "Creating a metering label from agent" +msgstr "" + +#: neutron/services/metering/agents/metering_agent.py:234 +msgid "Delete a metering label from agent" +msgstr "" + +#: neutron/services/metering/drivers/iptables/iptables_driver.py:90 +#, python-format +msgid "Loading interface driver %s" +msgstr "" + +#: neutron/services/vpn/agent.py:26 +msgid "The vpn device drivers Neutron will use" +msgstr "" + +#: neutron/services/vpn/plugin.py:46 +#, python-format +msgid "VPN plugin using service driver: %s" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_csr_rest_client.py:71 +#, python-format +msgid "RESPONSE: %s" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_csr_rest_client.py:84 +#, python-format +msgid "%(method)s: Request for %(resource)s payload: %(payload)s" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_csr_rest_client.py:91 +#, python-format +msgid "%(method)s Took %(time).2f seconds to process" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_csr_rest_client.py:97 +#, python-format +msgid "%(method)s: Request timeout%(ssl)s (%(timeout).3f sec) for CSR(%(host)s)" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_csr_rest_client.py:106 +#, python-format +msgid "%(method)s: Unable to connect to CSR(%(host)s)" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_csr_rest_client.py:110 +#, python-format +msgid "%(method)s: Unexpected error for CSR (%(host)s): %(error)s" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_csr_rest_client.py:116 +#, python-format +msgid "%(method)s: Completed [%(status)s]" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_csr_rest_client.py:131 +#, python-format +msgid "%(auth)s with CSR %(host)s" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_csr_rest_client.py:138 +#, python-format +msgid "Successfully authenticated with CSR %s" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_csr_rest_client.py:140 +#, python-format +msgid "Failed authentication with CSR %(host)s [%(status)s]" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_csr_rest_client.py:175 +#, python-format +msgid "%(method)s: Request timeout for CSR(%(host)s)" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:43 +msgid "Status check interval for Cisco CSR IPSec connections" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:54 +#, python-format +msgid "Cisco CSR failed to create %(resource)s (%(which)s)" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:58 +#, python-format +msgid "Cisco CSR failed to change %(tunnel)s admin state to %(state)s" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:63 +#, python-format +msgid "" +"Required %(resource)s attribute %(attr)s mapping for Cisco CSR is missing" +" in device driver" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:68 +#, python-format +msgid "" +"Device driver does not have a mapping of '%(value)s for attribute " +"%(attr)s of %(resource)s" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:83 +#, python-format +msgid "Scanning config files %s for Cisco CSR configurations" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:88 +#, python-format +msgid "Config file parse error: %s" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:92 +#, python-format +msgid "Unable to parse config files %s for Cisco CSR info" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:102 +#, python-format +msgid "Ignoring Cisco CSR configuration entry - router IP %s is not valid" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:113 +#, python-format +msgid "Ignoring Cisco CSR for router %(router)s - missing %(field)s setting" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:121 +#, python-format +msgid "Ignoring Cisco CSR for router %s - timeout is not a floating point number" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:130 +#, python-format +msgid "Ignoring Cisco CSR for subnet %s - REST management is not an IP address" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:137 +#, python-format +msgid "Ignoring Cisco CSR for router %s - local tunnel is not an IP address" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:147 +#, python-format +msgid "Found CSR for router %(router)s: %(info)s" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:213 +#, python-format +msgid "Loaded %(num)d Cisco CSR configuration%(plural)s" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:217 +#, python-format +msgid "No Cisco CSR configurations found in: %s" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:228 +#, python-format +msgid "Handling VPN service update notification '%s'" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:250 +#, python-format +msgid "Update: Existing connection %s changed" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:257 +#, python-format +msgid "Update: Connection %s no longer admin down" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:263 +#, python-format +msgid "Update: Connection %s forced to admin down" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:271 +#, python-format +msgid "Update: Created new connection %s in admin down state" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:276 +#, python-format +msgid "Update: Created new connection %s" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:288 +#, python-format +msgid "" +"Update: Skipping VPN service %(service)s as it's router (%(csr_id)s is " +"not associated with a Cisco CSR" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:294 +#, python-format +msgid "Update: Existing VPN service %s detected" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:298 +#, python-format +msgid "Update: New VPN service %s detected" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:307 +msgid "Update: Completed update processing" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:337 +#, python-format +msgid "Mark: %(service)d VPN services and %(conn)d IPSec connections marked dirty" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:359 +#, python-format +msgid "" +"Sweep: Removed %(service)d dirty VPN service%(splural)s and %(conn)d " +"dirty IPSec connection%(cplural)s" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:374 +#, python-format +msgid "Report: Collecting status for IPSec connections on VPN service %s" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:380 +#, python-format +msgid "Connection %s forced down" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:384 +#, python-format +msgid "Connection %(conn)s reported %(status)s" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:389 +#, python-format +msgid "Report: Adding info for IPSec connection %s" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:409 +#, python-format +msgid "Report: Adding info for VPN service %s" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:431 +msgid "Report: Starting status report processing" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:433 +#, python-format +msgid "Report: Collecting status for VPN service %s" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:439 +msgid "Sending status report update to plugin" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:441 +msgid "Report: Completed status report processing" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:736 +#, python-format +msgid "Unable to create %(resource)s %(which)s: %(status)d" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:749 +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:777 +#, python-format +msgid "Internal error - '%s' is not defined" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:762 +#, python-format +msgid "Unable to delete %(resource)s %(which)s: %(status)d" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:771 +#, python-format +msgid "Performing rollback action %(action)s for resource %(resource)s" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:797 +#, python-format +msgid "Creating IPSec connection %s" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:830 +#, python-format +msgid "FAILED: Create of IPSec site-to-site connection %s" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:833 +#, python-format +msgid "SUCCESS: Created IPSec site-to-site connection %s" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:842 +#, python-format +msgid "Deleting IPSec connection %s" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:844 +#, python-format +msgid "Unable to find connection %s" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:848 +#, python-format +msgid "SUCCESS: Deleted IPSec site-to-site connection %s" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:856 +#, python-format +msgid "Unable to change %(tunnel)s admin state to %(state)s" +msgstr "" + +#: neutron/services/vpn/device_drivers/ipsec.py:46 +msgid "Location to store ipsec server config files" +msgstr "" + +#: neutron/services/vpn/device_drivers/ipsec.py:49 +msgid "Interval for checking ipsec status" +msgstr "" + +#: neutron/services/vpn/device_drivers/ipsec.py:59 +msgid "Template file for ipsec configuration" +msgstr "" + +#: neutron/services/vpn/device_drivers/ipsec.py:65 +msgid "Template file for ipsec secret configuration" +msgstr "" + +#: neutron/services/vpn/device_drivers/ipsec.py:247 +#, python-format +msgid "Failed to enable vpn process on router %s" +msgstr "" + +#: neutron/services/vpn/device_drivers/ipsec.py:258 +#, python-format +msgid "Failed to disable vpn process on router %s" +msgstr "" + +#: neutron/services/vpn/service_drivers/__init__.py:76 +#, python-format +msgid "Notify agent at %(topic)s.%(host)s the message %(method)s %(args)s" +msgstr "" + +#: neutron/services/vpn/service_drivers/cisco_csr_db.py:46 +#, python-format +msgid "Fatal - %(reason)s" +msgstr "" + +#: neutron/services/vpn/service_drivers/cisco_csr_db.py:80 +#, python-format +msgid "No available Cisco CSR %(type)s IDs from %(min)d..%(max)d" +msgstr "" + +#: neutron/services/vpn/service_drivers/cisco_csr_db.py:135 +#, python-format +msgid "" +"Database inconsistency between IPSec connection and Cisco CSR mapping " +"table (%s)" +msgstr "" + +#: neutron/services/vpn/service_drivers/cisco_csr_db.py:161 +#, python-format +msgid "Reserved new CSR ID %(csr_id)d for %(policy)s ID %(policy_id)s" +msgstr "" + +#: neutron/services/vpn/service_drivers/cisco_csr_db.py:188 +#, python-format +msgid "" +"Mappings for IPSec connection %(conn)s - tunnel=%(tunnel)s " +"ike_policy=%(csr_ike)d ipsec_policy=%(csr_ipsec)d" +msgstr "" + +#: neutron/services/vpn/service_drivers/cisco_csr_db.py:197 +#, python-format +msgid "" +"Existing entry for IPSec connection %s not found in Cisco CSR mapping " +"table" +msgstr "" + +#: neutron/services/vpn/service_drivers/cisco_csr_db.py:224 +#, python-format +msgid "" +"Attempt to create duplicate entry in Cisco CSR mapping table for " +"connection %s" +msgstr "" + +#: neutron/services/vpn/service_drivers/cisco_csr_db.py:227 +#, python-format +msgid "" +"Mapped connection %(conn_id)s to Tunnel%(tunnel_id)d using IKE policy ID " +"%(ike_id)d and IPSec policy ID %(ipsec_id)d" +msgstr "" + +#: neutron/services/vpn/service_drivers/cisco_csr_db.py:239 +#, python-format +msgid "Removed mapping for connection %s" +msgstr "" + +#: neutron/services/vpn/service_drivers/cisco_ipsec.py:39 +#, python-format +msgid "" +"Cisco CSR does not support %(resource)s attribute %(key)s with value " +"'%(value)s'" +msgstr "" + +#: neutron/services/vpn/service_drivers/cisco_ipsec.py:160 +#, python-format +msgid "IPSec connection %s validated for Cisco CSR" +msgstr "" + +#: neutron/tests/unit/test_api_v2_resource.py:174 +#: neutron/tests/unit/test_api_v2_resource.py:244 +msgid "Unmapped error" +msgstr "" + +#: neutron/tests/unit/bigswitch/fake_server.py:72 +#, python-format +msgid "" +"Request: action=%(action)s, uri=%(uri)r, body=%(body)s, " +"headers=%(headers)s" +msgstr "" + +#: neutron/tests/unit/bigswitch/fake_server.py:124 +#, python-format +msgid "No floating IPs in requesturi=%(uri)s, body=%(body)s" +msgstr "" + +#: neutron/tests/unit/bigswitch/fake_server.py:133 +#, python-format +msgid "Expected floating IPs from multiple tenants.uri=%(uri)s, body=%(body)s" +msgstr "" + +#: neutron/tests/unit/bigswitch/fake_server.py:178 +#, python-format +msgid "No host cert for %(server)s in cert %(cert)s" +msgstr "" + +#: neutron/tests/unit/db/loadbalancer/test_db_loadbalancer.py:217 +#: neutron/tests/unit/db/loadbalancer/test_db_loadbalancer.py:239 +#: neutron/tests/unit/db/loadbalancer/test_db_loadbalancer.py:258 +#: neutron/tests/unit/db/loadbalancer/test_db_loadbalancer.py:281 +#, python-format +msgid "Unexpected error code: %s" +msgstr "" + +#: neutron/tests/unit/ml2/drivers/mechanism_logger.py:32 +#, python-format +msgid "" +"%(method)s called with network settings %(current)s (original settings " +"%(original)s) and network segments %(segments)s" +msgstr "" + +#: neutron/tests/unit/ml2/drivers/mechanism_logger.py:59 +#, python-format +msgid "" +"%(method)s called with subnet settings %(current)s (original settings " +"%(original)s)" +msgstr "" + +#: neutron/tests/unit/ml2/drivers/mechanism_logger.py:85 +#, python-format +msgid "" +"%(method)s called with port settings %(current)s (original settings " +"%(original)s) bound to segment %(segment)s (original segment " +"%(original_segment)s) using driver %(driver)s (original driver " +"%(original_driver)s) on network %(network)s" +msgstr "" + +#: neutron/tests/unit/nec/stub_ofc_driver.py:65 +#, python-format +msgid "(create_tenant) OFC tenant %s already exists" +msgstr "" + +#: neutron/tests/unit/nec/stub_ofc_driver.py:77 +#, python-format +msgid "(delete_tenant) OFC tenant %s not found" +msgstr "" + +#: neutron/tests/unit/nec/stub_ofc_driver.py:79 +msgid "delete_tenant: SUCCEED" +msgstr "" + +#: neutron/tests/unit/nec/stub_ofc_driver.py:86 +#, python-format +msgid "(create_network) OFC tenant %s not found" +msgstr "" + +#: neutron/tests/unit/nec/stub_ofc_driver.py:89 +#, python-format +msgid "(create_network) OFC network %s already exists" +msgstr "" + +#: neutron/tests/unit/nec/stub_ofc_driver.py:100 +#, python-format +msgid "(update_network) OFC network %s not found" +msgstr "" + +#: neutron/tests/unit/nec/stub_ofc_driver.py:104 +msgid "update_network: SUCCEED" +msgstr "" + +#: neutron/tests/unit/nec/stub_ofc_driver.py:112 +#, python-format +msgid "(delete_network) OFC network %s not found" +msgstr "" + +#: neutron/tests/unit/nec/stub_ofc_driver.py:114 +msgid "delete_network: SUCCEED" +msgstr "" + +#: neutron/tests/unit/nec/stub_ofc_driver.py:121 +#, python-format +msgid "(create_port) OFC network %s not found" +msgstr "" + +#: neutron/tests/unit/nec/stub_ofc_driver.py:124 +#, python-format +msgid "(create_port) OFC port %s already exists" +msgstr "" + +#: neutron/tests/unit/nec/stub_ofc_driver.py:138 +#, python-format +msgid "(delete_port) OFC port %s not found" +msgstr "" + +#: neutron/tests/unit/nec/stub_ofc_driver.py:140 +msgid "delete_port: SUCCEED" +msgstr "" + +#: neutron/tests/unit/nec/stub_ofc_driver.py:173 +#, python-format +msgid "(create_router) OFC tenant %s not found" +msgstr "" + +#: neutron/tests/unit/nec/stub_ofc_driver.py:176 +#, python-format +msgid "(create_router) OFC router %s already exists" +msgstr "" + +#: neutron/tests/unit/nec/stub_ofc_driver.py:179 +msgid "Operation on OFC is failed" +msgstr "" + +#: neutron/tests/unit/nec/stub_ofc_driver.py:193 +#: neutron/tests/unit/nec/stub_ofc_driver.py:283 +#, python-format +msgid "(delete_router) OFC router %s not found" +msgstr "" + +#: neutron/tests/unit/nec/stub_ofc_driver.py:195 +msgid "delete_router: SUCCEED" +msgstr "" + +#: neutron/tests/unit/nec/stub_ofc_driver.py:203 +#, python-format +msgid "(add_router_interface) ip_address %s is not a valid format (a.b.c.d/N)." +msgstr "" + +#: neutron/tests/unit/nec/stub_ofc_driver.py:208 +#, python-format +msgid "(add_router_interface) OFC router %s not found" +msgstr "" + +#: neutron/tests/unit/nec/stub_ofc_driver.py:211 +#, python-format +msgid "(add_router_interface) OFC network %s not found" +msgstr "" + +#: neutron/tests/unit/nec/stub_ofc_driver.py:218 +#, python-format +msgid "add_router_interface: SUCCEED (if_id=%s)" +msgstr "" + +#: neutron/tests/unit/nec/stub_ofc_driver.py:226 +#: neutron/tests/unit/nec/stub_ofc_driver.py:243 +#, python-format +msgid "(delete_router_interface) OFC router interface %s not found" +msgstr "" + +#: neutron/tests/unit/nec/stub_ofc_driver.py:235 +msgid "update_router_route: SUCCEED" +msgstr "" + +#: neutron/tests/unit/nec/stub_ofc_driver.py:246 +msgid "delete_router_interface: SUCCEED" +msgstr "" + +#: neutron/tests/unit/nec/stub_ofc_driver.py:256 +#, python-format +msgid "(add_router_route) OFC router %s not found" +msgstr "" + +#: neutron/tests/unit/nec/stub_ofc_driver.py:261 +#, python-format +msgid "(add_router_route) route to \"%s\" already exists" +msgstr "" + +#: neutron/tests/unit/nec/stub_ofc_driver.py:266 +#, python-format +msgid "add_router_route: SUCCEED (route_id=%s)" +msgstr "" + +#: neutron/tests/unit/nec/stub_ofc_driver.py:275 +#, python-format +msgid "(delete_router_route) OFC router route %s not found" +msgstr "" + +#: neutron/tests/unit/nec/stub_ofc_driver.py:277 +msgid "delete_router_route: SUCCEED" +msgstr "" + +#: neutron/tests/unit/nec/stub_ofc_driver.py:290 +#, python-format +msgid "list_router_routes: routes=%s" +msgstr "" + +#: neutron/tests/unit/nec/test_ofc_client.py:86 +msgid "The specified OFC resource (/somewhere) is not found." +msgstr "" + +#: neutron/tests/unit/nec/test_ofc_client.py:90 +#: neutron/tests/unit/nec/test_ofc_client.py:96 +#: neutron/tests/unit/nec/test_ofc_client.py:105 +msgid "An OFC exception has occurred: Operation on OFC failed" +msgstr "" + +#: neutron/tests/unit/nec/test_ofc_client.py:112 +msgid "An OFC exception has occurred: Failed to connect OFC : " +msgstr "" + +#: neutron/tests/unit/vmware/apiclient/fake.py:406 +#, python-format +msgid "lswitch:%s not found" +msgstr "" + +#: neutron/tests/unit/vmware/apiclient/fake.py:415 +#, python-format +msgid "lrouter:%s not found" +msgstr "" + +#: neutron/tests/unit/vmware/vshield/fake_vcns.py:104 +#, python-format +msgid "Job %s does not nexist" +msgstr "" + +#: neutron/tests/unit/vmware/vshield/fake_vcns.py:116 +#: neutron/tests/unit/vmware/vshield/fake_vcns.py:127 +#: neutron/tests/unit/vmware/vshield/fake_vcns.py:144 +#: neutron/tests/unit/vmware/vshield/fake_vcns.py:162 +#: neutron/tests/unit/vmware/vshield/fake_vcns.py:184 +#: neutron/tests/unit/vmware/vshield/fake_vcns.py:206 +#: neutron/tests/unit/vmware/vshield/fake_vcns.py:290 +#: neutron/tests/unit/vmware/vshield/fake_vcns.py:304 +#: neutron/tests/unit/vmware/vshield/fake_vcns.py:318 +#: neutron/tests/unit/vmware/vshield/fake_vcns.py:360 +#, python-format +msgid "Edge %s does not exist" +msgstr "" + +#: neutron/tests/unit/vmware/vshield/fake_vcns.py:194 +#, python-format +msgid "Rule id %d doest not exist" +msgstr "" + +#: neutron/tests/unit/vmware/vshield/fake_vcns.py:257 +#, python-format +msgid "Lswitch %s does not exist" +msgstr "" + +#: neutron/tests/unit/vmware/vshield/test_edge_router.py:128 +msgid "Tasks not completed" +msgstr "" + diff --git a/icehouse-patches/neutron/dvr-patch/neutron/locale/es/LC_MESSAGES/neutron-log-error.po b/icehouse-patches/neutron/dvr-patch/neutron/locale/es/LC_MESSAGES/neutron-log-error.po new file mode 100644 index 00000000..4c722b4f --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/locale/es/LC_MESSAGES/neutron-log-error.po @@ -0,0 +1,170 @@ +# Translations template for neutron. +# Copyright (C) 2014 ORGANIZATION +# This file is distributed under the same license as the neutron project. +# +# Translators: +msgid "" +msgstr "" +"Project-Id-Version: Neutron\n" +"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" +"POT-Creation-Date: 2014-06-09 06:08+0000\n" +"PO-Revision-Date: 2014-05-29 07:49+0000\n" +"Last-Translator: FULL NAME \n" +"Language-Team: Spanish (http://www.transifex.com/projects/p/neutron/language/" +"es/)\n" +"Language: es\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 1.3\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +#: neutron/openstack/common/excutils.py:76 +#, python-format +msgid "Original exception being dropped: %s" +msgstr "Se está descartando excepción original: %s" + +#: neutron/openstack/common/excutils.py:105 +#, python-format +msgid "Unexpected exception occurred %d time(s)... retrying." +msgstr "La excepción inesperada ha ocurrido %d vez(veces)... reintentando." + +#: neutron/openstack/common/periodic_task.py:179 +#, python-format +msgid "Error during %(full_task_name)s: %(e)s" +msgstr "" + +#: neutron/openstack/common/service.py:188 +msgid "Exception during rpc cleanup." +msgstr "Excepción durante limpieza de rpc." + +#: neutron/openstack/common/service.py:276 +msgid "Unhandled exception" +msgstr "Excepción no controlada" + +#: neutron/openstack/common/db/api.py:72 +msgid "DB exceeded retry limit." +msgstr "" + +#: neutron/openstack/common/db/api.py:76 +msgid "DB connection error." +msgstr "" + +#: neutron/openstack/common/db/sqlalchemy/session.py:460 +msgid "DB exception wrapped." +msgstr "Excepción de base de datos recortada." + +#: neutron/openstack/common/middleware/notifier.py:40 +#, python-format +msgid "An exception occurred processing the API call: %s " +msgstr "" + +#: neutron/openstack/common/rpc/amqp.py:480 +#: neutron/openstack/common/rpc/impl_zmq.py:286 +msgid "Exception during message handling" +msgstr "Excepción durante el manejo de mensajes" + +#: neutron/openstack/common/rpc/common.py:88 +msgid "Exception in string format operation" +msgstr "Excepción en la operación de formato de cadena" + +#: neutron/openstack/common/rpc/common.py:292 +#, python-format +msgid "Returning exception %s to caller" +msgstr "Devolviendo excepción %s al interlocutor" + +#: neutron/openstack/common/rpc/impl_kombu.py:156 +msgid "Failed to process message ... skipping it." +msgstr "Fallo al procesar mensaje ... omitiendo." + +#: neutron/openstack/common/rpc/impl_kombu.py:160 +msgid "Failed to process message ... will requeue." +msgstr "Fallo al procesar mensaje ... se encolará nuevamente." + +#: neutron/openstack/common/rpc/impl_kombu.py:571 +#, python-format +msgid "" +"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " +"again in %(sleep_time)d seconds." +msgstr "" +"El servidor AMQP en %(hostname)s:%(port)d es inalcanzable: %(err_str)s. Se " +"volverá a intentar en %(sleep_time)d segundos." + +#: neutron/openstack/common/rpc/impl_kombu.py:625 +#: neutron/openstack/common/rpc/impl_qpid.py:575 +#, python-format +msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" +msgstr "" +"No se ha podido declarar consumidor para el tema '%(topic)s': %(err_str)s" + +#: neutron/openstack/common/rpc/impl_kombu.py:647 +#: neutron/openstack/common/rpc/impl_qpid.py:594 +#, python-format +msgid "Failed to consume message from queue: %s" +msgstr "No se ha podido consumir el mensaje de la cola: %s" + +#: neutron/openstack/common/rpc/impl_kombu.py:686 +#: neutron/openstack/common/rpc/impl_qpid.py:629 +#, python-format +msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" +msgstr "" +"No se ha podido publicar el mensaje para el tema '%(topic)s': %(err_str)s" + +#: neutron/openstack/common/rpc/impl_qpid.py:191 +msgid "Failed to process message... skipping it." +msgstr "No se ha podido procesar el mensaje... saltándoselo." + +#: neutron/openstack/common/rpc/impl_qpid.py:517 +#, python-format +msgid "Unable to connect to AMQP server: %(e)s. Sleeping %(delay)s seconds" +msgstr "" +"No se puede conectar con el servidor AMQP: %(e)s. En reposo durante " +"%(delay)s segundos" + +#: neutron/openstack/common/rpc/impl_qpid.py:602 +msgid "Error processing message. Skipping it." +msgstr "Error al procesar el mensaje. Saltándolo." + +#: neutron/openstack/common/rpc/impl_zmq.py:96 +msgid "JSON serialization failed." +msgstr "Ha fallado la serialización JSON." + +#: neutron/openstack/common/rpc/impl_zmq.py:195 +msgid "ZeroMQ socket could not be closed." +msgstr "No se puede cerrar el socket ZeroMQ." + +#: neutron/openstack/common/rpc/impl_zmq.py:339 +msgid "RPC message did not include method." +msgstr "El mensaje de RPC no incluía método." + +#: neutron/openstack/common/rpc/impl_zmq.py:476 +msgid "Topic socket file creation failed." +msgstr "Ha fallado la creación de archivo de socket de tema." + +#: neutron/openstack/common/rpc/impl_zmq.py:482 +#, python-format +msgid "" +"Local per-topic backlog buffer full for topic %(topic)s. Dropping message." +msgstr "" +"Almacenamiento intermedio de retraso por tema local para el tema %(topic)s. " +"Descartando mensaje." + +#: neutron/openstack/common/rpc/impl_zmq.py:498 +#, python-format +msgid "Required IPC directory does not exist at %s" +msgstr "El directorio IPC requerido no existe en %s" + +#: neutron/openstack/common/rpc/impl_zmq.py:507 +#, python-format +msgid "Permission denied to IPC directory at %s" +msgstr "Permiso denegado para el directorio IPC en %s" + +#: neutron/openstack/common/rpc/impl_zmq.py:510 +msgid "Could not create ZeroMQ receiver daemon. Socket may already be in use." +msgstr "" +"No se ha podido crear el daemon de destinatario ZeroMQ. Es posible que ya se " +"esté utilizando el socket." + +#: neutron/openstack/common/rpc/impl_zmq.py:563 +msgid "ZMQ Envelope version unsupported or unknown." +msgstr "Versión de sobre de ZMQ no soportada o desconocida." diff --git a/icehouse-patches/neutron/dvr-patch/neutron/locale/es/LC_MESSAGES/neutron-log-info.po b/icehouse-patches/neutron/dvr-patch/neutron/locale/es/LC_MESSAGES/neutron-log-info.po new file mode 100644 index 00000000..71c8f145 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/locale/es/LC_MESSAGES/neutron-log-info.po @@ -0,0 +1,128 @@ +# Translations template for neutron. +# Copyright (C) 2014 ORGANIZATION +# This file is distributed under the same license as the neutron project. +# +# Translators: +msgid "" +msgstr "" +"Project-Id-Version: Neutron\n" +"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" +"POT-Creation-Date: 2014-06-16 06:08+0000\n" +"PO-Revision-Date: 2014-05-29 07:49+0000\n" +"Last-Translator: FULL NAME \n" +"Language-Team: Spanish (http://www.transifex.com/projects/p/neutron/language/" +"es/)\n" +"Language: es\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 1.3\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +#: neutron/openstack/common/periodic_task.py:125 +#, python-format +msgid "Skipping periodic task %(task)s because its interval is negative" +msgstr "" + +#: neutron/openstack/common/periodic_task.py:130 +#, python-format +msgid "Skipping periodic task %(task)s because it is disabled" +msgstr "" + +#: neutron/openstack/common/service.py:176 +#, python-format +msgid "Caught %s, exiting" +msgstr "Se ha captado %s, saliendo" + +#: neutron/openstack/common/service.py:240 +msgid "Parent process has died unexpectedly, exiting" +msgstr "El proceso padre se ha detenido inesperadamente, saliendo" + +#: neutron/openstack/common/service.py:271 +#, python-format +msgid "Child caught %s, exiting" +msgstr "" + +#: neutron/openstack/common/service.py:310 +msgid "Forking too fast, sleeping" +msgstr "Bifurcación demasiado rápida, en reposo" + +#: neutron/openstack/common/service.py:329 +#, python-format +msgid "Started child %d" +msgstr "Se ha iniciado el hijo %d" + +#: neutron/openstack/common/service.py:339 +#, python-format +msgid "Starting %d workers" +msgstr "Iniciando %d trabajadores" + +#: neutron/openstack/common/service.py:356 +#, python-format +msgid "Child %(pid)d killed by signal %(sig)d" +msgstr "Hijo %(pid)d matado por señal %(sig)d" + +#: neutron/openstack/common/service.py:360 +#, python-format +msgid "Child %(pid)s exited with status %(code)d" +msgstr "El hijo %(pid)s ha salido con el estado %(code)d" + +#: neutron/openstack/common/service.py:399 +#, python-format +msgid "Caught %s, stopping children" +msgstr "Se ha captado %s, deteniendo hijos" + +#: neutron/openstack/common/service.py:408 +msgid "Wait called after thread killed. Cleaning up." +msgstr "" + +#: neutron/openstack/common/service.py:424 +#, python-format +msgid "Waiting on %d children to exit" +msgstr "En espera de %d hijos para salir" + +#: neutron/openstack/common/db/sqlalchemy/utils.py:379 +#, python-format +msgid "Deleting duplicated row with id: %(id)s from table: %(table)s" +msgstr "Eliminando registro duplicado con id: %(id)s de la tabla: %(table)s" + +#: neutron/openstack/common/rpc/impl_kombu.py:498 +#, python-format +msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" +msgstr "Volviendo a conectar con el servidor AMQP en %(hostname)s:%(port)d " + +#: neutron/openstack/common/rpc/impl_kombu.py:520 +#, python-format +msgid "Connected to AMQP server on %(hostname)s:%(port)d" +msgstr "Conectado al servidor AMQP en %(hostname)s:%(port)d" + +#: neutron/openstack/common/rpc/impl_qpid.py:523 +#, python-format +msgid "Connected to AMQP server on %s" +msgstr "Conectado con el servidor AMQP en %s" + +#: neutron/openstack/common/rpc/impl_zmq.py:371 +msgid "Registering reactor" +msgstr "Registrando reactor" + +#: neutron/openstack/common/rpc/impl_zmq.py:383 +msgid "In reactor registered" +msgstr "Registrado en reactor" + +#: neutron/openstack/common/rpc/impl_zmq.py:388 +msgid "Consuming socket" +msgstr "Consumiendo socket" + +#: neutron/openstack/common/rpc/impl_zmq.py:438 +#, python-format +msgid "Creating proxy for topic: %s" +msgstr "Creando proxy para el tema: %s" + +#: neutron/openstack/common/rpc/impl_zmq.py:591 +msgid "Skipping topic registration. Already registered." +msgstr "Se va a saltar el registro del tema. Ya está registrado." + +#: neutron/openstack/common/rpc/matchmaker.py:216 +#, python-format +msgid "Matchmaker unregistered: %(key)s, %(host)s" +msgstr "Matchmaker no registrado: %(key)s, %(host)s" diff --git a/icehouse-patches/neutron/dvr-patch/neutron/locale/fr/LC_MESSAGES/neutron-log-critical.po b/icehouse-patches/neutron/dvr-patch/neutron/locale/fr/LC_MESSAGES/neutron-log-critical.po new file mode 100644 index 00000000..52ae6541 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/locale/fr/LC_MESSAGES/neutron-log-critical.po @@ -0,0 +1,23 @@ +# Translations template for neutron. +# Copyright (C) 2014 ORGANIZATION +# This file is distributed under the same license as the neutron project. +# +# Translators: +msgid "" +msgstr "" +"Project-Id-Version: Neutron\n" +"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" +"POT-Creation-Date: 2014-06-20 06:09+0000\n" +"PO-Revision-Date: 2014-05-30 06:24+0000\n" +"Last-Translator: FULL NAME \n" +"Language-Team: French (http://www.transifex.com/projects/p/neutron/language/" +"fr/)\n" +"Language: fr\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 1.3\n" +"Plural-Forms: nplurals=2; plural=(n > 1);\n" + +#~ msgid "Dummy message for transifex setup." +#~ msgstr "message fictif pour la configuration transifex" diff --git a/icehouse-patches/neutron/dvr-patch/neutron/locale/fr/LC_MESSAGES/neutron-log-error.po b/icehouse-patches/neutron/dvr-patch/neutron/locale/fr/LC_MESSAGES/neutron-log-error.po new file mode 100644 index 00000000..c3538790 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/locale/fr/LC_MESSAGES/neutron-log-error.po @@ -0,0 +1,171 @@ +# Translations template for neutron. +# Copyright (C) 2014 ORGANIZATION +# This file is distributed under the same license as the neutron project. +# +# Translators: +msgid "" +msgstr "" +"Project-Id-Version: Neutron\n" +"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" +"POT-Creation-Date: 2014-06-20 06:08+0000\n" +"PO-Revision-Date: 2014-05-29 07:49+0000\n" +"Last-Translator: FULL NAME \n" +"Language-Team: French (http://www.transifex.com/projects/p/neutron/language/" +"fr/)\n" +"Language: fr\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 1.3\n" +"Plural-Forms: nplurals=2; plural=(n > 1);\n" + +#: neutron/openstack/common/excutils.py:76 +#, python-format +msgid "Original exception being dropped: %s" +msgstr "Exception d'origine en cours de suppression : %s" + +#: neutron/openstack/common/excutils.py:105 +#, python-format +msgid "Unexpected exception occurred %d time(s)... retrying." +msgstr "Exception inattendue survenue %d fois... Nouvel essai." + +#: neutron/openstack/common/periodic_task.py:179 +#, python-format +msgid "Error during %(full_task_name)s: %(e)s" +msgstr "" + +#: neutron/openstack/common/service.py:190 +msgid "Exception during rpc cleanup." +msgstr "Exception pendant le nettoyage rpc." + +#: neutron/openstack/common/service.py:279 +msgid "Unhandled exception" +msgstr "Exception non gérée" + +#: neutron/openstack/common/db/api.py:72 +msgid "DB exceeded retry limit." +msgstr "DB dépassé limite de tentatives" + +#: neutron/openstack/common/db/api.py:76 +msgid "DB connection error." +msgstr "erreurs de connexion DB" + +#: neutron/openstack/common/db/sqlalchemy/session.py:460 +msgid "DB exception wrapped." +msgstr "Exception BD encapsulée." + +#: neutron/openstack/common/middleware/notifier.py:40 +#, python-format +msgid "An exception occurred processing the API call: %s " +msgstr "" + +#: neutron/openstack/common/rpc/amqp.py:480 +#: neutron/openstack/common/rpc/impl_zmq.py:286 +msgid "Exception during message handling" +msgstr "Exception pendant le traitement des messages" + +#: neutron/openstack/common/rpc/common.py:88 +msgid "Exception in string format operation" +msgstr "Exception dans l'opération de format de chaîne" + +#: neutron/openstack/common/rpc/common.py:292 +#, python-format +msgid "Returning exception %s to caller" +msgstr "Renvoi de l'exception %s à l'appelant" + +#: neutron/openstack/common/rpc/impl_kombu.py:156 +msgid "Failed to process message ... skipping it." +msgstr "Echec de traitement du message... Message ignoré." + +#: neutron/openstack/common/rpc/impl_kombu.py:160 +msgid "Failed to process message ... will requeue." +msgstr "Échec de traitement du message... Message remis en file d'attente." + +#: neutron/openstack/common/rpc/impl_kombu.py:571 +#, python-format +msgid "" +"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " +"again in %(sleep_time)d seconds." +msgstr "" +"Le serveur AMQP sur %(hostname)s:%(port)d est inaccessible : %(err_str)s. " +"Nouvelle tentative dans %(sleep_time)d secondes." + +#: neutron/openstack/common/rpc/impl_kombu.py:625 +#: neutron/openstack/common/rpc/impl_qpid.py:575 +#, python-format +msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" +msgstr "" +"Echec de la déclaration du consommateur pour la rubrique '%(topic)s' : " +"%(err_str)s" + +#: neutron/openstack/common/rpc/impl_kombu.py:647 +#: neutron/openstack/common/rpc/impl_qpid.py:594 +#, python-format +msgid "Failed to consume message from queue: %s" +msgstr "Echec de la consommation du message depuis la file d'attente : %s" + +#: neutron/openstack/common/rpc/impl_kombu.py:686 +#: neutron/openstack/common/rpc/impl_qpid.py:629 +#, python-format +msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" +msgstr "" +"Echec de la publication du message dans la rubrique '%(topic)s' : %(err_str)s" + +#: neutron/openstack/common/rpc/impl_qpid.py:191 +msgid "Failed to process message... skipping it." +msgstr "Echec du traitement du message... Message ignoré." + +#: neutron/openstack/common/rpc/impl_qpid.py:517 +#, python-format +msgid "Unable to connect to AMQP server: %(e)s. Sleeping %(delay)s seconds" +msgstr "" +"Impossible de se connecter au serveur AMQP : %(e)s. En veille pendant " +"%(delay)s secondes" + +#: neutron/openstack/common/rpc/impl_qpid.py:602 +msgid "Error processing message. Skipping it." +msgstr "Erreur lors du traitement du message. Message ignoré." + +#: neutron/openstack/common/rpc/impl_zmq.py:96 +msgid "JSON serialization failed." +msgstr "Echec de la sérialisation JSON." + +#: neutron/openstack/common/rpc/impl_zmq.py:195 +msgid "ZeroMQ socket could not be closed." +msgstr "" + +#: neutron/openstack/common/rpc/impl_zmq.py:339 +msgid "RPC message did not include method." +msgstr "Le message d'appel de procédure distante n'a pas inclus la méthode." + +#: neutron/openstack/common/rpc/impl_zmq.py:476 +msgid "Topic socket file creation failed." +msgstr "La création du fichier socket de la rubrique a échoué." + +#: neutron/openstack/common/rpc/impl_zmq.py:482 +#, python-format +msgid "" +"Local per-topic backlog buffer full for topic %(topic)s. Dropping message." +msgstr "" +"Mémoire tampon de commandes en attente par rubrique locale saturée pour la " +"rubrique %(topic)s. Suppression du message." + +#: neutron/openstack/common/rpc/impl_zmq.py:498 +#, python-format +msgid "Required IPC directory does not exist at %s" +msgstr "Répertoire IPC requis n'existe pas à %s" + +#: neutron/openstack/common/rpc/impl_zmq.py:507 +#, python-format +msgid "Permission denied to IPC directory at %s" +msgstr "Permission refusée au répertoire IPC à %s" + +#: neutron/openstack/common/rpc/impl_zmq.py:510 +msgid "Could not create ZeroMQ receiver daemon. Socket may already be in use." +msgstr "" +"Impossible de créer le démon récepteur ZeroMQ. Le socket est sans doute déjà " +"en cours d'utilisation." + +#: neutron/openstack/common/rpc/impl_zmq.py:563 +msgid "ZMQ Envelope version unsupported or unknown." +msgstr "Version de l'enveloppe ZMQ non prise en charge ou inconnue." diff --git a/icehouse-patches/neutron/dvr-patch/neutron/locale/fr/LC_MESSAGES/neutron-log-info.po b/icehouse-patches/neutron/dvr-patch/neutron/locale/fr/LC_MESSAGES/neutron-log-info.po new file mode 100644 index 00000000..2324a446 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/locale/fr/LC_MESSAGES/neutron-log-info.po @@ -0,0 +1,128 @@ +# Translations template for neutron. +# Copyright (C) 2014 ORGANIZATION +# This file is distributed under the same license as the neutron project. +# +# Translators: +msgid "" +msgstr "" +"Project-Id-Version: Neutron\n" +"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" +"POT-Creation-Date: 2014-06-16 06:08+0000\n" +"PO-Revision-Date: 2014-05-29 07:49+0000\n" +"Last-Translator: FULL NAME \n" +"Language-Team: French (http://www.transifex.com/projects/p/neutron/language/" +"fr/)\n" +"Language: fr\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 1.3\n" +"Plural-Forms: nplurals=2; plural=(n > 1);\n" + +#: neutron/openstack/common/periodic_task.py:125 +#, python-format +msgid "Skipping periodic task %(task)s because its interval is negative" +msgstr "" + +#: neutron/openstack/common/periodic_task.py:130 +#, python-format +msgid "Skipping periodic task %(task)s because it is disabled" +msgstr "" + +#: neutron/openstack/common/service.py:176 +#, python-format +msgid "Caught %s, exiting" +msgstr "%s interceptée, sortie" + +#: neutron/openstack/common/service.py:240 +msgid "Parent process has died unexpectedly, exiting" +msgstr "Processus parent arrêté de manière inattendue, sortie" + +#: neutron/openstack/common/service.py:271 +#, python-format +msgid "Child caught %s, exiting" +msgstr "" + +#: neutron/openstack/common/service.py:310 +msgid "Forking too fast, sleeping" +msgstr "Bifurcation trop rapide, pause" + +#: neutron/openstack/common/service.py:329 +#, python-format +msgid "Started child %d" +msgstr "Enfant démarré %d" + +#: neutron/openstack/common/service.py:339 +#, python-format +msgid "Starting %d workers" +msgstr "Démarrage des travailleurs %d" + +#: neutron/openstack/common/service.py:356 +#, python-format +msgid "Child %(pid)d killed by signal %(sig)d" +msgstr "Enfant %(pid)d arrêté par le signal %(sig)d" + +#: neutron/openstack/common/service.py:360 +#, python-format +msgid "Child %(pid)s exited with status %(code)d" +msgstr "Processus fils %(pid)s terminé avec le status %(code)d" + +#: neutron/openstack/common/service.py:399 +#, python-format +msgid "Caught %s, stopping children" +msgstr "%s interceptée, arrêt de l'enfant" + +#: neutron/openstack/common/service.py:408 +msgid "Wait called after thread killed. Cleaning up." +msgstr "" + +#: neutron/openstack/common/service.py:424 +#, python-format +msgid "Waiting on %d children to exit" +msgstr "En attente %d enfants pour sortie" + +#: neutron/openstack/common/db/sqlalchemy/utils.py:379 +#, python-format +msgid "Deleting duplicated row with id: %(id)s from table: %(table)s" +msgstr "Suppression ligne en double avec l'ID : %(id)s de la table : %(table)s" + +#: neutron/openstack/common/rpc/impl_kombu.py:498 +#, python-format +msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" +msgstr "Reconnexion au serveur AMQP sur %(hostname)s : %(port)d" + +#: neutron/openstack/common/rpc/impl_kombu.py:520 +#, python-format +msgid "Connected to AMQP server on %(hostname)s:%(port)d" +msgstr "Connecté au serveur AMQP sur %(hostname)s : %(port)d" + +#: neutron/openstack/common/rpc/impl_qpid.py:523 +#, python-format +msgid "Connected to AMQP server on %s" +msgstr "Connecté au serveur AMQP sur %s" + +#: neutron/openstack/common/rpc/impl_zmq.py:371 +msgid "Registering reactor" +msgstr "Enregistrement de Reactor" + +#: neutron/openstack/common/rpc/impl_zmq.py:383 +msgid "In reactor registered" +msgstr "Reactor entrant enregistré" + +#: neutron/openstack/common/rpc/impl_zmq.py:388 +msgid "Consuming socket" +msgstr "Consommation de socket" + +#: neutron/openstack/common/rpc/impl_zmq.py:438 +#, python-format +msgid "Creating proxy for topic: %s" +msgstr "Création du proxy pour la rubrique : %s" + +#: neutron/openstack/common/rpc/impl_zmq.py:591 +msgid "Skipping topic registration. Already registered." +msgstr "Passez l'enregistrement de la rubrique. Rubrique déjà enregistrée." + +#: neutron/openstack/common/rpc/matchmaker.py:216 +#, python-format +msgid "Matchmaker unregistered: %(key)s, %(host)s" +msgstr "Matchmaker non enregistré : %(key)s, %(host)s" diff --git a/icehouse-patches/neutron/dvr-patch/neutron/locale/it/LC_MESSAGES/neutron-log-info.po b/icehouse-patches/neutron/dvr-patch/neutron/locale/it/LC_MESSAGES/neutron-log-info.po new file mode 100644 index 00000000..c8692b2e --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/locale/it/LC_MESSAGES/neutron-log-info.po @@ -0,0 +1,128 @@ +# Translations template for neutron. +# Copyright (C) 2014 ORGANIZATION +# This file is distributed under the same license as the neutron project. +# +# Translators: +msgid "" +msgstr "" +"Project-Id-Version: Neutron\n" +"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" +"POT-Creation-Date: 2014-06-16 06:08+0000\n" +"PO-Revision-Date: 2014-05-29 07:49+0000\n" +"Last-Translator: FULL NAME \n" +"Language-Team: Italian (http://www.transifex.com/projects/p/neutron/language/" +"it/)\n" +"Language: it\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 1.3\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +#: neutron/openstack/common/periodic_task.py:125 +#, python-format +msgid "Skipping periodic task %(task)s because its interval is negative" +msgstr "" + +#: neutron/openstack/common/periodic_task.py:130 +#, python-format +msgid "Skipping periodic task %(task)s because it is disabled" +msgstr "" + +#: neutron/openstack/common/service.py:176 +#, python-format +msgid "Caught %s, exiting" +msgstr "Rilevato %s, esistente" + +#: neutron/openstack/common/service.py:240 +msgid "Parent process has died unexpectedly, exiting" +msgstr "Il processo principale è stato interrotto inaspettatamente, uscire" + +#: neutron/openstack/common/service.py:271 +#, python-format +msgid "Child caught %s, exiting" +msgstr "" + +#: neutron/openstack/common/service.py:310 +msgid "Forking too fast, sleeping" +msgstr "Sblocco troppo veloce, attendere" + +#: neutron/openstack/common/service.py:329 +#, python-format +msgid "Started child %d" +msgstr "Child avviato %d" + +#: neutron/openstack/common/service.py:339 +#, python-format +msgid "Starting %d workers" +msgstr "Avvio %d operatori" + +#: neutron/openstack/common/service.py:356 +#, python-format +msgid "Child %(pid)d killed by signal %(sig)d" +msgstr "Child %(pid)d interrotto dal segnale %(sig)d" + +#: neutron/openstack/common/service.py:360 +#, python-format +msgid "Child %(pid)s exited with status %(code)d" +msgstr "Child %(pid)s terminato con stato %(code)d" + +#: neutron/openstack/common/service.py:399 +#, python-format +msgid "Caught %s, stopping children" +msgstr "Intercettato %s, arresto in corso dei children" + +#: neutron/openstack/common/service.py:408 +msgid "Wait called after thread killed. Cleaning up." +msgstr "" + +#: neutron/openstack/common/service.py:424 +#, python-format +msgid "Waiting on %d children to exit" +msgstr "In attesa %d degli elementi secondari per uscire" + +#: neutron/openstack/common/db/sqlalchemy/utils.py:379 +#, python-format +msgid "Deleting duplicated row with id: %(id)s from table: %(table)s" +msgstr "" + +#: neutron/openstack/common/rpc/impl_kombu.py:498 +#, python-format +msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" +msgstr "Riconnessione al server AMQP su %(hostname)s:%(port)d" + +#: neutron/openstack/common/rpc/impl_kombu.py:520 +#, python-format +msgid "Connected to AMQP server on %(hostname)s:%(port)d" +msgstr "Connesso al server AMQP su %(hostname)s:%(port)d" + +#: neutron/openstack/common/rpc/impl_qpid.py:523 +#, python-format +msgid "Connected to AMQP server on %s" +msgstr "Connesso al serve AMQP su %s" + +#: neutron/openstack/common/rpc/impl_zmq.py:371 +msgid "Registering reactor" +msgstr "Registrazione del reattore" + +#: neutron/openstack/common/rpc/impl_zmq.py:383 +msgid "In reactor registered" +msgstr "Reactor interno registrato" + +#: neutron/openstack/common/rpc/impl_zmq.py:388 +msgid "Consuming socket" +msgstr "Utilizzo socket" + +#: neutron/openstack/common/rpc/impl_zmq.py:438 +#, python-format +msgid "Creating proxy for topic: %s" +msgstr "Creazione del proxy per il topic: %s" + +#: neutron/openstack/common/rpc/impl_zmq.py:591 +msgid "Skipping topic registration. Already registered." +msgstr "La registrazione dell'argomento viene ignorata. È già registrata." + +#: neutron/openstack/common/rpc/matchmaker.py:216 +#, python-format +msgid "Matchmaker unregistered: %(key)s, %(host)s" +msgstr "" diff --git a/icehouse-patches/neutron/dvr-patch/neutron/locale/ja/LC_MESSAGES/neutron-log-error.po b/icehouse-patches/neutron/dvr-patch/neutron/locale/ja/LC_MESSAGES/neutron-log-error.po new file mode 100644 index 00000000..9c2cf037 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/locale/ja/LC_MESSAGES/neutron-log-error.po @@ -0,0 +1,170 @@ +# Translations template for neutron. +# Copyright (C) 2014 ORGANIZATION +# This file is distributed under the same license as the neutron project. +# +# Translators: +msgid "" +msgstr "" +"Project-Id-Version: Neutron\n" +"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" +"POT-Creation-Date: 2014-06-19 06:08+0000\n" +"PO-Revision-Date: 2014-05-29 07:49+0000\n" +"Last-Translator: FULL NAME \n" +"Language-Team: Japanese (http://www.transifex.com/projects/p/neutron/" +"language/ja/)\n" +"Language: ja\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 1.3\n" +"Plural-Forms: nplurals=1; plural=0;\n" + +#: neutron/openstack/common/excutils.py:76 +#, python-format +msgid "Original exception being dropped: %s" +msgstr "除去される元の例外: %s" + +#: neutron/openstack/common/excutils.py:105 +#, python-format +msgid "Unexpected exception occurred %d time(s)... retrying." +msgstr "予期せぬ例外が、%d回()発生しました。再試行中。" + +#: neutron/openstack/common/periodic_task.py:179 +#, python-format +msgid "Error during %(full_task_name)s: %(e)s" +msgstr "" + +#: neutron/openstack/common/service.py:188 +msgid "Exception during rpc cleanup." +msgstr "RPCクリーンアップ中に例外が発生" + +#: neutron/openstack/common/service.py:277 +msgid "Unhandled exception" +msgstr "未処理例外" + +#: neutron/openstack/common/db/api.py:72 +msgid "DB exceeded retry limit." +msgstr "DBへのリトライが上限に達しました。" + +#: neutron/openstack/common/db/api.py:76 +msgid "DB connection error." +msgstr "DB接続エラーが発生しました。" + +#: neutron/openstack/common/db/sqlalchemy/session.py:460 +msgid "DB exception wrapped." +msgstr "DB 例外がラップされました。" + +#: neutron/openstack/common/middleware/notifier.py:40 +#, python-format +msgid "An exception occurred processing the API call: %s " +msgstr "" + +#: neutron/openstack/common/rpc/amqp.py:480 +#: neutron/openstack/common/rpc/impl_zmq.py:286 +msgid "Exception during message handling" +msgstr "メッセージ処理中の例外" + +#: neutron/openstack/common/rpc/common.py:88 +msgid "Exception in string format operation" +msgstr "ストリング・フォーマットの操作で例外が発生しました" + +#: neutron/openstack/common/rpc/common.py:292 +#, python-format +msgid "Returning exception %s to caller" +msgstr "例外 %s を呼び出し元に返しています" + +#: neutron/openstack/common/rpc/impl_kombu.py:156 +msgid "Failed to process message ... skipping it." +msgstr "メッセージの処理に失敗しました... スキップしています。" + +#: neutron/openstack/common/rpc/impl_kombu.py:160 +msgid "Failed to process message ... will requeue." +msgstr "メッセージ処理が失敗しました…キューに再登録します。" + +#: neutron/openstack/common/rpc/impl_kombu.py:571 +#, python-format +msgid "" +"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " +"again in %(sleep_time)d seconds." +msgstr "" +"%(hostname)s:%(port)d 上の AMQP サーバーは到達不能です: " +"%(err_str)s。%(sleep_time)d 秒後に再試行します。" + +#: neutron/openstack/common/rpc/impl_kombu.py:625 +#: neutron/openstack/common/rpc/impl_qpid.py:575 +#, python-format +msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" +msgstr "" +"トピック '%(topic)s' のコンシューマーを宣言できませんでした: %(err_str)s" + +#: neutron/openstack/common/rpc/impl_kombu.py:647 +#: neutron/openstack/common/rpc/impl_qpid.py:594 +#, python-format +msgid "Failed to consume message from queue: %s" +msgstr "キューからのメッセージのコンシュームに失敗しました: %s" + +#: neutron/openstack/common/rpc/impl_kombu.py:686 +#: neutron/openstack/common/rpc/impl_qpid.py:629 +#, python-format +msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" +msgstr "" +"トピック '%(topic)s' に対してメッセージをパブリッシュできませんでした: " +"%(err_str)s" + +#: neutron/openstack/common/rpc/impl_qpid.py:191 +msgid "Failed to process message... skipping it." +msgstr "メッセージの処理に失敗しました... スキップしています。" + +#: neutron/openstack/common/rpc/impl_qpid.py:517 +#, python-format +msgid "Unable to connect to AMQP server: %(e)s. Sleeping %(delay)s seconds" +msgstr "" +"AMQP サーバーに接続できません: %(e)s。%(delay)s 秒間スリープ状態になります" + +#: neutron/openstack/common/rpc/impl_qpid.py:602 +msgid "Error processing message. Skipping it." +msgstr "メッセージの処理中にエラーが発生しました。スキップしています。" + +#: neutron/openstack/common/rpc/impl_zmq.py:96 +msgid "JSON serialization failed." +msgstr "JSON の直列化が失敗しました。" + +#: neutron/openstack/common/rpc/impl_zmq.py:195 +msgid "ZeroMQ socket could not be closed." +msgstr "ZeroMQソケットをクローズできませんでした。" + +#: neutron/openstack/common/rpc/impl_zmq.py:339 +msgid "RPC message did not include method." +msgstr "RPC メッセージにメソッドが含まれていませんでした。" + +#: neutron/openstack/common/rpc/impl_zmq.py:476 +msgid "Topic socket file creation failed." +msgstr "トピック・ソケット・ファイルの作成に失敗しました。" + +#: neutron/openstack/common/rpc/impl_zmq.py:482 +#, python-format +msgid "" +"Local per-topic backlog buffer full for topic %(topic)s. Dropping message." +msgstr "" +"トピック %(topic)s のトピック単位のローカル・バックログ・バッファーがフルで" +"す。メッセージを除去しています。" + +#: neutron/openstack/common/rpc/impl_zmq.py:498 +#, python-format +msgid "Required IPC directory does not exist at %s" +msgstr "必要な IPC ディレクトリが %s に存在しません" + +#: neutron/openstack/common/rpc/impl_zmq.py:507 +#, python-format +msgid "Permission denied to IPC directory at %s" +msgstr "IPC ディレクトリ %s へのアクセス許可がありません" + +#: neutron/openstack/common/rpc/impl_zmq.py:510 +msgid "Could not create ZeroMQ receiver daemon. Socket may already be in use." +msgstr "" +"ZeroMQ 受信側デーモンを作成できませんでした。ソケットが既に使用中である可能性" +"があります。" + +#: neutron/openstack/common/rpc/impl_zmq.py:563 +msgid "ZMQ Envelope version unsupported or unknown." +msgstr "ZMQ エンベロープのバージョンがサポートされないか、不明です。" diff --git a/icehouse-patches/neutron/dvr-patch/neutron/locale/ja/LC_MESSAGES/neutron-log-info.po b/icehouse-patches/neutron/dvr-patch/neutron/locale/ja/LC_MESSAGES/neutron-log-info.po new file mode 100644 index 00000000..4340ef30 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/locale/ja/LC_MESSAGES/neutron-log-info.po @@ -0,0 +1,128 @@ +# Translations template for neutron. +# Copyright (C) 2014 ORGANIZATION +# This file is distributed under the same license as the neutron project. +# +# Translators: +msgid "" +msgstr "" +"Project-Id-Version: Neutron\n" +"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" +"POT-Creation-Date: 2014-06-16 06:08+0000\n" +"PO-Revision-Date: 2014-05-29 07:49+0000\n" +"Last-Translator: FULL NAME \n" +"Language-Team: Japanese (http://www.transifex.com/projects/p/neutron/" +"language/ja/)\n" +"Language: ja\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 1.3\n" +"Plural-Forms: nplurals=1; plural=0;\n" + +#: neutron/openstack/common/periodic_task.py:125 +#, python-format +msgid "Skipping periodic task %(task)s because its interval is negative" +msgstr "" + +#: neutron/openstack/common/periodic_task.py:130 +#, python-format +msgid "Skipping periodic task %(task)s because it is disabled" +msgstr "" + +#: neutron/openstack/common/service.py:176 +#, python-format +msgid "Caught %s, exiting" +msgstr "%s が見つかりました。終了しています" + +#: neutron/openstack/common/service.py:240 +msgid "Parent process has died unexpectedly, exiting" +msgstr "親プロセスが予期せずに停止しました。終了しています" + +#: neutron/openstack/common/service.py:271 +#, python-format +msgid "Child caught %s, exiting" +msgstr "" + +#: neutron/openstack/common/service.py:310 +msgid "Forking too fast, sleeping" +msgstr "fork が早すぎます。スリープ状態にしています" + +#: neutron/openstack/common/service.py:329 +#, python-format +msgid "Started child %d" +msgstr "子 %d を開始しました" + +#: neutron/openstack/common/service.py:339 +#, python-format +msgid "Starting %d workers" +msgstr "%d ワーカーを開始しています" + +#: neutron/openstack/common/service.py:356 +#, python-format +msgid "Child %(pid)d killed by signal %(sig)d" +msgstr "子 %(pid)d がシグナル %(sig)d によって強制終了されました" + +#: neutron/openstack/common/service.py:360 +#, python-format +msgid "Child %(pid)s exited with status %(code)d" +msgstr "子 %(pid)s が状況 %(code)d で終了しました" + +#: neutron/openstack/common/service.py:399 +#, python-format +msgid "Caught %s, stopping children" +msgstr "%s が見つかりました。子を停止しています" + +#: neutron/openstack/common/service.py:408 +msgid "Wait called after thread killed. Cleaning up." +msgstr "" + +#: neutron/openstack/common/service.py:424 +#, python-format +msgid "Waiting on %d children to exit" +msgstr "%d 個の子で終了を待機しています" + +#: neutron/openstack/common/db/sqlalchemy/utils.py:379 +#, python-format +msgid "Deleting duplicated row with id: %(id)s from table: %(table)s" +msgstr "" + +#: neutron/openstack/common/rpc/impl_kombu.py:498 +#, python-format +msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" +msgstr "%(hostname)s:%(port)d 上の AMQP サーバーに再接続しています" + +#: neutron/openstack/common/rpc/impl_kombu.py:520 +#, python-format +msgid "Connected to AMQP server on %(hostname)s:%(port)d" +msgstr "%(hostname)s:%(port)d 上の AMQP サーバーに接続しました" + +#: neutron/openstack/common/rpc/impl_qpid.py:523 +#, python-format +msgid "Connected to AMQP server on %s" +msgstr "%s 上の AMQP サーバーに接続しました" + +#: neutron/openstack/common/rpc/impl_zmq.py:371 +msgid "Registering reactor" +msgstr "リアクターの登録中" + +#: neutron/openstack/common/rpc/impl_zmq.py:383 +msgid "In reactor registered" +msgstr "入力リアクターが登録されました" + +#: neutron/openstack/common/rpc/impl_zmq.py:388 +msgid "Consuming socket" +msgstr "ソケットの消費中" + +#: neutron/openstack/common/rpc/impl_zmq.py:438 +#, python-format +msgid "Creating proxy for topic: %s" +msgstr "トピック用プロキシーの作成中: %s" + +#: neutron/openstack/common/rpc/impl_zmq.py:591 +msgid "Skipping topic registration. Already registered." +msgstr "トピックの登録をスキップします。既に登録されています。" + +#: neutron/openstack/common/rpc/matchmaker.py:216 +#, python-format +msgid "Matchmaker unregistered: %(key)s, %(host)s" +msgstr "matchmaker が登録されていません: %(key)s, %(host)s" diff --git a/icehouse-patches/neutron/dvr-patch/neutron/locale/ko_KR/LC_MESSAGES/neutron-log-error.po b/icehouse-patches/neutron/dvr-patch/neutron/locale/ko_KR/LC_MESSAGES/neutron-log-error.po new file mode 100644 index 00000000..a594504b --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/locale/ko_KR/LC_MESSAGES/neutron-log-error.po @@ -0,0 +1,165 @@ +# Translations template for neutron. +# Copyright (C) 2014 ORGANIZATION +# This file is distributed under the same license as the neutron project. +# +# Translators: +msgid "" +msgstr "" +"Project-Id-Version: Neutron\n" +"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" +"POT-Creation-Date: 2014-06-16 06:08+0000\n" +"PO-Revision-Date: 2014-05-29 07:49+0000\n" +"Last-Translator: FULL NAME \n" +"Language-Team: Korean (Korea) (http://www.transifex.com/projects/p/neutron/" +"language/ko_KR/)\n" +"Language: ko_KR\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 1.3\n" +"Plural-Forms: nplurals=1; plural=0;\n" + +#: neutron/openstack/common/excutils.py:76 +#, python-format +msgid "Original exception being dropped: %s" +msgstr "기존 예외가 삭제됨: %s" + +#: neutron/openstack/common/excutils.py:105 +#, python-format +msgid "Unexpected exception occurred %d time(s)... retrying." +msgstr "예기치 않은 예외 %d 번 발생하였습니다... 다시 시도중." + +#: neutron/openstack/common/periodic_task.py:179 +#, python-format +msgid "Error during %(full_task_name)s: %(e)s" +msgstr "" + +#: neutron/openstack/common/service.py:188 +msgid "Exception during rpc cleanup." +msgstr "" + +#: neutron/openstack/common/service.py:277 +msgid "Unhandled exception" +msgstr "처리되지 않은 예외" + +#: neutron/openstack/common/db/api.py:72 +msgid "DB exceeded retry limit." +msgstr "" + +#: neutron/openstack/common/db/api.py:76 +msgid "DB connection error." +msgstr "데이터 베이스 연결 에러." + +#: neutron/openstack/common/db/sqlalchemy/session.py:460 +msgid "DB exception wrapped." +msgstr "DB 예외가 랩핑되었습니다." + +#: neutron/openstack/common/middleware/notifier.py:40 +#, python-format +msgid "An exception occurred processing the API call: %s " +msgstr "" + +#: neutron/openstack/common/rpc/amqp.py:480 +#: neutron/openstack/common/rpc/impl_zmq.py:286 +msgid "Exception during message handling" +msgstr "메시지 처리 중 예외" + +#: neutron/openstack/common/rpc/common.py:88 +msgid "Exception in string format operation" +msgstr "문자열 형식화 오퍼레이션의 예외" + +#: neutron/openstack/common/rpc/common.py:292 +#, python-format +msgid "Returning exception %s to caller" +msgstr "%s 예외를 호출자에게 리턴 중" + +#: neutron/openstack/common/rpc/impl_kombu.py:156 +msgid "Failed to process message ... skipping it." +msgstr "메시지 처리 실패 ... 건너뜁니다." + +#: neutron/openstack/common/rpc/impl_kombu.py:160 +msgid "Failed to process message ... will requeue." +msgstr "메시지 처리 실패 ... 큐에 다시 넣습니다." + +#: neutron/openstack/common/rpc/impl_kombu.py:571 +#, python-format +msgid "" +"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " +"again in %(sleep_time)d seconds." +msgstr "" +"%(hostname)s:%(port)d의 AMQP 서버에 접근할 수 없음: %(err_str)s. " +"%(sleep_time)d초 내에 다시 시도하십시오. " + +#: neutron/openstack/common/rpc/impl_kombu.py:625 +#: neutron/openstack/common/rpc/impl_qpid.py:575 +#, python-format +msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" +msgstr "'%(topic)s' 주제에 대한 이용자를 선언하지 못했음: %(err_str)s" + +#: neutron/openstack/common/rpc/impl_kombu.py:647 +#: neutron/openstack/common/rpc/impl_qpid.py:594 +#, python-format +msgid "Failed to consume message from queue: %s" +msgstr "큐의 메시지를 이용하지 못했음: %s" + +#: neutron/openstack/common/rpc/impl_kombu.py:686 +#: neutron/openstack/common/rpc/impl_qpid.py:629 +#, python-format +msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" +msgstr "'%(topic)s' 주제에 메시지를 공개하지 못했음: %(err_str)s" + +#: neutron/openstack/common/rpc/impl_qpid.py:191 +msgid "Failed to process message... skipping it." +msgstr "메시지를 처리하지 못했습니다. 건너뛰는 중입니다. " + +#: neutron/openstack/common/rpc/impl_qpid.py:517 +#, python-format +msgid "Unable to connect to AMQP server: %(e)s. Sleeping %(delay)s seconds" +msgstr "AMQP 서버 %(e)s에 연결할 수 없습니다. %(delay)s 초 휴면 상태입니다. " + +#: neutron/openstack/common/rpc/impl_qpid.py:602 +msgid "Error processing message. Skipping it." +msgstr "메시지 처리 오류입니다. 건너뛰는 중입니다. " + +#: neutron/openstack/common/rpc/impl_zmq.py:96 +msgid "JSON serialization failed." +msgstr "JSON 직렬화에 실패했습니다. " + +#: neutron/openstack/common/rpc/impl_zmq.py:195 +msgid "ZeroMQ socket could not be closed." +msgstr "" + +#: neutron/openstack/common/rpc/impl_zmq.py:339 +msgid "RPC message did not include method." +msgstr "RPC 메시지에 메소드가 없습니다. " + +#: neutron/openstack/common/rpc/impl_zmq.py:476 +msgid "Topic socket file creation failed." +msgstr "주제 소켓 파일 작성에 실패했습니다. " + +#: neutron/openstack/common/rpc/impl_zmq.py:482 +#, python-format +msgid "" +"Local per-topic backlog buffer full for topic %(topic)s. Dropping message." +msgstr "" +"%(topic)s 주제에 대한 로컬 주제별 백로그 버퍼가 가득 찼습니다. 메시지 삭제 중" +"입니다. " + +#: neutron/openstack/common/rpc/impl_zmq.py:498 +#, python-format +msgid "Required IPC directory does not exist at %s" +msgstr "%s 에서 필요한 IPC 디렉터리가 없습니다" + +#: neutron/openstack/common/rpc/impl_zmq.py:507 +#, python-format +msgid "Permission denied to IPC directory at %s" +msgstr "%s에서 IPC 디렉터리에 대한 권한을 거부했습니다" + +#: neutron/openstack/common/rpc/impl_zmq.py:510 +msgid "Could not create ZeroMQ receiver daemon. Socket may already be in use." +msgstr "" +"ZeroMQ 수신기 디먼을 작성할 수 없습니다. 소켓이 이미 사용 중일 수 있습니다. " + +#: neutron/openstack/common/rpc/impl_zmq.py:563 +msgid "ZMQ Envelope version unsupported or unknown." +msgstr "ZMQ Envelope 버전을 지원하지 않거나 알 수 없습니다. " diff --git a/icehouse-patches/neutron/dvr-patch/neutron/locale/ko_KR/LC_MESSAGES/neutron-log-info.po b/icehouse-patches/neutron/dvr-patch/neutron/locale/ko_KR/LC_MESSAGES/neutron-log-info.po new file mode 100644 index 00000000..ab4e1ebe --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/locale/ko_KR/LC_MESSAGES/neutron-log-info.po @@ -0,0 +1,128 @@ +# Translations template for neutron. +# Copyright (C) 2014 ORGANIZATION +# This file is distributed under the same license as the neutron project. +# +# Translators: +msgid "" +msgstr "" +"Project-Id-Version: Neutron\n" +"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" +"POT-Creation-Date: 2014-06-16 06:08+0000\n" +"PO-Revision-Date: 2014-05-29 07:49+0000\n" +"Last-Translator: FULL NAME \n" +"Language-Team: Korean (Korea) (http://www.transifex.com/projects/p/neutron/" +"language/ko_KR/)\n" +"Language: ko_KR\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 1.3\n" +"Plural-Forms: nplurals=1; plural=0;\n" + +#: neutron/openstack/common/periodic_task.py:125 +#, python-format +msgid "Skipping periodic task %(task)s because its interval is negative" +msgstr "" + +#: neutron/openstack/common/periodic_task.py:130 +#, python-format +msgid "Skipping periodic task %(task)s because it is disabled" +msgstr "" + +#: neutron/openstack/common/service.py:176 +#, python-format +msgid "Caught %s, exiting" +msgstr "%s 발견, 종료 중" + +#: neutron/openstack/common/service.py:240 +msgid "Parent process has died unexpectedly, exiting" +msgstr "상위 프로세스가 예기치 않게 정지했습니다. 종료 중" + +#: neutron/openstack/common/service.py:271 +#, python-format +msgid "Child caught %s, exiting" +msgstr "" + +#: neutron/openstack/common/service.py:310 +msgid "Forking too fast, sleeping" +msgstr "포크가 너무 빠름. 정지 중" + +#: neutron/openstack/common/service.py:329 +#, python-format +msgid "Started child %d" +msgstr "%d 하위를 시작했음" + +#: neutron/openstack/common/service.py:339 +#, python-format +msgid "Starting %d workers" +msgstr "%d 작업자 시작 중" + +#: neutron/openstack/common/service.py:356 +#, python-format +msgid "Child %(pid)d killed by signal %(sig)d" +msgstr "%(pid)d 하위가 %(sig)d 신호에 의해 강제 종료됨" + +#: neutron/openstack/common/service.py:360 +#, python-format +msgid "Child %(pid)s exited with status %(code)d" +msgstr "%(pid)s 하위가 %(code)d 상태와 함께 종료했음" + +#: neutron/openstack/common/service.py:399 +#, python-format +msgid "Caught %s, stopping children" +msgstr "%s 발견, 하위 중지 중" + +#: neutron/openstack/common/service.py:408 +msgid "Wait called after thread killed. Cleaning up." +msgstr "" + +#: neutron/openstack/common/service.py:424 +#, python-format +msgid "Waiting on %d children to exit" +msgstr "%d 하위에서 종료하기를 대기 중임" + +#: neutron/openstack/common/db/sqlalchemy/utils.py:379 +#, python-format +msgid "Deleting duplicated row with id: %(id)s from table: %(table)s" +msgstr "" + +#: neutron/openstack/common/rpc/impl_kombu.py:498 +#, python-format +msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" +msgstr "%(hostname)s:%(port)d에서 AMQP 서버에 다시 연결 중" + +#: neutron/openstack/common/rpc/impl_kombu.py:520 +#, python-format +msgid "Connected to AMQP server on %(hostname)s:%(port)d" +msgstr "%(hostname)s:%(port)d에서 AMQP 서버에 연결되었음" + +#: neutron/openstack/common/rpc/impl_qpid.py:523 +#, python-format +msgid "Connected to AMQP server on %s" +msgstr "%s의 AMQP 서버에 연결했음" + +#: neutron/openstack/common/rpc/impl_zmq.py:371 +msgid "Registering reactor" +msgstr "등록 리액터" + +#: neutron/openstack/common/rpc/impl_zmq.py:383 +msgid "In reactor registered" +msgstr "인 리액터 등록" + +#: neutron/openstack/common/rpc/impl_zmq.py:388 +msgid "Consuming socket" +msgstr "소켓 이용 중" + +#: neutron/openstack/common/rpc/impl_zmq.py:438 +#, python-format +msgid "Creating proxy for topic: %s" +msgstr "주제에 대한 프록시 작성: %s" + +#: neutron/openstack/common/rpc/impl_zmq.py:591 +msgid "Skipping topic registration. Already registered." +msgstr "주제 등록을 건너뜁니다. 이미 등록되었습니다. " + +#: neutron/openstack/common/rpc/matchmaker.py:216 +#, python-format +msgid "Matchmaker unregistered: %(key)s, %(host)s" +msgstr "등록되지 않은 중개자: %(key)s, %(host)s" diff --git a/icehouse-patches/neutron/dvr-patch/neutron/locale/neutron-log-critical.pot b/icehouse-patches/neutron/dvr-patch/neutron/locale/neutron-log-critical.pot new file mode 100644 index 00000000..f93eeb4f --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/locale/neutron-log-critical.pot @@ -0,0 +1,19 @@ +# Translations template for neutron. +# Copyright (C) 2014 ORGANIZATION +# This file is distributed under the same license as the neutron project. +# FIRST AUTHOR , 2014. +# +#, fuzzy +msgid "" +msgstr "" +"Project-Id-Version: neutron 2014.2.dev608.g787bba2\n" +"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" +"POT-Creation-Date: 2014-06-09 06:08+0000\n" +"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" +"Last-Translator: FULL NAME \n" +"Language-Team: LANGUAGE \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 1.3\n" + diff --git a/icehouse-patches/neutron/dvr-patch/neutron/locale/neutron-log-error.pot b/icehouse-patches/neutron/dvr-patch/neutron/locale/neutron-log-error.pot new file mode 100644 index 00000000..a43e0106 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/locale/neutron-log-error.pot @@ -0,0 +1,158 @@ +# Translations template for neutron. +# Copyright (C) 2014 ORGANIZATION +# This file is distributed under the same license as the neutron project. +# FIRST AUTHOR , 2014. +# +#, fuzzy +msgid "" +msgstr "" +"Project-Id-Version: neutron 2014.2.dev608.g787bba2\n" +"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" +"POT-Creation-Date: 2014-06-09 06:08+0000\n" +"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" +"Last-Translator: FULL NAME \n" +"Language-Team: LANGUAGE \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 1.3\n" + +#: neutron/openstack/common/excutils.py:76 +#, python-format +msgid "Original exception being dropped: %s" +msgstr "" + +#: neutron/openstack/common/excutils.py:105 +#, python-format +msgid "Unexpected exception occurred %d time(s)... retrying." +msgstr "" + +#: neutron/openstack/common/periodic_task.py:179 +#, python-format +msgid "Error during %(full_task_name)s: %(e)s" +msgstr "" + +#: neutron/openstack/common/service.py:188 +msgid "Exception during rpc cleanup." +msgstr "" + +#: neutron/openstack/common/service.py:276 +msgid "Unhandled exception" +msgstr "" + +#: neutron/openstack/common/db/api.py:72 +msgid "DB exceeded retry limit." +msgstr "" + +#: neutron/openstack/common/db/api.py:76 +msgid "DB connection error." +msgstr "" + +#: neutron/openstack/common/db/sqlalchemy/session.py:460 +msgid "DB exception wrapped." +msgstr "" + +#: neutron/openstack/common/middleware/notifier.py:40 +#, python-format +msgid "An exception occurred processing the API call: %s " +msgstr "" + +#: neutron/openstack/common/rpc/amqp.py:480 +#: neutron/openstack/common/rpc/impl_zmq.py:286 +msgid "Exception during message handling" +msgstr "" + +#: neutron/openstack/common/rpc/common.py:88 +msgid "Exception in string format operation" +msgstr "" + +#: neutron/openstack/common/rpc/common.py:292 +#, python-format +msgid "Returning exception %s to caller" +msgstr "" + +#: neutron/openstack/common/rpc/impl_kombu.py:156 +msgid "Failed to process message ... skipping it." +msgstr "" + +#: neutron/openstack/common/rpc/impl_kombu.py:160 +msgid "Failed to process message ... will requeue." +msgstr "" + +#: neutron/openstack/common/rpc/impl_kombu.py:571 +#, python-format +msgid "" +"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " +"again in %(sleep_time)d seconds." +msgstr "" + +#: neutron/openstack/common/rpc/impl_kombu.py:625 +#: neutron/openstack/common/rpc/impl_qpid.py:575 +#, python-format +msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" +msgstr "" + +#: neutron/openstack/common/rpc/impl_kombu.py:647 +#: neutron/openstack/common/rpc/impl_qpid.py:594 +#, python-format +msgid "Failed to consume message from queue: %s" +msgstr "" + +#: neutron/openstack/common/rpc/impl_kombu.py:686 +#: neutron/openstack/common/rpc/impl_qpid.py:629 +#, python-format +msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" +msgstr "" + +#: neutron/openstack/common/rpc/impl_qpid.py:191 +msgid "Failed to process message... skipping it." +msgstr "" + +#: neutron/openstack/common/rpc/impl_qpid.py:517 +#, python-format +msgid "Unable to connect to AMQP server: %(e)s. Sleeping %(delay)s seconds" +msgstr "" + +#: neutron/openstack/common/rpc/impl_qpid.py:602 +msgid "Error processing message. Skipping it." +msgstr "" + +#: neutron/openstack/common/rpc/impl_zmq.py:96 +msgid "JSON serialization failed." +msgstr "" + +#: neutron/openstack/common/rpc/impl_zmq.py:195 +msgid "ZeroMQ socket could not be closed." +msgstr "" + +#: neutron/openstack/common/rpc/impl_zmq.py:339 +msgid "RPC message did not include method." +msgstr "" + +#: neutron/openstack/common/rpc/impl_zmq.py:476 +msgid "Topic socket file creation failed." +msgstr "" + +#: neutron/openstack/common/rpc/impl_zmq.py:482 +#, python-format +msgid "Local per-topic backlog buffer full for topic %(topic)s. Dropping message." +msgstr "" + +#: neutron/openstack/common/rpc/impl_zmq.py:498 +#, python-format +msgid "Required IPC directory does not exist at %s" +msgstr "" + +#: neutron/openstack/common/rpc/impl_zmq.py:507 +#, python-format +msgid "Permission denied to IPC directory at %s" +msgstr "" + +#: neutron/openstack/common/rpc/impl_zmq.py:510 +msgid "Could not create ZeroMQ receiver daemon. Socket may already be in use." +msgstr "" + +#: neutron/openstack/common/rpc/impl_zmq.py:563 +msgid "ZMQ Envelope version unsupported or unknown." +msgstr "" + diff --git a/icehouse-patches/neutron/dvr-patch/neutron/locale/neutron-log-info.pot b/icehouse-patches/neutron/dvr-patch/neutron/locale/neutron-log-info.pot new file mode 100644 index 00000000..7ca982b4 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/locale/neutron-log-info.pot @@ -0,0 +1,127 @@ +# Translations template for neutron. +# Copyright (C) 2014 ORGANIZATION +# This file is distributed under the same license as the neutron project. +# FIRST AUTHOR , 2014. +# +#, fuzzy +msgid "" +msgstr "" +"Project-Id-Version: neutron 2014.2.dev32.g043f04c\n" +"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" +"POT-Creation-Date: 2014-06-16 06:08+0000\n" +"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" +"Last-Translator: FULL NAME \n" +"Language-Team: LANGUAGE \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 1.3\n" + +#: neutron/openstack/common/periodic_task.py:125 +#, python-format +msgid "Skipping periodic task %(task)s because its interval is negative" +msgstr "" + +#: neutron/openstack/common/periodic_task.py:130 +#, python-format +msgid "Skipping periodic task %(task)s because it is disabled" +msgstr "" + +#: neutron/openstack/common/service.py:176 +#, python-format +msgid "Caught %s, exiting" +msgstr "" + +#: neutron/openstack/common/service.py:240 +msgid "Parent process has died unexpectedly, exiting" +msgstr "" + +#: neutron/openstack/common/service.py:271 +#, python-format +msgid "Child caught %s, exiting" +msgstr "" + +#: neutron/openstack/common/service.py:310 +msgid "Forking too fast, sleeping" +msgstr "" + +#: neutron/openstack/common/service.py:329 +#, python-format +msgid "Started child %d" +msgstr "" + +#: neutron/openstack/common/service.py:339 +#, python-format +msgid "Starting %d workers" +msgstr "" + +#: neutron/openstack/common/service.py:356 +#, python-format +msgid "Child %(pid)d killed by signal %(sig)d" +msgstr "" + +#: neutron/openstack/common/service.py:360 +#, python-format +msgid "Child %(pid)s exited with status %(code)d" +msgstr "" + +#: neutron/openstack/common/service.py:399 +#, python-format +msgid "Caught %s, stopping children" +msgstr "" + +#: neutron/openstack/common/service.py:408 +msgid "Wait called after thread killed. Cleaning up." +msgstr "" + +#: neutron/openstack/common/service.py:424 +#, python-format +msgid "Waiting on %d children to exit" +msgstr "" + +#: neutron/openstack/common/db/sqlalchemy/utils.py:379 +#, python-format +msgid "Deleting duplicated row with id: %(id)s from table: %(table)s" +msgstr "" + +#: neutron/openstack/common/rpc/impl_kombu.py:498 +#, python-format +msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: neutron/openstack/common/rpc/impl_kombu.py:520 +#, python-format +msgid "Connected to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: neutron/openstack/common/rpc/impl_qpid.py:523 +#, python-format +msgid "Connected to AMQP server on %s" +msgstr "" + +#: neutron/openstack/common/rpc/impl_zmq.py:371 +msgid "Registering reactor" +msgstr "" + +#: neutron/openstack/common/rpc/impl_zmq.py:383 +msgid "In reactor registered" +msgstr "" + +#: neutron/openstack/common/rpc/impl_zmq.py:388 +msgid "Consuming socket" +msgstr "" + +#: neutron/openstack/common/rpc/impl_zmq.py:438 +#, python-format +msgid "Creating proxy for topic: %s" +msgstr "" + +#: neutron/openstack/common/rpc/impl_zmq.py:591 +msgid "Skipping topic registration. Already registered." +msgstr "" + +#: neutron/openstack/common/rpc/matchmaker.py:216 +#, python-format +msgid "Matchmaker unregistered: %(key)s, %(host)s" +msgstr "" + diff --git a/icehouse-patches/neutron/dvr-patch/neutron/locale/neutron-log-warning.pot b/icehouse-patches/neutron/dvr-patch/neutron/locale/neutron-log-warning.pot new file mode 100644 index 00000000..bb272314 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/locale/neutron-log-warning.pot @@ -0,0 +1,53 @@ +# Translations template for neutron. +# Copyright (C) 2014 ORGANIZATION +# This file is distributed under the same license as the neutron project. +# FIRST AUTHOR , 2014. +# +#, fuzzy +msgid "" +msgstr "" +"Project-Id-Version: neutron 2014.2.dev608.g787bba2\n" +"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" +"POT-Creation-Date: 2014-06-09 06:08+0000\n" +"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" +"Last-Translator: FULL NAME \n" +"Language-Team: LANGUAGE \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 1.3\n" + +#: neutron/openstack/common/service.py:363 +#, python-format +msgid "pid %d not in child list" +msgstr "" + +#: neutron/openstack/common/db/sqlalchemy/session.py:506 +#, python-format +msgid "Database server has gone away: %s" +msgstr "" + +#: neutron/openstack/common/db/sqlalchemy/session.py:559 +msgid "Unable to detect effective SQL mode" +msgstr "" + +#: neutron/openstack/common/db/sqlalchemy/session.py:567 +#, python-format +msgid "MySQL SQL mode is '%s', consider enabling TRADITIONAL or STRICT_ALL_TABLES" +msgstr "" + +#: neutron/openstack/common/db/sqlalchemy/session.py:673 +#, python-format +msgid "SQL connection failed. %s attempts left." +msgstr "" + +#: neutron/openstack/common/db/sqlalchemy/utils.py:97 +msgid "Id not in sort_keys; is sort_keys unique?" +msgstr "" + +#: neutron/openstack/common/rpc/matchmaker_ring.py:75 +#: neutron/openstack/common/rpc/matchmaker_ring.py:93 +#, python-format +msgid "No key defining hosts for topic '%s', see ringfile" +msgstr "" + diff --git a/icehouse-patches/neutron/dvr-patch/neutron/locale/neutron.pot b/icehouse-patches/neutron/dvr-patch/neutron/locale/neutron.pot new file mode 100644 index 00000000..357c1052 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/locale/neutron.pot @@ -0,0 +1,16172 @@ +# Translations template for neutron. +# Copyright (C) 2014 ORGANIZATION +# This file is distributed under the same license as the neutron project. +# FIRST AUTHOR , 2014. +# +#, fuzzy +msgid "" +msgstr "" +"Project-Id-Version: neutron 2014.2.dev208.gfe57b96\n" +"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" +"POT-Creation-Date: 2014-06-30 06:06+0000\n" +"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" +"Last-Translator: FULL NAME \n" +"Language-Team: LANGUAGE \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 1.3\n" + +#: neutron/auth.py:35 +msgid "X_USER_ID is not found in request" +msgstr "" + +#: neutron/context.py:81 +#, python-format +msgid "Arguments dropped when creating context: %s" +msgstr "" + +#: neutron/context.py:109 +#, python-format +msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" +msgstr "" + +#: neutron/manager.py:71 +#, python-format +msgid "dhcp_agents_per_network must be >= 1. '%s' is invalid." +msgstr "" + +#: neutron/manager.py:83 +msgid "Neutron core_plugin not configured!" +msgstr "" + +#: neutron/manager.py:112 +#, python-format +msgid "Loading core plugin: %s" +msgstr "" + +#: neutron/manager.py:137 +#, python-format +msgid "Error loading plugin by name, %s" +msgstr "" + +#: neutron/manager.py:138 +#, python-format +msgid "Error loading plugin by class, %s" +msgstr "" + +#: neutron/manager.py:139 +msgid "Plugin not found." +msgstr "" + +#: neutron/manager.py:144 +msgid "Loading services supported by the core plugin" +msgstr "" + +#: neutron/manager.py:152 +#, python-format +msgid "Service %s is supported by the core plugin" +msgstr "" + +#: neutron/manager.py:165 +#, python-format +msgid "Loading service plugins: %s" +msgstr "" + +#: neutron/manager.py:170 +#, python-format +msgid "Loading Plugin: %s" +msgstr "" + +#: neutron/manager.py:178 +#, python-format +msgid "Multiple plugins for service %s were configured" +msgstr "" + +#: neutron/manager.py:190 +#, python-format +msgid "Successfully loaded %(type)s plugin. Description: %(desc)s" +msgstr "" + +#: neutron/policy.py:86 +#, python-format +msgid "Loading policies from file: %s" +msgstr "" + +#: neutron/policy.py:93 +#, python-format +msgid "" +"Found deprecated policy rule:%s. Please consider upgrading your policy " +"configuration file" +msgstr "" + +#: neutron/policy.py:105 +#, python-format +msgid "" +"Inserting policy:%(new_policy)s in place of deprecated " +"policy:%(old_policy)s" +msgstr "" + +#: neutron/policy.py:113 +#, python-format +msgid "" +"Backward compatibility unavailable for deprecated policy %s. The policy " +"will not be enforced" +msgstr "" + +#: neutron/policy.py:135 +#, python-format +msgid "Unable to find data type descriptor for attribute %s" +msgstr "" + +#: neutron/policy.py:140 +#, python-format +msgid "" +"Attribute type descriptor is not a dict. Unable to generate any sub-attr " +"policy rule for %s." +msgstr "" + +#: neutron/policy.py:213 +#, python-format +msgid "" +"Unable to identify a target field from:%s.match should be in the form " +"%%()s" +msgstr "" + +#: neutron/policy.py:239 +#, python-format +msgid "Unable to find ':' as separator in %s." +msgstr "" + +#: neutron/policy.py:243 +#, python-format +msgid "Unable to find resource name in %s" +msgstr "" + +#: neutron/policy.py:252 +#, python-format +msgid "" +"Unable to verify match:%(match)s as the parent resource: %(res)s was not " +"found" +msgstr "" + +#: neutron/policy.py:278 +#, python-format +msgid "Policy check error while calling %s!" +msgstr "" + +#: neutron/policy.py:309 +#, python-format +msgid "Unable to find requested field: %(field)s in target: %(target_dict)s" +msgstr "" + +#: neutron/policy.py:367 +#, python-format +msgid "Failed policy check for '%s'" +msgstr "" + +#: neutron/quota.py:34 +msgid "Resource name(s) that are supported in quota features" +msgstr "" + +#: neutron/quota.py:38 +msgid "" +"Default number of resource allowed per tenant. A negative value means " +"unlimited." +msgstr "" + +#: neutron/quota.py:42 +msgid "Number of networks allowed per tenant.A negative value means unlimited." +msgstr "" + +#: neutron/quota.py:46 +msgid "Number of subnets allowed per tenant, A negative value means unlimited." +msgstr "" + +#: neutron/quota.py:50 +msgid "Number of ports allowed per tenant. A negative value means unlimited." +msgstr "" + +#: neutron/quota.py:54 +msgid "Default driver to use for quota checks" +msgstr "" + +#: neutron/quota.py:148 neutron/quota.py:153 +msgid "Access to this resource was denied." +msgstr "" + +#: neutron/quota.py:226 +msgid "" +"ConfDriver is used as quota_driver because the loaded plugin does not " +"support 'quotas' table." +msgstr "" + +#: neutron/quota.py:231 +#, python-format +msgid "Loaded quota_driver: %s." +msgstr "" + +#: neutron/quota.py:240 +#, python-format +msgid "%s is already registered." +msgstr "" + +#: neutron/service.py:40 +msgid "Seconds between running periodic tasks" +msgstr "" + +#: neutron/service.py:43 +msgid "Number of separate worker processes for service" +msgstr "" + +#: neutron/service.py:46 +msgid "Number of RPC worker processes for service" +msgstr "" + +#: neutron/service.py:49 +msgid "" +"Range of seconds to randomly delay when starting the periodic task " +"scheduler to reduce stampeding. (Disable by setting to 0)" +msgstr "" + +#: neutron/service.py:105 neutron/service.py:163 +msgid "Unrecoverable error: please check log for details." +msgstr "" + +#: neutron/service.py:144 +msgid "Active plugin doesn't implement start_rpc_listeners" +msgstr "" + +#: neutron/service.py:146 +#, python-format +msgid "'rpc_workers = %d' ignored because start_rpc_listeners is not implemented." +msgstr "" + +#: neutron/service.py:170 +msgid "No known API applications configured." +msgstr "" + +#: neutron/service.py:177 +#, python-format +msgid "Neutron service started, listening on %(host)s:%(port)s" +msgstr "" + +#: neutron/service.py:278 +msgid "Exception occurs when timer stops" +msgstr "" + +#: neutron/service.py:288 +msgid "Exception occurs when waiting for timer" +msgstr "" + +#: neutron/wsgi.py:51 +msgid "Number of backlog requests to configure the socket with" +msgstr "" + +#: neutron/wsgi.py:55 +msgid "" +"Sets the value of TCP_KEEPIDLE in seconds for each server socket. Not " +"supported on OS X." +msgstr "" + +#: neutron/wsgi.py:59 +msgid "Number of seconds to keep retrying to listen" +msgstr "" + +#: neutron/wsgi.py:62 +msgid "Max header line to accommodate large tokens" +msgstr "" + +#: neutron/wsgi.py:65 +msgid "Enable SSL on the API server" +msgstr "" + +#: neutron/wsgi.py:67 +msgid "CA certificate file to use to verify connecting clients" +msgstr "" + +#: neutron/wsgi.py:70 +msgid "Certificate file to use when starting the server securely" +msgstr "" + +#: neutron/wsgi.py:73 +msgid "Private key file to use when starting the server securely" +msgstr "" + +#: neutron/wsgi.py:132 +#, python-format +msgid "Unable to listen on %(host)s:%(port)s" +msgstr "" + +#: neutron/wsgi.py:138 +#, python-format +msgid "Unable to find ssl_cert_file : %s" +msgstr "" + +#: neutron/wsgi.py:144 +#, python-format +msgid "Unable to find ssl_key_file : %s" +msgstr "" + +#: neutron/wsgi.py:149 +#, python-format +msgid "Unable to find ssl_ca_file : %s" +msgstr "" + +#: neutron/wsgi.py:182 +#, python-format +msgid "Could not bind to %(host)s:%(port)s after trying for %(time)d seconds" +msgstr "" + +#: neutron/wsgi.py:342 +msgid "Missing Content-Type" +msgstr "" + +#: neutron/wsgi.py:531 +#, python-format +msgid "Data %(data)s type is %(type)s" +msgstr "" + +#: neutron/wsgi.py:614 +msgid "Cannot understand JSON" +msgstr "" + +#: neutron/wsgi.py:627 neutron/wsgi.py:630 +msgid "Inline DTD forbidden" +msgstr "" + +#: neutron/wsgi.py:711 +msgid "Cannot understand XML" +msgstr "" + +#: neutron/wsgi.py:820 +msgid "Unrecognized Content-Type provided in request" +msgstr "" + +#: neutron/wsgi.py:824 +msgid "No Content-Type provided in request" +msgstr "" + +#: neutron/wsgi.py:828 +msgid "Empty body provided in request" +msgstr "" + +#: neutron/wsgi.py:835 +msgid "Unable to deserialize body as provided Content-Type" +msgstr "" + +#: neutron/wsgi.py:931 +msgid "You must implement __call__" +msgstr "" + +#: neutron/wsgi.py:1024 neutron/api/v2/base.py:190 neutron/api/v2/base.py:331 +#: neutron/api/v2/base.py:471 neutron/api/v2/base.py:522 +#: neutron/extensions/l3agentscheduler.py:49 +#: neutron/extensions/l3agentscheduler.py:87 +msgid "The resource could not be found." +msgstr "" + +#: neutron/wsgi.py:1071 +#, python-format +msgid "%(method)s %(url)s" +msgstr "" + +#: neutron/wsgi.py:1077 +msgid "Unsupported Content-Type" +msgstr "" + +#: neutron/wsgi.py:1078 +#, python-format +msgid "InvalidContentType: %s" +msgstr "" + +#: neutron/wsgi.py:1082 +msgid "Malformed request body" +msgstr "" + +#: neutron/wsgi.py:1083 +#, python-format +msgid "MalformedRequestBody: %s" +msgstr "" + +#: neutron/wsgi.py:1090 +#, python-format +msgid "HTTP exception thrown: %s" +msgstr "" + +#: neutron/wsgi.py:1095 +msgid "Internal error" +msgstr "" + +#: neutron/wsgi.py:1110 neutron/wsgi.py:1212 +#, python-format +msgid "%(url)s returned with HTTP %(status)d" +msgstr "" + +#: neutron/wsgi.py:1113 +#, python-format +msgid "%(url)s returned a fault: %(exception)s" +msgstr "" + +#: neutron/wsgi.py:1231 +#, python-format +msgid "The requested content type %s is invalid." +msgstr "" + +#: neutron/wsgi.py:1290 +msgid "Could not deserialize data" +msgstr "" + +#: neutron/agent/dhcp_agent.py:51 +msgid "Interval to resync." +msgstr "" + +#: neutron/agent/dhcp_agent.py:54 +msgid "The driver used to manage the DHCP server." +msgstr "" + +#: neutron/agent/dhcp_agent.py:56 +msgid "Support Metadata requests on isolated networks." +msgstr "" + +#: neutron/agent/dhcp_agent.py:58 +msgid "" +"Allows for serving metadata requests from a dedicated network. Requires " +"enable_isolated_metadata = True" +msgstr "" + +#: neutron/agent/dhcp_agent.py:62 +msgid "Number of threads to use during sync process." +msgstr "" + +#: neutron/agent/dhcp_agent.py:65 neutron/agent/l3_agent.py:190 +#: neutron/agent/metadata/namespace_proxy.py:165 +msgid "Location of Metadata Proxy UNIX domain socket" +msgstr "" + +#: neutron/agent/dhcp_agent.py:102 +#, python-format +msgid "" +"The '%s' DHCP-driver does not support retrieving of a list of existing " +"networks" +msgstr "" + +#: neutron/agent/dhcp_agent.py:109 neutron/agent/dhcp_agent.py:598 +msgid "DHCP agent started" +msgstr "" + +#: neutron/agent/dhcp_agent.py:118 +#, python-format +msgid "Calling driver for network: %(net)s action: %(action)s" +msgstr "" + +#: neutron/agent/dhcp_agent.py:134 +#, python-format +msgid "" +"Unable to %(action)s dhcp for %(net_id)s: there is a conflict with its " +"current state; please check that the network and/or its subnet(s) still " +"exist." +msgstr "" + +#: neutron/agent/dhcp_agent.py:143 neutron/agent/dhcp_agent.py:201 +#, python-format +msgid "Network %s has been deleted." +msgstr "" + +#: neutron/agent/dhcp_agent.py:145 +#, python-format +msgid "Unable to %(action)s dhcp for %(net_id)s." +msgstr "" + +#: neutron/agent/dhcp_agent.py:155 +msgid "Synchronizing state" +msgstr "" + +#: neutron/agent/dhcp_agent.py:167 +#, python-format +msgid "Unable to sync network state on deleted network %s" +msgstr "" + +#: neutron/agent/dhcp_agent.py:173 +msgid "Synchronizing state complete" +msgstr "" + +#: neutron/agent/dhcp_agent.py:177 +msgid "Unable to sync network state." +msgstr "" + +#: neutron/agent/dhcp_agent.py:189 +#, python-format +msgid "resync: %(reason)s" +msgstr "" + +#: neutron/agent/dhcp_agent.py:205 +#, python-format +msgid "Network %s info call failed." +msgstr "" + +#: neutron/agent/dhcp_agent.py:217 +#, python-format +msgid "" +"Network %s may have been deleted and its resources may have already been " +"disposed." +msgstr "" + +#: neutron/agent/dhcp_agent.py:342 +#, python-format +msgid "" +"%(port_num)d router ports found on the metadata access network. Only the " +"port %(port_id)s, for router %(router_id)s will be considered" +msgstr "" + +#: neutron/agent/dhcp_agent.py:580 neutron/agent/l3_agent.py:961 +#: neutron/agent/metadata/agent.py:362 +#: neutron/services/metering/agents/metering_agent.py:272 +msgid "" +"Neutron server does not support state report. State report for this agent" +" will be disabled." +msgstr "" + +#: neutron/agent/dhcp_agent.py:586 neutron/agent/l3_agent.py:966 +#: neutron/agent/metadata/agent.py:367 +#: neutron/plugins/ibm/agent/sdnve_neutron_agent.py:111 +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:794 +#: neutron/plugins/mlnx/agent/eswitch_neutron_agent.py:248 +#: neutron/plugins/nec/agent/nec_neutron_agent.py:182 +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:266 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:244 +#: neutron/services/loadbalancer/agent/agent_manager.py:121 +#: neutron/services/metering/agents/metering_agent.py:277 +msgid "Failed reporting state!" +msgstr "" + +#: neutron/agent/dhcp_agent.py:593 +#, python-format +msgid "Agent updated: %(payload)s" +msgstr "" + +#: neutron/agent/dhcp_agent.py:595 neutron/agent/l3_agent.py:971 +#: neutron/services/metering/agents/metering_agent.py:280 +#, python-format +msgid "agent_updated by server side %s!" +msgstr "" + +#: neutron/agent/l3_agent.py:164 neutron/debug/debug_agent.py:41 +msgid "Name of bridge used for external network traffic." +msgstr "" + +#: neutron/agent/l3_agent.py:168 +msgid "TCP Port used by Neutron metadata namespace proxy." +msgstr "" + +#: neutron/agent/l3_agent.py:172 +msgid "" +"Send this many gratuitous ARPs for HA setup, if less than or equal to 0, " +"the feature is disabled" +msgstr "" + +#: neutron/agent/l3_agent.py:175 +msgid "" +"If namespaces is disabled, the l3 agent can only configure a router that " +"has the matching router ID." +msgstr "" + +#: neutron/agent/l3_agent.py:180 +msgid "Agent should implement routers with no gateway" +msgstr "" + +#: neutron/agent/l3_agent.py:182 +msgid "UUID of external network for routers implemented by the agents." +msgstr "" + +#: neutron/agent/l3_agent.py:185 +msgid "Allow running metadata proxy." +msgstr "" + +#: neutron/agent/l3_agent.py:187 +msgid "Delete namespace after removing a router." +msgstr "" + +#: neutron/agent/l3_agent.py:210 +#, python-format +msgid "Error importing interface driver '%s'" +msgstr "" + +#: neutron/agent/l3_agent.py:238 neutron/agent/linux/dhcp.py:729 +#: neutron/services/metering/drivers/iptables/iptables_driver.py:89 +msgid "An interface driver must be specified" +msgstr "" + +#: neutron/agent/l3_agent.py:243 +msgid "Router id is required if not using namespaces." +msgstr "" + +#: neutron/agent/l3_agent.py:264 +msgid "RuntimeError in obtaining router list for namespace cleanup." +msgstr "" + +#: neutron/agent/l3_agent.py:284 +#, python-format +msgid "Failed to destroy stale router namespace %s" +msgstr "" + +#: neutron/agent/l3_agent.py:305 neutron/agent/linux/dhcp.py:225 +#, python-format +msgid "Failed trying to delete namespace: %s" +msgstr "" + +#: neutron/agent/l3_agent.py:335 +msgid "" +"The 'gateway_external_network_id' option must be configured for this " +"agent as Neutron has more than one external network." +msgstr "" + +#: neutron/agent/l3_agent.py:359 +#, python-format +msgid "Info for router %s were not found. Skipping router removal" +msgstr "" + +#: neutron/agent/l3_agent.py:408 +#: neutron/services/firewall/agents/varmour/varmour_router.py:102 +#, python-format +msgid "Router port %s has no IP address" +msgstr "" + +#: neutron/agent/l3_agent.py:410 neutron/db/l3_db.py:974 +#: neutron/services/firewall/agents/varmour/varmour_router.py:105 +#, python-format +msgid "Ignoring multiple IPs on router port %s" +msgstr "" + +#: neutron/agent/l3_agent.py:450 +#, python-format +msgid "Deleting stale internal router device: %s" +msgstr "" + +#: neutron/agent/l3_agent.py:479 +#, python-format +msgid "Deleting stale external router device: %s" +msgstr "" + +#: neutron/agent/l3_agent.py:598 +#, python-format +msgid "Unable to configure IP address for floating IP: %s" +msgstr "" + +#: neutron/agent/l3_agent.py:628 +#, python-format +msgid "Failed sending gratuitous ARP: %s" +msgstr "" + +#: neutron/agent/l3_agent.py:737 +#, python-format +msgid "Got router deleted notification for %s" +msgstr "" + +#: neutron/agent/l3_agent.py:742 +#, python-format +msgid "Got routers updated notification :%s" +msgstr "" + +#: neutron/agent/l3_agent.py:750 +#, python-format +msgid "Got router removed from agent :%r" +msgstr "" + +#: neutron/agent/l3_agent.py:754 +#, python-format +msgid "Got router added to agent :%r" +msgstr "" + +#: neutron/agent/l3_agent.py:761 +#, python-format +msgid "The external network bridge '%s' does not exist" +msgstr "" + +#: neutron/agent/l3_agent.py:811 +#, python-format +msgid "Starting RPC loop for %d updated routers" +msgstr "" + +#: neutron/agent/l3_agent.py:829 +msgid "RPC loop successfully completed" +msgstr "" + +#: neutron/agent/l3_agent.py:831 neutron/agent/l3_agent.py:869 +#: neutron/services/metering/agents/metering_agent.py:61 +msgid "Failed synchronizing routers" +msgstr "" + +#: neutron/agent/l3_agent.py:849 +#, python-format +msgid "Starting _sync_routers_task - fullsync:%s" +msgstr "" + +#: neutron/agent/l3_agent.py:860 +#, python-format +msgid "Processing :%r" +msgstr "" + +#: neutron/agent/l3_agent.py:863 +msgid "_sync_routers_task successfully completed" +msgstr "" + +#: neutron/agent/l3_agent.py:865 +msgid "Failed synchronizing routers due to RPC error" +msgstr "" + +#: neutron/agent/l3_agent.py:878 +msgid "L3 agent started" +msgstr "" + +#: neutron/agent/l3_agent.py:893 +#, python-format +msgid "Added route entry is '%s'" +msgstr "" + +#: neutron/agent/l3_agent.py:901 +#, python-format +msgid "Removed route entry is '%s'" +msgstr "" + +#: neutron/agent/l3_agent.py:934 +msgid "Report state task started" +msgstr "" + +#: neutron/agent/l3_agent.py:958 +msgid "Report state task successfully completed" +msgstr "" + +#: neutron/agent/netns_cleanup_util.py:59 +msgid "Delete the namespace by removing all devices." +msgstr "" + +#: neutron/agent/netns_cleanup_util.py:116 +#, python-format +msgid "Unable to find bridge for device: %s" +msgstr "" + +#: neutron/agent/netns_cleanup_util.py:140 +#, python-format +msgid "Error unable to destroy namespace: %s" +msgstr "" + +#: neutron/agent/ovs_cleanup_util.py:39 +msgid "" +"True to delete all ports on all the OpenvSwitch bridges. False to delete " +"ports created by Neutron on integration and external network bridges." +msgstr "" + +#: neutron/agent/ovs_cleanup_util.py:73 +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:664 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:281 +#, python-format +msgid "Delete %s" +msgstr "" + +#: neutron/agent/ovs_cleanup_util.py:103 +#, python-format +msgid "Cleaning %s" +msgstr "" + +#: neutron/agent/ovs_cleanup_util.py:110 +msgid "OVS cleanup completed successfully" +msgstr "" + +#: neutron/agent/securitygroups_rpc.py:29 +msgid "Driver for security groups firewall in the L2 agent" +msgstr "" + +#: neutron/agent/securitygroups_rpc.py:33 +msgid "" +"Controls whether the neutron security group API is enabled in the server." +" It should be false when using no security groups or using the nova " +"security group API." +msgstr "" + +#: neutron/agent/securitygroups_rpc.py:56 +#: neutron/agent/securitygroups_rpc.py:140 +msgid "Driver configuration doesn't match with enable_security_group" +msgstr "" + +#: neutron/agent/securitygroups_rpc.py:69 +msgid "Disabled security-group extension." +msgstr "" + +#: neutron/agent/securitygroups_rpc.py:71 +msgid "Disabled allowed-address-pairs extension." +msgstr "" + +#: neutron/agent/securitygroups_rpc.py:78 +#, python-format +msgid "Get security group rules for devices via rpc %r" +msgstr "" + +#: neutron/agent/securitygroups_rpc.py:95 +msgid "" +"Security group agent binding currently not set. This should be set by the" +" end of the init process." +msgstr "" + +#: neutron/agent/securitygroups_rpc.py:106 +#, python-format +msgid "Security group rule updated on remote: %s" +msgstr "" + +#: neutron/agent/securitygroups_rpc.py:118 +#, python-format +msgid "Security group member updated on remote: %s" +msgstr "" + +#: neutron/agent/securitygroups_rpc.py:125 +#: neutron/agent/securitygroups_rpc.py:194 +msgid "Provider rule updated" +msgstr "" + +#: neutron/agent/securitygroups_rpc.py:138 +#, python-format +msgid "Init firewall settings (driver=%s)" +msgstr "" + +#: neutron/agent/securitygroups_rpc.py:157 +#, python-format +msgid "Preparing filters for devices %s" +msgstr "" + +#: neutron/agent/securitygroups_rpc.py:165 +#, python-format +msgid "Security group rule updated %r" +msgstr "" + +#: neutron/agent/securitygroups_rpc.py:172 +#, python-format +msgid "Security group member updated %r" +msgstr "" + +#: neutron/agent/securitygroups_rpc.py:186 +#, python-format +msgid "" +"Adding %s devices to the list of devices for which firewall needs to be " +"refreshed" +msgstr "" + +#: neutron/agent/securitygroups_rpc.py:206 +#, python-format +msgid "Remove device filter for %r" +msgstr "" + +#: neutron/agent/securitygroups_rpc.py:215 +msgid "Refresh firewall rules" +msgstr "" + +#: neutron/agent/securitygroups_rpc.py:219 +msgid "No ports here to refresh firewall" +msgstr "" + +#: neutron/agent/securitygroups_rpc.py:225 +#, python-format +msgid "Update port filter for %s" +msgstr "" + +#: neutron/agent/securitygroups_rpc.py:243 +#, python-format +msgid "Preparing device filters for %d new devices" +msgstr "" + +#: neutron/agent/securitygroups_rpc.py:256 +msgid "Refreshing firewall for all filtered devices" +msgstr "" + +#: neutron/agent/securitygroups_rpc.py:264 +#, python-format +msgid "Refreshing firewall for %d devices" +msgstr "" + +#: neutron/agent/common/config.py:29 +msgid "Root helper application." +msgstr "" + +#: neutron/agent/common/config.py:34 +msgid "" +"Seconds between nodes reporting state to server; should be less than " +"agent_down_time, best if it is half or less than agent_down_time." +msgstr "" + +#: neutron/agent/common/config.py:41 +msgid "The driver used to manage the virtual interface." +msgstr "" + +#: neutron/agent/common/config.py:46 +msgid "Allow overlapping IP." +msgstr "" + +#: neutron/agent/common/config.py:102 +msgid "" +"DEFAULT.root_helper is deprecated! Please move root_helper configuration " +"to [AGENT] section." +msgstr "" + +#: neutron/agent/common/config.py:113 +msgid "Top-level directory for maintaining dhcp state" +msgstr "" + +#: neutron/agent/linux/async_process.py:66 +msgid "respawn_interval must be >= 0 if provided." +msgstr "" + +#: neutron/agent/linux/async_process.py:80 +msgid "Process is already started" +msgstr "" + +#: neutron/agent/linux/async_process.py:82 +#, python-format +msgid "Launching async process [%s]." +msgstr "" + +#: neutron/agent/linux/async_process.py:88 +#, python-format +msgid "Halting async process [%s]." +msgstr "" + +#: neutron/agent/linux/async_process.py:91 +msgid "Process is not running." +msgstr "" + +#: neutron/agent/linux/async_process.py:163 +#, python-format +msgid "An error occurred while killing [%s]." +msgstr "" + +#: neutron/agent/linux/async_process.py:170 +#, python-format +msgid "Halting async process [%s] in response to an error." +msgstr "" + +#: neutron/agent/linux/async_process.py:176 +#, python-format +msgid "Respawning async process [%s]." +msgstr "" + +#: neutron/agent/linux/async_process.py:185 +#, python-format +msgid "An error occurred while communicating with async process [%s]." +msgstr "" + +#: neutron/agent/linux/daemon.py:37 +#, python-format +msgid "Error while handling pidfile: %s" +msgstr "" + +#: neutron/agent/linux/daemon.py:45 +msgid "Unable to unlock pid file" +msgstr "" + +#: neutron/agent/linux/daemon.py:94 +msgid "Fork failed" +msgstr "" + +#: neutron/agent/linux/daemon.py:136 +#, python-format +msgid "Pidfile %s already exist. Daemon already running?" +msgstr "" + +#: neutron/agent/linux/dhcp.py:43 +msgid "Location to store DHCP server config files" +msgstr "" + +#: neutron/agent/linux/dhcp.py:46 neutron/plugins/vmware/dhcp_meta/nsx.py:44 +msgid "Domain to use for building the hostnames" +msgstr "" + +#: neutron/agent/linux/dhcp.py:49 +msgid "Override the default dnsmasq settings with this file" +msgstr "" + +#: neutron/agent/linux/dhcp.py:51 +msgid "Comma-separated list of the DNS servers which will be used as forwarders." +msgstr "" + +#: neutron/agent/linux/dhcp.py:55 +msgid "Delete namespace after removing a dhcp server." +msgstr "" + +#: neutron/agent/linux/dhcp.py:59 +msgid "Limit number of leases to prevent a denial-of-service." +msgstr "" + +#: neutron/agent/linux/dhcp.py:207 +#, python-format +msgid "" +"DHCP for %(net_id)s is stale, pid %(pid)d does not exist, performing " +"cleanup" +msgstr "" + +#: neutron/agent/linux/dhcp.py:214 +#, python-format +msgid "No DHCP started for %s" +msgstr "" + +#: neutron/agent/linux/dhcp.py:246 neutron/agent/linux/external_process.py:78 +#, python-format +msgid "Error while reading %s" +msgstr "" + +#: neutron/agent/linux/dhcp.py:253 neutron/agent/linux/external_process.py:86 +#, python-format +msgid "Unable to convert value in %s" +msgstr "" + +#: neutron/agent/linux/dhcp.py:255 neutron/agent/linux/external_process.py:84 +#, python-format +msgid "Unable to access %s" +msgstr "" + +#: neutron/agent/linux/dhcp.py:316 +#, python-format +msgid "" +"FAILED VERSION REQUIREMENT FOR DNSMASQ. DHCP AGENT MAY NOT RUN CORRECTLY!" +" Please ensure that its version is %s or above!" +msgstr "" + +#: neutron/agent/linux/dhcp.py:321 +#, python-format +msgid "" +"Unable to determine dnsmasq version. Please ensure that its version is %s" +" or above!" +msgstr "" + +#: neutron/agent/linux/dhcp.py:424 +#, python-format +msgid "Killing dhcpmasq for network since all subnets have turned off DHCP: %s" +msgstr "" + +#: neutron/agent/linux/dhcp.py:436 +#, python-format +msgid "Pid %d is stale, relaunching dnsmasq" +msgstr "" + +#: neutron/agent/linux/dhcp.py:437 +#, python-format +msgid "Reloading allocations for network: %s" +msgstr "" + +#: neutron/agent/linux/dhcp.py:487 +#, python-format +msgid "Building host file: %s" +msgstr "" + +#: neutron/agent/linux/dhcp.py:497 +#, python-format +msgid "Adding %(mac)s : %(name)s : %(ip)s" +msgstr "" + +#: neutron/agent/linux/dhcp.py:513 +#, python-format +msgid "Done building host file %s" +msgstr "" + +#: neutron/agent/linux/dhcp.py:736 +#, python-format +msgid "Error importing interface driver '%(driver)s': %(inner)s" +msgstr "" + +#: neutron/agent/linux/dhcp.py:776 +#, python-format +msgid "Setting gateway for dhcp netns on net %(n)s to %(ip)s" +msgstr "" + +#: neutron/agent/linux/dhcp.py:786 +#, python-format +msgid "Removing gateway for dhcp netns on net %s" +msgstr "" + +#: neutron/agent/linux/dhcp.py:830 +#, python-format +msgid "" +"DHCP port %(device_id)s on network %(network_id)s does not yet exist. " +"Checking for a reserved port." +msgstr "" + +#: neutron/agent/linux/dhcp.py:844 +#, python-format +msgid "DHCP port %(device_id)s on network %(network_id)s does not yet exist." +msgstr "" + +#: neutron/agent/linux/dhcp.py:879 neutron/debug/debug_agent.py:67 +#, python-format +msgid "Reusing existing device: %s." +msgstr "" + +#: neutron/agent/linux/external_process.py:30 +msgid "Location to store child pid files" +msgstr "" + +#: neutron/agent/linux/external_process.py:61 +#, python-format +msgid "Process for %(uuid)s pid %(pid)d is stale, ignoring command" +msgstr "" + +#: neutron/agent/linux/external_process.py:64 +#, python-format +msgid "No process started for %s" +msgstr "" + +#: neutron/agent/linux/interface.py:37 +msgid "Name of Open vSwitch bridge to use" +msgstr "" + +#: neutron/agent/linux/interface.py:40 +msgid "Uses veth for an interface or not" +msgstr "" + +#: neutron/agent/linux/interface.py:42 +msgid "MTU setting for device." +msgstr "" + +#: neutron/agent/linux/interface.py:44 +msgid "Mapping between flavor and LinuxInterfaceDriver" +msgstr "" + +#: neutron/agent/linux/interface.py:46 +msgid "Admin username" +msgstr "" + +#: neutron/agent/linux/interface.py:48 neutron/agent/metadata/agent.py:54 +#: neutron/plugins/metaplugin/common/config.py:65 +msgid "Admin password" +msgstr "" + +#: neutron/agent/linux/interface.py:51 neutron/agent/metadata/agent.py:57 +#: neutron/plugins/metaplugin/common/config.py:68 +msgid "Admin tenant name" +msgstr "" + +#: neutron/agent/linux/interface.py:53 neutron/agent/metadata/agent.py:59 +#: neutron/plugins/metaplugin/common/config.py:70 +msgid "Authentication URL" +msgstr "" + +#: neutron/agent/linux/interface.py:55 neutron/agent/metadata/agent.py:61 +#: neutron/common/config.py:47 neutron/plugins/metaplugin/common/config.py:72 +msgid "The type of authentication to use" +msgstr "" + +#: neutron/agent/linux/interface.py:57 neutron/agent/metadata/agent.py:63 +#: neutron/plugins/metaplugin/common/config.py:74 +msgid "Authentication region" +msgstr "" + +#: neutron/agent/linux/interface.py:214 neutron/agent/linux/interface.py:268 +#: neutron/agent/linux/interface.py:330 neutron/agent/linux/interface.py:379 +#, python-format +msgid "Device %s already exists" +msgstr "" + +#: neutron/agent/linux/interface.py:232 neutron/agent/linux/interface.py:279 +#: neutron/agent/linux/interface.py:342 neutron/agent/linux/interface.py:386 +#, python-format +msgid "Unplugged interface '%s'" +msgstr "" + +#: neutron/agent/linux/interface.py:234 neutron/agent/linux/interface.py:278 +#: neutron/agent/linux/interface.py:344 neutron/agent/linux/interface.py:388 +#, python-format +msgid "Failed unplugging interface '%s'" +msgstr "" + +#: neutron/agent/linux/interface.py:446 +#, python-format +msgid "Driver location: %s" +msgstr "" + +#: neutron/agent/linux/ip_lib.py:26 +msgid "Force ip_lib calls to use the root helper" +msgstr "" + +#: neutron/agent/linux/iptables_firewall.py:56 +#, python-format +msgid "Preparing device (%s) filter" +msgstr "" + +#: neutron/agent/linux/iptables_firewall.py:64 +#, python-format +msgid "Updating device (%s) filter" +msgstr "" + +#: neutron/agent/linux/iptables_firewall.py:66 +#, python-format +msgid "Attempted to update port filter which is not filtered %s" +msgstr "" + +#: neutron/agent/linux/iptables_firewall.py:75 +#, python-format +msgid "Removing device (%s) filter" +msgstr "" + +#: neutron/agent/linux/iptables_firewall.py:77 +#, python-format +msgid "Attempted to remove port filter which is not filtered %r" +msgstr "" + +#: neutron/agent/linux/iptables_manager.py:157 +#, python-format +msgid "Attempted to remove chain %s which does not exist" +msgstr "" + +#: neutron/agent/linux/iptables_manager.py:199 +#, python-format +msgid "Unknown chain: %r" +msgstr "" + +#: neutron/agent/linux/iptables_manager.py:234 +#, python-format +msgid "" +"Tried to remove rule that was not there: %(chain)r %(rule)r %(wrap)r " +"%(top)r" +msgstr "" + +#: neutron/agent/linux/iptables_manager.py:372 +#, python-format +msgid "Got semaphore / lock \"%s\"" +msgstr "" + +#: neutron/agent/linux/iptables_manager.py:375 +#, python-format +msgid "Semaphore / lock released \"%s\"" +msgstr "" + +#: neutron/agent/linux/iptables_manager.py:424 +#: neutron/tests/unit/test_iptables_manager.py:558 +#: neutron/tests/unit/test_iptables_manager.py:592 +#, python-format +msgid "" +"IPTablesManager.apply failed to apply the following set of iptables " +"rules:\n" +"%s" +msgstr "" + +#: neutron/agent/linux/iptables_manager.py:427 +msgid "IPTablesManager.apply completed with success" +msgstr "" + +#: neutron/agent/linux/iptables_manager.py:437 +#, python-format +msgid "Unable to find table %s" +msgstr "" + +#: neutron/agent/linux/iptables_manager.py:637 +#, python-format +msgid "Attempted to get traffic counters of chain %s which does not exist" +msgstr "" + +#: neutron/agent/linux/ovs_lib.py:34 +msgid "Timeout in seconds for ovs-vsctl commands" +msgstr "" + +#: neutron/agent/linux/ovs_lib.py:68 neutron/agent/linux/ovs_lib.py:168 +#: neutron/agent/linux/ovs_lib.py:315 +#: neutron/plugins/bigswitch/agent/restproxy_agent.py:55 +#, python-format +msgid "Unable to execute %(cmd)s. Exception: %(exception)s" +msgstr "" + +#: neutron/agent/linux/ovs_lib.py:223 +msgid "defer_apply_on" +msgstr "" + +#: neutron/agent/linux/ovs_lib.py:227 +msgid "defer_apply_off" +msgstr "" + +#: neutron/agent/linux/ovs_lib.py:237 +#, python-format +msgid "Applying following deferred flows to bridge %s" +msgstr "" + +#: neutron/agent/linux/ovs_lib.py:240 +#, python-format +msgid "%(action)s: %(flow)s" +msgstr "" + +#: neutron/agent/linux/ovs_lib.py:266 +msgid "" +"Unable to create VXLAN tunnel port. Please ensure that an openvswitch " +"version that supports VXLAN is installed." +msgstr "" + +#: neutron/agent/linux/ovs_lib.py:363 +#, python-format +msgid "Found not yet ready openvswitch port: %s" +msgstr "" + +#: neutron/agent/linux/ovs_lib.py:378 +#, python-format +msgid "Found failed openvswitch port: %s" +msgstr "" + +#: neutron/agent/linux/ovs_lib.py:433 +#, python-format +msgid "Port: %(port_name)s is on %(switch)s, not on %(br_name)s" +msgstr "" + +#: neutron/agent/linux/ovs_lib.py:441 +#, python-format +msgid "ofport: %(ofport)s for VIF: %(vif)s is not a positive integer" +msgstr "" + +#: neutron/agent/linux/ovs_lib.py:451 +#, python-format +msgid "Unable to parse interface details. Exception: %s" +msgstr "" + +#: neutron/agent/linux/ovs_lib.py:469 +#, python-format +msgid "Unable to determine mac address for %s" +msgstr "" + +#: neutron/agent/linux/ovs_lib.py:486 +#, python-format +msgid "Interface %s not found." +msgstr "" + +#: neutron/agent/linux/ovs_lib.py:497 +#, python-format +msgid "Unable to retrieve bridges. Exception: %s" +msgstr "" + +#: neutron/agent/linux/ovs_lib.py:506 +#, python-format +msgid "Bridge %s not found." +msgstr "" + +#: neutron/agent/linux/ovs_lib.py:522 +msgid "Cannot match priority on flow deletion or modification" +msgstr "" + +#: neutron/agent/linux/ovs_lib.py:527 +msgid "Must specify one or more actions on flow addition or modification" +msgstr "" + +#: neutron/agent/linux/ovsdb_monitor.py:44 +#, python-format +msgid "Output received from ovsdb monitor: %s" +msgstr "" + +#: neutron/agent/linux/ovsdb_monitor.py:50 +#, python-format +msgid "Error received from ovsdb monitor: %s" +msgstr "" + +#: neutron/agent/linux/utils.py:47 +#, python-format +msgid "Running command: %s" +msgstr "" + +#: neutron/agent/linux/utils.py:70 +#, python-format +msgid "" +"\n" +"Command: %(cmd)s\n" +"Exit code: %(code)s\n" +"Stdout: %(stdout)r\n" +"Stderr: %(stderr)r" +msgstr "" + +#: neutron/agent/metadata/agent.py:52 +#: neutron/plugins/metaplugin/common/config.py:63 +msgid "Admin user" +msgstr "" + +#: neutron/agent/metadata/agent.py:66 +msgid "Turn off verification of the certificate for ssl" +msgstr "" + +#: neutron/agent/metadata/agent.py:69 +msgid "Certificate Authority public key (CA cert) file for ssl" +msgstr "" + +#: neutron/agent/metadata/agent.py:73 +msgid "Network service endpoint type to pull from the keystone catalog" +msgstr "" + +#: neutron/agent/metadata/agent.py:76 +msgid "IP address used by Nova metadata server." +msgstr "" + +#: neutron/agent/metadata/agent.py:79 +msgid "TCP Port used by Nova metadata server." +msgstr "" + +#: neutron/agent/metadata/agent.py:82 +#: neutron/plugins/vmware/dhcp_meta/nsx.py:63 +msgid "Shared secret to sign instance-id request" +msgstr "" + +#: neutron/agent/metadata/agent.py:87 +msgid "Protocol to access nova metadata, http or https" +msgstr "" + +#: neutron/agent/metadata/agent.py:89 +msgid "Allow to perform insecure SSL (https) requests to nova metadata" +msgstr "" + +#: neutron/agent/metadata/agent.py:93 +msgid "Client certificate for nova metadata api server." +msgstr "" + +#: neutron/agent/metadata/agent.py:96 +msgid "Private key of client certificate." +msgstr "" + +#: neutron/agent/metadata/agent.py:126 +#: neutron/agent/metadata/namespace_proxy.py:68 +#, python-format +msgid "Request: %s" +msgstr "" + +#: neutron/agent/metadata/agent.py:135 +#: neutron/agent/metadata/namespace_proxy.py:76 +msgid "Unexpected error." +msgstr "" + +#: neutron/agent/metadata/agent.py:136 +#: neutron/agent/metadata/namespace_proxy.py:77 +msgid "An unknown error has occurred. Please try your request again." +msgstr "" + +#: neutron/agent/metadata/agent.py:178 +msgid "" +"Either one of parameter network_id or router_id must be passed to " +"_get_ports method." +msgstr "" + +#: neutron/agent/metadata/agent.py:230 +msgid "" +"The remote metadata server responded with Forbidden. This response " +"usually occurs when shared secrets do not match." +msgstr "" + +#: neutron/agent/metadata/agent.py:241 +#: neutron/agent/metadata/namespace_proxy.py:120 +msgid "Remote metadata server experienced an internal server error." +msgstr "" + +#: neutron/agent/metadata/agent.py:247 +#: neutron/agent/metadata/namespace_proxy.py:126 +#, python-format +msgid "Unexpected response code: %s" +msgstr "" + +#: neutron/agent/metadata/agent.py:307 +msgid "Location for Metadata Proxy UNIX domain socket" +msgstr "" + +#: neutron/agent/metadata/agent.py:310 +msgid "Number of separate worker processes for metadata server" +msgstr "" + +#: neutron/agent/metadata/agent.py:314 +msgid "Number of backlog requests to configure the metadata server socket with" +msgstr "" + +#: neutron/agent/metadata/namespace_proxy.py:63 +msgid "network_id and router_id are None. One must be provided." +msgstr "" + +#: neutron/agent/metadata/namespace_proxy.py:149 +msgid "Network that will have instance metadata proxied." +msgstr "" + +#: neutron/agent/metadata/namespace_proxy.py:152 +msgid "Router that will have connected instances' metadata proxied." +msgstr "" + +#: neutron/agent/metadata/namespace_proxy.py:155 +msgid "Location of pid file of this process." +msgstr "" + +#: neutron/agent/metadata/namespace_proxy.py:158 +msgid "Run as daemon." +msgstr "" + +#: neutron/agent/metadata/namespace_proxy.py:161 +msgid "TCP Port to listen for metadata server requests." +msgstr "" + +#: neutron/api/api_common.py:101 +#, python-format +msgid "" +"Invalid value for pagination_max_limit: %s. It should be an integer " +"greater to 0" +msgstr "" + +#: neutron/api/api_common.py:115 +#, python-format +msgid "Limit must be an integer 0 or greater and not '%d'" +msgstr "" + +#: neutron/api/api_common.py:132 +msgid "The number of sort_keys and sort_dirs must be same" +msgstr "" + +#: neutron/api/api_common.py:137 +#, python-format +msgid "%s is invalid attribute for sort_keys" +msgstr "" + +#: neutron/api/api_common.py:141 +#, python-format +msgid "" +"%(invalid_dirs)s is invalid value for sort_dirs, valid value is '%(asc)s'" +" and '%(desc)s'" +msgstr "" + +#: neutron/api/api_common.py:315 neutron/api/v2/base.py:592 +#, python-format +msgid "Unable to find '%s' in request body" +msgstr "" + +#: neutron/api/api_common.py:322 +#, python-format +msgid "Failed to parse request. Parameter '%s' not specified" +msgstr "" + +#: neutron/api/extensions.py:253 +#, python-format +msgid "Extension with alias %s does not exist" +msgstr "" + +#: neutron/api/extensions.py:257 neutron/api/extensions.py:261 +msgid "Resource not found." +msgstr "" + +#: neutron/api/extensions.py:283 +#, python-format +msgid "Extended resource: %s" +msgstr "" + +#: neutron/api/extensions.py:305 +#, python-format +msgid "Extended action: %s" +msgstr "" + +#: neutron/api/extensions.py:313 +#, python-format +msgid "Extended request: %s" +msgstr "" + +#: neutron/api/extensions.py:403 +msgid "Initializing extension manager." +msgstr "" + +#: neutron/api/extensions.py:486 +#, python-format +msgid "Error fetching extended attributes for extension '%s'" +msgstr "" + +#: neutron/api/extensions.py:492 +#, python-format +msgid "" +"Extension '%s' provides no backward compatibility map for extended " +"attributes" +msgstr "" + +#: neutron/api/extensions.py:502 +#, python-format +msgid "" +"It was impossible to process the following extensions: %s because of " +"missing requirements." +msgstr "" + +#: neutron/api/extensions.py:513 +#, python-format +msgid "Ext name: %s" +msgstr "" + +#: neutron/api/extensions.py:514 +#, python-format +msgid "Ext alias: %s" +msgstr "" + +#: neutron/api/extensions.py:515 +#, python-format +msgid "Ext description: %s" +msgstr "" + +#: neutron/api/extensions.py:516 +#, python-format +msgid "Ext namespace: %s" +msgstr "" + +#: neutron/api/extensions.py:517 +#, python-format +msgid "Ext updated: %s" +msgstr "" + +#: neutron/api/extensions.py:519 +#, python-format +msgid "Exception loading extension: %s" +msgstr "" + +#: neutron/api/extensions.py:537 +#, python-format +msgid "Extension path '%s' doesn't exist!" +msgstr "" + +#: neutron/api/extensions.py:545 +#, python-format +msgid "Loading extension file: %s" +msgstr "" + +#: neutron/api/extensions.py:553 +#, python-format +msgid "Did not find expected name \"%(ext_name)s\" in %(file)s" +msgstr "" + +#: neutron/api/extensions.py:561 +#, python-format +msgid "Extension file %(f)s wasn't loaded due to %(exception)s" +msgstr "" + +#: neutron/api/extensions.py:570 +#, python-format +msgid "Loaded extension: %s" +msgstr "" + +#: neutron/api/extensions.py:601 +#, python-format +msgid "Extension %s not supported by any of loaded plugins" +msgstr "" + +#: neutron/api/extensions.py:612 +#, python-format +msgid "Loaded plugins do not implement extension %s interface" +msgstr "" + +#: neutron/api/versions.py:45 +msgid "Unknown API version specified" +msgstr "" + +#: neutron/api/rpc/agentnotifiers/dhcp_rpc_agent_api.py:65 +#, python-format +msgid "" +"Unable to schedule network %s: no agents available; will retry on " +"subsequent port creation events." +msgstr "" + +#: neutron/api/rpc/agentnotifiers/dhcp_rpc_agent_api.py:78 +#, python-format +msgid "" +"Only %(active)d of %(total)d DHCP agents associated with network " +"'%(net_id)s' are marked as active, so notifications may be sent to " +"inactive agents." +msgstr "" + +#: neutron/api/rpc/agentnotifiers/dhcp_rpc_agent_api.py:90 +#, python-format +msgid "" +"Will not send event %(method)s for network %(net_id)s: no agent " +"available. Payload: %(payload)s" +msgstr "" + +#: neutron/api/rpc/agentnotifiers/l3_rpc_agent_api.py:38 +#, python-format +msgid "Nofity agent at %(host)s the message %(method)s" +msgstr "" + +#: neutron/api/rpc/agentnotifiers/l3_rpc_agent_api.py:58 +#, python-format +msgid "Notify agent at %(topic)s.%(host)s the message %(method)s" +msgstr "" + +#: neutron/api/rpc/agentnotifiers/l3_rpc_agent_api.py:74 +#, python-format +msgid "" +"No plugin for L3 routing registered. Cannot notify agents with the " +"message %s" +msgstr "" + +#: neutron/api/rpc/agentnotifiers/l3_rpc_agent_api.py:92 +#, python-format +msgid "" +"Fanout notify agent at %(topic)s the message %(method)s on router " +"%(router_id)s" +msgstr "" + +#: neutron/api/rpc/agentnotifiers/metering_rpc_agent_api.py:49 +#, python-format +msgid "Notify metering agent at %(topic)s.%(host)s the message %(method)s" +msgstr "" + +#: neutron/api/rpc/agentnotifiers/metering_rpc_agent_api.py:64 +#, python-format +msgid "" +"Fanout notify metering agent at %(topic)s the message %(method)s on " +"router %(router_id)s" +msgstr "" + +#: neutron/api/v2/attributes.py:44 +#, python-format +msgid "" +"Invalid input. '%(target_dict)s' must be a dictionary with keys: " +"%(expected_keys)s" +msgstr "" + +#: neutron/api/v2/attributes.py:55 +#, python-format +msgid "" +"Validation of dictionary's keys failed.Expected keys: %(expected_keys)s " +"Provided keys: %(provided_keys)s" +msgstr "" + +#: neutron/api/v2/attributes.py:69 +#, python-format +msgid "'%(data)s' is not in %(valid_values)s" +msgstr "" + +#: neutron/api/v2/attributes.py:85 +#, python-format +msgid "'%s' Blank strings are not permitted" +msgstr "" + +#: neutron/api/v2/attributes.py:95 +#, python-format +msgid "'%s' is not a valid string" +msgstr "" + +#: neutron/api/v2/attributes.py:100 +#, python-format +msgid "'%(data)s' exceeds maximum length of %(max_len)s" +msgstr "" + +#: neutron/api/v2/attributes.py:110 +#, python-format +msgid "'%s' is not a valid boolean value" +msgstr "" + +#: neutron/api/v2/attributes.py:129 neutron/api/v2/attributes.py:454 +#, python-format +msgid "'%s' is not an integer" +msgstr "" + +#: neutron/api/v2/attributes.py:133 +#, python-format +msgid "'%(data)s' is too small - must be at least '%(limit)d'" +msgstr "" + +#: neutron/api/v2/attributes.py:138 +#, python-format +msgid "'%(data)s' is too large - must be no larger than '%(limit)d'" +msgstr "" + +#: neutron/api/v2/attributes.py:147 +#, python-format +msgid "'%s' contains whitespace" +msgstr "" + +#: neutron/api/v2/attributes.py:164 +#, python-format +msgid "'%s' is not a valid MAC address" +msgstr "" + +#: neutron/api/v2/attributes.py:179 +#, python-format +msgid "'%s' is not a valid IP address" +msgstr "" + +#: neutron/api/v2/attributes.py:190 +#, python-format +msgid "Invalid data format for IP pool: '%s'" +msgstr "" + +#: neutron/api/v2/attributes.py:209 neutron/api/v2/attributes.py:216 +#, python-format +msgid "Invalid data format for fixed IP: '%s'" +msgstr "" + +#: neutron/api/v2/attributes.py:224 +#, python-format +msgid "Duplicate IP address '%s'" +msgstr "" + +#: neutron/api/v2/attributes.py:240 +#, python-format +msgid "Invalid data format for nameserver: '%s'" +msgstr "" + +#: neutron/api/v2/attributes.py:251 +#, python-format +msgid "'%s' is not a valid nameserver" +msgstr "" + +#: neutron/api/v2/attributes.py:255 +#, python-format +msgid "Duplicate nameserver '%s'" +msgstr "" + +#: neutron/api/v2/attributes.py:263 +#, python-format +msgid "Invalid data format for hostroute: '%s'" +msgstr "" + +#: neutron/api/v2/attributes.py:283 +#, python-format +msgid "Duplicate hostroute '%s'" +msgstr "" + +#: neutron/api/v2/attributes.py:300 neutron/tests/unit/test_attributes.py:460 +#: neutron/tests/unit/test_attributes.py:474 +#: neutron/tests/unit/test_attributes.py:482 +#, python-format +msgid "'%(data)s' isn't a recognized IP subnet cidr, '%(cidr)s' is recommended" +msgstr "" + +#: neutron/api/v2/attributes.py:306 +#, python-format +msgid "'%s' is not a valid IP subnet" +msgstr "" + +#: neutron/api/v2/attributes.py:314 neutron/api/v2/attributes.py:367 +#, python-format +msgid "'%s' is not a list" +msgstr "" + +#: neutron/api/v2/attributes.py:319 neutron/api/v2/attributes.py:378 +#, python-format +msgid "Duplicate items in the list: '%s'" +msgstr "" + +#: neutron/api/v2/attributes.py:342 +#, python-format +msgid "'%s' is not a valid input" +msgstr "" + +#: neutron/api/v2/attributes.py:355 +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:549 +#, python-format +msgid "'%s' is not a valid UUID" +msgstr "" + +#: neutron/api/v2/attributes.py:398 +#, python-format +msgid "Validator '%s' does not exist." +msgstr "" + +#: neutron/api/v2/attributes.py:408 +#, python-format +msgid "'%s' is not a dictionary" +msgstr "" + +#: neutron/api/v2/attributes.py:459 +#, python-format +msgid "'%s' should be non-negative" +msgstr "" + +#: neutron/api/v2/attributes.py:478 +#, python-format +msgid "'%s' cannot be converted to boolean" +msgstr "" + +#: neutron/api/v2/attributes.py:486 +#: neutron/plugins/nec/extensions/packetfilter.py:75 +#, python-format +msgid "'%s' is not a integer" +msgstr "" + +#: neutron/api/v2/attributes.py:499 +#, python-format +msgid "'%s' is not of the form =[value]" +msgstr "" + +#: neutron/api/v2/base.py:86 +msgid "Native pagination depend on native sorting" +msgstr "" + +#: neutron/api/v2/base.py:89 +msgid "Allow sorting is enabled because native pagination requires native sorting" +msgstr "" + +#: neutron/api/v2/base.py:360 +#, python-format +msgid "Unable to undo add for %(resource)s %(id)s" +msgstr "" + +#: neutron/api/v2/base.py:492 +#, python-format +msgid "Invalid format: %s" +msgstr "" + +#: neutron/api/v2/base.py:545 +msgid "" +"Specifying 'tenant_id' other than authenticated tenant in request " +"requires admin privileges" +msgstr "" + +#: neutron/api/v2/base.py:553 +msgid "Running without keystone AuthN requires that tenant_id is specified" +msgstr "" + +#: neutron/api/v2/base.py:571 +msgid "Resource body required" +msgstr "" + +#: neutron/api/v2/base.py:573 +#, python-format +msgid "Request body: %(body)s" +msgstr "" + +#: neutron/api/v2/base.py:583 +msgid "Bulk operation not supported" +msgstr "" + +#: neutron/api/v2/base.py:587 +msgid "Resources required" +msgstr "" + +#: neutron/api/v2/base.py:603 +#, python-format +msgid "Failed to parse request. Required attribute '%s' not specified" +msgstr "" + +#: neutron/api/v2/base.py:610 +#, python-format +msgid "Attribute '%s' not allowed in POST" +msgstr "" + +#: neutron/api/v2/base.py:615 +#, python-format +msgid "Cannot update read-only attribute %s" +msgstr "" + +#: neutron/api/v2/base.py:633 +#, python-format +msgid "Invalid input for %(attr)s. Reason: %(reason)s." +msgstr "" + +#: neutron/api/v2/base.py:642 neutron/extensions/allowedaddresspairs.py:57 +#: neutron/extensions/multiprovidernet.py:51 +#, python-format +msgid "Unrecognized attribute(s) '%s'" +msgstr "" + +#: neutron/api/v2/base.py:661 +#, python-format +msgid "Tenant %(tenant_id)s not allowed to create %(resource)s on this network" +msgstr "" + +#: neutron/api/v2/resource.py:97 +#, python-format +msgid "%(action)s failed (client error): %(exc)s" +msgstr "" + +#: neutron/api/v2/resource.py:100 neutron/api/v2/resource.py:110 +#: neutron/api/v2/resource.py:129 +#, python-format +msgid "%s failed" +msgstr "" + +#: neutron/api/v2/resource.py:131 +#: neutron/tests/unit/test_api_v2_resource.py:275 +#: neutron/tests/unit/test_api_v2_resource.py:291 +msgid "Request Failed: internal server error while processing your request." +msgstr "" + +#: neutron/cmd/sanity_check.py:38 +msgid "" +"Check for Open vSwitch VXLAN support failed. Please ensure that the " +"version of openvswitch being used has VXLAN support." +msgstr "" + +#: neutron/cmd/sanity_check.py:47 +msgid "" +"Check for Open vSwitch patch port support failed. Please ensure that the " +"version of openvswitch being used has patch port support or disable " +"features requiring patch ports (gre/vxlan, etc.)." +msgstr "" + +#: neutron/cmd/sanity_check.py:57 +msgid "" +"Nova notifcations are enabled, but novaclient is not installed. Either " +"disable nova notifications or install python-novaclient." +msgstr "" + +#: neutron/cmd/sanity_check.py:66 +msgid "Check for vxlan support" +msgstr "" + +#: neutron/cmd/sanity_check.py:68 +msgid "Check for patch port support" +msgstr "" + +#: neutron/cmd/sanity_check.py:70 +msgid "Check for nova notification support" +msgstr "" + +#: neutron/common/config.py:37 +msgid "The host IP to bind to" +msgstr "" + +#: neutron/common/config.py:39 +msgid "The port to bind to" +msgstr "" + +#: neutron/common/config.py:41 +msgid "The API paste config file to use" +msgstr "" + +#: neutron/common/config.py:43 +msgid "The path for API extensions" +msgstr "" + +#: neutron/common/config.py:45 +msgid "The policy file to use" +msgstr "" + +#: neutron/common/config.py:49 +msgid "The core plugin Neutron will use" +msgstr "" + +#: neutron/common/config.py:51 neutron/db/migration/cli.py:35 +msgid "The service plugins Neutron will use" +msgstr "" + +#: neutron/common/config.py:53 +msgid "The base MAC address Neutron will use for VIFs" +msgstr "" + +#: neutron/common/config.py:55 +msgid "How many times Neutron will retry MAC generation" +msgstr "" + +#: neutron/common/config.py:57 +msgid "Allow the usage of the bulk API" +msgstr "" + +#: neutron/common/config.py:59 +msgid "Allow the usage of the pagination" +msgstr "" + +#: neutron/common/config.py:61 +msgid "Allow the usage of the sorting" +msgstr "" + +#: neutron/common/config.py:63 +msgid "" +"The maximum number of items returned in a single response, value was " +"'infinite' or negative integer means no limit" +msgstr "" + +#: neutron/common/config.py:67 +msgid "Maximum number of DNS nameservers" +msgstr "" + +#: neutron/common/config.py:69 +msgid "Maximum number of host routes per subnet" +msgstr "" + +#: neutron/common/config.py:71 +msgid "Maximum number of fixed ips per port" +msgstr "" + +#: neutron/common/config.py:74 +msgid "" +"DHCP lease duration (in seconds). Use -1 to tell dnsmasq to use infinite " +"lease times." +msgstr "" + +#: neutron/common/config.py:77 +msgid "Allow sending resource operation notification to DHCP agent" +msgstr "" + +#: neutron/common/config.py:80 +msgid "Allow overlapping IP support in Neutron" +msgstr "" + +#: neutron/common/config.py:82 +msgid "The hostname Neutron is running on" +msgstr "" + +#: neutron/common/config.py:84 +msgid "Ensure that configured gateway is on subnet" +msgstr "" + +#: neutron/common/config.py:86 +msgid "Send notification to nova when port status changes" +msgstr "" + +#: neutron/common/config.py:88 +msgid "" +"Send notification to nova when port data (fixed_ips/floatingip) changes " +"so nova can update its cache." +msgstr "" + +#: neutron/common/config.py:92 +msgid "URL for connection to nova" +msgstr "" + +#: neutron/common/config.py:94 +msgid "Username for connecting to nova in admin context" +msgstr "" + +#: neutron/common/config.py:96 +msgid "Password for connection to nova in admin context" +msgstr "" + +#: neutron/common/config.py:99 +msgid "The uuid of the admin nova tenant" +msgstr "" + +#: neutron/common/config.py:102 +msgid "Authorization URL for connecting to nova in admin context" +msgstr "" + +#: neutron/common/config.py:105 +msgid "CA file for novaclient to verify server certificates" +msgstr "" + +#: neutron/common/config.py:107 +msgid "If True, ignore any SSL validation issues" +msgstr "" + +#: neutron/common/config.py:109 +msgid "" +"Name of nova region to use. Useful if keystone manages more than one " +"region." +msgstr "" + +#: neutron/common/config.py:112 +msgid "" +"Number of seconds between sending events to nova if there are any events " +"to send." +msgstr "" + +#: neutron/common/config.py:119 +msgid "" +"Where to store Neutron state files. This directory must be writable by " +"the agent." +msgstr "" + +#: neutron/common/config.py:151 +#, python-format +msgid "Base MAC: %s" +msgstr "" + +#: neutron/common/config.py:162 +msgid "Logging enabled!" +msgstr "" + +#: neutron/common/config.py:178 +#, python-format +msgid "Config paste file: %s" +msgstr "" + +#: neutron/common/config.py:183 +#, python-format +msgid "Unable to load %(app_name)s from configuration file %(config_path)s." +msgstr "" + +#: neutron/common/exceptions.py:30 +#: neutron/plugins/vmware/api_client/exception.py:27 +msgid "An unknown exception occurred." +msgstr "" + +#: neutron/common/exceptions.py:51 +#, python-format +msgid "Bad %(resource)s request: %(msg)s" +msgstr "" + +#: neutron/common/exceptions.py:63 +msgid "Not authorized." +msgstr "" + +#: neutron/common/exceptions.py:67 +msgid "The service is unavailable" +msgstr "" + +#: neutron/common/exceptions.py:71 +#, python-format +msgid "User does not have admin privileges: %(reason)s" +msgstr "" + +#: neutron/common/exceptions.py:75 +#, python-format +msgid "Policy doesn't allow %(action)s to be performed." +msgstr "" + +#: neutron/common/exceptions.py:79 +#, python-format +msgid "Network %(net_id)s could not be found" +msgstr "" + +#: neutron/common/exceptions.py:83 +#, python-format +msgid "Subnet %(subnet_id)s could not be found" +msgstr "" + +#: neutron/common/exceptions.py:87 +#, python-format +msgid "Port %(port_id)s could not be found" +msgstr "" + +#: neutron/common/exceptions.py:91 +#, python-format +msgid "Port %(port_id)s could not be found on network %(net_id)s" +msgstr "" + +#: neutron/common/exceptions.py:96 +msgid "Policy configuration policy.json could not be found" +msgstr "" + +#: neutron/common/exceptions.py:100 +#, python-format +msgid "Failed to init policy %(policy)s because %(reason)s" +msgstr "" + +#: neutron/common/exceptions.py:104 +#, python-format +msgid "Failed to check policy %(policy)s because %(reason)s" +msgstr "" + +#: neutron/common/exceptions.py:108 +#, python-format +msgid "Unsupported port state: %(port_state)s" +msgstr "" + +#: neutron/common/exceptions.py:112 +msgid "The resource is inuse" +msgstr "" + +#: neutron/common/exceptions.py:116 +#, python-format +msgid "" +"Unable to complete operation on network %(net_id)s. There are one or more" +" ports still in use on the network." +msgstr "" + +#: neutron/common/exceptions.py:121 +#, python-format +msgid "" +"Unable to complete operation on subnet %(subnet_id)s. One or more ports " +"have an IP allocation from this subnet." +msgstr "" + +#: neutron/common/exceptions.py:126 +#, python-format +msgid "" +"Unable to complete operation on port %(port_id)s for network %(net_id)s. " +"Port already has an attacheddevice %(device_id)s." +msgstr "" + +#: neutron/common/exceptions.py:132 +#, python-format +msgid "" +"Unable to complete operation for network %(net_id)s. The mac address " +"%(mac)s is in use." +msgstr "" + +#: neutron/common/exceptions.py:138 +#, python-format +msgid "" +"Unable to complete operation for %(subnet_id)s. The number of host routes" +" exceeds the limit %(quota)s." +msgstr "" + +#: neutron/common/exceptions.py:144 +#, python-format +msgid "" +"Unable to complete operation for %(subnet_id)s. The number of DNS " +"nameservers exceeds the limit %(quota)s." +msgstr "" + +#: neutron/common/exceptions.py:149 +#, python-format +msgid "" +"Unable to complete operation for network %(net_id)s. The IP address " +"%(ip_address)s is in use." +msgstr "" + +#: neutron/common/exceptions.py:154 +#, python-format +msgid "" +"Unable to create the network. The VLAN %(vlan_id)s on physical network " +"%(physical_network)s is in use." +msgstr "" + +#: neutron/common/exceptions.py:160 +#, python-format +msgid "" +"Unable to create the flat network. Physical network %(physical_network)s " +"is in use." +msgstr "" + +#: neutron/common/exceptions.py:165 +#, python-format +msgid "Unable to create the network. The tunnel ID %(tunnel_id)s is in use." +msgstr "" + +#: neutron/common/exceptions.py:170 +msgid "Tenant network creation is not enabled." +msgstr "" + +#: neutron/common/exceptions.py:178 +msgid "" +"Unable to create the network. No tenant network is available for " +"allocation." +msgstr "" + +#: neutron/common/exceptions.py:183 +#, python-format +msgid "" +"Subnet on port %(port_id)s does not match the requested subnet " +"%(subnet_id)s" +msgstr "" + +#: neutron/common/exceptions.py:188 +#, python-format +msgid "Malformed request body: %(reason)s" +msgstr "" + +#: neutron/common/exceptions.py:198 +#, python-format +msgid "Invalid input for operation: %(error_message)s." +msgstr "" + +#: neutron/common/exceptions.py:202 +#, python-format +msgid "The allocation pool %(pool)s is not valid." +msgstr "" + +#: neutron/common/exceptions.py:206 +#, python-format +msgid "" +"Found overlapping allocation pools:%(pool_1)s %(pool_2)s for subnet " +"%(subnet_cidr)s." +msgstr "" + +#: neutron/common/exceptions.py:211 +#, python-format +msgid "The allocation pool %(pool)s spans beyond the subnet cidr %(subnet_cidr)s." +msgstr "" + +#: neutron/common/exceptions.py:216 +#, python-format +msgid "Unable to generate unique mac on network %(net_id)s." +msgstr "" + +#: neutron/common/exceptions.py:220 +#, python-format +msgid "No more IP addresses available on network %(net_id)s." +msgstr "" + +#: neutron/common/exceptions.py:224 +#, python-format +msgid "Bridge %(bridge)s does not exist." +msgstr "" + +#: neutron/common/exceptions.py:228 +#, python-format +msgid "Creation failed. %(dev_name)s already exists." +msgstr "" + +#: neutron/common/exceptions.py:232 +msgid "Sudo privilege is required to run this command." +msgstr "" + +#: neutron/common/exceptions.py:236 +#, python-format +msgid "Unknown quota resources %(unknown)s." +msgstr "" + +#: neutron/common/exceptions.py:240 +#, python-format +msgid "Quota exceeded for resources: %(overs)s" +msgstr "" + +#: neutron/common/exceptions.py:244 +msgid "Tenant-id was missing from Quota request" +msgstr "" + +#: neutron/common/exceptions.py:248 +#, python-format +msgid "" +"Change would make usage less than 0 for the following resources: " +"%(unders)s" +msgstr "" + +#: neutron/common/exceptions.py:253 +#, python-format +msgid "" +"Unable to reconfigure sharing settings for network %(network)s. Multiple " +"tenants are using it" +msgstr "" + +#: neutron/common/exceptions.py:258 +#, python-format +msgid "Invalid extension environment: %(reason)s" +msgstr "" + +#: neutron/common/exceptions.py:262 +#, python-format +msgid "Extensions not found: %(extensions)s" +msgstr "" + +#: neutron/common/exceptions.py:266 +#, python-format +msgid "Invalid content type %(content_type)s" +msgstr "" + +#: neutron/common/exceptions.py:270 +#, python-format +msgid "Unable to find any IP address on external network %(net_id)s." +msgstr "" + +#: neutron/common/exceptions.py:275 +msgid "More than one external network exists" +msgstr "" + +#: neutron/common/exceptions.py:279 +#, python-format +msgid "An invalid value was provided for %(opt_name)s: %(opt_value)s" +msgstr "" + +#: neutron/common/exceptions.py:284 +#, python-format +msgid "Gateway ip %(ip_address)s conflicts with allocation pool %(pool)s" +msgstr "" + +#: neutron/common/exceptions.py:289 +#, python-format +msgid "" +"Current gateway ip %(ip_address)s already in use by port %(port_id)s. " +"Unable to update." +msgstr "" + +#: neutron/common/exceptions.py:294 +#, python-format +msgid "Invalid network VLAN range: '%(vlan_range)s' - '%(error)s'" +msgstr "" + +#: neutron/common/exceptions.py:304 +#, python-format +msgid "Invalid network VXLAN port range: '%(vxlan_range)s'" +msgstr "" + +#: neutron/common/exceptions.py:308 +msgid "VXLAN Network unsupported." +msgstr "" + +#: neutron/common/exceptions.py:312 +#, python-format +msgid "Found duplicate extension: %(alias)s" +msgstr "" + +#: neutron/common/exceptions.py:316 +#, python-format +msgid "" +"The following device_id %(device_id)s is not owned by your tenant or " +"matches another tenants router." +msgstr "" + +#: neutron/common/exceptions.py:321 +#, python-format +msgid "Invalid CIDR %(input)s given as IP prefix" +msgstr "" + +#: neutron/common/ipv6_utils.py:27 +msgid "Unable to generate IP address by EUI64 for IPv4 prefix" +msgstr "" + +#: neutron/common/ipv6_utils.py:34 +#, python-format +msgid "" +"Bad prefix or mac format for generating IPv6 address by EUI-64: " +"%(prefix)s, %(mac)s:" +msgstr "" + +#: neutron/common/ipv6_utils.py:38 +#, python-format +msgid "Bad prefix type for generate IPv6 address by EUI-64: %s" +msgstr "" + +#: neutron/common/log.py:32 +#, python-format +msgid "" +"%(class_name)s method %(method_name)s called with arguments %(args)s " +"%(kwargs)s" +msgstr "" + +#: neutron/common/utils.py:68 +#, python-format +msgid "" +"Method %(func_name)s cannot be cached due to unhashable parameters: args:" +" %(args)s, kwargs: %(kwargs)s" +msgstr "" + +#: neutron/common/utils.py:91 +#, python-format +msgid "" +"Instance of class %(module)s.%(class)s doesn't contain attribute _cache " +"therefore results cannot be cached for %(func_name)s." +msgstr "" + +#: neutron/common/utils.py:117 neutron/openstack/common/fileutils.py:63 +#, python-format +msgid "Reloading cached file %s" +msgstr "" + +#: neutron/common/utils.py:200 +#, python-format +msgid "Invalid mapping: '%s'" +msgstr "" + +#: neutron/common/utils.py:203 +#, python-format +msgid "Missing key in mapping: '%s'" +msgstr "" + +#: neutron/common/utils.py:206 +#, python-format +msgid "Missing value in mapping: '%s'" +msgstr "" + +#: neutron/common/utils.py:208 +#, python-format +msgid "Key %(key)s in mapping: '%(mapping)s' not unique" +msgstr "" + +#: neutron/common/utils.py:211 +#, python-format +msgid "Value %(value)s in mapping: '%(mapping)s' not unique" +msgstr "" + +#: neutron/db/agents_db.py:36 +msgid "" +"Seconds to regard the agent is down; should be at least twice " +"report_interval, to be sure the agent is down for good." +msgstr "" + +#: neutron/db/agents_db.py:93 +#, python-format +msgid "Configuration for agent %(agent_type)s on host %(host)s is invalid." +msgstr "" + +#: neutron/db/agents_db.py:214 +msgid "Message with invalid timestamp received" +msgstr "" + +#: neutron/db/agentschedulers_db.py:37 +msgid "Driver to use for scheduling network to DHCP agent" +msgstr "" + +#: neutron/db/agentschedulers_db.py:39 +msgid "Allow auto scheduling networks to DHCP agent." +msgstr "" + +#: neutron/db/agentschedulers_db.py:41 +msgid "Number of DHCP agents scheduled to host a network." +msgstr "" + +#: neutron/db/api.py:77 +#, python-format +msgid "Database registration exception: %s" +msgstr "" + +#: neutron/db/api.py:89 +msgid "Database exception" +msgstr "" + +#: neutron/db/db_base_plugin_v2.py:148 +msgid "Cannot create resource for another tenant" +msgstr "" + +#: neutron/db/db_base_plugin_v2.py:317 +#, python-format +msgid "Generated mac for network %(network_id)s is %(mac_address)s" +msgstr "" + +#: neutron/db/db_base_plugin_v2.py:323 +#, python-format +msgid "Generated mac %(mac_address)s exists. Remaining attempts %(max_retries)s." +msgstr "" + +#: neutron/db/db_base_plugin_v2.py:327 +#, python-format +msgid "Unable to generate mac address after %s attempts" +msgstr "" + +#: neutron/db/db_base_plugin_v2.py:345 +#, python-format +msgid "Delete allocated IP %(ip_address)s (%(network_id)s/%(subnet_id)s)" +msgstr "" + +#: neutron/db/db_base_plugin_v2.py:383 +#, python-format +msgid "All IPs from subnet %(subnet_id)s (%(cidr)s) allocated" +msgstr "" + +#: neutron/db/db_base_plugin_v2.py:388 +#, python-format +msgid "Allocated IP - %(ip_address)s from %(first_ip)s to %(last_ip)s" +msgstr "" + +#: neutron/db/db_base_plugin_v2.py:395 +msgid "No more free IP's in slice. Deleting allocation pool." +msgstr "" + +#: neutron/db/db_base_plugin_v2.py:414 +#, python-format +msgid "Rebuilding availability ranges for subnet %s" +msgstr "" + +#: neutron/db/db_base_plugin_v2.py:546 +msgid "IP allocation requires subnet_id or ip_address" +msgstr "" + +#: neutron/db/db_base_plugin_v2.py:558 +#, python-format +msgid "IP address %s is not a valid IP for the defined networks subnets" +msgstr "" + +#: neutron/db/db_base_plugin_v2.py:564 +#, python-format +msgid "" +"Failed to create port on network %(network_id)s, because fixed_ips " +"included invalid subnet %(subnet_id)s" +msgstr "" + +#: neutron/db/db_base_plugin_v2.py:584 +#, python-format +msgid "IP address %s is not a valid IP for the defined subnet" +msgstr "" + +#: neutron/db/db_base_plugin_v2.py:593 neutron/db/db_base_plugin_v2.py:626 +msgid "Exceeded maximim amount of fixed ips per port" +msgstr "" + +#: neutron/db/db_base_plugin_v2.py:641 +#, python-format +msgid "Port update. Hold %s" +msgstr "" + +#: neutron/db/db_base_plugin_v2.py:648 +#, python-format +msgid "Port update. Adding %s" +msgstr "" + +#: neutron/db/db_base_plugin_v2.py:717 +#, python-format +msgid "" +"Requested subnet with cidr: %(cidr)s for network: %(network_id)s overlaps" +" with another subnet" +msgstr "" + +#: neutron/db/db_base_plugin_v2.py:722 +#, python-format +msgid "" +"Validation for CIDR: %(new_cidr)s failed - overlaps with subnet " +"%(subnet_id)s (CIDR: %(cidr)s)" +msgstr "" + +#: neutron/db/db_base_plugin_v2.py:742 +msgid "Performing IP validity checks on allocation pools" +msgstr "" + +#: neutron/db/db_base_plugin_v2.py:749 +#, python-format +msgid "Found invalid IP address in pool: %(start)s - %(end)s:" +msgstr "" + +#: neutron/db/db_base_plugin_v2.py:756 +msgid "Specified IP addresses do not match the subnet IP version" +msgstr "" + +#: neutron/db/db_base_plugin_v2.py:760 +#, python-format +msgid "Start IP (%(start)s) is greater than end IP (%(end)s)" +msgstr "" + +#: neutron/db/db_base_plugin_v2.py:765 +#, python-format +msgid "Found pool larger than subnet CIDR:%(start)s - %(end)s" +msgstr "" + +#: neutron/db/db_base_plugin_v2.py:778 +msgid "Checking for overlaps among allocation pools and gateway ip" +msgstr "" + +#: neutron/db/db_base_plugin_v2.py:789 +#, python-format +msgid "Found overlapping ranges: %(l_range)s and %(r_range)s" +msgstr "" + +#: neutron/db/db_base_plugin_v2.py:802 neutron/db/db_base_plugin_v2.py:806 +#, python-format +msgid "Invalid route: %s" +msgstr "" + +#: neutron/db/db_base_plugin_v2.py:879 +#, python-format +msgid "" +"ipv6_ra_mode set to '%(ra_mode)s' with ipv6_address_mode set to " +"'%(addr_mode)s' is not valid. If both attributes are set, they must be " +"the same value" +msgstr "" + +#: neutron/db/db_base_plugin_v2.py:887 +msgid "" +"ipv6_ra_mode or ipv6_address_mode cannot be set when enable_dhcp is set " +"to False." +msgstr "" + +#: neutron/db/db_base_plugin_v2.py:893 +msgid "Cannot disable enable_dhcp with ipv6 attributes set" +msgstr "" + +#: neutron/db/db_base_plugin_v2.py:983 +#, python-format +msgid "An exception occurred while creating the %(resource)s:%(item)s" +msgstr "" + +#: neutron/db/db_base_plugin_v2.py:1080 +#, python-format +msgid "%(name)s '%(addr)s' does not match the ip_version '%(ip_version)s'" +msgstr "" + +#: neutron/db/db_base_plugin_v2.py:1104 +msgid "Gateway is not valid on subnet" +msgstr "" + +#: neutron/db/db_base_plugin_v2.py:1124 neutron/db/db_base_plugin_v2.py:1138 +msgid "new subnet" +msgstr "" + +#: neutron/db/db_base_plugin_v2.py:1131 +#, python-format +msgid "Error parsing dns address %s" +msgstr "" + +#: neutron/db/db_base_plugin_v2.py:1147 +msgid "ipv6_ra_mode is not valid when ip_version is 4" +msgstr "" + +#: neutron/db/db_base_plugin_v2.py:1151 +msgid "ipv6_address_mode is not valid when ip_version is 4" +msgstr "" + +#: neutron/db/db_base_plugin_v2.py:1452 +#, python-format +msgid "Allocated IP %(ip_address)s (%(network_id)s/%(subnet_id)s/%(port_id)s)" +msgstr "" + +#: neutron/db/db_base_plugin_v2.py:1531 +#, python-format +msgid "" +"Ignoring PortNotFound when deleting port '%s'. The port has already been " +"deleted." +msgstr "" + +#: neutron/db/dhcp_rpc_base.py:58 +msgid "Unrecognized action" +msgstr "" + +#: neutron/db/dhcp_rpc_base.py:75 +#, python-format +msgid "" +"Action %(action)s for network %(net_id)s could not complete successfully:" +" %(reason)s" +msgstr "" + +#: neutron/db/dhcp_rpc_base.py:85 +#, python-format +msgid "get_active_networks requested from %s" +msgstr "" + +#: neutron/db/dhcp_rpc_base.py:92 +#, python-format +msgid "get_active_networks_info from %s" +msgstr "" + +#: neutron/db/dhcp_rpc_base.py:112 +#, python-format +msgid "Network %(network_id)s requested from %(host)s" +msgstr "" + +#: neutron/db/dhcp_rpc_base.py:119 neutron/db/dhcp_rpc_base.py:183 +#, python-format +msgid "Network %s could not be found, it might have been deleted concurrently." +msgstr "" + +#: neutron/db/dhcp_rpc_base.py:141 +#, python-format +msgid "Port %(device_id)s for %(network_id)s requested from %(host)s" +msgstr "" + +#: neutron/db/dhcp_rpc_base.py:175 +#, python-format +msgid "" +"DHCP port %(device_id)s on network %(network_id)s does not exist on " +"%(host)s" +msgstr "" + +#: neutron/db/dhcp_rpc_base.py:215 +#, python-format +msgid "DHCP port deletion for %(network_id)s request from %(host)s" +msgstr "" + +#: neutron/db/dhcp_rpc_base.py:228 +#, python-format +msgid "DHCP port remove fixed_ip for %(subnet_id)s request from %(host)s" +msgstr "" + +#: neutron/db/dhcp_rpc_base.py:252 +#, python-format +msgid "Updating lease expiration is now deprecated. Issued from host %s." +msgstr "" + +#: neutron/db/dhcp_rpc_base.py:263 +#, python-format +msgid "Create dhcp port %(port)s from %(host)s." +msgstr "" + +#: neutron/db/dhcp_rpc_base.py:280 +#, python-format +msgid "Update dhcp port %(port)s from %(host)s." +msgstr "" + +#: neutron/db/extraroute_db.py:36 +msgid "Maximum number of routes" +msgstr "" + +#: neutron/db/extraroute_db.py:91 +msgid "the nexthop is not connected with router" +msgstr "" + +#: neutron/db/extraroute_db.py:96 +msgid "the nexthop is used by router" +msgstr "" + +#: neutron/db/extraroute_db.py:125 +#, python-format +msgid "Added routes are %s" +msgstr "" + +#: neutron/db/extraroute_db.py:133 +#, python-format +msgid "Removed routes are %s" +msgstr "" + +#: neutron/db/l3_agentschedulers_db.py:34 +msgid "Driver to use for scheduling router to a default L3 agent" +msgstr "" + +#: neutron/db/l3_agentschedulers_db.py:37 +msgid "Allow auto scheduling of routers to L3 agent." +msgstr "" + +#: neutron/db/l3_db.py:240 +#, python-format +msgid "No eligible l3 agent associated with external network %s found" +msgstr "" + +#: neutron/db/l3_db.py:261 +#, python-format +msgid "No IPs available for external network %s" +msgstr "" + +#: neutron/db/l3_db.py:275 +#, python-format +msgid "Network %s is not an external network" +msgstr "" + +#: neutron/db/l3_db.py:389 +#, python-format +msgid "Router already has a port on subnet %s" +msgstr "" + +#: neutron/db/l3_db.py:403 +#, python-format +msgid "" +"Cidr %(subnet_cidr)s of subnet %(subnet_id)s overlaps with cidr %(cidr)s " +"of subnet %(sub_id)s" +msgstr "" + +#: neutron/db/l3_db.py:417 neutron/db/l3_db.py:543 +#: neutron/plugins/bigswitch/plugin.py:1009 +#: neutron/plugins/bigswitch/plugin.py:1018 +msgid "Either subnet_id or port_id must be specified" +msgstr "" + +#: neutron/db/l3_db.py:422 +msgid "Cannot specify both subnet-id and port-id" +msgstr "" + +#: neutron/db/l3_db.py:435 +msgid "Router port must have exactly one fixed IP" +msgstr "" + +#: neutron/db/l3_db.py:449 +msgid "Subnet for router interface must have a gateway IP" +msgstr "" + +#: neutron/db/l3_db.py:597 neutron/plugins/nec/nec_router.py:197 +#, python-format +msgid "Cannot add floating IP to port on subnet %s which has no gateway_ip" +msgstr "" + +#: neutron/db/l3_db.py:633 +#, python-format +msgid "" +"Port %(port_id)s is associated with a different tenant than Floating IP " +"%(floatingip_id)s and therefore cannot be bound." +msgstr "" + +#: neutron/db/l3_db.py:637 +#, python-format +msgid "" +"Cannot create floating IP and bind it to Port %s, since that port is " +"owned by a different tenant." +msgstr "" + +#: neutron/db/l3_db.py:649 +#, python-format +msgid "Port %(id)s does not have fixed ip %(address)s" +msgstr "" + +#: neutron/db/l3_db.py:656 +#, python-format +msgid "Cannot add floating IP to port %s that hasno fixed IP addresses" +msgstr "" + +#: neutron/db/l3_db.py:660 +#, python-format +msgid "" +"Port %s has multiple fixed IPs. Must provide a specific IP when " +"assigning a floating IP" +msgstr "" + +#: neutron/db/l3_db.py:703 neutron/plugins/vmware/plugins/base.py:1871 +msgid "fixed_ip_address cannot be specified without a port_id" +msgstr "" + +#: neutron/db/l3_db.py:738 +#, python-format +msgid "Network %s is not a valid external network" +msgstr "" + +#: neutron/db/l3_db.py:875 +#, python-format +msgid "" +"Port %(port_id)s has owner %(port_owner)s, but no IP address, so it can " +"be deleted" +msgstr "" + +#: neutron/db/l3_db.py:980 +#, python-format +msgid "Skipping port %s as no IP is configure on it" +msgstr "" + +#: neutron/db/l3_rpc_base.py:50 +msgid "" +"No plugin for L3 routing registered! Will reply to l3 agent with empty " +"router dictionary." +msgstr "" + +#: neutron/db/l3_rpc_base.py:64 +#, python-format +msgid "" +"Routers returned to l3 agent:\n" +" %s" +msgstr "" + +#: neutron/db/l3_rpc_base.py:70 +#, python-format +msgid "Checking router: %(id)s for host: %(host)s" +msgstr "" + +#: neutron/db/l3_rpc_base.py:95 +#, python-format +msgid "External network ID returned to l3 agent: %s" +msgstr "" + +#: neutron/db/l3_rpc_base.py:105 +#, python-format +msgid "New status for floating IP %(floatingip_id)s: %(status)s" +msgstr "" + +#: neutron/db/l3_rpc_base.py:113 +#, python-format +msgid "Floating IP: %s no longer present." +msgstr "" + +#: neutron/db/routedserviceinsertion_db.py:36 +#, python-format +msgid "Resource type '%(resource_type)s' is longer than %(maxlen)d characters" +msgstr "" + +#: neutron/db/securitygroups_rpc_base.py:277 +#, python-format +msgid "No valid gateway port on subnet %s is found for IPv6 RA" +msgstr "" + +#: neutron/db/sqlalchemyutils.py:73 +#, python-format +msgid "%s is invalid attribute for sort_key" +msgstr "" + +#: neutron/db/sqlalchemyutils.py:76 +#, python-format +msgid "" +"The attribute '%(attr)s' is reference to other resource, can't used by " +"sort '%(resource)s'" +msgstr "" + +#: neutron/db/firewall/firewall_db.py:239 +#: neutron/plugins/vmware/plugins/service.py:902 +#: neutron/services/firewall/fwaas_plugin.py:227 +msgid "create_firewall() called" +msgstr "" + +#: neutron/db/firewall/firewall_db.py:255 +#: neutron/plugins/vmware/plugins/service.py:936 +#: neutron/services/firewall/fwaas_plugin.py:242 +msgid "update_firewall() called" +msgstr "" + +#: neutron/db/firewall/firewall_db.py:265 +#: neutron/plugins/vmware/plugins/service.py:962 +#: neutron/services/firewall/fwaas_plugin.py:257 +msgid "delete_firewall() called" +msgstr "" + +#: neutron/db/firewall/firewall_db.py:275 +msgid "get_firewall() called" +msgstr "" + +#: neutron/db/firewall/firewall_db.py:280 +msgid "get_firewalls() called" +msgstr "" + +#: neutron/db/firewall/firewall_db.py:286 +msgid "get_firewalls_count() called" +msgstr "" + +#: neutron/db/firewall/firewall_db.py:291 +msgid "create_firewall_policy() called" +msgstr "" + +#: neutron/db/firewall/firewall_db.py:307 +#: neutron/plugins/vmware/plugins/service.py:1024 +#: neutron/services/firewall/fwaas_plugin.py:266 +msgid "update_firewall_policy() called" +msgstr "" + +#: neutron/db/firewall/firewall_db.py:319 +msgid "delete_firewall_policy() called" +msgstr "" + +#: neutron/db/firewall/firewall_db.py:331 +msgid "get_firewall_policy() called" +msgstr "" + +#: neutron/db/firewall/firewall_db.py:336 +msgid "get_firewall_policies() called" +msgstr "" + +#: neutron/db/firewall/firewall_db.py:342 +msgid "get_firewall_policies_count() called" +msgstr "" + +#: neutron/db/firewall/firewall_db.py:347 +msgid "create_firewall_rule() called" +msgstr "" + +#: neutron/db/firewall/firewall_db.py:376 +#: neutron/plugins/vmware/plugins/service.py:998 +#: neutron/services/firewall/fwaas_plugin.py:274 +msgid "update_firewall_rule() called" +msgstr "" + +#: neutron/db/firewall/firewall_db.py:400 +msgid "delete_firewall_rule() called" +msgstr "" + +#: neutron/db/firewall/firewall_db.py:408 +msgid "get_firewall_rule() called" +msgstr "" + +#: neutron/db/firewall/firewall_db.py:413 +msgid "get_firewall_rules() called" +msgstr "" + +#: neutron/db/firewall/firewall_db.py:419 +msgid "get_firewall_rules_count() called" +msgstr "" + +#: neutron/db/firewall/firewall_db.py:428 +#: neutron/plugins/vmware/plugins/service.py:1051 +#: neutron/services/firewall/fwaas_plugin.py:284 +msgid "insert_rule() called" +msgstr "" + +#: neutron/db/firewall/firewall_db.py:468 +#: neutron/plugins/vmware/plugins/service.py:1080 +#: neutron/services/firewall/fwaas_plugin.py:292 +msgid "remove_rule() called" +msgstr "" + +#: neutron/db/loadbalancer/loadbalancer_db.py:68 +#, python-format +msgid "The %(key)s field can not have negative value. Current value is %(value)d." +msgstr "" + +#: neutron/db/loadbalancer/loadbalancer_db.py:272 +msgid "'cookie_name' should be specified for this type of session persistence." +msgstr "" + +#: neutron/db/loadbalancer/loadbalancer_db.py:276 +msgid "'cookie_name' is not allowed for this type of session persistence" +msgstr "" + +#: neutron/db/metering/metering_rpc.py:46 +#, python-format +msgid "Unable to find agent %s." +msgstr "" + +#: neutron/db/migration/cli.py:32 +msgid "Neutron plugin provider module" +msgstr "" + +#: neutron/db/migration/cli.py:41 +msgid "Neutron quota driver class" +msgstr "" + +#: neutron/db/migration/cli.py:49 +msgid "URL to database" +msgstr "" + +#: neutron/db/migration/cli.py:52 +msgid "Database engine" +msgstr "" + +#: neutron/db/migration/cli.py:75 +msgid "You must provide a revision or relative delta" +msgstr "" + +#: neutron/db/migration/cli.py:105 neutron/db/migration/cli.py:118 +msgid "Timeline branches unable to generate timeline" +msgstr "" + +#: neutron/db/migration/cli.py:112 +msgid "HEAD file does not match migration timeline head" +msgstr "" + +#: neutron/db/migration/cli.py:154 +msgid "Available commands" +msgstr "" + +#: neutron/db/migration/migrate_to_ml2.py:88 +msgid "Missing version in alembic_versions table" +msgstr "" + +#: neutron/db/migration/migrate_to_ml2.py:90 +#, python-format +msgid "Multiple versions in alembic_versions table: %s" +msgstr "" + +#: neutron/db/migration/migrate_to_ml2.py:94 +#, python-format +msgid "" +"Unsupported database schema %(current)s. Please migrate your database to " +"one of following versions: %(supported)s" +msgstr "" + +#: neutron/db/migration/migrate_to_ml2.py:414 +#, python-format +msgid "Unknown tunnel type: %s" +msgstr "" + +#: neutron/db/migration/migrate_to_ml2.py:428 +msgid "The plugin type whose database will be migrated" +msgstr "" + +#: neutron/db/migration/migrate_to_ml2.py:431 +msgid "The connection url for the target db" +msgstr "" + +#: neutron/db/migration/migrate_to_ml2.py:434 +#, python-format +msgid "The %s tunnel type to migrate from" +msgstr "" + +#: neutron/db/migration/migrate_to_ml2.py:437 +#: neutron/plugins/openvswitch/common/config.py:78 +msgid "The UDP port to use for VXLAN tunnels." +msgstr "" + +#: neutron/db/migration/migrate_to_ml2.py:440 +msgid "Retain the old plugin's tables" +msgstr "" + +#: neutron/db/migration/migrate_to_ml2.py:446 +#, python-format +msgid "" +"Tunnel args (tunnel-type and vxlan-udp-port) are not valid for the %s " +"plugin" +msgstr "" + +#: neutron/db/migration/migrate_to_ml2.py:453 +#, python-format +msgid "" +"Support for migrating %(plugin)s for release %(release)s is not yet " +"implemented" +msgstr "" + +#: neutron/db/vpn/vpn_db.py:678 +#, python-format +msgid "vpnservice %s in db is already deleted" +msgstr "" + +#: neutron/debug/commands.py:32 +msgid "Unimplemented commands" +msgstr "" + +#: neutron/debug/commands.py:44 +msgid "ID of network to probe" +msgstr "" + +#: neutron/debug/commands.py:48 +msgid "Owner type of the device: network/compute" +msgstr "" + +#: neutron/debug/commands.py:56 +#, python-format +msgid "Probe created : %s " +msgstr "" + +#: neutron/debug/commands.py:68 +msgid "ID of probe port to delete" +msgstr "" + +#: neutron/debug/commands.py:75 +#, python-format +msgid "Probe %s deleted" +msgstr "" + +#: neutron/debug/commands.py:106 +msgid "All Probes deleted " +msgstr "" + +#: neutron/debug/commands.py:118 +msgid "ID of probe port to execute command" +msgstr "" + +#: neutron/debug/commands.py:123 +msgid "Command to execute" +msgstr "" + +#: neutron/debug/commands.py:143 +msgid "Ping timeout" +msgstr "" + +#: neutron/debug/commands.py:147 +msgid "ID of network" +msgstr "" + +#: neutron/debug/debug_agent.py:120 +#, python-format +msgid "Failed to delete namespace %s" +msgstr "" + +#: neutron/debug/shell.py:62 +msgid "Config file for interface driver (You may also use l3_agent.ini)" +msgstr "" + +#: neutron/debug/shell.py:70 +msgid "" +"You must provide a config file for bridge - either --config-file or " +"env[NEUTRON_TEST_CONFIG_FILE]" +msgstr "" + +#: neutron/extensions/agent.py:61 +#, python-format +msgid "Agent %(id)s could not be found" +msgstr "" + +#: neutron/extensions/agent.py:65 +#, python-format +msgid "Agent with agent_type=%(agent_type)s and host=%(host)s could not be found" +msgstr "" + +#: neutron/extensions/agent.py:70 +#, python-format +msgid "Multiple agents with agent_type=%(agent_type)s and host=%(host)s found" +msgstr "" + +#: neutron/extensions/allowedaddresspairs.py:22 +msgid "AllowedAddressPair must contain ip_address" +msgstr "" + +#: neutron/extensions/allowedaddresspairs.py:26 +msgid "" +"Port Security must be enabled in order to have allowed address pairs on a" +" port." +msgstr "" + +#: neutron/extensions/allowedaddresspairs.py:31 +#, python-format +msgid "" +"Request contains duplicate address pair: mac_address %(mac_address)s " +"ip_address %(ip_address)s." +msgstr "" + +#: neutron/extensions/dhcpagentscheduler.py:119 +#, python-format +msgid "Agent %(id)s is not a valid DHCP Agent or has been disabled" +msgstr "" + +#: neutron/extensions/dhcpagentscheduler.py:123 +#, python-format +msgid "" +"The network %(network_id)s has been already hosted by the DHCP Agent " +"%(agent_id)s." +msgstr "" + +#: neutron/extensions/dhcpagentscheduler.py:128 +#, python-format +msgid "The network %(network_id)s is not hosted by the DHCP agent %(agent_id)s." +msgstr "" + +#: neutron/extensions/external_net.py:23 +#, python-format +msgid "" +"External network %(net_id)s cannot be updated to be made non-external, " +"since it has existing gateway ports" +msgstr "" + +#: neutron/extensions/external_net.py:51 +msgid "Adds external network attribute to network resource." +msgstr "" + +#: neutron/extensions/extra_dhcp_opt.py:25 +#, python-format +msgid "ExtraDhcpOpt %(id)s could not be found" +msgstr "" + +#: neutron/extensions/extra_dhcp_opt.py:29 +#, python-format +msgid "Invalid data format for extra-dhcp-opt: %(data)s" +msgstr "" + +#: neutron/extensions/extraroute.py:23 +#, python-format +msgid "Invalid format for routes: %(routes)s, %(reason)s" +msgstr "" + +#: neutron/extensions/extraroute.py:27 +#, python-format +msgid "" +"Router interface for subnet %(subnet_id)s on router %(router_id)s cannot " +"be deleted, as it is required by one or more routes." +msgstr "" + +#: neutron/extensions/extraroute.py:33 +#, python-format +msgid "" +"Unable to complete operation for %(router_id)s. The number of routes " +"exceeds the maximum %(quota)s." +msgstr "" + +#: neutron/extensions/firewall.py:37 +#, python-format +msgid "Firewall %(firewall_id)s could not be found." +msgstr "" + +#: neutron/extensions/firewall.py:41 +#, python-format +msgid "Firewall %(firewall_id)s is still active." +msgstr "" + +#: neutron/extensions/firewall.py:45 +#, python-format +msgid "" +"Operation cannot be performed since associated Firewall %(firewall_id)s " +"is in %(pending_state)s." +msgstr "" + +#: neutron/extensions/firewall.py:50 +#, python-format +msgid "Firewall Policy %(firewall_policy_id)s could not be found." +msgstr "" + +#: neutron/extensions/firewall.py:54 +#, python-format +msgid "Firewall Policy %(firewall_policy_id)s is being used." +msgstr "" + +#: neutron/extensions/firewall.py:58 +#, python-format +msgid "Firewall Rule %(firewall_rule_id)s could not be found." +msgstr "" + +#: neutron/extensions/firewall.py:62 +#, python-format +msgid "Firewall Rule %(firewall_rule_id)s is being used." +msgstr "" + +#: neutron/extensions/firewall.py:66 +#, python-format +msgid "" +"Firewall Rule %(firewall_rule_id)s is not associated with Firewall " +"Policy %(firewall_policy_id)s." +msgstr "" + +#: neutron/extensions/firewall.py:71 +#, python-format +msgid "" +"Firewall Rule protocol %(protocol)s is not supported. Only protocol " +"values %(values)s and their integer representation (0 to 255) are " +"supported." +msgstr "" + +#: neutron/extensions/firewall.py:77 +#, python-format +msgid "" +"Firewall rule action %(action)s is not supported. Only action values " +"%(values)s are supported." +msgstr "" + +#: neutron/extensions/firewall.py:82 +#, python-format +msgid "%(param)s are not allowed when protocol is set to ICMP." +msgstr "" + +#: neutron/extensions/firewall.py:87 +#, python-format +msgid "Invalid value for port %(port)s." +msgstr "" + +#: neutron/extensions/firewall.py:91 +msgid "Missing rule info argument for insert/remove rule operation." +msgstr "" + +#: neutron/extensions/firewall.py:101 +#, python-format +msgid "%(driver)s: Internal driver error." +msgstr "" + +#: neutron/extensions/firewall.py:150 +#, python-format +msgid "Port '%s' is not a valid number" +msgstr "" + +#: neutron/extensions/firewall.py:154 +#, python-format +msgid "Invalid port '%s'" +msgstr "" + +#: neutron/extensions/firewall.py:168 +#, python-format +msgid "%(msg_ip)s and %(msg_subnet)s" +msgstr "" + +#: neutron/extensions/firewall.py:289 +msgid "Number of firewalls allowed per tenant. A negative value means unlimited." +msgstr "" + +#: neutron/extensions/firewall.py:293 +msgid "" +"Number of firewall policies allowed per tenant. A negative value means " +"unlimited." +msgstr "" + +#: neutron/extensions/firewall.py:297 +msgid "" +"Number of firewall rules allowed per tenant. A negative value means " +"unlimited." +msgstr "" + +#: neutron/extensions/l3.py:29 +#, python-format +msgid "Router %(router_id)s could not be found" +msgstr "" + +#: neutron/extensions/l3.py:33 +#, python-format +msgid "Router %(router_id)s still has ports" +msgstr "" + +#: neutron/extensions/l3.py:37 +#, python-format +msgid "Router %(router_id)s does not have an interface with id %(port_id)s" +msgstr "" + +#: neutron/extensions/l3.py:42 +#, python-format +msgid "Router %(router_id)s has no interface on subnet %(subnet_id)s" +msgstr "" + +#: neutron/extensions/l3.py:47 +#, python-format +msgid "" +"Router interface for subnet %(subnet_id)s on router %(router_id)s cannot " +"be deleted, as it is required by one or more floating IPs." +msgstr "" + +#: neutron/extensions/l3.py:53 +#, python-format +msgid "Floating IP %(floatingip_id)s could not be found" +msgstr "" + +#: neutron/extensions/l3.py:57 +#, python-format +msgid "" +"External network %(external_network_id)s is not reachable from subnet " +"%(subnet_id)s. Therefore, cannot associate Port %(port_id)s with a " +"Floating IP." +msgstr "" + +#: neutron/extensions/l3.py:63 +#, python-format +msgid "" +"Cannot associate floating IP %(floating_ip_address)s (%(fip_id)s) with " +"port %(port_id)s using fixed IP %(fixed_ip)s, as that fixed IP already " +"has a floating IP on external network %(net_id)s." +msgstr "" + +#: neutron/extensions/l3.py:70 +#, python-format +msgid "" +"Port %(port_id)s has owner %(device_owner)s and therefore cannot be " +"deleted directly via the port API." +msgstr "" + +#: neutron/extensions/l3.py:75 +#, python-format +msgid "" +"Gateway cannot be updated for router %(router_id)s, since a gateway to " +"external network %(net_id)s is required by one or more floating IPs." +msgstr "" + +#: neutron/extensions/l3.py:138 +msgid "Number of routers allowed per tenant. A negative value means unlimited." +msgstr "" + +#: neutron/extensions/l3.py:142 +msgid "" +"Number of floating IPs allowed per tenant. A negative value means " +"unlimited." +msgstr "" + +#: neutron/extensions/l3agentscheduler.py:47 +#: neutron/extensions/l3agentscheduler.py:85 +msgid "No plugin for L3 routing registered to handle router scheduling" +msgstr "" + +#: neutron/extensions/l3agentscheduler.py:151 +#, python-format +msgid "Agent %(id)s is not a L3 Agent or has been disabled" +msgstr "" + +#: neutron/extensions/l3agentscheduler.py:155 +#, python-format +msgid "" +"The router %(router_id)s has been already hosted by the L3 Agent " +"%(agent_id)s." +msgstr "" + +#: neutron/extensions/l3agentscheduler.py:160 +#, python-format +msgid "Failed scheduling router %(router_id)s to the L3 Agent %(agent_id)s." +msgstr "" + +#: neutron/extensions/l3agentscheduler.py:165 +#, python-format +msgid "Failed rescheduling router %(router_id)s: no eligible l3 agent found." +msgstr "" + +#: neutron/extensions/l3agentscheduler.py:170 +#, python-format +msgid "The router %(router_id)s is not hosted by L3 agent %(agent_id)s." +msgstr "" + +#: neutron/extensions/lbaas_agentscheduler.py:116 +#, python-format +msgid "No eligible loadbalancer agent found for pool %(pool_id)s." +msgstr "" + +#: neutron/extensions/lbaas_agentscheduler.py:121 +#, python-format +msgid "No active loadbalancer agent found for pool %(pool_id)s." +msgstr "" + +#: neutron/extensions/loadbalancer.py:33 +msgid "Delay must be greater than or equal to timeout" +msgstr "" + +#: neutron/extensions/loadbalancer.py:37 +#, python-format +msgid "No eligible backend for pool %(pool_id)s" +msgstr "" + +#: neutron/extensions/loadbalancer.py:41 +#, python-format +msgid "Vip %(vip_id)s could not be found" +msgstr "" + +#: neutron/extensions/loadbalancer.py:45 +#, python-format +msgid "Another Vip already exists for pool %(pool_id)s" +msgstr "" + +#: neutron/extensions/loadbalancer.py:49 +#, python-format +msgid "Pool %(pool_id)s could not be found" +msgstr "" + +#: neutron/extensions/loadbalancer.py:53 +#, python-format +msgid "Member %(member_id)s could not be found" +msgstr "" + +#: neutron/extensions/loadbalancer.py:57 +#, python-format +msgid "Health_monitor %(monitor_id)s could not be found" +msgstr "" + +#: neutron/extensions/loadbalancer.py:61 +#, python-format +msgid "Monitor %(monitor_id)s is not associated with Pool %(pool_id)s" +msgstr "" + +#: neutron/extensions/loadbalancer.py:66 +#, python-format +msgid "health_monitor %(monitor_id)s is already associated with pool %(pool_id)s" +msgstr "" + +#: neutron/extensions/loadbalancer.py:71 +#, python-format +msgid "Invalid state %(state)s of Loadbalancer resource %(id)s" +msgstr "" + +#: neutron/extensions/loadbalancer.py:75 +#, python-format +msgid "Pool %(pool_id)s is still in use" +msgstr "" + +#: neutron/extensions/loadbalancer.py:79 +#, python-format +msgid "Health monitor %(monitor_id)s still has associations with pools" +msgstr "" + +#: neutron/extensions/loadbalancer.py:84 +#, python-format +msgid "Statistics of Pool %(pool_id)s could not be found" +msgstr "" + +#: neutron/extensions/loadbalancer.py:88 +#, python-format +msgid "Protocol %(vip_proto)s does not match pool protocol %(pool_proto)s" +msgstr "" + +#: neutron/extensions/loadbalancer.py:93 +#, python-format +msgid "" +"Member with address %(address)s and port %(port)s already present in pool" +" %(pool)s" +msgstr "" + +#: neutron/extensions/loadbalancer.py:309 +msgid "Number of vips allowed per tenant. A negative value means unlimited." +msgstr "" + +#: neutron/extensions/loadbalancer.py:313 +msgid "Number of pools allowed per tenant. A negative value means unlimited." +msgstr "" + +#: neutron/extensions/loadbalancer.py:317 +msgid "" +"Number of pool members allowed per tenant. A negative value means " +"unlimited." +msgstr "" + +#: neutron/extensions/loadbalancer.py:321 +msgid "" +"Number of health monitors allowed per tenant. A negative value means " +"unlimited." +msgstr "" + +#: neutron/extensions/metering.py:33 +#, python-format +msgid "Metering label %(label_id)s does not exist" +msgstr "" + +#: neutron/extensions/metering.py:37 +msgid "Duplicate Metering Rule in POST." +msgstr "" + +#: neutron/extensions/metering.py:41 +#, python-format +msgid "Metering label rule %(rule_id)s does not exist" +msgstr "" + +#: neutron/extensions/metering.py:45 +#, python-format +msgid "" +"Metering label rule with remote_ip_prefix %(remote_ip_prefix)s overlaps " +"another" +msgstr "" + +#: neutron/extensions/multiprovidernet.py:27 +msgid "Segments and provider values cannot both be set." +msgstr "" + +#: neutron/extensions/multiprovidernet.py:31 +msgid "Duplicate segment entry in request." +msgstr "" + +#: neutron/extensions/portsecurity.py:20 +msgid "" +"Port has security group associated. Cannot disable port security or ip " +"address until security group is removed" +msgstr "" + +#: neutron/extensions/portsecurity.py:25 +msgid "" +"Port security must be enabled and port must have an IP address in order " +"to use security groups." +msgstr "" + +#: neutron/extensions/portsecurity.py:30 +msgid "Port does not have port security binding." +msgstr "" + +#: neutron/extensions/providernet.py:54 +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:287 +msgid "Plugin does not support updating provider attributes" +msgstr "" + +#: neutron/extensions/quotasv2.py:67 +msgid "POST requests are not supported on this resource." +msgstr "" + +#: neutron/extensions/quotasv2.py:86 +msgid "Only admin is authorized to access quotas for another tenant" +msgstr "" + +#: neutron/extensions/quotasv2.py:91 +msgid "Only admin can view or configure quota" +msgstr "" + +#: neutron/extensions/securitygroup.py:34 +msgid "For TCP/UDP protocols, port_range_min must be <= port_range_max" +msgstr "" + +#: neutron/extensions/securitygroup.py:39 +#, python-format +msgid "Invalid value for port %(port)s" +msgstr "" + +#: neutron/extensions/securitygroup.py:43 +#, python-format +msgid "" +"Invalid value for ICMP %(field)s (%(attr)s) %(value)s. It must be 0 to " +"255." +msgstr "" + +#: neutron/extensions/securitygroup.py:48 +#, python-format +msgid "" +"ICMP code (port-range-max) %(value)s is provided but ICMP type (port-" +"range-min) is missing." +msgstr "" + +#: neutron/extensions/securitygroup.py:53 +#, python-format +msgid "Security Group %(id)s in use." +msgstr "" + +#: neutron/extensions/securitygroup.py:57 +msgid "Removing default security group not allowed." +msgstr "" + +#: neutron/extensions/securitygroup.py:61 +msgid "Updating default security group not allowed." +msgstr "" + +#: neutron/extensions/securitygroup.py:65 +msgid "Default security group already exists." +msgstr "" + +#: neutron/extensions/securitygroup.py:69 +#, python-format +msgid "" +"Security group rule protocol %(protocol)s not supported. Only protocol " +"values %(values)s and their integer representation (0 to 255) are " +"supported." +msgstr "" + +#: neutron/extensions/securitygroup.py:75 +msgid "Multiple tenant_ids in bulk security group rule create not allowed" +msgstr "" + +#: neutron/extensions/securitygroup.py:80 +msgid "Only remote_ip_prefix or remote_group_id may be provided." +msgstr "" + +#: neutron/extensions/securitygroup.py:85 +msgid "Must also specifiy protocol if port range is given." +msgstr "" + +#: neutron/extensions/securitygroup.py:89 +msgid "Only allowed to update rules for one security profile at a time" +msgstr "" + +#: neutron/extensions/securitygroup.py:94 +#, python-format +msgid "Security group %(id)s does not exist" +msgstr "" + +#: neutron/extensions/securitygroup.py:98 +#, python-format +msgid "Security group rule %(id)s does not exist" +msgstr "" + +#: neutron/extensions/securitygroup.py:102 +msgid "Duplicate Security Group Rule in POST." +msgstr "" + +#: neutron/extensions/securitygroup.py:106 +#, python-format +msgid "Security group rule already exists. Group id is %(id)s." +msgstr "" + +#: neutron/extensions/securitygroup.py:110 +#, python-format +msgid "Conflicting value ethertype %(ethertype)s for CIDR %(cidr)s" +msgstr "" + +#: neutron/extensions/securitygroup.py:158 +#, python-format +msgid "'%s' is not an integer or uuid" +msgstr "" + +#: neutron/extensions/securitygroup.py:247 +msgid "" +"Number of security groups allowed per tenant. A negative value means " +"unlimited." +msgstr "" + +#: neutron/extensions/securitygroup.py:251 +msgid "" +"Number of security rules allowed per tenant. A negative value means " +"unlimited." +msgstr "" + +#: neutron/extensions/servicetype.py:52 +msgid "Neutron Service Type Management" +msgstr "" + +#: neutron/extensions/servicetype.py:60 +msgid "API for retrieving service providers for Neutron advanced services" +msgstr "" + +#: neutron/extensions/vpnaas.py:31 +#, python-format +msgid "VPNService %(vpnservice_id)s could not be found" +msgstr "" + +#: neutron/extensions/vpnaas.py:35 +#, python-format +msgid "ipsec_site_connection %(ipsecsite_conn_id)s not found" +msgstr "" + +#: neutron/extensions/vpnaas.py:39 +#, python-format +msgid "ipsec_site_connection %(attr)s is equal to or less than dpd_interval" +msgstr "" + +#: neutron/extensions/vpnaas.py:44 +#, python-format +msgid "ipsec_site_connection MTU %(mtu)d is too small for ipv%(version)s" +msgstr "" + +#: neutron/extensions/vpnaas.py:49 +#, python-format +msgid "IKEPolicy %(ikepolicy_id)s could not be found" +msgstr "" + +#: neutron/extensions/vpnaas.py:53 +#, python-format +msgid "IPsecPolicy %(ipsecpolicy_id)s could not be found" +msgstr "" + +#: neutron/extensions/vpnaas.py:57 +#, python-format +msgid "" +"IKEPolicy %(ikepolicy_id)s is in use by existing IPsecSiteConnection and " +"can't be updated or deleted" +msgstr "" + +#: neutron/extensions/vpnaas.py:62 +#, python-format +msgid "VPNService %(vpnservice_id)s is still in use" +msgstr "" + +#: neutron/extensions/vpnaas.py:66 +#, python-format +msgid "Router %(router_id)s is used by VPNService %(vpnservice_id)s" +msgstr "" + +#: neutron/extensions/vpnaas.py:70 +#, python-format +msgid "Invalid state %(state)s of vpnaas resource %(id)s for updating" +msgstr "" + +#: neutron/extensions/vpnaas.py:75 +#, python-format +msgid "" +"IPsecPolicy %(ipsecpolicy_id)s is in use by existing IPsecSiteConnection " +"and can't be updated or deleted" +msgstr "" + +#: neutron/extensions/vpnaas.py:80 +#, python-format +msgid "Can not load driver :%(device_driver)s" +msgstr "" + +#: neutron/extensions/vpnaas.py:84 +#, python-format +msgid "Subnet %(subnet_id)s is not connected to Router %(router_id)s" +msgstr "" + +#: neutron/extensions/vpnaas.py:89 +#, python-format +msgid "Router %(router_id)s has no external network gateway set" +msgstr "" + +#: neutron/notifiers/nova.py:165 +msgid "device_id is not set on port yet." +msgstr "" + +#: neutron/notifiers/nova.py:169 +msgid "Port ID not set! Nova will not be notified of port status change." +msgstr "" + +#: neutron/notifiers/nova.py:194 +#, python-format +msgid "" +"Ignoring state change previous_port_status: %(pre_status)s " +"current_port_status: %(cur_status)s port_id %(id)s" +msgstr "" + +#: neutron/notifiers/nova.py:220 +#, python-format +msgid "Sending events: %s" +msgstr "" + +#: neutron/notifiers/nova.py:225 +#, python-format +msgid "Nova returned NotFound for event: %s" +msgstr "" + +#: neutron/notifiers/nova.py:228 +#, python-format +msgid "Failed to notify nova on events: %s" +msgstr "" + +#: neutron/notifiers/nova.py:232 neutron/notifiers/nova.py:248 +#, python-format +msgid "Error response returned from nova: %s" +msgstr "" + +#: neutron/notifiers/nova.py:243 +#, python-format +msgid "Nova event: %s returned with failed status" +msgstr "" + +#: neutron/notifiers/nova.py:246 +#, python-format +msgid "Nova event response: %s" +msgstr "" + +#: neutron/openstack/common/eventlet_backdoor.py:140 +#, python-format +msgid "Eventlet backdoor listening on %(port)s for process %(pid)d" +msgstr "" + +#: neutron/openstack/common/gettextutils.py:320 +msgid "Message objects do not support addition." +msgstr "" + +#: neutron/openstack/common/gettextutils.py:330 +msgid "" +"Message objects do not support str() because they may contain non-ascii " +"characters. Please use unicode() or translate() instead." +msgstr "" + +#: neutron/openstack/common/lockutils.py:103 +#, python-format +msgid "Could not release the acquired lock `%s`" +msgstr "" + +#: neutron/openstack/common/lockutils.py:168 +#, python-format +msgid "Got semaphore \"%(lock)s\"" +msgstr "" + +#: neutron/openstack/common/lockutils.py:177 +#, python-format +msgid "Attempting to grab file lock \"%(lock)s\"" +msgstr "" + +#: neutron/openstack/common/lockutils.py:187 +#, python-format +msgid "Created lock path: %s" +msgstr "" + +#: neutron/openstack/common/lockutils.py:205 +#, python-format +msgid "Got file lock \"%(lock)s\" at %(path)s" +msgstr "" + +#: neutron/openstack/common/lockutils.py:209 +#, python-format +msgid "Released file lock \"%(lock)s\" at %(path)s" +msgstr "" + +#: neutron/openstack/common/lockutils.py:247 +#, python-format +msgid "Got semaphore / lock \"%(function)s\"" +msgstr "" + +#: neutron/openstack/common/lockutils.py:251 +#, python-format +msgid "Semaphore / lock released \"%(function)s\"" +msgstr "" + +#: neutron/openstack/common/log.py:327 +#, python-format +msgid "Deprecated: %s" +msgstr "" + +#: neutron/openstack/common/log.py:436 +#, python-format +msgid "Error loading logging config %(log_config)s: %(err_msg)s" +msgstr "" + +#: neutron/openstack/common/log.py:486 +#, python-format +msgid "syslog facility must be one of: %s" +msgstr "" + +#: neutron/openstack/common/log.py:729 +#, python-format +msgid "Fatal call to deprecated config: %(msg)s" +msgstr "" + +#: neutron/openstack/common/loopingcall.py:82 +#, python-format +msgid "task run outlasted interval by %s sec" +msgstr "" + +#: neutron/openstack/common/loopingcall.py:89 +msgid "in fixed duration looping call" +msgstr "" + +#: neutron/openstack/common/loopingcall.py:129 +#, python-format +msgid "Dynamic looping call sleeping for %.02f seconds" +msgstr "" + +#: neutron/openstack/common/loopingcall.py:136 +msgid "in dynamic looping call" +msgstr "" + +#: neutron/openstack/common/periodic_task.py:39 +#, python-format +msgid "Unexpected argument for periodic task creation: %(arg)s." +msgstr "" + +#: neutron/openstack/common/policy.py:395 +#, python-format +msgid "Failed to understand rule %(rule)s" +msgstr "" + +#: neutron/openstack/common/policy.py:405 +#, python-format +msgid "No handler for matches of kind %s" +msgstr "" + +#: neutron/openstack/common/policy.py:680 +#, python-format +msgid "Failed to understand rule %(rule)r" +msgstr "" + +#: neutron/openstack/common/processutils.py:130 +#, python-format +msgid "Got unknown keyword args to utils.execute: %r" +msgstr "" + +#: neutron/openstack/common/processutils.py:145 +#, python-format +msgid "Running cmd (subprocess): %s" +msgstr "" + +#: neutron/openstack/common/processutils.py:169 +#: neutron/openstack/common/processutils.py:241 +#, python-format +msgid "Result was %s" +msgstr "" + +#: neutron/openstack/common/processutils.py:181 +#, python-format +msgid "%r failed. Retrying." +msgstr "" + +#: neutron/openstack/common/processutils.py:220 +#, python-format +msgid "Running cmd (SSH): %s" +msgstr "" + +#: neutron/openstack/common/processutils.py:222 +msgid "Environment not supported over SSH" +msgstr "" + +#: neutron/openstack/common/processutils.py:226 +msgid "process_input not supported over SSH" +msgstr "" + +#: neutron/openstack/common/sslutils.py:50 +#, python-format +msgid "Unable to find cert_file : %s" +msgstr "" + +#: neutron/openstack/common/sslutils.py:53 +#, python-format +msgid "Unable to find ca_file : %s" +msgstr "" + +#: neutron/openstack/common/sslutils.py:56 +#, python-format +msgid "Unable to find key_file : %s" +msgstr "" + +#: neutron/openstack/common/sslutils.py:59 +msgid "" +"When running server in SSL mode, you must specify both a cert_file and " +"key_file option value in your configuration file" +msgstr "" + +#: neutron/openstack/common/sslutils.py:98 +#, python-format +msgid "Invalid SSL version : %s" +msgstr "" + +#: neutron/openstack/common/strutils.py:92 +#, python-format +msgid "Unrecognized value '%(val)s', acceptable values are: %(acceptable)s" +msgstr "" + +#: neutron/openstack/common/strutils.py:197 +#, python-format +msgid "Invalid unit system: \"%s\"" +msgstr "" + +#: neutron/openstack/common/strutils.py:206 +#, python-format +msgid "Invalid string format: %s" +msgstr "" + +#: neutron/openstack/common/versionutils.py:69 +#, python-format +msgid "" +"%(what)s is deprecated as of %(as_of)s in favor of %(in_favor_of)s and " +"may be removed in %(remove_in)s." +msgstr "" + +#: neutron/openstack/common/versionutils.py:73 +#, python-format +msgid "" +"%(what)s is deprecated as of %(as_of)s and may be removed in " +"%(remove_in)s. It will not be superseded." +msgstr "" + +#: neutron/openstack/common/db/exception.py:44 +msgid "Invalid Parameter: Unicode is not supported by the current database." +msgstr "" + +#: neutron/openstack/common/db/sqlalchemy/utils.py:58 +msgid "Sort key supplied was not valid." +msgstr "" + +#: neutron/openstack/common/db/sqlalchemy/utils.py:119 +msgid "Unknown sort direction, must be 'desc' or 'asc'" +msgstr "" + +#: neutron/openstack/common/db/sqlalchemy/utils.py:162 +#, python-format +msgid "" +"There is no `deleted` column in `%s` table. Project doesn't use soft-" +"deleted feature." +msgstr "" + +#: neutron/openstack/common/db/sqlalchemy/utils.py:174 +#, python-format +msgid "Unrecognized read_deleted value '%s'" +msgstr "" + +#: neutron/openstack/common/db/sqlalchemy/utils.py:181 +#, python-format +msgid "There is no `project_id` column in `%s` table." +msgstr "" + +#: neutron/openstack/common/db/sqlalchemy/utils.py:243 +msgid "model should be a subclass of ModelBase" +msgstr "" + +#: neutron/openstack/common/db/sqlalchemy/utils.py:286 +#, python-format +msgid "" +"Please specify column %s in col_name_col_instance param. It is required " +"because column has unsupported type by sqlite)." +msgstr "" + +#: neutron/openstack/common/db/sqlalchemy/utils.py:292 +#, python-format +msgid "" +"col_name_col_instance param has wrong type of column instance for column " +"%s It should be instance of sqlalchemy.Column." +msgstr "" + +#: neutron/openstack/common/db/sqlalchemy/utils.py:400 +msgid "Unsupported id columns type" +msgstr "" + +#: neutron/openstack/common/middleware/catch_errors.py:40 +#, python-format +msgid "An error occurred during processing the request: %s" +msgstr "" + +#: neutron/openstack/common/middleware/sizelimit.py:55 +#: neutron/openstack/common/middleware/sizelimit.py:64 +#: neutron/openstack/common/middleware/sizelimit.py:75 +msgid "Request is too large." +msgstr "" + +#: neutron/plugins/bigswitch/config.py:32 +msgid "" +"A comma separated list of Big Switch or Floodlight servers and port " +"numbers. The plugin proxies the requests to the Big Switch/Floodlight " +"server, which performs the networking configuration. Only oneserver is " +"needed per deployment, but you may wish todeploy multiple servers to " +"support failover." +msgstr "" + +#: neutron/plugins/bigswitch/config.py:39 +msgid "" +"The username and password for authenticating against the Big Switch or " +"Floodlight controller." +msgstr "" + +#: neutron/plugins/bigswitch/config.py:42 +msgid "" +"If True, Use SSL when connecting to the Big Switch or Floodlight " +"controller." +msgstr "" + +#: neutron/plugins/bigswitch/config.py:45 +msgid "" +"Trust and store the first certificate received for each controller " +"address and use it to validate future connections to that address." +msgstr "" + +#: neutron/plugins/bigswitch/config.py:49 +msgid "Disables SSL certificate validation for controllers" +msgstr "" + +#: neutron/plugins/bigswitch/config.py:51 +msgid "Re-use HTTP/HTTPS connections to the controller." +msgstr "" + +#: neutron/plugins/bigswitch/config.py:54 +msgid "Directory containing ca_certs and host_certs certificate directories." +msgstr "" + +#: neutron/plugins/bigswitch/config.py:57 +msgid "Sync data on connect" +msgstr "" + +#: neutron/plugins/bigswitch/config.py:59 +msgid "" +"If neutron fails to create a resource because the backend controller " +"doesn't know of a dependency, the plugin automatically triggers a full " +"data synchronization to the controller." +msgstr "" + +#: neutron/plugins/bigswitch/config.py:64 +msgid "" +"Time between verifications that the backend controller database is " +"consistent with Neutron. (0 to disable)" +msgstr "" + +#: neutron/plugins/bigswitch/config.py:67 +msgid "" +"Maximum number of seconds to wait for proxy request to connect and " +"complete." +msgstr "" + +#: neutron/plugins/bigswitch/config.py:70 +msgid "" +"Maximum number of threads to spawn to handle large volumes of port " +"creations." +msgstr "" + +#: neutron/plugins/bigswitch/config.py:74 +msgid "User defined identifier for this Neutron deployment" +msgstr "" + +#: neutron/plugins/bigswitch/config.py:76 +msgid "" +"Flag to decide if a route to the metadata server should be injected into " +"the VM" +msgstr "" + +#: neutron/plugins/bigswitch/config.py:81 +msgid "" +"The default router rules installed in new tenant routers. Repeat the " +"config option for each rule. Format is " +"::: Use an * to specify default for " +"all tenants." +msgstr "" + +#: neutron/plugins/bigswitch/config.py:86 +msgid "Maximum number of router rules" +msgstr "" + +#: neutron/plugins/bigswitch/config.py:90 +msgid "Virtual interface type to configure on Nova compute nodes" +msgstr "" + +#: neutron/plugins/bigswitch/config.py:97 +#, python-format +msgid "Nova compute nodes to manually set VIF type to %s" +msgstr "" + +#: neutron/plugins/bigswitch/config.py:104 +msgid "List of allowed vif_type values." +msgstr "" + +#: neutron/plugins/bigswitch/config.py:108 +msgid "" +"Name of integration bridge on compute nodes used for security group " +"insertion." +msgstr "" + +#: neutron/plugins/bigswitch/config.py:111 +msgid "Seconds between agent checks for port changes" +msgstr "" + +#: neutron/plugins/bigswitch/config.py:113 +msgid "Virtual switch type." +msgstr "" + +#: neutron/plugins/bigswitch/plugin.py:94 +msgid "Syntax error in server config file, aborting plugin" +msgstr "" + +#: neutron/plugins/bigswitch/plugin.py:132 neutron/plugins/ml2/db.py:100 +#, python-format +msgid "get_port_and_sgs() called for port_id %s" +msgstr "" + +#: neutron/plugins/bigswitch/plugin.py:238 +#, python-format +msgid "Unable to update remote topology: %s" +msgstr "" + +#: neutron/plugins/bigswitch/plugin.py:322 +#, python-format +msgid "" +"Setting admin_state_up=False is not supported in this plugin version. " +"Ignoring setting for resource: %s" +msgstr "" + +#: neutron/plugins/bigswitch/plugin.py:328 +#, python-format +msgid "" +"Operational status is internally set by the plugin. Ignoring setting " +"status=%s." +msgstr "" + +#: neutron/plugins/bigswitch/plugin.py:353 +#, python-format +msgid "Unrecognized vif_type in configuration [%s]. Defaulting to ovs." +msgstr "" + +#: neutron/plugins/bigswitch/plugin.py:399 +#: neutron/plugins/ml2/drivers/mech_bigswitch/driver.py:96 +msgid "Iconsistency with backend controller triggering full synchronization." +msgstr "" + +#: neutron/plugins/bigswitch/plugin.py:416 +#, python-format +msgid "NeutronRestProxyV2: Unable to create port: %s" +msgstr "" + +#: neutron/plugins/bigswitch/plugin.py:475 +#, python-format +msgid "NeutronRestProxy: Starting plugin. Version=%s" +msgstr "" + +#: neutron/plugins/bigswitch/plugin.py:502 +msgid "NeutronRestProxyV2: initialization done" +msgstr "" + +#: neutron/plugins/bigswitch/plugin.py:545 +msgid "NeutronRestProxyV2: create_network() called" +msgstr "" + +#: neutron/plugins/bigswitch/plugin.py:588 +msgid "NeutronRestProxyV2.update_network() called" +msgstr "" + +#: neutron/plugins/bigswitch/plugin.py:616 +msgid "NeutronRestProxyV2: delete_network() called" +msgstr "" + +#: neutron/plugins/bigswitch/plugin.py:654 +msgid "NeutronRestProxyV2: create_port() called" +msgstr "" + +#: neutron/plugins/bigswitch/plugin.py:746 +msgid "NeutronRestProxyV2: update_port() called" +msgstr "" + +#: neutron/plugins/bigswitch/plugin.py:806 +msgid "NeutronRestProxyV2: delete_port() called" +msgstr "" + +#: neutron/plugins/bigswitch/plugin.py:823 +msgid "NeutronRestProxyV2: create_subnet() called" +msgstr "" + +#: neutron/plugins/bigswitch/plugin.py:840 +msgid "NeutronRestProxyV2: update_subnet() called" +msgstr "" + +#: neutron/plugins/bigswitch/plugin.py:859 +msgid "NeutronRestProxyV2: delete_subnet() called" +msgstr "" + +#: neutron/plugins/bigswitch/plugin.py:898 +msgid "NeutronRestProxyV2: create_router() called" +msgstr "" + +#: neutron/plugins/bigswitch/plugin.py:921 +msgid "NeutronRestProxyV2.update_router() called" +msgstr "" + +#: neutron/plugins/bigswitch/plugin.py:944 +msgid "NeutronRestProxyV2: delete_router() called" +msgstr "" + +#: neutron/plugins/bigswitch/plugin.py:973 +msgid "NeutronRestProxyV2: add_router_interface() called" +msgstr "" + +#: neutron/plugins/bigswitch/plugin.py:1001 +msgid "NeutronRestProxyV2: remove_router_interface() called" +msgstr "" + +#: neutron/plugins/bigswitch/plugin.py:1035 +msgid "NeutronRestProxyV2: create_floatingip() called" +msgstr "" + +#: neutron/plugins/bigswitch/plugin.py:1052 +#, python-format +msgid "NeutronRestProxyV2: Unable to create remote floating IP: %s" +msgstr "" + +#: neutron/plugins/bigswitch/plugin.py:1059 +msgid "NeutronRestProxyV2: update_floatingip() called" +msgstr "" + +#: neutron/plugins/bigswitch/plugin.py:1076 +msgid "NeutronRestProxyV2: delete_floatingip() called" +msgstr "" + +#: neutron/plugins/bigswitch/plugin.py:1091 +msgid "NeutronRestProxyV2: diassociate_floatingips() called" +msgstr "" + +#: neutron/plugins/bigswitch/plugin.py:1122 +msgid "NeutronRestProxyV2: too many external networks" +msgstr "" + +#: neutron/plugins/bigswitch/plugin.py:1137 +msgid "Adding host route: " +msgstr "" + +#: neutron/plugins/bigswitch/plugin.py:1138 +#, python-format +msgid "Destination:%(dst)s nexthop:%(next)s" +msgstr "" + +#: neutron/plugins/bigswitch/routerrule_db.py:75 +msgid "No rules in router" +msgstr "" + +#: neutron/plugins/bigswitch/routerrule_db.py:89 +#, python-format +msgid "Updating router rules to %s" +msgstr "" + +#: neutron/plugins/bigswitch/servermanager.py:77 +#, python-format +msgid "Error in REST call to remote network controller: %(reason)s" +msgstr "" + +#: neutron/plugins/bigswitch/servermanager.py:116 +msgid "Couldn't retrieve capabilities. Newer API calls won't be supported." +msgstr "" + +#: neutron/plugins/bigswitch/servermanager.py:118 +#, python-format +msgid "The following capabilities were received for %(server)s: %(cap)s" +msgstr "" + +#: neutron/plugins/bigswitch/servermanager.py:147 +#, python-format +msgid "ServerProxy: server=%(server)s, port=%(port)d, ssl=%(ssl)r" +msgstr "" + +#: neutron/plugins/bigswitch/servermanager.py:150 +#, python-format +msgid "" +"ServerProxy: resource=%(resource)s, data=%(data)r, headers=%(headers)r, " +"action=%(action)s" +msgstr "" + +#: neutron/plugins/bigswitch/servermanager.py:171 +msgid "ServerProxy: Could not establish HTTPS connection" +msgstr "" + +#: neutron/plugins/bigswitch/servermanager.py:179 +msgid "ServerProxy: Could not establish HTTP connection" +msgstr "" + +#: neutron/plugins/bigswitch/servermanager.py:212 +#, python-format +msgid "ServerProxy: %(action)s failure, %(e)r" +msgstr "" + +#: neutron/plugins/bigswitch/servermanager.py:215 +#, python-format +msgid "" +"ServerProxy: status=%(status)d, reason=%(reason)r, ret=%(ret)s, " +"data=%(data)r" +msgstr "" + +#: neutron/plugins/bigswitch/servermanager.py:227 +msgid "ServerPool: initializing" +msgstr "" + +#: neutron/plugins/bigswitch/servermanager.py:250 +msgid "Servers not defined. Aborting server manager." +msgstr "" + +#: neutron/plugins/bigswitch/servermanager.py:257 +#, python-format +msgid "Servers must be defined as :. Configuration was %s" +msgstr "" + +#: neutron/plugins/bigswitch/servermanager.py:265 +msgid "ServerPool: initialization done" +msgstr "" + +#: neutron/plugins/bigswitch/servermanager.py:311 +#, python-format +msgid "ssl_cert_directory [%s] does not exist. Create it or disable ssl." +msgstr "" + +#: neutron/plugins/bigswitch/servermanager.py:328 +#, python-format +msgid "No certificates were found to verify controller %s" +msgstr "" + +#: neutron/plugins/bigswitch/servermanager.py:370 +#, python-format +msgid "" +"Could not retrieve initial certificate from controller %(server)s. Error " +"details: %(error)s" +msgstr "" + +#: neutron/plugins/bigswitch/servermanager.py:375 +#, python-format +msgid "Storing to certificate for host %(server)s at %(path)s" +msgstr "" + +#: neutron/plugins/bigswitch/servermanager.py:417 +msgid "Server requires synchronization, but no topology function was defined." +msgstr "" + +#: neutron/plugins/bigswitch/servermanager.py:432 +#, python-format +msgid "" +"ServerProxy: %(action)s failure for servers: %(server)r Response: " +"%(response)s" +msgstr "" + +#: neutron/plugins/bigswitch/servermanager.py:438 +#, python-format +msgid "" +"ServerProxy: Error details: status=%(status)d, reason=%(reason)r, " +"ret=%(ret)s, data=%(data)r" +msgstr "" + +#: neutron/plugins/bigswitch/servermanager.py:445 +#, python-format +msgid "ServerProxy: %(action)s failure for all servers: %(server)r" +msgstr "" + +#: neutron/plugins/bigswitch/servermanager.py:468 +#, python-format +msgid "" +"NeutronRestProxyV2: Received and ignored error code %(code)s on " +"%(action)s action to resource %(resource)s" +msgstr "" + +#: neutron/plugins/bigswitch/servermanager.py:478 +#, python-format +msgid "Unable to create remote router: %s" +msgstr "" + +#: neutron/plugins/bigswitch/servermanager.py:484 +#, python-format +msgid "Unable to update remote router: %s" +msgstr "" + +#: neutron/plugins/bigswitch/servermanager.py:489 +#, python-format +msgid "Unable to delete remote router: %s" +msgstr "" + +#: neutron/plugins/bigswitch/servermanager.py:495 +#, python-format +msgid "Unable to add router interface: %s" +msgstr "" + +#: neutron/plugins/bigswitch/servermanager.py:500 +#, python-format +msgid "Unable to delete remote intf: %s" +msgstr "" + +#: neutron/plugins/bigswitch/servermanager.py:506 +#, python-format +msgid "Unable to create remote network: %s" +msgstr "" + +#: neutron/plugins/bigswitch/servermanager.py:512 +#: neutron/plugins/bigswitch/servermanager.py:517 +#, python-format +msgid "Unable to update remote network: %s" +msgstr "" + +#: neutron/plugins/bigswitch/servermanager.py:526 +#, python-format +msgid "No device MAC attached to port %s. Skipping notification to controller." +msgstr "" + +#: neutron/plugins/bigswitch/servermanager.py:531 +#, python-format +msgid "Unable to create remote port: %s" +msgstr "" + +#: neutron/plugins/bigswitch/servermanager.py:536 +#, python-format +msgid "Unable to delete remote port: %s" +msgstr "" + +#: neutron/plugins/bigswitch/servermanager.py:546 +#, python-format +msgid "Unable to create floating IP: %s" +msgstr "" + +#: neutron/plugins/bigswitch/servermanager.py:551 +#, python-format +msgid "Unable to update floating IP: %s" +msgstr "" + +#: neutron/plugins/bigswitch/servermanager.py:556 +#, python-format +msgid "Unable to delete floating IP: %s" +msgstr "" + +#: neutron/plugins/bigswitch/servermanager.py:561 +msgid "Backend server(s) do not support automated consitency checks." +msgstr "" + +#: neutron/plugins/bigswitch/servermanager.py:565 +#, python-format +msgid "Consistency watchdog disabled by polling interval setting of %s." +msgstr "" + +#: neutron/plugins/bigswitch/servermanager.py:577 +msgid "Encountered an error checking controller health." +msgstr "" + +#: neutron/plugins/bigswitch/agent/restproxy_agent.py:116 +#: neutron/plugins/ryu/agent/ryu_neutron_agent.py:235 +msgid "Port update received" +msgstr "" + +#: neutron/plugins/bigswitch/agent/restproxy_agent.py:120 +#, python-format +msgid "Port %s is not present on this host." +msgstr "" + +#: neutron/plugins/bigswitch/agent/restproxy_agent.py:123 +#, python-format +msgid "Port %s found. Refreshing firewall." +msgstr "" + +#: neutron/plugins/bigswitch/agent/restproxy_agent.py:151 +#: neutron/plugins/ryu/agent/ryu_neutron_agent.py:268 +msgid "Agent loop has new device" +msgstr "" + +#: neutron/plugins/bigswitch/agent/restproxy_agent.py:155 +#: neutron/plugins/mlnx/agent/eswitch_neutron_agent.py:398 +#: neutron/plugins/nec/agent/nec_neutron_agent.py:225 +#: neutron/plugins/oneconvergence/agent/nvsd_neutron_agent.py:159 +#: neutron/plugins/ryu/agent/ryu_neutron_agent.py:272 +msgid "Error in agent event loop" +msgstr "" + +#: neutron/plugins/bigswitch/agent/restproxy_agent.py:161 +#: neutron/plugins/ibm/agent/sdnve_neutron_agent.py:226 +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:993 +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:1404 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1457 +#: neutron/plugins/ryu/agent/ryu_neutron_agent.py:278 +#, python-format +msgid "Loop iteration exceeded interval (%(polling_interval)s vs. %(elapsed)s)!" +msgstr "" + +#: neutron/plugins/bigswitch/db/consistency_db.py:26 +msgid "Only one read_for_update call may be made at a time." +msgstr "" + +#: neutron/plugins/bigswitch/db/consistency_db.py:81 +#, python-format +msgid "Consistency hash for group %(hash_id)s updated to %(hash)s" +msgstr "" + +#: neutron/plugins/bigswitch/db/porttracker_db.py:39 +msgid "No host_id in port request to track port location." +msgstr "" + +#: neutron/plugins/bigswitch/db/porttracker_db.py:42 +#, python-format +msgid "Received an empty port ID for host_id '%s'" +msgstr "" + +#: neutron/plugins/bigswitch/db/porttracker_db.py:45 +#, python-format +msgid "Received an empty host_id for port '%s'" +msgstr "" + +#: neutron/plugins/bigswitch/db/porttracker_db.py:47 +#, python-format +msgid "Logging port %(port)s on host_id %(host)s" +msgstr "" + +#: neutron/plugins/bigswitch/extensions/routerrule.py:28 +#, python-format +msgid "Invalid format for router rules: %(rule)s, %(reason)s" +msgstr "" + +#: neutron/plugins/bigswitch/extensions/routerrule.py:32 +#, python-format +msgid "" +"Unable to complete rules update for %(router_id)s. The number of rules " +"exceeds the maximum %(quota)s." +msgstr "" + +#: neutron/plugins/bigswitch/extensions/routerrule.py:49 +#, python-format +msgid "Invalid data format for router rule: '%s'" +msgstr "" + +#: neutron/plugins/bigswitch/extensions/routerrule.py:81 +#, python-format +msgid "Duplicate nexthop in rule '%s'" +msgstr "" + +#: neutron/plugins/bigswitch/extensions/routerrule.py:89 +#, python-format +msgid "Action must be either permit or deny. '%s' was provided" +msgstr "" + +#: neutron/plugins/bigswitch/extensions/routerrule.py:101 +#, python-format +msgid "Duplicate router rules (src,dst) found '%s'" +msgstr "" + +#: neutron/plugins/brocade/NeutronPlugin.py:62 +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:34 +msgid "The address of the host to SSH to" +msgstr "" + +#: neutron/plugins/brocade/NeutronPlugin.py:64 +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:36 +msgid "The SSH username to use" +msgstr "" + +#: neutron/plugins/brocade/NeutronPlugin.py:66 +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:38 +msgid "The SSH password to use" +msgstr "" + +#: neutron/plugins/brocade/NeutronPlugin.py:68 +msgid "Currently unused" +msgstr "" + +#: neutron/plugins/brocade/NeutronPlugin.py:72 +msgid "The network interface to use when creatinga port" +msgstr "" + +#: neutron/plugins/brocade/NeutronPlugin.py:123 +#: neutron/plugins/hyperv/rpc_callbacks.py:47 +#: neutron/plugins/linuxbridge/lb_neutron_plugin.py:80 +#: neutron/plugins/mlnx/rpc_callbacks.py:62 +#: neutron/plugins/openvswitch/ovs_neutron_plugin.py:88 +#, python-format +msgid "Device %(device)s details requested from %(agent_id)s" +msgstr "" + +#: neutron/plugins/brocade/NeutronPlugin.py:137 +#: neutron/plugins/brocade/NeutronPlugin.py:164 +#: neutron/plugins/hyperv/rpc_callbacks.py:63 +#: neutron/plugins/hyperv/rpc_callbacks.py:92 +#: neutron/plugins/linuxbridge/lb_neutron_plugin.py:103 +#: neutron/plugins/linuxbridge/lb_neutron_plugin.py:140 +#: neutron/plugins/linuxbridge/lb_neutron_plugin.py:163 +#: neutron/plugins/mlnx/rpc_callbacks.py:84 +#: neutron/plugins/mlnx/rpc_callbacks.py:113 +#: neutron/plugins/mlnx/rpc_callbacks.py:128 +#: neutron/plugins/openvswitch/ovs_neutron_plugin.py:106 +#: neutron/plugins/openvswitch/ovs_neutron_plugin.py:143 +#: neutron/plugins/openvswitch/ovs_neutron_plugin.py:166 +#, python-format +msgid "%s can not be found in database" +msgstr "" + +#: neutron/plugins/brocade/NeutronPlugin.py:299 +#: neutron/plugins/brocade/NeutronPlugin.py:343 +#: neutron/plugins/brocade/NeutronPlugin.py:396 +#: neutron/plugins/brocade/NeutronPlugin.py:426 +msgid "Brocade NOS driver error" +msgstr "" + +#: neutron/plugins/brocade/NeutronPlugin.py:300 +#, python-format +msgid "Returning the allocated vlan (%d) to the pool" +msgstr "" + +#: neutron/plugins/brocade/NeutronPlugin.py:303 +#: neutron/plugins/brocade/NeutronPlugin.py:344 +#: neutron/plugins/brocade/NeutronPlugin.py:397 +#: neutron/plugins/brocade/NeutronPlugin.py:428 +msgid "Brocade plugin raised exception, check logs" +msgstr "" + +#: neutron/plugins/brocade/NeutronPlugin.py:309 +#, python-format +msgid "Allocated vlan (%d) from the pool" +msgstr "" + +#: neutron/plugins/brocade/nos/nosdriver.py:69 +#, python-format +msgid "Connect failed to switch: %s" +msgstr "" + +#: neutron/plugins/brocade/nos/nosdriver.py:71 +#: neutron/plugins/ml2/drivers/brocade/nos/nosdriver.py:76 +#, python-format +msgid "Connect success to host %(host)s:%(ssh_port)d" +msgstr "" + +#: neutron/plugins/brocade/nos/nosdriver.py:96 +#: neutron/plugins/brocade/nos/nosdriver.py:110 +#: neutron/plugins/brocade/nos/nosdriver.py:123 +#: neutron/plugins/brocade/nos/nosdriver.py:136 +#, python-format +msgid "NETCONF error: %s" +msgstr "" + +#: neutron/plugins/cisco/network_plugin.py:89 +msgid "Plugin initialization complete" +msgstr "" + +#: neutron/plugins/cisco/network_plugin.py:117 +#, python-format +msgid "'%(model)s' object has no attribute '%(name)s'" +msgstr "" + +#: neutron/plugins/cisco/network_plugin.py:134 +#: neutron/plugins/cisco/db/network_db_v2.py:36 +msgid "get_all_qoss() called" +msgstr "" + +#: neutron/plugins/cisco/network_plugin.py:140 +msgid "get_qos_details() called" +msgstr "" + +#: neutron/plugins/cisco/network_plugin.py:145 +msgid "create_qos() called" +msgstr "" + +#: neutron/plugins/cisco/network_plugin.py:151 +msgid "delete_qos() called" +msgstr "" + +#: neutron/plugins/cisco/network_plugin.py:156 +msgid "rename_qos() called" +msgstr "" + +#: neutron/plugins/cisco/network_plugin.py:161 +msgid "get_all_credentials() called" +msgstr "" + +#: neutron/plugins/cisco/network_plugin.py:167 +msgid "get_credential_details() called" +msgstr "" + +#: neutron/plugins/cisco/network_plugin.py:172 +msgid "rename_credential() called" +msgstr "" + +#: neutron/plugins/cisco/common/cisco_exceptions.py:25 +#, python-format +msgid "Segmentation ID for network %(net_id)s is not found." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_exceptions.py:30 +msgid "" +"Unable to complete operation. No more dynamic NICs are available in the " +"system." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_exceptions.py:36 +#, python-format +msgid "" +"NetworkVlanBinding for %(vlan_id)s and network %(network_id)s already " +"exists." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_exceptions.py:42 +#, python-format +msgid "Vlan ID %(vlan_id)s not found." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_exceptions.py:47 +msgid "" +"Unable to complete operation. VLAN ID exists outside of the configured " +"network segment range." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_exceptions.py:53 +msgid "No Vlan ID available." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_exceptions.py:58 +#, python-format +msgid "QoS level %(qos_id)s could not be found for tenant %(tenant_id)s." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_exceptions.py:64 +#, python-format +msgid "QoS level with name %(qos_name)s already exists for tenant %(tenant_id)s." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_exceptions.py:70 +#: neutron/plugins/ml2/drivers/cisco/nexus/exceptions.py:23 +#, python-format +msgid "Credential %(credential_id)s could not be found." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_exceptions.py:75 +#: neutron/plugins/ml2/drivers/cisco/nexus/exceptions.py:28 +#, python-format +msgid "Credential %(credential_name)s could not be found." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_exceptions.py:80 +#, python-format +msgid "Credential %(credential_name)s already exists." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_exceptions.py:85 +#, python-format +msgid "Provider network %s already exists" +msgstr "" + +#: neutron/plugins/cisco/common/cisco_exceptions.py:90 +#: neutron/plugins/ml2/drivers/cisco/nexus/exceptions.py:39 +#, python-format +msgid "Connection to %(host)s is not configured." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_exceptions.py:95 +#: neutron/plugins/ml2/drivers/cisco/nexus/exceptions.py:44 +#, python-format +msgid "Unable to connect to Nexus %(nexus_host)s. Reason: %(exc)s." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_exceptions.py:100 +#: neutron/plugins/ml2/drivers/cisco/nexus/exceptions.py:49 +#, python-format +msgid "Failed to configure Nexus: %(config)s. Reason: %(exc)s." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_exceptions.py:105 +#, python-format +msgid "Nexus Port Binding (%(filters)s) is not present." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_exceptions.py:114 +#: neutron/plugins/ml2/drivers/cisco/nexus/exceptions.py:69 +msgid "No usable Nexus switch found to create SVI interface." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_exceptions.py:119 +#, python-format +msgid "PortVnic Binding %(port_id)s already exists." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_exceptions.py:124 +#, python-format +msgid "PortVnic Binding %(port_id)s is not present." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_exceptions.py:129 +#: neutron/plugins/ml2/drivers/cisco/nexus/exceptions.py:74 +msgid "No subnet_id specified for router gateway." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_exceptions.py:134 +#: neutron/plugins/ml2/drivers/cisco/nexus/exceptions.py:79 +#, python-format +msgid "Subnet %(subnet_id)s has an interface on %(router_id)s." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_exceptions.py:139 +#: neutron/plugins/ml2/drivers/cisco/nexus/exceptions.py:84 +msgid "Nexus hardware router gateway only uses Subnet Ids." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_exceptions.py:143 +#, python-format +msgid "" +"Unable to unplug the attachment %(att_id)s from port %(port_id)s for " +"network %(net_id)s. The attachment %(att_id)s does not exist." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_exceptions.py:150 +#, python-format +msgid "Policy Profile %(profile_id)s already exists." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_exceptions.py:156 +#, python-format +msgid "Policy Profile %(profile_id)s could not be found." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_exceptions.py:161 +#, python-format +msgid "Network Profile %(profile_id)s already exists." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_exceptions.py:167 +#, python-format +msgid "Network Profile %(profile)s could not be found." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_exceptions.py:172 +#, python-format +msgid "" +"One or more network segments belonging to network profile %(profile)s is " +"in use." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_exceptions.py:178 +#, python-format +msgid "" +"No more segments available in network segment pool " +"%(network_profile_name)s." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_exceptions.py:184 +#, python-format +msgid "VM Network %(name)s could not be found." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_exceptions.py:189 +#, python-format +msgid "Unable to create the network. The VXLAN ID %(vxlan_id)s is in use." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_exceptions.py:195 +#, python-format +msgid "Vxlan ID %(vxlan_id)s not found." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_exceptions.py:200 +msgid "" +"Unable to complete operation. VXLAN ID exists outside of the configured " +"network segment range." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_exceptions.py:206 +#, python-format +msgid "Connection to VSM failed: %(reason)s." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_exceptions.py:211 +#, python-format +msgid "Internal VSM Error: %(reason)s." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_exceptions.py:216 +#, python-format +msgid "Network Binding for network %(network_id)s could not be found." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_exceptions.py:222 +#, python-format +msgid "Port Binding for port %(port_id)s could not be found." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_exceptions.py:228 +#, python-format +msgid "Profile-Tenant binding for profile %(profile_id)s could not be found." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_exceptions.py:234 +msgid "No service cluster found to perform multi-segment bridging." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_faults.py:72 +msgid "Port not Found" +msgstr "" + +#: neutron/plugins/cisco/common/cisco_faults.py:73 +msgid "Unable to find a port with the specified identifier." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_faults.py:87 +msgid "Credential Not Found" +msgstr "" + +#: neutron/plugins/cisco/common/cisco_faults.py:88 +msgid "Unable to find a Credential with the specified identifier." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_faults.py:103 +msgid "QoS Not Found" +msgstr "" + +#: neutron/plugins/cisco/common/cisco_faults.py:104 +msgid "Unable to find a QoS with the specified identifier." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_faults.py:119 +msgid "Nova tenant Not Found" +msgstr "" + +#: neutron/plugins/cisco/common/cisco_faults.py:120 +msgid "Unable to find a Novatenant with the specified identifier." +msgstr "" + +#: neutron/plugins/cisco/common/cisco_faults.py:135 +msgid "Requested State Invalid" +msgstr "" + +#: neutron/plugins/cisco/common/cisco_faults.py:136 +msgid "Unable to update port state with specified value." +msgstr "" + +#: neutron/plugins/cisco/common/config.py:24 +msgid "Virtual Switch to use" +msgstr "" + +#: neutron/plugins/cisco/common/config.py:28 +msgid "Nexus Switch to use" +msgstr "" + +#: neutron/plugins/cisco/common/config.py:33 +#: neutron/plugins/ml2/drivers/cisco/nexus/config.py:21 +msgid "VLAN Name prefix" +msgstr "" + +#: neutron/plugins/cisco/common/config.py:35 +msgid "VLAN Name prefix for provider vlans" +msgstr "" + +#: neutron/plugins/cisco/common/config.py:37 +msgid "Provider VLANs are automatically created as needed on the Nexus switch" +msgstr "" + +#: neutron/plugins/cisco/common/config.py:40 +msgid "" +"Provider VLANs are automatically trunked as needed on the ports of the " +"Nexus switch" +msgstr "" + +#: neutron/plugins/cisco/common/config.py:43 +msgid "Enable L3 support on the Nexus switches" +msgstr "" + +#: neutron/plugins/cisco/common/config.py:45 +#: neutron/plugins/ml2/drivers/cisco/nexus/config.py:23 +msgid "Distribute SVI interfaces over all switches" +msgstr "" + +#: neutron/plugins/cisco/common/config.py:49 +msgid "Model Class" +msgstr "" + +#: neutron/plugins/cisco/common/config.py:53 +msgid "Nexus Driver Name" +msgstr "" + +#: neutron/plugins/cisco/common/config.py:58 +msgid "N1K Integration Bridge" +msgstr "" + +#: neutron/plugins/cisco/common/config.py:60 +msgid "N1K Enable Tunneling" +msgstr "" + +#: neutron/plugins/cisco/common/config.py:62 +msgid "N1K Tunnel Bridge" +msgstr "" + +#: neutron/plugins/cisco/common/config.py:64 +msgid "N1K Local IP" +msgstr "" + +#: neutron/plugins/cisco/common/config.py:66 +msgid "N1K Tenant Network Type" +msgstr "" + +#: neutron/plugins/cisco/common/config.py:68 +msgid "N1K Bridge Mappings" +msgstr "" + +#: neutron/plugins/cisco/common/config.py:70 +msgid "N1K VXLAN ID Ranges" +msgstr "" + +#: neutron/plugins/cisco/common/config.py:72 +msgid "N1K Network VLAN Ranges" +msgstr "" + +#: neutron/plugins/cisco/common/config.py:74 +msgid "N1K default network profile" +msgstr "" + +#: neutron/plugins/cisco/common/config.py:76 +msgid "N1K default policy profile" +msgstr "" + +#: neutron/plugins/cisco/common/config.py:78 +msgid "N1K policy profile for network node" +msgstr "" + +#: neutron/plugins/cisco/common/config.py:80 +msgid "N1K Policy profile polling duration in seconds" +msgstr "" + +#: neutron/plugins/cisco/common/config.py:82 +msgid "Number of threads to use to make HTTP requests" +msgstr "" + +#: neutron/plugins/cisco/common/config.py:135 +#: neutron/plugins/ml2/drivers/cisco/apic/config.py:68 +#: neutron/plugins/ml2/drivers/cisco/nexus/config.py:58 +msgid "Some config files were not parsed properly" +msgstr "" + +#: neutron/plugins/cisco/db/n1kv_db_v2.py:329 +#, python-format +msgid "seg_min %(seg_min)s, seg_max %(seg_max)s" +msgstr "" + +#: neutron/plugins/cisco/db/n1kv_db_v2.py:560 +#, python-format +msgid "Reserving specific vlan %(vlan)s on physical network %(network)s from pool" +msgstr "" + +#: neutron/plugins/cisco/db/n1kv_db_v2.py:585 +#, python-format +msgid "vlan_id %(vlan)s on physical network %(network)s not found" +msgstr "" + +#: neutron/plugins/cisco/db/n1kv_db_v2.py:599 +#, python-format +msgid "Unreasonable vxlan ID range %(vxlan_min)s - %(vxlan_max)s" +msgstr "" + +#: neutron/plugins/cisco/db/n1kv_db_v2.py:641 +#, python-format +msgid "Reserving specific vxlan %s from pool" +msgstr "" + +#: neutron/plugins/cisco/db/n1kv_db_v2.py:662 +#, python-format +msgid "vxlan_id %s not found" +msgstr "" + +#: neutron/plugins/cisco/db/n1kv_db_v2.py:770 +msgid "create_network_profile()" +msgstr "" + +#: neutron/plugins/cisco/db/n1kv_db_v2.py:792 +msgid "delete_network_profile()" +msgstr "" + +#: neutron/plugins/cisco/db/n1kv_db_v2.py:806 +msgid "update_network_profile()" +msgstr "" + +#: neutron/plugins/cisco/db/n1kv_db_v2.py:815 +msgid "get_network_profile()" +msgstr "" + +#: neutron/plugins/cisco/db/n1kv_db_v2.py:840 +msgid "create_policy_profile()" +msgstr "" + +#: neutron/plugins/cisco/db/n1kv_db_v2.py:851 +msgid "delete_policy_profile()" +msgstr "" + +#: neutron/plugins/cisco/db/n1kv_db_v2.py:860 +msgid "update_policy_profile()" +msgstr "" + +#: neutron/plugins/cisco/db/n1kv_db_v2.py:869 +msgid "get_policy_profile()" +msgstr "" + +#: neutron/plugins/cisco/db/n1kv_db_v2.py:888 +msgid "Invalid profile type" +msgstr "" + +#: neutron/plugins/cisco/db/n1kv_db_v2.py:905 +msgid "_profile_binding_exists()" +msgstr "" + +#: neutron/plugins/cisco/db/n1kv_db_v2.py:913 +msgid "get_profile_binding()" +msgstr "" + +#: neutron/plugins/cisco/db/n1kv_db_v2.py:923 +msgid "delete_profile_binding()" +msgstr "" + +#: neutron/plugins/cisco/db/n1kv_db_v2.py:930 +#, python-format +msgid "" +"Profile-Tenant binding missing for profile ID %(profile_id)s and tenant " +"ID %(tenant_id)s" +msgstr "" + +#: neutron/plugins/cisco/db/n1kv_db_v2.py:944 +msgid "_get_profile_bindings()" +msgstr "" + +#: neutron/plugins/cisco/db/n1kv_db_v2.py:1094 +msgid "segment_range not required for TRUNK" +msgstr "" + +#: neutron/plugins/cisco/db/n1kv_db_v2.py:1100 +msgid "multicast_ip_range not required" +msgstr "" + +#: neutron/plugins/cisco/db/n1kv_db_v2.py:1227 +msgid "Invalid segment range. example range: 500-550" +msgstr "" + +#: neutron/plugins/cisco/db/n1kv_db_v2.py:1240 +msgid "Invalid multicast ip address range. example range: 224.1.1.1-224.1.1.10" +msgstr "" + +#: neutron/plugins/cisco/db/n1kv_db_v2.py:1247 +#, python-format +msgid "%s is not a valid multicast ip address" +msgstr "" + +#: neutron/plugins/cisco/db/n1kv_db_v2.py:1251 +#, python-format +msgid "%s is reserved multicast ip address" +msgstr "" + +#: neutron/plugins/cisco/db/n1kv_db_v2.py:1255 +#, python-format +msgid "%s is not a valid ip address" +msgstr "" + +#: neutron/plugins/cisco/db/n1kv_db_v2.py:1259 +#, python-format +msgid "" +"Invalid multicast IP range '%(min_ip)s-%(max_ip)s': Range should be from " +"low address to high address" +msgstr "" + +#: neutron/plugins/cisco/db/n1kv_db_v2.py:1272 +msgid "Arguments segment_type missing for network profile" +msgstr "" + +#: neutron/plugins/cisco/db/n1kv_db_v2.py:1281 +msgid "segment_type should either be vlan, overlay, multi-segment or trunk" +msgstr "" + +#: neutron/plugins/cisco/db/n1kv_db_v2.py:1287 +msgid "Argument physical_network missing for network profile" +msgstr "" + +#: neutron/plugins/cisco/db/n1kv_db_v2.py:1293 +msgid "segment_range not required for trunk" +msgstr "" + +#: neutron/plugins/cisco/db/n1kv_db_v2.py:1299 +msgid "Argument sub_type missing for network profile" +msgstr "" + +#: neutron/plugins/cisco/db/n1kv_db_v2.py:1306 +msgid "Argument segment_range missing for network profile" +msgstr "" + +#: neutron/plugins/cisco/db/n1kv_db_v2.py:1317 +msgid "Argument multicast_ip_range missing for VXLAN multicast network profile" +msgstr "" + +#: neutron/plugins/cisco/db/n1kv_db_v2.py:1345 +#, python-format +msgid "Segment range is invalid, select from %(min)s-%(nmin)s, %(nmax)s-%(max)s" +msgstr "" + +#: neutron/plugins/cisco/db/n1kv_db_v2.py:1363 +#, python-format +msgid "segment range is invalid. Valid range is : %(min)s-%(max)s" +msgstr "" + +#: neutron/plugins/cisco/db/n1kv_db_v2.py:1377 +#, python-format +msgid "NetworkProfile name %s already exists" +msgstr "" + +#: neutron/plugins/cisco/db/n1kv_db_v2.py:1394 +msgid "Segment range overlaps with another profile" +msgstr "" + +#: neutron/plugins/cisco/db/network_db_v2.py:44 +msgid "get_qos() called" +msgstr "" + +#: neutron/plugins/cisco/db/network_db_v2.py:57 +msgid "add_qos() called" +msgstr "" + +#: neutron/plugins/cisco/db/nexus_db_v2.py:32 +#: neutron/plugins/ml2/drivers/cisco/nexus/nexus_db_v2.py:30 +msgid "get_nexusport_binding() called" +msgstr "" + +#: neutron/plugins/cisco/db/nexus_db_v2.py:41 +#: neutron/plugins/ml2/drivers/cisco/nexus/nexus_db_v2.py:39 +msgid "get_nexusvlan_binding() called" +msgstr "" + +#: neutron/plugins/cisco/db/nexus_db_v2.py:47 +#: neutron/plugins/ml2/drivers/cisco/nexus/nexus_db_v2.py:45 +msgid "add_nexusport_binding() called" +msgstr "" + +#: neutron/plugins/cisco/db/nexus_db_v2.py:60 +#: neutron/plugins/ml2/drivers/cisco/nexus/nexus_db_v2.py:58 +msgid "remove_nexusport_binding() called" +msgstr "" + +#: neutron/plugins/cisco/db/nexus_db_v2.py:76 +#: neutron/plugins/ml2/drivers/cisco/nexus/nexus_db_v2.py:74 +msgid "update_nexusport_binding called with no vlan" +msgstr "" + +#: neutron/plugins/cisco/db/nexus_db_v2.py:78 +#: neutron/plugins/ml2/drivers/cisco/nexus/nexus_db_v2.py:76 +msgid "update_nexusport_binding called" +msgstr "" + +#: neutron/plugins/cisco/db/nexus_db_v2.py:89 +msgid "get_nexusvm_binding() called" +msgstr "" + +#: neutron/plugins/cisco/db/nexus_db_v2.py:97 +#: neutron/plugins/ml2/drivers/cisco/nexus/nexus_db_v2.py:94 +msgid "get_port_vlan_switch_binding() called" +msgstr "" + +#: neutron/plugins/cisco/db/nexus_db_v2.py:105 +#: neutron/plugins/ml2/drivers/cisco/nexus/nexus_db_v2.py:102 +#, python-format +msgid "" +"get_port_switch_bindings() called, port:'%(port_id)s', " +"switch:'%(switch_ip)s'" +msgstr "" + +#: neutron/plugins/cisco/db/nexus_db_v2.py:117 +msgid "get_nexussvi_bindings() called" +msgstr "" + +#: neutron/plugins/cisco/models/virt_phy_sw_v2.py:72 +#, python-format +msgid "Loaded device plugin %s" +msgstr "" + +#: neutron/plugins/cisco/models/virt_phy_sw_v2.py:89 +#, python-format +msgid "%(module)s.%(name)s init done" +msgstr "" + +#: neutron/plugins/cisco/models/virt_phy_sw_v2.py:136 +#, python-format +msgid "No %s Plugin loaded" +msgstr "" + +#: neutron/plugins/cisco/models/virt_phy_sw_v2.py:137 +#, python-format +msgid "%(plugin_key)s: %(function_name)s with args %(args)s ignored" +msgstr "" + +#: neutron/plugins/cisco/models/virt_phy_sw_v2.py:167 +msgid "create_network() called" +msgstr "" + +#: neutron/plugins/cisco/models/virt_phy_sw_v2.py:180 +#, python-format +msgid "Provider network added to DB: %(network_id)s, %(vlan_id)s" +msgstr "" + +#: neutron/plugins/cisco/models/virt_phy_sw_v2.py:199 +msgid "update_network() called" +msgstr "" + +#: neutron/plugins/cisco/models/virt_phy_sw_v2.py:223 +#, python-format +msgid "Provider network removed from DB: %s" +msgstr "" + +#: neutron/plugins/cisco/models/virt_phy_sw_v2.py:285 +msgid "create_port() called" +msgstr "" + +#: neutron/plugins/cisco/models/virt_phy_sw_v2.py:350 +#, python-format +msgid "" +"tenant_id: %(tid)s, net_id: %(nid)s, old_device_id: %(odi)s, " +"new_device_id: %(ndi)s, old_host_id: %(ohi)s, new_host_id: %(nhi)s, " +"old_device_owner: %(odo)s, new_device_owner: %(ndo)s" +msgstr "" + +#: neutron/plugins/cisco/models/virt_phy_sw_v2.py:379 +msgid "update_port() called" +msgstr "" + +#: neutron/plugins/cisco/models/virt_phy_sw_v2.py:406 +#, python-format +msgid "Unable to update port '%s' on Nexus switch" +msgstr "" + +#: neutron/plugins/cisco/models/virt_phy_sw_v2.py:425 +msgid "delete_port() called" +msgstr "" + +#: neutron/plugins/cisco/models/virt_phy_sw_v2.py:466 +msgid "L3 enabled on Nexus plugin, create SVI on switch" +msgstr "" + +#: neutron/plugins/cisco/models/virt_phy_sw_v2.py:486 +#: neutron/plugins/cisco/models/virt_phy_sw_v2.py:511 +msgid "L3 disabled or not Nexus plugin, send to vswitch" +msgstr "" + +#: neutron/plugins/cisco/models/virt_phy_sw_v2.py:500 +msgid "L3 enabled on Nexus plugin, delete SVI from switch" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_client.py:226 +msgid "Logical network" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_client.py:251 +msgid "network_segment_pool" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_client.py:296 +msgid "Invalid input for CIDR" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_client.py:436 +#, python-format +msgid "req: %s" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_client.py:446 +#, python-format +msgid "status_code %s" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_client.py:454 +#, python-format +msgid "VSM: %s" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:144 +msgid "_setup_vsm" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:163 +msgid "_populate_policy_profiles" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:190 +msgid "No policy profile populated from VSM" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:227 +#: neutron/plugins/linuxbridge/lb_neutron_plugin.py:347 +#: neutron/plugins/mlnx/mlnx_plugin.py:217 +#: neutron/plugins/openvswitch/ovs_neutron_plugin.py:409 +msgid "provider:network_type required" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:231 +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:245 +#: neutron/plugins/linuxbridge/lb_neutron_plugin.py:357 +#: neutron/plugins/mlnx/mlnx_plugin.py:247 +#: neutron/plugins/openvswitch/ovs_neutron_plugin.py:419 +#: neutron/plugins/openvswitch/ovs_neutron_plugin.py:438 +msgid "provider:segmentation_id required" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:234 +msgid "provider:segmentation_id out of range (1 through 4094)" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:239 +msgid "provider:physical_network specified for Overlay network" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:248 +msgid "provider:segmentation_id out of range (5000+)" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:252 +#: neutron/plugins/linuxbridge/lb_neutron_plugin.py:379 +#: neutron/plugins/mlnx/mlnx_plugin.py:233 +#: neutron/plugins/openvswitch/ovs_neutron_plugin.py:454 +#, python-format +msgid "provider:network_type %s not supported" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:263 +#: neutron/plugins/linuxbridge/lb_neutron_plugin.py:385 +#: neutron/plugins/mlnx/mlnx_plugin.py:273 +#: neutron/plugins/openvswitch/ovs_neutron_plugin.py:460 +#, python-format +msgid "Unknown provider:physical_network %s" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:267 +#: neutron/plugins/linuxbridge/lb_neutron_plugin.py:391 +#: neutron/plugins/mlnx/mlnx_plugin.py:279 +#: neutron/plugins/openvswitch/ovs_neutron_plugin.py:466 +msgid "provider:physical_network required" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:445 +#, python-format +msgid "_populate_member_segments %s" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:484 +msgid "Invalid pairing supplied" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:489 +#, python-format +msgid "Invalid UUID supplied in %s" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:490 +msgid "Invalid UUID supplied" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:519 +#, python-format +msgid "Cannot add a trunk segment '%s' as a member of another trunk segment" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:524 +#, python-format +msgid "Cannot add vlan segment '%s' as a member of a vxlan trunk segment" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:530 +#, python-format +msgid "Network UUID '%s' belongs to a different physical network" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:535 +#, python-format +msgid "Cannot add vxlan segment '%s' as a member of a vlan trunk segment" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:540 +#, python-format +msgid "Vlan tag '%s' is out of range" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:543 +#, python-format +msgid "Vlan tag '%s' is not an integer value" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:548 +#, python-format +msgid "%s is not a valid uuid" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:595 +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:598 +msgid "n1kv:profile_id does not exist" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:610 +msgid "_send_create_logical_network" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:633 +#, python-format +msgid "_send_create_network_profile_request: %s" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:643 +#, python-format +msgid "_send_update_network_profile_request: %s" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:653 +#, python-format +msgid "_send_delete_network_profile_request: %s" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:668 +#, python-format +msgid "_send_create_network_request: %s" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:700 +#, python-format +msgid "_send_update_network_request: %s" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:722 +#, python-format +msgid "add_segments=%s" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:723 +#, python-format +msgid "del_segments=%s" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:747 +#, python-format +msgid "_send_delete_network_request: %s" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:787 +#, python-format +msgid "_send_create_subnet_request: %s" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:797 +#, python-format +msgid "_send_update_subnet_request: %s" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:808 +#, python-format +msgid "_send_delete_subnet_request: %s" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:834 +#, python-format +msgid "_send_create_port_request: %s" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:851 +#, python-format +msgid "_send_update_port_request: %s" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:867 +#, python-format +msgid "_send_delete_port_request: %s" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:898 +#, python-format +msgid "Create network: profile_id=%s" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:906 +#, python-format +msgid "" +"Physical_network %(phy_net)s, seg_type %(net_type)s, seg_id %(seg_id)s, " +"multicast_ip %(multicast_ip)s" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:918 +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:928 +#, python-format +msgid "Seg list %s " +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:968 +#: neutron/plugins/hyperv/hyperv_neutron_plugin.py:252 +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:198 +#: neutron/plugins/metaplugin/meta_neutron_plugin.py:230 +#: neutron/plugins/mlnx/mlnx_plugin.py:360 +#: neutron/plugins/openvswitch/ovs_neutron_plugin.py:514 +#, python-format +msgid "Created network: %s" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:1026 +#, python-format +msgid "Updated network: %s" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:1041 +#, python-format +msgid "Cannot delete network '%s' that is member of a trunk segment" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:1045 +#, python-format +msgid "Cannot delete network '%s' that is a member of a multi-segment network" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:1058 +#, python-format +msgid "Deleted network: %s" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:1068 +#, python-format +msgid "Get network: %s" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:1090 +msgid "Get networks" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:1140 +#, python-format +msgid "Create port: profile_id=%s" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:1186 +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:305 +#, python-format +msgid "Created port: %s" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:1197 +#, python-format +msgid "Update port: %s" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:1244 +#, python-format +msgid "Get port: %s" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:1264 +msgid "Get ports" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:1280 +msgid "Create subnet" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:1288 +#, python-format +msgid "Created subnet: %s" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:1299 +msgid "Update subnet" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:1314 +#, python-format +msgid "Delete subnet: %s" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:1329 +#, python-format +msgid "Get subnet: %s" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:1349 +msgid "Get subnets" +msgstr "" + +#: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:1434 +#, python-format +msgid "Scheduling router %s" +msgstr "" + +#: neutron/plugins/cisco/nexus/cisco_nexus_network_driver_v2.py:157 +#: neutron/plugins/cisco/nexus/cisco_nexus_network_driver_v2.py:165 +#: neutron/plugins/cisco/nexus/cisco_nexus_network_driver_v2.py:187 +#: neutron/plugins/cisco/nexus/cisco_nexus_network_driver_v2.py:193 +#: neutron/plugins/ml2/drivers/cisco/nexus/nexus_network_driver.py:113 +#: neutron/plugins/ml2/drivers/cisco/nexus/nexus_network_driver.py:152 +#: neutron/plugins/ml2/drivers/cisco/nexus/nexus_network_driver.py:161 +#, python-format +msgid "NexusDriver: %s" +msgstr "" + +#: neutron/plugins/cisco/nexus/cisco_nexus_network_driver_v2.py:172 +#: neutron/plugins/ml2/drivers/cisco/nexus/nexus_network_driver.py:168 +#, python-format +msgid "NexusDriver created VLAN: %s" +msgstr "" + +#: neutron/plugins/cisco/nexus/cisco_nexus_plugin_v2.py:47 +#, python-format +msgid "Loaded driver %s" +msgstr "" + +#: neutron/plugins/cisco/nexus/cisco_nexus_plugin_v2.py:62 +msgid "NexusPlugin:create_network() called" +msgstr "" + +#: neutron/plugins/cisco/nexus/cisco_nexus_plugin_v2.py:111 +#: neutron/plugins/ml2/drivers/cisco/nexus/mech_cisco_nexus.py:108 +#, python-format +msgid "Nexus: create & trunk vlan %s" +msgstr "" + +#: neutron/plugins/cisco/nexus/cisco_nexus_plugin_v2.py:118 +#, python-format +msgid "Nexus: create vlan %s" +msgstr "" + +#: neutron/plugins/cisco/nexus/cisco_nexus_plugin_v2.py:123 +#: neutron/plugins/ml2/drivers/cisco/nexus/mech_cisco_nexus.py:112 +#, python-format +msgid "Nexus: trunk vlan %s" +msgstr "" + +#: neutron/plugins/cisco/nexus/cisco_nexus_plugin_v2.py:136 +#, python-format +msgid "Nexus: delete & untrunk vlan %s" +msgstr "" + +#: neutron/plugins/cisco/nexus/cisco_nexus_plugin_v2.py:142 +#, python-format +msgid "Nexus: delete vlan %s" +msgstr "" + +#: neutron/plugins/cisco/nexus/cisco_nexus_plugin_v2.py:145 +#, python-format +msgid "Nexus: untrunk vlan %s" +msgstr "" + +#: neutron/plugins/cisco/nexus/cisco_nexus_plugin_v2.py:202 +msgid "Grabbing a switch to create SVI" +msgstr "" + +#: neutron/plugins/cisco/nexus/cisco_nexus_plugin_v2.py:205 +msgid "Using round robin to create SVI" +msgstr "" + +#: neutron/plugins/cisco/nexus/cisco_nexus_plugin_v2.py:224 +msgid "No round robin or zero weights, using first switch" +msgstr "" + +#: neutron/plugins/cisco/nexus/cisco_nexus_plugin_v2.py:234 +msgid "NexusPlugin:delete_network() called" +msgstr "" + +#: neutron/plugins/cisco/nexus/cisco_nexus_plugin_v2.py:242 +msgid "NexusPlugin:update_network() called" +msgstr "" + +#: neutron/plugins/cisco/nexus/cisco_nexus_plugin_v2.py:250 +msgid "NexusPlugin:create_port() called" +msgstr "" + +#: neutron/plugins/cisco/nexus/cisco_nexus_plugin_v2.py:258 +msgid "NexusPlugin:delete_port() called" +msgstr "" + +#: neutron/plugins/cisco/nexus/cisco_nexus_plugin_v2.py:270 +#, python-format +msgid "delete_network(): provider vlan %s" +msgstr "" + +#: neutron/plugins/cisco/nexus/cisco_nexus_plugin_v2.py:327 +msgid "NexusPlugin:update_port() called" +msgstr "" + +#: neutron/plugins/cisco/nexus/cisco_nexus_plugin_v2.py:336 +msgid "NexusPlugin:plug_interface() called" +msgstr "" + +#: neutron/plugins/cisco/nexus/cisco_nexus_plugin_v2.py:344 +msgid "NexusPlugin:unplug_interface() called" +msgstr "" + +#: neutron/plugins/common/utils.py:30 +#, python-format +msgid "%s is not a valid VLAN tag" +msgstr "" + +#: neutron/plugins/common/utils.py:34 +msgid "End of VLAN range is less than start of VLAN range" +msgstr "" + +#: neutron/plugins/embrane/base_plugin.py:107 +#: neutron/plugins/embrane/agent/dispatcher.py:132 +#: neutron/services/loadbalancer/drivers/embrane/poller.py:56 +#: neutron/services/loadbalancer/drivers/embrane/agent/dispatcher.py:108 +msgid "Unhandled exception occurred" +msgstr "" + +#: neutron/plugins/embrane/base_plugin.py:172 +#: neutron/plugins/embrane/base_plugin.py:191 +#, python-format +msgid "The following routers have not physical match: %s" +msgstr "" + +#: neutron/plugins/embrane/base_plugin.py:177 +#, python-format +msgid "Requested router: %s" +msgstr "" + +#: neutron/plugins/embrane/base_plugin.py:229 +#, python-format +msgid "Deleting router=%s" +msgstr "" + +#: neutron/plugins/embrane/agent/operations/router_operations.py:97 +#, python-format +msgid "The router %s had no physical representation,likely already deleted" +msgstr "" + +#: neutron/plugins/embrane/agent/operations/router_operations.py:126 +#, python-format +msgid "Interface %s not found in the heleos back-end,likely already deleted" +msgstr "" + +#: neutron/plugins/embrane/common/config.py:23 +#: neutron/services/loadbalancer/drivers/embrane/config.py:25 +msgid "ESM management root address" +msgstr "" + +#: neutron/plugins/embrane/common/config.py:25 +#: neutron/services/loadbalancer/drivers/embrane/config.py:27 +msgid "ESM admin username." +msgstr "" + +#: neutron/plugins/embrane/common/config.py:28 +#: neutron/services/loadbalancer/drivers/embrane/config.py:30 +msgid "ESM admin password." +msgstr "" + +#: neutron/plugins/embrane/common/config.py:30 +msgid "Router image id (Embrane FW/VPN)" +msgstr "" + +#: neutron/plugins/embrane/common/config.py:32 +msgid "In band Security Zone id" +msgstr "" + +#: neutron/plugins/embrane/common/config.py:34 +msgid "Out of band Security Zone id" +msgstr "" + +#: neutron/plugins/embrane/common/config.py:36 +msgid "Management Security Zone id" +msgstr "" + +#: neutron/plugins/embrane/common/config.py:38 +msgid "Dummy user traffic Security Zone id" +msgstr "" + +#: neutron/plugins/embrane/common/config.py:40 +#: neutron/services/loadbalancer/drivers/embrane/config.py:42 +msgid "Shared resource pool id" +msgstr "" + +#: neutron/plugins/embrane/common/config.py:42 +#: neutron/services/loadbalancer/drivers/embrane/config.py:49 +msgid "Define if the requests have run asynchronously or not" +msgstr "" + +#: neutron/plugins/embrane/common/constants.py:49 +#: neutron/services/loadbalancer/drivers/embrane/constants.py:50 +#, python-format +msgid "Dva is pending for the following reason: %s" +msgstr "" + +#: neutron/plugins/embrane/common/constants.py:50 +msgid "" +"Dva can't be found to execute the operation, probably was cancelled " +"through the heleos UI" +msgstr "" + +#: neutron/plugins/embrane/common/constants.py:52 +#: neutron/services/loadbalancer/drivers/embrane/constants.py:53 +#, python-format +msgid "Dva seems to be broken for reason %s" +msgstr "" + +#: neutron/plugins/embrane/common/constants.py:53 +#, python-format +msgid "Dva interface seems to be broken for reason %s" +msgstr "" + +#: neutron/plugins/embrane/common/constants.py:55 +#: neutron/services/loadbalancer/drivers/embrane/constants.py:54 +#, python-format +msgid "Dva creation failed reason %s" +msgstr "" + +#: neutron/plugins/embrane/common/constants.py:56 +#: neutron/services/loadbalancer/drivers/embrane/constants.py:55 +#, python-format +msgid "Dva creation is in pending state for reason %s" +msgstr "" + +#: neutron/plugins/embrane/common/constants.py:58 +#: neutron/services/loadbalancer/drivers/embrane/constants.py:57 +#, python-format +msgid "Dva configuration failed for reason %s" +msgstr "" + +#: neutron/plugins/embrane/common/constants.py:59 +#, python-format +msgid "" +"Failed to delete the backend router for reason %s. Please remove it " +"manually through the heleos UI" +msgstr "" + +#: neutron/plugins/embrane/common/exceptions.py:22 +#, python-format +msgid "An unexpected error occurred:%(err_msg)s" +msgstr "" + +#: neutron/plugins/embrane/common/exceptions.py:26 +#, python-format +msgid "%(err_msg)s" +msgstr "" + +#: neutron/plugins/embrane/common/utils.py:45 +msgid "No ip allocation set" +msgstr "" + +#: neutron/plugins/embrane/l2base/support_exceptions.py:22 +#, python-format +msgid "Cannot retrieve utif info for the following reason: %(err_msg)s" +msgstr "" + +#: neutron/plugins/embrane/l2base/openvswitch/openvswitch_support.py:44 +msgid "" +"No segmentation_id found for the network, please be sure that " +"tenant_network_type is vlan" +msgstr "" + +#: neutron/plugins/hyperv/db.py:40 +#: neutron/plugins/linuxbridge/db/l2network_db_v2.py:113 +#: neutron/plugins/openvswitch/ovs_db_v2.py:131 +#, python-format +msgid "" +"Reserving vlan %(vlan_id)s on physical network %(physical_network)s from " +"pool" +msgstr "" + +#: neutron/plugins/hyperv/db.py:55 +#, python-format +msgid "Reserving flat physical network %(physical_network)s from pool" +msgstr "" + +#: neutron/plugins/hyperv/db.py:78 +#: neutron/plugins/linuxbridge/db/l2network_db_v2.py:136 +#: neutron/plugins/ml2/drivers/type_vlan.py:204 +#: neutron/plugins/openvswitch/ovs_db_v2.py:155 +#, python-format +msgid "" +"Reserving specific vlan %(vlan_id)s on physical network " +"%(physical_network)s from pool" +msgstr "" + +#: neutron/plugins/hyperv/db.py:135 +#, python-format +msgid "Releasing vlan %(vlan_id)s on physical network %(physical_network)s" +msgstr "" + +#: neutron/plugins/hyperv/db.py:140 +#: neutron/plugins/linuxbridge/db/l2network_db_v2.py:177 +#: neutron/plugins/openvswitch/ovs_db_v2.py:196 +#, python-format +msgid "vlan_id %(vlan_id)s on physical network %(physical_network)s not found" +msgstr "" + +#: neutron/plugins/hyperv/db.py:165 neutron/plugins/hyperv/db.py:178 +#: neutron/plugins/linuxbridge/db/l2network_db_v2.py:64 +#: neutron/plugins/linuxbridge/db/l2network_db_v2.py:83 +#: neutron/plugins/ml2/drivers/type_vlan.py:128 +#: neutron/plugins/ml2/drivers/type_vlan.py:149 +#: neutron/plugins/openvswitch/ovs_db_v2.py:87 +#: neutron/plugins/openvswitch/ovs_db_v2.py:105 +#, python-format +msgid "" +"Removing vlan %(vlan_id)s on physical network %(physical_network)s from " +"pool" +msgstr "" + +#: neutron/plugins/hyperv/hyperv_neutron_plugin.py:44 +msgid "Network type for tenant networks (local, flat, vlan or none)" +msgstr "" + +#: neutron/plugins/hyperv/hyperv_neutron_plugin.py:48 +#: neutron/plugins/linuxbridge/common/config.py:33 +#: neutron/plugins/mlnx/common/config.py:30 +#: neutron/plugins/openvswitch/common/config.py:49 +msgid "List of :: or " +msgstr "" + +#: neutron/plugins/hyperv/hyperv_neutron_plugin.py:76 +#: neutron/plugins/hyperv/hyperv_neutron_plugin.py:98 +#, python-format +msgid "segmentation_id specified for %s network" +msgstr "" + +#: neutron/plugins/hyperv/hyperv_neutron_plugin.py:83 +#, python-format +msgid "physical_network specified for %s network" +msgstr "" + +#: neutron/plugins/hyperv/hyperv_neutron_plugin.py:125 +msgid "physical_network not provided" +msgstr "" + +#: neutron/plugins/hyperv/hyperv_neutron_plugin.py:178 +#, python-format +msgid "Invalid tenant_network_type: %s. Agent terminated!" +msgstr "" + +#: neutron/plugins/hyperv/hyperv_neutron_plugin.py:201 +#: neutron/plugins/linuxbridge/lb_neutron_plugin.py:308 +#: neutron/plugins/ml2/drivers/type_vlan.py:94 +#: neutron/plugins/mlnx/mlnx_plugin.py:178 +#: neutron/plugins/openvswitch/ovs_neutron_plugin.py:361 +#, python-format +msgid "Network VLAN ranges: %s" +msgstr "" + +#: neutron/plugins/hyperv/hyperv_neutron_plugin.py:226 +#, python-format +msgid "Network type %s not supported" +msgstr "" + +#: neutron/plugins/hyperv/rpc_callbacks.py:81 +#: neutron/plugins/linuxbridge/lb_neutron_plugin.py:123 +#: neutron/plugins/mlnx/rpc_callbacks.py:101 +#: neutron/plugins/openvswitch/ovs_neutron_plugin.py:125 +#, python-format +msgid "Device %(device)s no longer exists on %(agent_id)s" +msgstr "" + +#: neutron/plugins/hyperv/agent/hyperv_neutron_agent.py:50 +msgid "" +"List of : where the physical networks can be " +"expressed with wildcards, e.g.: .\"*:external\"" +msgstr "" + +#: neutron/plugins/hyperv/agent/hyperv_neutron_agent.py:56 +msgid "Private vswitch name used for local networks" +msgstr "" + +#: neutron/plugins/hyperv/agent/hyperv_neutron_agent.py:58 +#: neutron/plugins/linuxbridge/common/config.py:64 +#: neutron/plugins/mlnx/common/config.py:67 +#: neutron/plugins/nec/common/config.py:29 +#: neutron/plugins/oneconvergence/lib/config.py:47 +#: neutron/plugins/openvswitch/common/config.py:64 +#: neutron/plugins/ryu/common/config.py:43 +msgid "" +"The number of seconds the agent will wait between polling for local " +"device changes." +msgstr "" + +#: neutron/plugins/hyperv/agent/hyperv_neutron_agent.py:62 +msgid "" +"Enables metrics collections for switch ports by using Hyper-V's metric " +"APIs. Collected data can by retrieved by other apps and services, e.g.: " +"Ceilometer. Requires Hyper-V / Windows Server 2012 and above" +msgstr "" + +#: neutron/plugins/hyperv/agent/hyperv_neutron_agent.py:69 +msgid "" +"Specifies the maximum number of retries to enable Hyper-V's port metrics " +"collection. The agent will try to enable the feature once every " +"polling_interval period for at most metrics_max_retries or until it " +"succeedes." +msgstr "" + +#: neutron/plugins/hyperv/agent/hyperv_neutron_agent.py:151 +#, python-format +msgid "Failed reporting state! %s" +msgstr "" + +#: neutron/plugins/hyperv/agent/hyperv_neutron_agent.py:186 +#, python-format +msgid "Invalid physical network mapping: %s" +msgstr "" + +#: neutron/plugins/hyperv/agent/hyperv_neutron_agent.py:207 +#, python-format +msgid "network_delete received. Deleting network %s" +msgstr "" + +#: neutron/plugins/hyperv/agent/hyperv_neutron_agent.py:213 +#, python-format +msgid "Network %s not defined on agent." +msgstr "" + +#: neutron/plugins/hyperv/agent/hyperv_neutron_agent.py:216 +msgid "port_delete received" +msgstr "" + +#: neutron/plugins/hyperv/agent/hyperv_neutron_agent.py:221 +#: neutron/plugins/mlnx/agent/eswitch_neutron_agent.py:171 +msgid "port_update received" +msgstr "" + +#: neutron/plugins/hyperv/agent/hyperv_neutron_agent.py:243 +#: neutron/plugins/mlnx/agent/eswitch_neutron_agent.py:129 +#, python-format +msgid "Provisioning network %s" +msgstr "" + +#: neutron/plugins/hyperv/agent/hyperv_neutron_agent.py:256 +#, python-format +msgid "" +"Cannot provision unknown network type %(network_type)s for network " +"%(net_uuid)s" +msgstr "" + +#: neutron/plugins/hyperv/agent/hyperv_neutron_agent.py:268 +#, python-format +msgid "Reclaiming local network %s" +msgstr "" + +#: neutron/plugins/hyperv/agent/hyperv_neutron_agent.py:276 +#, python-format +msgid "Binding port %s" +msgstr "" + +#: neutron/plugins/hyperv/agent/hyperv_neutron_agent.py:289 +#, python-format +msgid "Binding VLAN ID %(segmentation_id)s to switch port %(port_id)s" +msgstr "" + +#: neutron/plugins/hyperv/agent/hyperv_neutron_agent.py:302 +#: neutron/plugins/mlnx/agent/eswitch_neutron_agent.py:114 +#, python-format +msgid "Unsupported network type %s" +msgstr "" + +#: neutron/plugins/hyperv/agent/hyperv_neutron_agent.py:311 +#, python-format +msgid "Network %s is not avalailable on this agent" +msgstr "" + +#: neutron/plugins/hyperv/agent/hyperv_neutron_agent.py:315 +#, python-format +msgid "Unbinding port %s" +msgstr "" + +#: neutron/plugins/hyperv/agent/hyperv_neutron_agent.py:328 +#, python-format +msgid "Port metrics enabled for port: %s" +msgstr "" + +#: neutron/plugins/hyperv/agent/hyperv_neutron_agent.py:332 +#, python-format +msgid "Port metrics raw enabling for port: %s" +msgstr "" + +#: neutron/plugins/hyperv/agent/hyperv_neutron_agent.py:357 +#: neutron/plugins/mlnx/agent/eswitch_neutron_agent.py:209 +#: neutron/plugins/mlnx/agent/eswitch_neutron_agent.py:312 +#, python-format +msgid "No port %s defined on agent." +msgstr "" + +#: neutron/plugins/hyperv/agent/hyperv_neutron_agent.py:374 +#, python-format +msgid "Adding port %s" +msgstr "" + +#: neutron/plugins/hyperv/agent/hyperv_neutron_agent.py:377 +#, python-format +msgid "Port %(device)s updated. Details: %(device_details)s" +msgstr "" + +#: neutron/plugins/hyperv/agent/hyperv_neutron_agent.py:403 +#, python-format +msgid "Removing port %s" +msgstr "" + +#: neutron/plugins/hyperv/agent/hyperv_neutron_agent.py:411 +#, python-format +msgid "Removing port failed for device %(device)s: %(e)s" +msgstr "" + +#: neutron/plugins/hyperv/agent/hyperv_neutron_agent.py:436 +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:962 +#: neutron/plugins/mlnx/agent/eswitch_neutron_agent.py:382 +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:1306 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1354 +msgid "Agent out of sync with plugin!" +msgstr "" + +#: neutron/plugins/hyperv/agent/hyperv_neutron_agent.py:444 +msgid "Agent loop has new devices!" +msgstr "" + +#: neutron/plugins/hyperv/agent/hyperv_neutron_agent.py:451 +#, python-format +msgid "Error in agent event loop: %s" +msgstr "" + +#: neutron/plugins/hyperv/agent/hyperv_neutron_agent.py:459 +#: neutron/plugins/mlnx/agent/eswitch_neutron_agent.py:405 +#, python-format +msgid "Loop iteration exceeded interval (%(polling_interval)s vs. %(elapsed)s)" +msgstr "" + +#: neutron/plugins/hyperv/agent/hyperv_neutron_agent.py:472 +#: neutron/plugins/ibm/agent/sdnve_neutron_agent.py:269 +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:1017 +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:159 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1540 +msgid "Agent initialized successfully, now running... " +msgstr "" + +#: neutron/plugins/hyperv/agent/security_groups_driver.py:62 +#, python-format +msgid "Hyper-V Exception: %(hyperv_exeption)s while adding rule: %(rule)s" +msgstr "" + +#: neutron/plugins/hyperv/agent/security_groups_driver.py:72 +#, python-format +msgid "Hyper-V Exception: %(hyperv_exeption)s while removing rule: %(rule)s" +msgstr "" + +#: neutron/plugins/hyperv/agent/security_groups_driver.py:92 +msgid "Aplying port filter." +msgstr "" + +#: neutron/plugins/hyperv/agent/security_groups_driver.py:95 +msgid "Updating port rules." +msgstr "" + +#: neutron/plugins/hyperv/agent/security_groups_driver.py:108 +#, python-format +msgid "Creating %(new)s new rules, removing %(old)s old rules." +msgstr "" + +#: neutron/plugins/hyperv/agent/security_groups_driver.py:119 +msgid "Removing port filter" +msgstr "" + +#: neutron/plugins/hyperv/agent/utils.py:36 +#, python-format +msgid "HyperVException: %(msg)s" +msgstr "" + +#: neutron/plugins/hyperv/agent/utils.py:81 +#, python-format +msgid "Vnic not found: %s" +msgstr "" + +#: neutron/plugins/hyperv/agent/utils.py:116 +#, python-format +msgid "Job failed with error %d" +msgstr "" + +#: neutron/plugins/hyperv/agent/utils.py:135 +#, python-format +msgid "" +"WMI job failed with status %(job_state)d. Error details: %(err_sum_desc)s" +" - %(err_desc)s - Error code: %(err_code)d" +msgstr "" + +#: neutron/plugins/hyperv/agent/utils.py:144 +#, python-format +msgid "WMI job failed with status %(job_state)d. Error details: %(error)s" +msgstr "" + +#: neutron/plugins/hyperv/agent/utils.py:148 +#, python-format +msgid "WMI job failed with status %d. No error description available" +msgstr "" + +#: neutron/plugins/hyperv/agent/utils.py:153 +#, python-format +msgid "WMI job succeeded: %(desc)s, Elapsed=%(elap)s" +msgstr "" + +#: neutron/plugins/hyperv/agent/utils.py:167 +#, python-format +msgid "Failed creating port for %s" +msgstr "" + +#: neutron/plugins/hyperv/agent/utils.py:187 +#, python-format +msgid "" +"Failed to disconnect port %(switch_port_name)s from switch " +"%(vswitch_name)s with error %(ret_val)s" +msgstr "" + +#: neutron/plugins/hyperv/agent/utils.py:198 +#, python-format +msgid "" +"Failed to delete port %(switch_port_name)s from switch %(vswitch_name)s " +"with error %(ret_val)s" +msgstr "" + +#: neutron/plugins/hyperv/agent/utils.py:205 +#: neutron/plugins/hyperv/agent/utilsv2.py:135 +#, python-format +msgid "VSwitch not found: %s" +msgstr "" + +#: neutron/plugins/hyperv/agent/utils.py:246 +#: neutron/plugins/hyperv/agent/utils.py:250 +msgid "Metrics collection is not supported on this version of Hyper-V" +msgstr "" + +#: neutron/plugins/hyperv/agent/utilsfactory.py:32 +msgid "Force V1 WMI utility classes" +msgstr "" + +#: neutron/plugins/hyperv/agent/utilsfactory.py:61 +msgid "" +"V1 virtualization namespace no longer supported on Windows Server / " +"Hyper-V Server 2012 R2 or above." +msgstr "" + +#: neutron/plugins/hyperv/agent/utilsfactory.py:68 +#, python-format +msgid "Loading class: %(module_name)s.%(class_name)s" +msgstr "" + +#: neutron/plugins/hyperv/agent/utilsv2.py:158 +#: neutron/plugins/hyperv/agent/utilsv2.py:318 +#, python-format +msgid "Port Allocation not found: %s" +msgstr "" + +#: neutron/plugins/hyperv/agent/utilsv2.py:268 +#, python-format +msgid "Cannot get VM summary data for: %s" +msgstr "" + +#: neutron/plugins/ibm/sdnve_api.py:77 +#, python-format +msgid "The IP addr of available SDN-VE controllers: %s" +msgstr "" + +#: neutron/plugins/ibm/sdnve_api.py:80 +#, python-format +msgid "The SDN-VE controller IP address: %s" +msgstr "" + +#: neutron/plugins/ibm/sdnve_api.py:97 +#, python-format +msgid "unable to serialize object type: '%s'" +msgstr "" + +#: neutron/plugins/ibm/sdnve_api.py:164 +#, python-format +msgid "" +"Sending request to SDN-VE. url: %(myurl)s method: %(method)s body: " +"%(body)s header: %(header)s " +msgstr "" + +#: neutron/plugins/ibm/sdnve_api.py:177 +#, python-format +msgid "Error: Could not reach server: %(url)s Exception: %(excp)s." +msgstr "" + +#: neutron/plugins/ibm/sdnve_api.py:184 +#, python-format +msgid "Error message: %(reply)s -- Status: %(status)s" +msgstr "" + +#: neutron/plugins/ibm/sdnve_api.py:187 +#, python-format +msgid "Received response status: %s" +msgstr "" + +#: neutron/plugins/ibm/sdnve_api.py:194 +#, python-format +msgid "Deserialized body: %s" +msgstr "" + +#: neutron/plugins/ibm/sdnve_api.py:236 +msgid "Bad resource for forming a list request" +msgstr "" + +#: neutron/plugins/ibm/sdnve_api.py:246 +msgid "Bad resource for forming a show request" +msgstr "" + +#: neutron/plugins/ibm/sdnve_api.py:256 +msgid "Bad resource for forming a create request" +msgstr "" + +#: neutron/plugins/ibm/sdnve_api.py:268 +msgid "Bad resource for forming a update request" +msgstr "" + +#: neutron/plugins/ibm/sdnve_api.py:279 +msgid "Bad resource for forming a delete request" +msgstr "" + +#: neutron/plugins/ibm/sdnve_api.py:307 +#, python-format +msgid "Non matching tenant and network types: %(ttype)s %(ntype)s" +msgstr "" + +#: neutron/plugins/ibm/sdnve_api.py:369 +#, python-format +msgid "Did not find tenant: %r" +msgstr "" + +#: neutron/plugins/ibm/sdnve_api_fake.py:32 +msgid "Fake SDNVE controller initialized" +msgstr "" + +#: neutron/plugins/ibm/sdnve_api_fake.py:35 +msgid "Fake SDNVE controller: list" +msgstr "" + +#: neutron/plugins/ibm/sdnve_api_fake.py:39 +msgid "Fake SDNVE controller: show" +msgstr "" + +#: neutron/plugins/ibm/sdnve_api_fake.py:43 +msgid "Fake SDNVE controller: create" +msgstr "" + +#: neutron/plugins/ibm/sdnve_api_fake.py:47 +msgid "Fake SDNVE controller: update" +msgstr "" + +#: neutron/plugins/ibm/sdnve_api_fake.py:51 +msgid "Fake SDNVE controller: delete" +msgstr "" + +#: neutron/plugins/ibm/sdnve_api_fake.py:55 +msgid "Fake SDNVE controller: get tenant by id" +msgstr "" + +#: neutron/plugins/ibm/sdnve_api_fake.py:59 +msgid "Fake SDNVE controller: check and create tenant" +msgstr "" + +#: neutron/plugins/ibm/sdnve_api_fake.py:63 +msgid "Fake SDNVE controller: get controller" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:152 +msgid "Set a new controller if needed." +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:158 +#, python-format +msgid "Set the controller to a new controller: %s" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:166 +#, python-format +msgid "Original SDN-VE HTTP request: %(orig)s; New request: %(new)s" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:176 +#, python-format +msgid "Create network in progress: %r" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:185 +msgid "Create net failed: no SDN-VE tenant." +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:196 +#, python-format +msgid "Create net failed in SDN-VE: %s" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:203 +#, python-format +msgid "Update network in progress: %r" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:223 +#, python-format +msgid "Update net failed in SDN-VE: %s" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:229 +#, python-format +msgid "Delete network in progress: %s" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:239 +#, python-format +msgid "Delete net failed after deleting the network in DB: %s" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:244 +#, python-format +msgid "Get network in progress: %s" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:250 +msgid "Get networks in progress" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:260 +#, python-format +msgid "Create port in progress: %r" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:276 +msgid "Create port does not have tenant id info" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:282 +#, python-format +msgid "Create port does not have tenant id info; obtained is: %s" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:303 +#, python-format +msgid "Create port failed in SDN-VE: %s" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:310 +#, python-format +msgid "Update port in progress: %r" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:337 +#, python-format +msgid "Update port failed in SDN-VE: %s" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:343 +#, python-format +msgid "Delete port in progress: %s" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:356 +#, python-format +msgid "Delete port operation failed in SDN-VE after deleting the port from DB: %s" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:365 +#, python-format +msgid "Create subnet in progress: %r" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:377 +#, python-format +msgid "Create subnet failed in SDN-VE: %s" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:379 +#, python-format +msgid "Subnet created: %s" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:385 +#, python-format +msgid "Update subnet in progress: %r" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:410 +#, python-format +msgid "Update subnet failed in SDN-VE: %s" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:416 +#, python-format +msgid "Delete subnet in progress: %s" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:421 +#, python-format +msgid "" +"Delete subnet operation failed in SDN-VE after deleting the subnet from " +"DB: %s" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:430 +#, python-format +msgid "Create router in progress: %r" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:433 +#, python-format +msgid "Ignoring admin_state_up=False for router=%r. Overriding with True" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:443 +msgid "Create router failed: no SDN-VE tenant." +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:451 +#, python-format +msgid "Create router failed in SDN-VE: %s" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:453 +#, python-format +msgid "Router created: %r" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:458 +#, python-format +msgid "Update router in progress: id=%(id)s router=%(router)r" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:465 +msgid "admin_state_up=False routers are not supported." +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:489 +#, python-format +msgid "Update router failed in SDN-VE: %s" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:495 +#, python-format +msgid "Delete router in progress: %s" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:502 +#, python-format +msgid "" +"Delete router operation failed in SDN-VE after deleting the router in DB:" +" %s" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:507 +#, python-format +msgid "" +"Add router interface in progress: router_id=%(router_id)s " +"interface_info=%(interface_info)r" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:515 +#, python-format +msgid "SdnvePluginV2.add_router_interface called. Port info: %s" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:529 +#, python-format +msgid "Update router-add-interface failed in SDN-VE: %s" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:532 +#, python-format +msgid "Added router interface: %r" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:536 +#, python-format +msgid "" +"Add router interface only called: router_id=%(router_id)s " +"interface_info=%(interface_info)r" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:546 +msgid "" +"SdnvePluginV2._add_router_interface_only: failed to add the interface in " +"the roll back. of a remove_router_interface operation" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:552 +#, python-format +msgid "" +"Remove router interface in progress: router_id=%(router_id)s " +"interface_info=%(interface_info)r" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:561 +msgid "No port ID" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:563 +#, python-format +msgid "SdnvePluginV2.remove_router_interface port: %s" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:567 +msgid "No fixed IP" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:572 +#, python-format +msgid "SdnvePluginV2.remove_router_interface subnet_id: %s" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:595 +#, python-format +msgid "Update router-remove-interface failed SDN-VE: %s" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:616 +#, python-format +msgid "Create floatingip in progress: %r" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:627 +#, python-format +msgid "Creating floating ip operation failed in SDN-VE controller: %s" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:630 +#, python-format +msgid "Created floatingip : %r" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:635 +#, python-format +msgid "Update floatingip in progress: %r" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:655 +#, python-format +msgid "Update floating ip failed in SDN-VE: %s" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:661 +#, python-format +msgid "Delete floatingip in progress: %s" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:666 +#, python-format +msgid "Delete floatingip failed in SDN-VE: %s" +msgstr "" + +#: neutron/plugins/ibm/agent/sdnve_neutron_agent.py:139 +msgid "info_update received" +msgstr "" + +#: neutron/plugins/ibm/agent/sdnve_neutron_agent.py:144 +#, python-format +msgid "info_update received. New controlleris to be set to: %s" +msgstr "" + +#: neutron/plugins/ibm/agent/sdnve_neutron_agent.py:150 +msgid "info_update received. New controlleris set to be out of band" +msgstr "" + +#: neutron/plugins/ibm/agent/sdnve_neutron_agent.py:195 +#, python-format +msgid "Mapping physical network %(physical_network)s to interface %(interface)s" +msgstr "" + +#: neutron/plugins/ibm/agent/sdnve_neutron_agent.py:201 +#, python-format +msgid "" +"Interface %(interface)s for physical network %(physical_network)s does " +"not exist. Agent terminated!" +msgstr "" + +#: neutron/plugins/ibm/agent/sdnve_neutron_agent.py:219 +msgid "Agent in the rpc loop." +msgstr "" + +#: neutron/plugins/ibm/agent/sdnve_neutron_agent.py:241 +#, python-format +msgid "Controller IPs: %s" +msgstr "" + +#: neutron/plugins/ibm/agent/sdnve_neutron_agent.py:263 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1527 +#, python-format +msgid "%s Agent terminated!" +msgstr "" + +#: neutron/plugins/ibm/common/config.py:28 +msgid "If set to True uses a fake controller." +msgstr "" + +#: neutron/plugins/ibm/common/config.py:30 +msgid "Base URL for SDN-VE controller REST API" +msgstr "" + +#: neutron/plugins/ibm/common/config.py:32 +msgid "List of IP addresses of SDN-VE controller(s)" +msgstr "" + +#: neutron/plugins/ibm/common/config.py:34 +msgid "SDN-VE RPC subject" +msgstr "" + +#: neutron/plugins/ibm/common/config.py:36 +msgid "SDN-VE controller port number" +msgstr "" + +#: neutron/plugins/ibm/common/config.py:38 +msgid "SDN-VE request/response format" +msgstr "" + +#: neutron/plugins/ibm/common/config.py:40 +msgid "SDN-VE administrator user id" +msgstr "" + +#: neutron/plugins/ibm/common/config.py:42 +msgid "SDN-VE administrator password" +msgstr "" + +#: neutron/plugins/ibm/common/config.py:44 +#: neutron/plugins/nec/common/config.py:24 +#: neutron/plugins/openvswitch/common/config.py:28 +#: neutron/plugins/ryu/common/config.py:22 +msgid "Integration bridge to use" +msgstr "" + +#: neutron/plugins/ibm/common/config.py:46 +msgid "Reset the integration bridge before use" +msgstr "" + +#: neutron/plugins/ibm/common/config.py:48 +msgid "Indicating if controller is out of band or not" +msgstr "" + +#: neutron/plugins/ibm/common/config.py:51 +msgid "List of :" +msgstr "" + +#: neutron/plugins/ibm/common/config.py:53 +msgid "Tenant type: OVERLAY (default) or OF" +msgstr "" + +#: neutron/plugins/ibm/common/config.py:55 +msgid "" +"The string in tenant description that indicates the tenant is a OVERLAY " +"tenant" +msgstr "" + +#: neutron/plugins/ibm/common/config.py:58 +msgid "The string in tenant description that indicates the tenant is a OF tenant" +msgstr "" + +#: neutron/plugins/ibm/common/config.py:64 +msgid "Agent polling interval if necessary" +msgstr "" + +#: neutron/plugins/ibm/common/config.py:66 +msgid "Using root helper" +msgstr "" + +#: neutron/plugins/ibm/common/config.py:68 +msgid "Whether using rpc" +msgstr "" + +#: neutron/plugins/ibm/common/exceptions.py:23 +#, python-format +msgid "" +"An unexpected error occurred in the SDN-VE Plugin. Here is the error " +"message: %(msg)s" +msgstr "" + +#: neutron/plugins/ibm/common/exceptions.py:28 +#, python-format +msgid "The input does not contain nececessary info: %(msg)s" +msgstr "" + +#: neutron/plugins/linuxbridge/lb_neutron_plugin.py:131 +#: neutron/plugins/linuxbridge/lb_neutron_plugin.py:155 +#: neutron/plugins/ml2/rpc.py:173 neutron/plugins/ml2/rpc.py:195 +#: neutron/plugins/openvswitch/ovs_neutron_plugin.py:133 +#: neutron/plugins/openvswitch/ovs_neutron_plugin.py:158 +#, python-format +msgid "Device %(device)s not bound to the agent host %(host)s" +msgstr "" + +#: neutron/plugins/linuxbridge/lb_neutron_plugin.py:149 +#: neutron/plugins/openvswitch/ovs_neutron_plugin.py:152 +#, python-format +msgid "Device %(device)s up on %(agent_id)s" +msgstr "" + +#: neutron/plugins/linuxbridge/lb_neutron_plugin.py:269 +#: neutron/plugins/mlnx/mlnx_plugin.py:198 +#, python-format +msgid "Invalid tenant_network_type: %s. Service terminated!" +msgstr "" + +#: neutron/plugins/linuxbridge/lb_neutron_plugin.py:280 +msgid "Linux Bridge Plugin initialization complete" +msgstr "" + +#: neutron/plugins/linuxbridge/lb_neutron_plugin.py:306 +#, python-format +msgid "%s. Agent terminated!" +msgstr "" + +#: neutron/plugins/linuxbridge/lb_neutron_plugin.py:351 +#: neutron/plugins/mlnx/mlnx_plugin.py:242 +#: neutron/plugins/openvswitch/ovs_neutron_plugin.py:413 +msgid "provider:segmentation_id specified for flat network" +msgstr "" + +#: neutron/plugins/linuxbridge/lb_neutron_plugin.py:360 +#: neutron/plugins/mlnx/mlnx_plugin.py:250 +#: neutron/plugins/openvswitch/ovs_neutron_plugin.py:422 +#, python-format +msgid "provider:segmentation_id out of range (%(min_id)s through %(max_id)s)" +msgstr "" + +#: neutron/plugins/linuxbridge/lb_neutron_plugin.py:367 +#: neutron/plugins/mlnx/mlnx_plugin.py:258 +#: neutron/plugins/openvswitch/ovs_neutron_plugin.py:442 +msgid "provider:physical_network specified for local network" +msgstr "" + +#: neutron/plugins/linuxbridge/lb_neutron_plugin.py:373 +#: neutron/plugins/mlnx/mlnx_plugin.py:262 +#: neutron/plugins/openvswitch/ovs_neutron_plugin.py:448 +msgid "provider:segmentation_id specified for local network" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:83 +msgid "VXLAN is enabled, a valid local_ip must be provided" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:97 +msgid "Invalid Network ID, will lead to incorrect bridgename" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:104 +msgid "Invalid VLAN ID, will lead to incorrect subinterface name" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:111 +msgid "Invalid Interface ID, will lead to incorrect tap device name" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:120 +#, python-format +msgid "Invalid Segmentation ID: %s, will lead to incorrect vxlan device name" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:183 +#, python-format +msgid "Failed creating vxlan interface for %(segmentation_id)s" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:215 +#, python-format +msgid "" +"Creating subinterface %(interface)s for VLAN %(vlan_id)s on interface " +"%(physical_interface)s" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:228 +#, python-format +msgid "Done creating subinterface %s" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:235 +#, python-format +msgid "Creating vxlan interface %(interface)s for VNI %(segmentation_id)s" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:250 +#, python-format +msgid "Done creating vxlan interface %s" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:303 +#, python-format +msgid "Starting bridge %(bridge_name)s for subinterface %(interface)s" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:318 +#, python-format +msgid "Done starting bridge %(bridge_name)s for subinterface %(interface)s" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:340 +#, python-format +msgid "Unable to add %(interface)s to %(bridge_name)s! Exception: %(e)s" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:353 +#, python-format +msgid "Unable to add vxlan interface for network %s" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:360 +#, python-format +msgid "No mapping for physical network %s" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:369 +#, python-format +msgid "Unknown network_type %(network_type)s for network %(network_id)s." +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:381 +#, python-format +msgid "Tap device: %s does not exist on this host, skipped" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:399 +#, python-format +msgid "Adding device %(tap_device_name)s to bridge %(bridge_name)s" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:408 +#, python-format +msgid "%(tap_device_name)s already exists on bridge %(bridge_name)s" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:446 +#, python-format +msgid "Deleting bridge %s" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:453 +#, python-format +msgid "Done deleting bridge %s" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:456 +#, python-format +msgid "Cannot delete bridge %s, does not exist" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:470 +#, python-format +msgid "Removing device %(interface_name)s from bridge %(bridge_name)s" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:477 +#, python-format +msgid "Done removing device %(interface_name)s from bridge %(bridge_name)s" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:483 +#, python-format +msgid "" +"Cannot remove device %(interface_name)s bridge %(bridge_name)s does not " +"exist" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:491 +#, python-format +msgid "Deleting subinterface %s for vlan" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:498 +#, python-format +msgid "Done deleting subinterface %s" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:502 +#, python-format +msgid "Deleting vxlan interface %s for vlan" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:507 +#, python-format +msgid "Done deleting vxlan interface %s" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:521 +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:555 +#, python-format +msgid "" +"Option \"%(option)s\" must be supported by command \"%(command)s\" to " +"enable %(mode)s mode" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:532 +msgid "No valid Segmentation ID to perform UCAST test." +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:549 +msgid "" +"VXLAN muticast group must be provided in vxlan_group option to enable " +"VXLAN MCAST mode" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:574 +msgid "" +"Linux kernel vxlan module and iproute2 3.8 or above are required to " +"enable VXLAN." +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:584 +#, python-format +msgid "Using %s VXLAN mode" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:661 +#: neutron/plugins/mlnx/agent/eswitch_neutron_agent.py:162 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:279 +msgid "network_delete received" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:675 +#, python-format +msgid "port_update RPC received for port: %s" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:678 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:320 +msgid "fdb_add received" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:700 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:348 +msgid "fdb_remove received" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:722 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:427 +msgid "update chg_ip received" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:747 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:450 +msgid "fdb_update received" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:804 +msgid "Unable to obtain MAC address for unique ID. Agent terminated!" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:808 +#: neutron/plugins/mlnx/agent/eswitch_neutron_agent.py:252 +#: neutron/plugins/nec/agent/nec_neutron_agent.py:144 +#: neutron/plugins/oneconvergence/agent/nvsd_neutron_agent.py:109 +#, python-format +msgid "RPC agent_id: %s" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:879 +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:1114 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1131 +#, python-format +msgid "Port %(device)s updated. Details: %(details)s" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:912 +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:933 +#: neutron/plugins/mlnx/agent/eswitch_neutron_agent.py:368 +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:1134 +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:1196 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1151 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1215 +#, python-format +msgid "Device %s not defined on plugin" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:919 +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:1164 +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:1181 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1183 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1200 +#, python-format +msgid "Attachment %s removed" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:927 +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:1171 +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:1188 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1190 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1207 +#, python-format +msgid "port_removed failed for %(device)s: %(e)s" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:931 +#: neutron/plugins/mlnx/agent/eswitch_neutron_agent.py:366 +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:1193 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1212 +#, python-format +msgid "Port %s updated." +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:957 +msgid "LinuxBridge Agent RPC Daemon Started!" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:975 +#, python-format +msgid "Agent loop found changes! %s" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:981 +#, python-format +msgid "Error in agent loop. Devices info: %s" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:1007 +#: neutron/plugins/mlnx/agent/eswitch_neutron_agent.py:419 +#, python-format +msgid "Parsing physical_interface_mappings failed: %s. Agent terminated!" +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:1010 +#: neutron/plugins/mlnx/agent/eswitch_neutron_agent.py:422 +#, python-format +msgid "Interface mappings: %s" +msgstr "" + +#: neutron/plugins/linuxbridge/common/config.py:29 +#: neutron/plugins/mlnx/common/config.py:26 +msgid "Network type for tenant networks (local, vlan, or none)" +msgstr "" + +#: neutron/plugins/linuxbridge/common/config.py:39 +msgid "" +"Enable VXLAN on the agent. Can be enabled when agent is managed by ml2 " +"plugin using linuxbridge mechanism driver" +msgstr "" + +#: neutron/plugins/linuxbridge/common/config.py:43 +msgid "TTL for vxlan interface protocol packets." +msgstr "" + +#: neutron/plugins/linuxbridge/common/config.py:45 +msgid "TOS for vxlan interface protocol packets." +msgstr "" + +#: neutron/plugins/linuxbridge/common/config.py:47 +msgid "Multicast group for vxlan interface." +msgstr "" + +#: neutron/plugins/linuxbridge/common/config.py:49 +msgid "Local IP address of the VXLAN endpoints." +msgstr "" + +#: neutron/plugins/linuxbridge/common/config.py:51 +msgid "" +"Extension to use alongside ml2 plugin's l2population mechanism driver. It" +" enables the plugin to populate VXLAN forwarding table." +msgstr "" + +#: neutron/plugins/linuxbridge/common/config.py:59 +#: neutron/plugins/mlnx/common/config.py:45 +msgid "List of :" +msgstr "" + +#: neutron/plugins/linuxbridge/common/config.py:67 +#: neutron/plugins/mlnx/common/config.py:70 +msgid "Enable server RPC compatibility with old agents" +msgstr "" + +#: neutron/plugins/linuxbridge/db/l2network_db_v2.py:142 +#: neutron/plugins/ml2/drivers/type_vlan.py:210 +#: neutron/plugins/openvswitch/ovs_db_v2.py:161 +#, python-format +msgid "" +"Reserving specific vlan %(vlan_id)s on physical network " +"%(physical_network)s outside pool" +msgstr "" + +#: neutron/plugins/linuxbridge/db/l2network_db_v2.py:166 +#: neutron/plugins/ml2/drivers/type_vlan.py:259 +#: neutron/plugins/openvswitch/ovs_db_v2.py:191 +#, python-format +msgid "" +"Releasing vlan %(vlan_id)s on physical network %(physical_network)s to " +"pool" +msgstr "" + +#: neutron/plugins/linuxbridge/db/l2network_db_v2.py:171 +#: neutron/plugins/ml2/drivers/type_vlan.py:254 +#: neutron/plugins/openvswitch/ovs_db_v2.py:186 +#, python-format +msgid "" +"Releasing vlan %(vlan_id)s on physical network %(physical_network)s " +"outside pool" +msgstr "" + +#: neutron/plugins/linuxbridge/db/l2network_db_v2.py:202 +#: neutron/plugins/mlnx/db/mlnx_db_v2.py:212 +msgid "get_port_from_device() called" +msgstr "" + +#: neutron/plugins/linuxbridge/db/l2network_db_v2.py:230 +#, python-format +msgid "set_port_status as %s called" +msgstr "" + +#: neutron/plugins/metaplugin/meta_neutron_plugin.py:60 +#, python-format +msgid "Flavor %(flavor)s could not be found" +msgstr "" + +#: neutron/plugins/metaplugin/meta_neutron_plugin.py:64 +msgid "Failed to add flavor binding" +msgstr "" + +#: neutron/plugins/metaplugin/meta_neutron_plugin.py:73 +msgid "Start initializing metaplugin" +msgstr "" + +#: neutron/plugins/metaplugin/meta_neutron_plugin.py:125 +#, python-format +msgid "default_flavor %s is not plugin list" +msgstr "" + +#: neutron/plugins/metaplugin/meta_neutron_plugin.py:131 +#, python-format +msgid "default_l3_flavor %s is not plugin list" +msgstr "" + +#: neutron/plugins/metaplugin/meta_neutron_plugin.py:137 +#, python-format +msgid "rpc_flavor %s is not plugin list" +msgstr "" + +#: neutron/plugins/metaplugin/meta_neutron_plugin.py:169 +#, python-format +msgid "Plugin location: %s" +msgstr "" + +#: neutron/plugins/metaplugin/meta_neutron_plugin.py:220 +#, python-format +msgid "Created network: %(net_id)s with flavor %(flavor)s" +msgstr "" + +#: neutron/plugins/metaplugin/meta_neutron_plugin.py:226 +#: neutron/plugins/metaplugin/meta_neutron_plugin.py:370 +msgid "Failed to add flavor bindings" +msgstr "" + +#: neutron/plugins/metaplugin/meta_neutron_plugin.py:363 +#, python-format +msgid "Created router: %(router_id)s with flavor %(flavor)s" +msgstr "" + +#: neutron/plugins/metaplugin/meta_neutron_plugin.py:374 +#, python-format +msgid "Created router: %s" +msgstr "" + +#: neutron/plugins/metaplugin/proxy_neutron_plugin.py:66 +#, python-format +msgid "Update subnet failed: %s" +msgstr "" + +#: neutron/plugins/metaplugin/proxy_neutron_plugin.py:73 +msgid "Subnet in remote have already deleted" +msgstr "" + +#: neutron/plugins/metaplugin/proxy_neutron_plugin.py:94 +#, python-format +msgid "Update network failed: %s" +msgstr "" + +#: neutron/plugins/metaplugin/proxy_neutron_plugin.py:101 +msgid "Network in remote have already deleted" +msgstr "" + +#: neutron/plugins/metaplugin/proxy_neutron_plugin.py:122 +#, python-format +msgid "Update port failed: %s" +msgstr "" + +#: neutron/plugins/metaplugin/proxy_neutron_plugin.py:133 +msgid "Port in remote have already deleted" +msgstr "" + +#: neutron/plugins/metaplugin/common/config.py:23 +msgid "" +"Comma separated list of flavor:neutron_plugin for plugins to load. " +"Extension method is searched in the list order and the first one is used." +msgstr "" + +#: neutron/plugins/metaplugin/common/config.py:29 +msgid "" +"Comma separated list of flavor:neutron_plugin for L3 service plugins to " +"load. This is intended for specifying L2 plugins which support L3 " +"functions. If you use a router service plugin, set this blank." +msgstr "" + +#: neutron/plugins/metaplugin/common/config.py:36 +msgid "" +"Default flavor to use, when flavor:network is not specified at network " +"creation." +msgstr "" + +#: neutron/plugins/metaplugin/common/config.py:41 +msgid "" +"Default L3 flavor to use, when flavor:router is not specified at router " +"creation. Ignored if 'l3_plugin_list' is blank." +msgstr "" + +#: neutron/plugins/metaplugin/common/config.py:47 +msgid "Comma separated list of supported extension aliases." +msgstr "" + +#: neutron/plugins/metaplugin/common/config.py:51 +msgid "" +"Comma separated list of method:flavor to select specific plugin for a " +"method. This has priority over method search order based on " +"'plugin_list'." +msgstr "" + +#: neutron/plugins/metaplugin/common/config.py:57 +msgid "Specifies flavor for plugin to handle 'q-plugin' RPC requests." +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:44 +#, python-format +msgid "MidoNet %(resource_type)s %(id)s could not be found" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:48 +#, python-format +msgid "MidoNet API error: %(msg)s" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:82 +#, python-format +msgid "MidoClient.create_bridge called: kwargs=%(kwargs)s" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:92 +#, python-format +msgid "MidoClient.delete_bridge called: id=%(id)s" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:102 +#, python-format +msgid "MidoClient.get_bridge called: id=%s" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:116 +#, python-format +msgid "MidoClient.update_bridge called: id=%(id)s, kwargs=%(kwargs)s" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:136 +#, python-format +msgid "" +"MidoClient.create_dhcp called: bridge=%(bridge)s, cidr=%(cidr)s, " +"gateway_ip=%(gateway_ip)s, host_rts=%(host_rts)s, " +"dns_servers=%(dns_servers)s" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:154 +#, python-format +msgid "" +"MidoClient.add_dhcp_host called: bridge=%(bridge)s, cidr=%(cidr)s, " +"ip=%(ip)s, mac=%(mac)s" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:159 +msgid "Tried to add tonon-existent DHCP" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:173 +#, python-format +msgid "" +"MidoClient.remove_dhcp_host called: bridge=%(bridge)s, cidr=%(cidr)s, " +"ip=%(ip)s, mac=%(mac)s" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:178 +msgid "Tried to delete mapping from non-existent subnet" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:183 +#, python-format +msgid "MidoClient.remove_dhcp_host: Deleting %(dh)r" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:196 +#, python-format +msgid "" +"MidoClient.delete_dhcp_host called: bridge_id=%(bridge_id)s, " +"cidr=%(cidr)s, ip=%(ip)s, mac=%(mac)s" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:211 +#, python-format +msgid "MidoClient.delete_dhcp called: bridge=%(bridge)s, cidr=%(cidr)s" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:218 +msgid "Tried to delete non-existent DHCP" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:230 +#, python-format +msgid "MidoClient.delete_port called: id=%(id)s, delete_chains=%(delete_chains)s" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:245 +#, python-format +msgid "MidoClient.get_port called: id=%(id)s" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:259 +#, python-format +msgid "MidoClient.add_bridge_port called: bridge=%(bridge)s, kwargs=%(kwargs)s" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:271 +#, python-format +msgid "MidoClient.update_port called: id=%(id)s, kwargs=%(kwargs)s" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:296 +#, python-format +msgid "MidoClient.create_router called: kwargs=%(kwargs)s" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:306 +#, python-format +msgid "MidoClient.delete_router called: id=%(id)s" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:316 +#, python-format +msgid "MidoClient.get_router called: id=%(id)s" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:330 +#, python-format +msgid "MidoClient.update_router called: id=%(id)s, kwargs=%(kwargs)s" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:351 +#, python-format +msgid "" +"MidoClient.add_dhcp_route_option called: bridge=%(bridge)s, " +"cidr=%(cidr)s, gw_ip=%(gw_ip)sdst_ip=%(dst_ip)s" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:359 +msgid "Tried to access non-existent DHCP" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:391 +#, python-format +msgid "MidoClient.unlink called: port=%(port)s" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:396 +#, python-format +msgid "Attempted to unlink a port that was not linked. %s" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:402 +#, python-format +msgid "" +"MidoClient.remove_rules_by_property called: tenant_id=%(tenant_id)s, " +"chain_name=%(chain_name)skey=%(key)s, value=%(value)s" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:429 +#, python-format +msgid "" +"MidoClient.create_router_chains called: router=%(router)s, " +"inbound_chain_name=%(in_chain)s, outbound_chain_name=%(out_chain)s" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:452 +#, python-format +msgid "MidoClient.delete_router_chains called: id=%(id)s" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:467 +#, python-format +msgid "MidoClient.delete_port_chains called: id=%(id)s" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:479 +#, python-format +msgid "" +"MidoClient.get_link_port called: router=%(router)s, " +"peer_router_id=%(peer_router_id)s" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:518 +#, python-format +msgid "" +"MidoClient.add_static_nat called: tenant_id=%(tenant_id)s, " +"chain_name=%(chain_name)s, from_ip=%(from_ip)s, to_ip=%(to_ip)s, " +"port_id=%(port_id)s, nat_type=%(nat_type)s" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:526 +#, python-format +msgid "Invalid NAT type passed in %s" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:578 +#, python-format +msgid "MidoClient.remote_static_route called: router=%(router)s, ip=%(ip)s" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:589 +#, python-format +msgid "" +"MidoClient.update_port_chains called: " +"port=%(port)sinbound_chain_id=%(inbound_chain_id)s, " +"outbound_chain_id=%(outbound_chain_id)s" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:600 +#, python-format +msgid "MidoClient.create_chain called: tenant_id=%(tenant_id)s name=%(name)s" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:608 +#, python-format +msgid "MidoClient.delete_chain called: id=%(id)s" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:614 +#, python-format +msgid "" +"MidoClient.delete_chains_by_names called: tenant_id=%(tenant_id)s " +"names=%(names)s " +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:625 +#, python-format +msgid "" +"MidoClient.get_chain_by_name called: tenant_id=%(tenant_id)s " +"name=%(name)s " +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:636 +#, python-format +msgid "" +"MidoClient.get_port_group_by_name called: tenant_id=%(tenant_id)s " +"name=%(name)s " +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:650 +#, python-format +msgid "MidoClient.create_port_group called: tenant_id=%(tenant_id)s name=%(name)s" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:659 +#, python-format +msgid "" +"MidoClient.delete_port_group_by_name called: tenant_id=%(tenant_id)s " +"name=%(name)s " +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:665 +#, python-format +msgid "Deleting pg %(id)s" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:671 +#, python-format +msgid "" +"MidoClient.add_port_to_port_group_by_name called: tenant_id=%(tenant_id)s" +" name=%(name)s port_id=%(port_id)s" +msgstr "" + +#: neutron/plugins/midonet/midonet_lib.py:685 +#, python-format +msgid "MidoClient.remove_port_from_port_groups called: port_id=%(port_id)s" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:80 +#, python-format +msgid "Invalid nat_type %s" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:137 +#, python-format +msgid "Unrecognized direction %s" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:171 +#, python-format +msgid "There is no %(name)s with ID %(id)s in MidoNet." +msgstr "" + +#: neutron/plugins/midonet/plugin.py:183 +#: neutron/plugins/ml2/drivers/mech_arista/exceptions.py:23 +#: neutron/plugins/ml2/drivers/mech_arista/exceptions.py:27 +#, python-format +msgid "%(msg)s" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:215 +msgid "provider_router_id should be configured in the plugin config file" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:385 +#, python-format +msgid "MidonetPluginV2.create_subnet called: subnet=%r" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:417 +#, python-format +msgid "MidonetPluginV2.create_subnet exiting: sn_entry=%r" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:426 +#, python-format +msgid "MidonetPluginV2.delete_subnet called: id=%s" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:445 +msgid "MidonetPluginV2.delete_subnet exiting" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:452 +#, python-format +msgid "MidonetPluginV2.create_network called: network=%r" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:467 +#, python-format +msgid "MidonetPluginV2.create_network exiting: net=%r" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:476 +#, python-format +msgid "MidonetPluginV2.update_network called: id=%(id)r, network=%(network)r" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:485 +#, python-format +msgid "MidonetPluginV2.update_network exiting: net=%r" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:493 +#, python-format +msgid "MidonetPluginV2.get_network called: id=%(id)r, fields=%(fields)r" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:498 +#, python-format +msgid "MidonetPluginV2.get_network exiting: qnet=%r" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:503 +#, python-format +msgid "MidonetPluginV2.delete_network called: id=%r" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:511 +#, python-format +msgid "Failed to delete neutron db, while Midonet bridge=%r had been deleted" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:516 +#, python-format +msgid "MidonetPluginV2.create_port called: port=%r" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:577 +#, python-format +msgid "Failed to create a port on network %(net_id)s: %(err)s" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:582 +#, python-format +msgid "MidonetPluginV2.create_port exiting: port=%r" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:587 +#, python-format +msgid "MidonetPluginV2.get_port called: id=%(id)s fields=%(fields)r" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:594 +#, python-format +msgid "There is no port with ID %(id)s in MidoNet." +msgstr "" + +#: neutron/plugins/midonet/plugin.py:598 +#, python-format +msgid "MidonetPluginV2.get_port exiting: port=%r" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:603 +#, python-format +msgid "MidonetPluginV2.get_ports called: filters=%(filters)s fields=%(fields)r" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:612 +#, python-format +msgid "" +"MidonetPluginV2.delete_port called: id=%(id)s " +"l3_port_check=%(l3_port_check)r" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:646 +#, python-format +msgid "Failed to delete DHCP mapping for port %(id)s" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:721 +#, python-format +msgid "MidonetPluginV2.create_router called: router=%(router)s" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:768 +#, python-format +msgid "MidonetPluginV2.create_router exiting: router_data=%(router_data)s." +msgstr "" + +#: neutron/plugins/midonet/plugin.py:780 +#, python-format +msgid "" +"MidonetPluginV2.set_router_gateway called: id=%(id)s, " +"gw_router=%(gw_router)s, gw_ip=%(gw_ip)s" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:824 +#, python-format +msgid "MidonetPluginV2.remove_router_gateway called: id=%(id)s" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:844 +#, python-format +msgid "MidonetPluginV2.update_router called: id=%(id)s router=%(router)r" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:884 +#, python-format +msgid "MidonetPluginV2.update_router exiting: router=%r" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:895 +#, python-format +msgid "MidonetPluginV2.delete_router called: id=%s" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:995 +#, python-format +msgid "" +"MidonetPluginV2.add_router_interface called: router_id=%(router_id)s " +"interface_info=%(interface_info)r" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:1019 +msgid "" +"DHCP agent is not working correctly. No port to reach the Metadata server" +" on this network" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:1028 +#, python-format +msgid "" +"Failed to create MidoNet resources to add router interface. " +"info=%(info)s, router_id=%(router_id)s" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:1035 +#, python-format +msgid "MidonetPluginV2.add_router_interface exiting: info=%r" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:1075 +#, python-format +msgid "" +"MidonetPluginV2.update_floatingip called: id=%(id)s " +"floatingip=%(floatingip)s " +msgstr "" + +#: neutron/plugins/midonet/plugin.py:1094 +#, python-format +msgid "MidonetPluginV2.update_floating_ip exiting: fip=%s" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:1116 +#, python-format +msgid "" +"MidonetPluginV2.create_security_group called: " +"security_group=%(security_group)s default_sg=%(default_sg)s " +msgstr "" + +#: neutron/plugins/midonet/plugin.py:1145 +#, python-format +msgid "Failed to create MidoNet resources for sg %(sg)r" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:1152 +#, python-format +msgid "MidonetPluginV2.create_security_group exiting: sg=%r" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:1158 +#, python-format +msgid "MidonetPluginV2.delete_security_group called: id=%s" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:1190 +#, python-format +msgid "" +"MidonetPluginV2.create_security_group_rule called: " +"security_group_rule=%(security_group_rule)r" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:1200 +#, python-format +msgid "MidonetPluginV2.create_security_group_rule exiting: rule=%r" +msgstr "" + +#: neutron/plugins/midonet/plugin.py:1210 +#, python-format +msgid "MidonetPluginV2.delete_security_group_rule called: sg_rule_id=%s" +msgstr "" + +#: neutron/plugins/midonet/common/config.py:23 +msgid "MidoNet API server URI." +msgstr "" + +#: neutron/plugins/midonet/common/config.py:25 +msgid "MidoNet admin username." +msgstr "" + +#: neutron/plugins/midonet/common/config.py:28 +msgid "MidoNet admin password." +msgstr "" + +#: neutron/plugins/midonet/common/config.py:31 +msgid "ID of the project that MidoNet admin userbelongs to." +msgstr "" + +#: neutron/plugins/midonet/common/config.py:34 +msgid "Virtual provider router ID." +msgstr "" + +#: neutron/plugins/midonet/common/config.py:37 +msgid "Operational mode. Internal dev use only." +msgstr "" + +#: neutron/plugins/midonet/common/config.py:40 +msgid "Path to midonet host uuid file" +msgstr "" + +#: neutron/plugins/ml2/config.py:22 +msgid "" +"List of network type driver entrypoints to be loaded from the " +"neutron.ml2.type_drivers namespace." +msgstr "" + +#: neutron/plugins/ml2/config.py:26 +msgid "Ordered list of network_types to allocate as tenant networks." +msgstr "" + +#: neutron/plugins/ml2/config.py:30 +msgid "" +"An ordered list of networking mechanism driver entrypoints to be loaded " +"from the neutron.ml2.mechanism_drivers namespace." +msgstr "" + +#: neutron/plugins/ml2/db.py:41 +#, python-format +msgid "Added segment %(id)s of type %(network_type)s for network %(network_id)s" +msgstr "" + +#: neutron/plugins/ml2/db.py:85 +#, python-format +msgid "Multiple ports have port_id starting with %s" +msgstr "" + +#: neutron/plugins/ml2/db.py:91 +#, python-format +msgid "get_port_from_device_mac() called for mac %s" +msgstr "" + +#: neutron/plugins/ml2/db.py:133 +#, python-format +msgid "No binding found for port %(port_id)s" +msgstr "" + +#: neutron/plugins/ml2/managers.py:36 +#, python-format +msgid "Configured type driver names: %s" +msgstr "" + +#: neutron/plugins/ml2/managers.py:41 +#, python-format +msgid "Loaded type driver names: %s" +msgstr "" + +#: neutron/plugins/ml2/managers.py:49 +#, python-format +msgid "" +"Type driver '%(new_driver)s' ignored because type driver '%(old_driver)s'" +" is already registered for type '%(type)s'" +msgstr "" + +#: neutron/plugins/ml2/managers.py:57 +#, python-format +msgid "Registered types: %s" +msgstr "" + +#: neutron/plugins/ml2/managers.py:65 +#, python-format +msgid "No type driver for tenant network_type: %s. Service terminated!" +msgstr "" + +#: neutron/plugins/ml2/managers.py:69 +#, python-format +msgid "Tenant network_types: %s" +msgstr "" + +#: neutron/plugins/ml2/managers.py:73 +#, python-format +msgid "Initializing driver for type '%s'" +msgstr "" + +#: neutron/plugins/ml2/managers.py:82 +#: neutron/plugins/ml2/drivers/type_tunnel.py:116 +#, python-format +msgid "network_type value '%s' not supported" +msgstr "" + +#: neutron/plugins/ml2/managers.py:108 +#, python-format +msgid "Failed to release segment '%s' because network type is not supported." +msgstr "" + +#: neutron/plugins/ml2/managers.py:124 +#, python-format +msgid "Configured mechanism driver names: %s" +msgstr "" + +#: neutron/plugins/ml2/managers.py:130 +#, python-format +msgid "Loaded mechanism driver names: %s" +msgstr "" + +#: neutron/plugins/ml2/managers.py:142 +#, python-format +msgid "Registered mechanism drivers: %s" +msgstr "" + +#: neutron/plugins/ml2/managers.py:149 +#, python-format +msgid "Initializing mechanism driver '%s'" +msgstr "" + +#: neutron/plugins/ml2/managers.py:171 +#, python-format +msgid "Mechanism driver '%(name)s' failed in %(method)s" +msgstr "" + +#: neutron/plugins/ml2/managers.py:447 +#, python-format +msgid "" +"Attempting to bind port %(port)s on host %(host)s for vnic_type " +"%(vnic_type)s with profile %(profile)s" +msgstr "" + +#: neutron/plugins/ml2/managers.py:458 +#, python-format +msgid "" +"Bound port: %(port)s, host: %(host)s, vnic_type: %(vnic_type)s, profile: " +"%(profile)sdriver: %(driver)s, vif_type: %(vif_type)s, vif_details: " +"%(vif_details)s, segment: %(segment)s" +msgstr "" + +#: neutron/plugins/ml2/managers.py:474 +#, python-format +msgid "Mechanism driver %s failed in bind_port" +msgstr "" + +#: neutron/plugins/ml2/managers.py:478 +#, python-format +msgid "Failed to bind port %(port)s on host %(host)s" +msgstr "" + +#: neutron/plugins/ml2/plugin.py:121 +msgid "Modular L2 Plugin initialization complete" +msgstr "" + +#: neutron/plugins/ml2/plugin.py:152 +msgid "network_type required" +msgstr "" + +#: neutron/plugins/ml2/plugin.py:191 +#, python-format +msgid "Network %s has no segments" +msgstr "" + +#: neutron/plugins/ml2/plugin.py:254 +msgid "binding:profile value too large" +msgstr "" + +#: neutron/plugins/ml2/plugin.py:290 +#, python-format +msgid "Serialized vif_details DB value '%(value)s' for port %(port)s is invalid" +msgstr "" + +#: neutron/plugins/ml2/plugin.py:301 +#, python-format +msgid "Serialized profile DB value '%(value)s' for port %(port)s is invalid" +msgstr "" + +#: neutron/plugins/ml2/plugin.py:354 +#, python-format +msgid "" +"In _notify_port_updated(), no bound segment for port %(port_id)s on " +"network %(network_id)s" +msgstr "" + +#: neutron/plugins/ml2/plugin.py:396 +#, python-format +msgid "mechanism_manager.create_network_postcommit failed, deleting network '%s'" +msgstr "" + +#: neutron/plugins/ml2/plugin.py:456 +#, python-format +msgid "Deleting network %s" +msgstr "" + +#: neutron/plugins/ml2/plugin.py:468 +#, python-format +msgid "Ports to auto-delete: %s" +msgstr "" + +#: neutron/plugins/ml2/plugin.py:474 neutron/plugins/ml2/plugin.py:594 +msgid "Tenant-owned ports exist" +msgstr "" + +#: neutron/plugins/ml2/plugin.py:482 +#, python-format +msgid "Subnets to auto-delete: %s" +msgstr "" + +#: neutron/plugins/ml2/plugin.py:493 +#, python-format +msgid "Deleting network record %s" +msgstr "" + +#: neutron/plugins/ml2/plugin.py:501 neutron/plugins/ml2/plugin.py:607 +msgid "Committing transaction" +msgstr "" + +#: neutron/plugins/ml2/plugin.py:507 +msgid "A concurrent port creation has occurred" +msgstr "" + +#: neutron/plugins/ml2/plugin.py:516 +#, python-format +msgid "Exception auto-deleting port %s" +msgstr "" + +#: neutron/plugins/ml2/plugin.py:524 +#, python-format +msgid "Exception auto-deleting subnet %s" +msgstr "" + +#: neutron/plugins/ml2/plugin.py:533 +msgid "mechanism_manager.delete_network_postcommit failed" +msgstr "" + +#: neutron/plugins/ml2/plugin.py:547 +#, python-format +msgid "mechanism_manager.create_subnet_postcommit failed, deleting subnet '%s'" +msgstr "" + +#: neutron/plugins/ml2/plugin.py:577 +#, python-format +msgid "Deleting subnet %s" +msgstr "" + +#: neutron/plugins/ml2/plugin.py:588 +#, python-format +msgid "Ports to auto-deallocate: %s" +msgstr "" + +#: neutron/plugins/ml2/plugin.py:603 +msgid "Deleting subnet record" +msgstr "" + +#: neutron/plugins/ml2/plugin.py:623 +#, python-format +msgid "Exception deleting fixed_ip from port %s" +msgstr "" + +#: neutron/plugins/ml2/plugin.py:633 +msgid "mechanism_manager.delete_subnet_postcommit failed" +msgstr "" + +#: neutron/plugins/ml2/plugin.py:662 +#, python-format +msgid "mechanism_manager.create_port_postcommit failed, deleting port '%s'" +msgstr "" + +#: neutron/plugins/ml2/plugin.py:718 +#: neutron/tests/unit/ml2/test_ml2_plugin.py:132 +#, python-format +msgid "Deleting port %s" +msgstr "" + +#: neutron/plugins/ml2/plugin.py:736 +#: neutron/tests/unit/ml2/test_ml2_plugin.py:133 +#, python-format +msgid "The port '%s' was deleted" +msgstr "" + +#: neutron/plugins/ml2/plugin.py:745 +msgid "Calling base delete_port" +msgstr "" + +#: neutron/plugins/ml2/plugin.py:757 +msgid "mechanism_manager.delete_port_postcommit failed" +msgstr "" + +#: neutron/plugins/ml2/plugin.py:769 +#, python-format +msgid "Port %(port)s updated up by agent not found" +msgstr "" + +#: neutron/plugins/ml2/rpc.py:81 +#, python-format +msgid "Device %(device)s details requested by agent %(agent_id)s" +msgstr "" + +#: neutron/plugins/ml2/rpc.py:90 +#, python-format +msgid "Device %(device)s requested by agent %(agent_id)s not found in database" +msgstr "" + +#: neutron/plugins/ml2/rpc.py:97 +#, python-format +msgid "" +"Device %(device)s requested by agent %(agent_id)s has network " +"%(network_id)s with no segments" +msgstr "" + +#: neutron/plugins/ml2/rpc.py:107 +#, python-format +msgid "" +"Device %(device)s requested by agent %(agent_id)s on network " +"%(network_id)s not bound, vif_type: %(vif_type)s" +msgstr "" + +#: neutron/plugins/ml2/rpc.py:118 +#, python-format +msgid "" +"Device %(device)s requested by agent %(agent_id)s on network " +"%(network_id)s invalid segment, vif_type: %(vif_type)s" +msgstr "" + +#: neutron/plugins/ml2/rpc.py:142 +#, python-format +msgid "Returning: %s" +msgstr "" + +#: neutron/plugins/ml2/rpc.py:166 +#, python-format +msgid "Device %(device)s no longer exists at agent %(agent_id)s" +msgstr "" + +#: neutron/plugins/ml2/rpc.py:190 +#, python-format +msgid "Device %(device)s up at agent %(agent_id)s" +msgstr "" + +#: neutron/plugins/ml2/common/exceptions.py:23 +#, python-format +msgid "%(method)s failed." +msgstr "" + +#: neutron/plugins/ml2/drivers/mech_agent.py:54 +#: neutron/plugins/ml2/drivers/mechanism_odl.py:345 +#, python-format +msgid "Attempting to bind port %(port)s on network %(network)s" +msgstr "" + +#: neutron/plugins/ml2/drivers/mech_agent.py:61 +#, python-format +msgid "Refusing to bind due to unsupported vnic_type: %s" +msgstr "" + +#: neutron/plugins/ml2/drivers/mech_agent.py:65 +#, python-format +msgid "Checking agent: %s" +msgstr "" + +#: neutron/plugins/ml2/drivers/mech_agent.py:70 +#: neutron/plugins/ml2/drivers/mechanism_odl.py:355 +#, python-format +msgid "Bound using segment: %s" +msgstr "" + +#: neutron/plugins/ml2/drivers/mech_agent.py:73 +#, python-format +msgid "Attempting to bind with dead agent: %s" +msgstr "" + +#: neutron/plugins/ml2/drivers/mech_hyperv.py:44 +#, python-format +msgid "Checking segment: %(segment)s for mappings: %(mappings)s" +msgstr "" + +#: neutron/plugins/ml2/drivers/mech_linuxbridge.py:44 +#: neutron/plugins/ml2/drivers/mech_ofagent.py:50 +#: neutron/plugins/ml2/drivers/mech_openvswitch.py:45 +#, python-format +msgid "" +"Checking segment: %(segment)s for mappings: %(mappings)s with " +"tunnel_types: %(tunnel_types)s" +msgstr "" + +#: neutron/plugins/ml2/drivers/mechanism_fslsdn.py:35 +msgid "CRD service Username" +msgstr "" + +#: neutron/plugins/ml2/drivers/mechanism_fslsdn.py:38 +msgid "CRD Service Password" +msgstr "" + +#: neutron/plugins/ml2/drivers/mechanism_fslsdn.py:40 +msgid "CRD Tenant Name" +msgstr "" + +#: neutron/plugins/ml2/drivers/mechanism_fslsdn.py:43 +msgid "CRD Auth URL" +msgstr "" + +#: neutron/plugins/ml2/drivers/mechanism_fslsdn.py:46 +msgid "URL for connecting to CRD service" +msgstr "" + +#: neutron/plugins/ml2/drivers/mechanism_fslsdn.py:49 +msgid "Timeout value for connecting to CRD service in seconds" +msgstr "" + +#: neutron/plugins/ml2/drivers/mechanism_fslsdn.py:53 +msgid "Region name for connecting to CRD Service in admin context" +msgstr "" + +#: neutron/plugins/ml2/drivers/mechanism_fslsdn.py:57 +msgid "If set, ignore any SSL validation issues" +msgstr "" + +#: neutron/plugins/ml2/drivers/mechanism_fslsdn.py:60 +msgid "Auth strategy for connecting to neutron in admin context" +msgstr "" + +#: neutron/plugins/ml2/drivers/mechanism_fslsdn.py:63 +msgid "Location of ca certificates file to use for CRD client requests." +msgstr "" + +#: neutron/plugins/ml2/drivers/mechanism_fslsdn.py:87 +msgid "Initializing CRD client... " +msgstr "" + +#: neutron/plugins/ml2/drivers/mechanism_ncs.py:29 +msgid "HTTP URL of Tail-f NCS REST interface." +msgstr "" + +#: neutron/plugins/ml2/drivers/mechanism_ncs.py:31 +#: neutron/plugins/ml2/drivers/mechanism_odl.py:50 +msgid "HTTP username for authentication" +msgstr "" + +#: neutron/plugins/ml2/drivers/mechanism_ncs.py:33 +#: neutron/plugins/ml2/drivers/mechanism_odl.py:52 +msgid "HTTP password for authentication" +msgstr "" + +#: neutron/plugins/ml2/drivers/mechanism_ncs.py:35 +#: neutron/plugins/ml2/drivers/mechanism_odl.py:54 +msgid "HTTP timeout in seconds." +msgstr "" + +#: neutron/plugins/ml2/drivers/mechanism_odl.py:48 +msgid "HTTP URL of OpenDaylight REST interface." +msgstr "" + +#: neutron/plugins/ml2/drivers/mechanism_odl.py:56 +msgid "Tomcat session timeout in minutes." +msgstr "" + +#: neutron/plugins/ml2/drivers/mechanism_odl.py:106 +#, python-format +msgid "Failed to authenticate with OpenDaylight: %s" +msgstr "" + +#: neutron/plugins/ml2/drivers/mechanism_odl.py:109 +#, python-format +msgid "Authentication Timed Out: %s" +msgstr "" + +#: neutron/plugins/ml2/drivers/mechanism_odl.py:297 +#, python-format +msgid "%(object_type)s not found (%(obj_id)s)" +msgstr "" + +#: neutron/plugins/ml2/drivers/mechanism_odl.py:333 +#, python-format +msgid "ODL-----> sending URL (%s) <-----ODL" +msgstr "" + +#: neutron/plugins/ml2/drivers/mechanism_odl.py:334 +#, python-format +msgid "ODL-----> sending JSON (%s) <-----ODL" +msgstr "" + +#: neutron/plugins/ml2/drivers/mechanism_odl.py:358 +#, python-format +msgid "" +"Refusing to bind port for segment ID %(id)s, segment %(seg)s, phys net " +"%(physnet)s, and network type %(nettype)s" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_flat.py:30 +msgid "" +"List of physical_network names with which flat networks can be created. " +"Use * to allow flat networks with arbitrary physical_network names." +msgstr "" + +#: neutron/plugins/ml2/drivers/type_flat.py:68 +msgid "Arbitrary flat physical_network names allowed" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_flat.py:73 +#, python-format +msgid "Allowable flat physical_network names: %s" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_flat.py:80 +msgid "ML2 FlatTypeDriver initialization complete" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_flat.py:85 +msgid "physical_network required for flat provider network" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_flat.py:88 +#, python-format +msgid "physical_network '%s' unknown for flat provider network" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_flat.py:95 +#, python-format +msgid "%s prohibited for flat provider network" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_flat.py:109 +#, python-format +msgid "Reserving flat network on physical network %s" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_flat.py:127 +#, python-format +msgid "Releasing flat network on physical network %s" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_flat.py:130 +#, python-format +msgid "No flat network found on physical network %s" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_gre.py:34 +msgid "" +"Comma-separated list of : tuples enumerating ranges of " +"GRE tunnel IDs that are available for tenant network allocation" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_gre.py:85 +#, python-format +msgid "Reserving specific gre tunnel %s from pool" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_gre.py:89 +#, python-format +msgid "Reserving specific gre tunnel %s outside pool" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_gre.py:102 +#, python-format +msgid "Allocating gre tunnel id %(gre_id)s" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_gre.py:120 +#, python-format +msgid "Releasing gre tunnel %s to pool" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_gre.py:125 +#, python-format +msgid "Releasing gre tunnel %s outside pool" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_gre.py:128 +#, python-format +msgid "gre_id %s not found" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_gre.py:138 +#, python-format +msgid "Skipping unreasonable gre ID range %(tun_min)s:%(tun_max)s" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_gre.py:156 +#: neutron/plugins/openvswitch/ovs_db_v2.py:229 +#, python-format +msgid "Removing tunnel %s from pool" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_gre.py:171 +msgid "get_gre_endpoints() called" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_gre.py:180 +#, python-format +msgid "add_gre_endpoint() called for ip %s" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_gre.py:186 +#, python-format +msgid "Gre endpoint with ip %s already exists" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_local.py:35 +msgid "ML2 LocalTypeDriver initialization complete" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_local.py:46 +#, python-format +msgid "%s prohibited for local provider network" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_tunnel.py:61 +#, python-format +msgid "Invalid tunnel ID range: '%(range)s' - %(e)s. Agent terminated!" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_tunnel.py:64 +#, python-format +msgid "%(type)s ID ranges: %(range)s" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_tunnel.py:70 +#: neutron/plugins/openvswitch/ovs_neutron_plugin.py:432 +#, python-format +msgid "provider:physical_network specified for %s network" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_tunnel.py:76 +#, python-format +msgid "segmentation_id required for %s provider network" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_tunnel.py:83 +#, python-format +msgid "%(key)s prohibited for %(tunnel)s provider network" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_tunnel.py:103 +msgid "Network_type value needed by the ML2 plugin" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_vlan.py:37 +msgid "" +"List of :: or " +"specifying physical_network names usable for VLAN provider and tenant " +"networks, as well as ranges of VLAN tags on each available for allocation" +" to tenant networks." +msgstr "" + +#: neutron/plugins/ml2/drivers/type_vlan.py:91 +msgid "Failed to parse network_vlan_ranges. Service terminated!" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_vlan.py:161 +msgid "VlanTypeDriver initialization complete" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_vlan.py:166 +msgid "physical_network required for VLAN provider network" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_vlan.py:169 +#, python-format +msgid "physical_network '%s' unknown for VLAN provider network" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_vlan.py:175 +msgid "segmentation_id required for VLAN provider network" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_vlan.py:178 +#, python-format +msgid "segmentation_id out of range (%(min)s through %(max)s)" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_vlan.py:188 +#, python-format +msgid "%s prohibited for VLAN provider network" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_vlan.py:226 +#, python-format +msgid "" +"Allocating vlan %(vlan_id)s on physical network %(physical_network)s from" +" pool" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_vlan.py:264 +#, python-format +msgid "No vlan_id %(vlan_id)s found on physical network %(physical_network)s" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_vxlan.py:37 +msgid "" +"Comma-separated list of : tuples enumerating ranges of " +"VXLAN VNI IDs that are available for tenant network allocation" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_vxlan.py:41 +msgid "Multicast group for VXLAN. If unset, disables VXLAN multicast mode." +msgstr "" + +#: neutron/plugins/ml2/drivers/type_vxlan.py:93 +#, python-format +msgid "Reserving specific vxlan tunnel %s from pool" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_vxlan.py:97 +#, python-format +msgid "Reserving specific vxlan tunnel %s outside pool" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_vxlan.py:110 +#, python-format +msgid "Allocating vxlan tunnel vni %(vxlan_vni)s" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_vxlan.py:128 +#, python-format +msgid "Releasing vxlan tunnel %s to pool" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_vxlan.py:133 +#, python-format +msgid "Releasing vxlan tunnel %s outside pool" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_vxlan.py:136 +#, python-format +msgid "vxlan_vni %s not found" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_vxlan.py:147 +#, python-format +msgid "Skipping unreasonable VXLAN VNI range %(tun_min)s:%(tun_max)s" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_vxlan.py:192 +msgid "get_vxlan_endpoints() called" +msgstr "" + +#: neutron/plugins/ml2/drivers/type_vxlan.py:202 +#, python-format +msgid "add_vxlan_endpoint() called for ip %s" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:40 +msgid "Allowed physical networks" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:42 +msgid "Unused" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:93 +msgid "" +"Brocade Mechanism: failed to create network, network cannot be created in" +" the configured physical network" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:99 +msgid "" +"Brocade Mechanism: failed to create network, only network type vlan is " +"supported" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:107 +msgid "Brocade Mechanism: failed to create network in db" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:109 +msgid "Brocade Mechanism: create_network_precommit failed" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:111 +#, python-format +msgid "" +"create network (precommit): %(network_id)s of network type = " +"%(network_type)s with vlan = %(vlan_id)s for tenant %(tenant_id)s" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:123 +msgid "create_network_postcommit: called" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:143 +msgid "Brocade NOS driver: failed in create network" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:146 +msgid "Brocade Mechanism: create_network_postcommmit failed" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:148 +#, python-format +msgid "" +"created network (postcommit): %(network_id)s of network type = " +"%(network_type)s with vlan = %(vlan_id)s for tenant %(tenant_id)s" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:160 +msgid "delete_network_precommit: called" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:173 +msgid "Brocade Mechanism: failed to delete network in db" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:175 +msgid "Brocade Mechanism: delete_network_precommit failed" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:177 +#, python-format +msgid "" +"delete network (precommit): %(network_id)s with vlan = %(vlan_id)s for " +"tenant %(tenant_id)s" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:189 +msgid "delete_network_postcommit: called" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:201 +msgid "Brocade NOS driver: failed to delete network" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:203 +msgid "Brocade switch exception, delete_network_postcommit failed" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:206 +#, python-format +msgid "" +"delete network (postcommit): %(network_id)s with vlan = %(vlan_id)s for " +"tenant %(tenant_id)s" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:224 +msgid "create_port_precommit: called" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:242 +msgid "Brocade Mechanism: failed to create port in db" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:244 +msgid "Brocade Mechanism: create_port_precommit failed" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:249 +msgid "create_port_postcommit: called" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:273 +#, python-format +msgid "Brocade NOS driver: failed to associate mac %s" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:276 +msgid "Brocade switch exception: create_port_postcommit failed" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:279 +#, python-format +msgid "" +"created port (postcommit): port_id=%(port_id)s network_id=%(network_id)s " +"tenant_id=%(tenant_id)s" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:287 +msgid "delete_port_precommit: called" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:296 +msgid "Brocade Mechanism: failed to delete port in db" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:298 +msgid "Brocade Mechanism: delete_port_precommit failed" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:303 +msgid "delete_port_postcommit: called" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:327 +#, python-format +msgid "Brocade NOS driver: failed to dissociate MAC %s" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:330 +msgid "Brocade switch exception, delete_port_postcommit failed" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:333 +#, python-format +msgid "" +"delete port (postcommit): port_id=%(port_id)s network_id=%(network_id)s " +"tenant_id=%(tenant_id)s" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:340 +msgid "update_port_precommit(self: called" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:344 +msgid "update_port_postcommit: called" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:348 +msgid "create_subnetwork_precommit: called" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:352 +msgid "create_subnetwork_postcommit: called" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:356 +msgid "delete_subnetwork_precommit: called" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:360 +msgid "delete_subnetwork_postcommit: called" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:364 +msgid "update_subnet_precommit(self: called" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:368 +msgid "update_subnet_postcommit: called" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/nos/nosdriver.py:64 +msgid "" +"Brocade Switch IP address is not set, check config ml2_conf_brocade.ini " +"file" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/nos/nosdriver.py:74 +msgid "Connect failed to switch" +msgstr "" + +#: neutron/plugins/ml2/drivers/brocade/nos/nosdriver.py:101 +#: neutron/plugins/ml2/drivers/brocade/nos/nosdriver.py:115 +#: neutron/plugins/ml2/drivers/brocade/nos/nosdriver.py:128 +#: neutron/plugins/ml2/drivers/brocade/nos/nosdriver.py:141 +msgid "NETCONF error" +msgstr "" + +#: neutron/plugins/ml2/drivers/cisco/apic/apic_client.py:223 +#, python-format +msgid "data = %s" +msgstr "" + +#: neutron/plugins/ml2/drivers/cisco/apic/apic_client.py:226 +#: neutron/services/loadbalancer/drivers/netscaler/ncc_client.py:168 +#, python-format +msgid "Response: %s" +msgstr "" + +#: neutron/plugins/ml2/drivers/cisco/apic/apic_client.py:290 +#, python-format +msgid "APIC session will expire in %d seconds" +msgstr "" + +#: neutron/plugins/ml2/drivers/cisco/apic/apic_client.py:335 +msgid "APIC session timed-out, logging in again." +msgstr "" + +#: neutron/plugins/ml2/drivers/cisco/apic/config.py:23 +msgid "Host name or IP Address of the APIC controller" +msgstr "" + +#: neutron/plugins/ml2/drivers/cisco/apic/config.py:25 +msgid "Username for the APIC controller" +msgstr "" + +#: neutron/plugins/ml2/drivers/cisco/apic/config.py:27 +msgid "Password for the APIC controller" +msgstr "" + +#: neutron/plugins/ml2/drivers/cisco/apic/config.py:29 +msgid "Communication port for the APIC controller" +msgstr "" + +#: neutron/plugins/ml2/drivers/cisco/apic/config.py:31 +msgid "Name for the VMM domain provider" +msgstr "" + +#: neutron/plugins/ml2/drivers/cisco/apic/config.py:33 +msgid "Name for the VMM domain to be created for Openstack" +msgstr "" + +#: neutron/plugins/ml2/drivers/cisco/apic/config.py:35 +msgid "Name for the vlan namespace to be used for openstack" +msgstr "" + +#: neutron/plugins/ml2/drivers/cisco/apic/config.py:37 +msgid "Range of VLAN's to be used for Openstack" +msgstr "" + +#: neutron/plugins/ml2/drivers/cisco/apic/config.py:39 +msgid "Name of the node profile to be created" +msgstr "" + +#: neutron/plugins/ml2/drivers/cisco/apic/config.py:41 +msgid "Name of the entity profile to be created" +msgstr "" + +#: neutron/plugins/ml2/drivers/cisco/apic/config.py:43 +msgid "Name of the function profile to be created" +msgstr "" + +#: neutron/plugins/ml2/drivers/cisco/apic/config.py:45 +msgid "Clear the node profiles on the APIC at startup (mainly used for testing)" +msgstr "" + +#: neutron/plugins/ml2/drivers/cisco/apic/exceptions.py:25 +#, python-format +msgid "No response from APIC at %(url)s" +msgstr "" + +#: neutron/plugins/ml2/drivers/cisco/apic/exceptions.py:30 +#, python-format +msgid "" +"APIC responded with HTTP status %(status)s: %(reason)s, Request: " +"'%(request)s', APIC error code %(err_code)s: %(err_text)s" +msgstr "" + +#: neutron/plugins/ml2/drivers/cisco/apic/exceptions.py:37 +#, python-format +msgid "APIC failed to provide cookie for %(request)s request" +msgstr "" + +#: neutron/plugins/ml2/drivers/cisco/apic/exceptions.py:42 +msgid "Authorized APIC session not established" +msgstr "" + +#: neutron/plugins/ml2/drivers/cisco/apic/exceptions.py:47 +#, python-format +msgid "The switch and port for host '%(host)s' are not configured" +msgstr "" + +#: neutron/plugins/ml2/drivers/cisco/apic/exceptions.py:52 +#, python-format +msgid "Managed Object '%(mo_class)s' is not supported" +msgstr "" + +#: neutron/plugins/ml2/drivers/cisco/apic/exceptions.py:57 +#, python-format +msgid "" +"Multiple VLAN ranges are not supported in the APIC plugin. Please specify" +" a single VLAN range. Current config: '%(vlan_ranges)s'" +msgstr "" + +#: neutron/plugins/ml2/drivers/cisco/apic/mechanism_apic.py:84 +#, python-format +msgid "Port %s is not bound to a segment" +msgstr "" + +#: neutron/plugins/ml2/drivers/cisco/nexus/config.py:25 +msgid "The physical network managed by the switches." +msgstr "" + +#: neutron/plugins/ml2/drivers/cisco/nexus/exceptions.py:33 +#, python-format +msgid "Credential %(credential_name)s already exists for tenant %(tenant_id)s." +msgstr "" + +#: neutron/plugins/ml2/drivers/cisco/nexus/exceptions.py:54 +#, python-format +msgid "Nexus Port Binding (%(filters)s) is not present" +msgstr "" + +#: neutron/plugins/ml2/drivers/cisco/nexus/exceptions.py:63 +#, python-format +msgid "Missing required field(s) to configure nexus switch: %(fields)s" +msgstr "" + +#: neutron/plugins/ml2/drivers/cisco/nexus/mech_cisco_nexus.py:45 +#, python-format +msgid "nexus_switches found = %s" +msgstr "" + +#: neutron/plugins/ml2/drivers/cisco/nexus/nexus_db_v2.py:87 +msgid "get_nexusvm_bindings() called" +msgstr "" + +#: neutron/plugins/ml2/drivers/l2pop/config.py:25 +msgid "" +"Delay within which agent is expected to update existing ports whent it " +"restarts" +msgstr "" + +#: neutron/plugins/ml2/drivers/l2pop/mech_driver.py:42 +msgid "Experimental L2 population driver" +msgstr "" + +#: neutron/plugins/ml2/drivers/l2pop/mech_driver.py:143 +msgid "Unable to retrieve the agent ip, check the agent configuration." +msgstr "" + +#: neutron/plugins/ml2/drivers/l2pop/mech_driver.py:149 +#, python-format +msgid "Port %(port)s updated by agent %(agent)s isn't bound to any segment" +msgstr "" + +#: neutron/plugins/ml2/drivers/l2pop/mech_driver.py:199 +#, python-format +msgid "" +"Unable to retrieve the agent ip, check the agent %(agent_host)s " +"configuration." +msgstr "" + +#: neutron/plugins/ml2/drivers/l2pop/rpc.py:40 +#, python-format +msgid "" +"Fanout notify l2population agents at %(topic)s the message %(method)s " +"with %(fdb_entries)s" +msgstr "" + +#: neutron/plugins/ml2/drivers/l2pop/rpc.py:51 +#, python-format +msgid "" +"Notify l2population agent %(host)s at %(topic)s the message %(method)s " +"with %(fdb_entries)s" +msgstr "" + +#: neutron/plugins/ml2/drivers/mech_arista/config.py:31 +msgid "" +"Username for Arista EOS. This is required field. If not set, all " +"communications to Arista EOSwill fail." +msgstr "" + +#: neutron/plugins/ml2/drivers/mech_arista/config.py:37 +msgid "" +"Password for Arista EOS. This is required field. If not set, all " +"communications to Arista EOS will fail." +msgstr "" + +#: neutron/plugins/ml2/drivers/mech_arista/config.py:42 +msgid "" +"Arista EOS IP address. This is required field. If not set, all " +"communications to Arista EOSwill fail." +msgstr "" + +#: neutron/plugins/ml2/drivers/mech_arista/config.py:47 +msgid "" +"Defines if hostnames are sent to Arista EOS as FQDNs " +"(\"node1.domain.com\") or as short names (\"node1\"). This is optional. " +"If not set, a value of \"True\" is assumed." +msgstr "" + +#: neutron/plugins/ml2/drivers/mech_arista/config.py:53 +msgid "" +"Sync interval in seconds between Neutron plugin and EOS. This interval " +"defines how often the synchronization is performed. This is an optional " +"field. If not set, a value of 180 seconds is assumed." +msgstr "" + +#: neutron/plugins/ml2/drivers/mech_arista/config.py:60 +msgid "" +"Defines Region Name that is assigned to this OpenStack Controller. This " +"is useful when multiple OpenStack/Neutron controllers are managing the " +"same Arista HW clusters. Note that this name must match with the region " +"name registered (or known) to keystone service. Authentication with " +"Keysotne is performed by EOS. This is optional. If not set, a value of " +"\"RegionOne\" is assumed." +msgstr "" + +#: neutron/plugins/ml2/drivers/mech_arista/mechanism_arista.py:32 +msgid "Unable to reach EOS" +msgstr "" + +#: neutron/plugins/ml2/drivers/mech_arista/mechanism_arista.py:74 +#, python-format +msgid "'timestamp' command '%s' is not available on EOS" +msgstr "" + +#: neutron/plugins/ml2/drivers/mech_arista/mechanism_arista.py:314 +#, python-format +msgid "VM id %(vmid)s not found for port %(portid)s" +msgstr "" + +#: neutron/plugins/ml2/drivers/mech_arista/mechanism_arista.py:333 +#, python-format +msgid "Unknown device owner: %s" +msgstr "" + +#: neutron/plugins/ml2/drivers/mech_arista/mechanism_arista.py:429 +#, python-format +msgid "Executing command on Arista EOS: %s" +msgstr "" + +#: neutron/plugins/ml2/drivers/mech_arista/mechanism_arista.py:437 +#, python-format +msgid "Error %(err)s while trying to execute commands %(cmd)s on EOS %(host)s" +msgstr "" + +#: neutron/plugins/ml2/drivers/mech_arista/mechanism_arista.py:505 +msgid "Required option eapi_host is not set" +msgstr "" + +#: neutron/plugins/ml2/drivers/mech_arista/mechanism_arista.py:509 +msgid "Required option eapi_username is not set" +msgstr "" + +#: neutron/plugins/ml2/drivers/mech_arista/mechanism_arista.py:529 +msgid "Syncing Neutron <-> EOS" +msgstr "" + +#: neutron/plugins/ml2/drivers/mech_arista/mechanism_arista.py:535 +msgid "OpenStack and EOS are in sync!" +msgstr "" + +#: neutron/plugins/ml2/drivers/mech_arista/mechanism_arista.py:557 +#, python-format +msgid "" +"No Tenants configured in Neutron DB. But %d tenants disovered in EOS " +"during synchronization.Enitre EOS region is cleared" +msgstr "" + +#: neutron/plugins/ml2/drivers/mech_arista/mechanism_arista.py:722 +#, python-format +msgid "Network %s is not created as it is not found inArista DB" +msgstr "" + +#: neutron/plugins/ml2/drivers/mech_arista/mechanism_arista.py:736 +#, python-format +msgid "Network name changed to %s" +msgstr "" + +#: neutron/plugins/ml2/drivers/mech_arista/mechanism_arista.py:764 +#, python-format +msgid "Network %s is not updated as it is not found inArista DB" +msgstr "" + +#: neutron/plugins/ml2/drivers/mech_arista/mechanism_arista.py:856 +#, python-format +msgid "VM %s is not created as it is not found in Arista DB" +msgstr "" + +#: neutron/plugins/ml2/drivers/mech_arista/mechanism_arista.py:871 +#, python-format +msgid "Port name changed to %s" +msgstr "" + +#: neutron/plugins/ml2/drivers/mech_arista/mechanism_arista.py:921 +#, python-format +msgid "VM %s is not updated as it is not found in Arista DB" +msgstr "" + +#: neutron/plugins/ml2/drivers/mech_bigswitch/driver.py:47 +msgid "Initializing driver" +msgstr "" + +#: neutron/plugins/ml2/drivers/mech_bigswitch/driver.py:62 +msgid "Initialization done" +msgstr "" + +#: neutron/plugins/ml2/drivers/mech_bigswitch/driver.py:123 +msgid "Ignoring port notification to controller because of missing host ID." +msgstr "" + +#: neutron/plugins/ml2/drivers/mlnx/config.py:24 +#: neutron/plugins/mlnx/common/config.py:48 +msgid "Type of VM network interface: mlnx_direct or hostdev" +msgstr "" + +#: neutron/plugins/ml2/drivers/mlnx/config.py:28 +msgid "Enable server compatibility with old nova" +msgstr "" + +#: neutron/plugins/ml2/drivers/mlnx/mech_mlnx.py:56 +#, python-format +msgid "Checking segment: %(segment)s for mappings: %(mappings)s " +msgstr "" + +#: neutron/plugins/mlnx/agent_notify_api.py:48 +msgid "Sending delete network message" +msgstr "" + +#: neutron/plugins/mlnx/agent_notify_api.py:56 +msgid "Sending update port message" +msgstr "" + +#: neutron/plugins/mlnx/mlnx_plugin.py:115 +msgid "Mellanox Embedded Switch Plugin initialisation complete" +msgstr "" + +#: neutron/plugins/mlnx/mlnx_plugin.py:152 +#, python-format +msgid "Invalid physical network type %(type)s.Server terminated!" +msgstr "" + +#: neutron/plugins/mlnx/mlnx_plugin.py:159 +#, python-format +msgid "Parsing physical_network_type failed: %s. Server terminated!" +msgstr "" + +#: neutron/plugins/mlnx/mlnx_plugin.py:164 +#, python-format +msgid "" +"Invalid physical network type %(type)s for network %(net)s. Server " +"terminated!" +msgstr "" + +#: neutron/plugins/mlnx/mlnx_plugin.py:168 +#, python-format +msgid "Physical Network type mappings: %s" +msgstr "" + +#: neutron/plugins/mlnx/mlnx_plugin.py:176 +#: neutron/plugins/openvswitch/ovs_neutron_plugin.py:359 +#, python-format +msgid "%s. Server terminated!" +msgstr "" + +#: neutron/plugins/mlnx/mlnx_plugin.py:314 +#, python-format +msgid "Unsupported vnic type %(vnic_type)s for physical network type %(net_type)s" +msgstr "" + +#: neutron/plugins/mlnx/mlnx_plugin.py:318 +msgid "Invalid vnic_type on port_create" +msgstr "" + +#: neutron/plugins/mlnx/mlnx_plugin.py:320 +msgid "vnic_type is not defined in port profile" +msgstr "" + +#: neutron/plugins/mlnx/mlnx_plugin.py:364 +msgid "Update network" +msgstr "" + +#: neutron/plugins/mlnx/mlnx_plugin.py:377 +msgid "Delete network" +msgstr "" + +#: neutron/plugins/mlnx/mlnx_plugin.py:425 +#, python-format +msgid "create_port with %s" +msgstr "" + +#: neutron/plugins/mlnx/rpc_callbacks.py:120 +#, python-format +msgid "Device %(device)s up %(agent_id)s" +msgstr "" + +#: neutron/plugins/mlnx/agent/eswitch_neutron_agent.py:56 +#, python-format +msgid "Agent cache inconsistency - port id is not stored for %s" +msgstr "" + +#: neutron/plugins/mlnx/agent/eswitch_neutron_agent.py:71 +#, python-format +msgid "Network %s not defined on Agent." +msgstr "" + +#: neutron/plugins/mlnx/agent/eswitch_neutron_agent.py:84 +#, python-format +msgid "Network %s is not available on this agent" +msgstr "" + +#: neutron/plugins/mlnx/agent/eswitch_neutron_agent.py:95 +#, python-format +msgid "Connecting port %s" +msgstr "" + +#: neutron/plugins/mlnx/agent/eswitch_neutron_agent.py:105 +#, python-format +msgid "Binding Segmentation ID %(seg_id)sto eSwitch for vNIC mac_address %(mac)s" +msgstr "" + +#: neutron/plugins/mlnx/agent/eswitch_neutron_agent.py:124 +#, python-format +msgid "Port_mac %s is not available on this agent" +msgstr "" + +#: neutron/plugins/mlnx/agent/eswitch_neutron_agent.py:131 +msgid "Creating VLAN Network" +msgstr "" + +#: neutron/plugins/mlnx/agent/eswitch_neutron_agent.py:133 +#, python-format +msgid "Unknown network type %(network_type)s for network %(network_id)s" +msgstr "" + +#: neutron/plugins/mlnx/agent/eswitch_neutron_agent.py:165 +msgid "Invalid Network ID, cannot remove Network" +msgstr "" + +#: neutron/plugins/mlnx/agent/eswitch_neutron_agent.py:167 +#, python-format +msgid "Delete network %s" +msgstr "" + +#: neutron/plugins/mlnx/agent/eswitch_neutron_agent.py:207 +#, python-format +msgid "RPC timeout while updating port %s" +msgstr "" + +#: neutron/plugins/mlnx/agent/eswitch_neutron_agent.py:289 +msgid "Ports added!" +msgstr "" + +#: neutron/plugins/mlnx/agent/eswitch_neutron_agent.py:292 +msgid "Ports removed!" +msgstr "" + +#: neutron/plugins/mlnx/agent/eswitch_neutron_agent.py:329 +#, python-format +msgid "Adding port with mac %s" +msgstr "" + +#: neutron/plugins/mlnx/agent/eswitch_neutron_agent.py:332 +#, python-format +msgid "Port %s updated" +msgstr "" + +#: neutron/plugins/mlnx/agent/eswitch_neutron_agent.py:333 +#, python-format +msgid "Device details %s" +msgstr "" + +#: neutron/plugins/mlnx/agent/eswitch_neutron_agent.py:346 +#, python-format +msgid "Device with mac_address %s not defined on Neutron Plugin" +msgstr "" + +#: neutron/plugins/mlnx/agent/eswitch_neutron_agent.py:353 +#, python-format +msgid "Removing device with mac_address %s" +msgstr "" + +#: neutron/plugins/mlnx/agent/eswitch_neutron_agent.py:361 +#, python-format +msgid "Removing port failed for device %(device)s due to %(exc)s" +msgstr "" + +#: neutron/plugins/mlnx/agent/eswitch_neutron_agent.py:376 +msgid "eSwitch Agent Started!" +msgstr "" + +#: neutron/plugins/mlnx/agent/eswitch_neutron_agent.py:389 +msgid "Agent loop process devices!" +msgstr "" + +#: neutron/plugins/mlnx/agent/eswitch_neutron_agent.py:394 +msgid "" +"Request timeout in agent event loop eSwitchD is not responding - " +"exiting..." +msgstr "" + +#: neutron/plugins/mlnx/agent/eswitch_neutron_agent.py:427 +#, python-format +msgid "Failed on Agent initialisation : %s. Agent terminated!" +msgstr "" + +#: neutron/plugins/mlnx/agent/eswitch_neutron_agent.py:432 +msgid "Agent initialised successfully, now running... " +msgstr "" + +#: neutron/plugins/mlnx/agent/utils.py:30 +msgid "" +"Failed to import eventlet.green.zmq. Won't connect to eSwitchD - " +"exiting..." +msgstr "" + +#: neutron/plugins/mlnx/agent/utils.py:74 +#, python-format +msgid "Action %(action)s failed: %(reason)s" +msgstr "" + +#: neutron/plugins/mlnx/agent/utils.py:76 +#, python-format +msgid "Unknown operation status %s" +msgstr "" + +#: neutron/plugins/mlnx/agent/utils.py:81 +msgid "get_attached_vnics" +msgstr "" + +#: neutron/plugins/mlnx/agent/utils.py:88 +#, python-format +msgid "" +"Set Vlan %(segmentation_id)s on Port %(port_mac)s on Fabric " +"%(physical_network)s" +msgstr "" + +#: neutron/plugins/mlnx/agent/utils.py:101 +#, python-format +msgid "Define Fabric %(fabric)s on interface %(ifc)s" +msgstr "" + +#: neutron/plugins/mlnx/agent/utils.py:110 +#, python-format +msgid "Port Up for %(port_mac)s on fabric %(fabric)s" +msgstr "" + +#: neutron/plugins/mlnx/agent/utils.py:119 +#, python-format +msgid "Port Down for %(port_mac)s on fabric %(fabric)s" +msgstr "" + +#: neutron/plugins/mlnx/agent/utils.py:128 +#, python-format +msgid "Port Release for %(port_mac)s on fabric %(fabric)s" +msgstr "" + +#: neutron/plugins/mlnx/common/comm_utils.py:57 +#, python-format +msgid "Request timeout - call again after %s seconds" +msgstr "" + +#: neutron/plugins/mlnx/common/config.py:34 +msgid "" +"List of : with " +"physical_network_type is either eth or ib" +msgstr "" + +#: neutron/plugins/mlnx/common/config.py:37 +msgid "Physical network type for provider network (eth or ib)" +msgstr "" + +#: neutron/plugins/mlnx/common/config.py:52 +msgid "eswitch daemon end point" +msgstr "" + +#: neutron/plugins/mlnx/common/config.py:54 +msgid "" +"The number of milliseconds the agent will wait for response on request to" +" daemon." +msgstr "" + +#: neutron/plugins/mlnx/common/config.py:57 +msgid "" +"The number of retries the agent will send request to daemon before giving" +" up" +msgstr "" + +#: neutron/plugins/mlnx/common/config.py:60 +msgid "" +"backoff rate multiplier for waiting period between retries for request to" +" daemon, i.e. value of 2 will double the request timeout each retry" +msgstr "" + +#: neutron/plugins/mlnx/common/exceptions.py:20 +#, python-format +msgid "Mlnx Exception: %(err_msg)s" +msgstr "" + +#: neutron/plugins/mlnx/common/exceptions.py:24 +msgid "Request Timeout: no response from eSwitchD" +msgstr "" + +#: neutron/plugins/mlnx/common/exceptions.py:28 +#, python-format +msgid "Operation Failed: %(err_msg)s" +msgstr "" + +#: neutron/plugins/mlnx/db/mlnx_db_v2.py:42 +#: neutron/plugins/mlnx/db/mlnx_db_v2.py:63 +#, python-format +msgid "Removing vlan %(seg_id)s on physical network %(net)s from pool" +msgstr "" + +#: neutron/plugins/mlnx/db/mlnx_db_v2.py:119 +#, python-format +msgid "Reserving vlan %(seg_id)s on physical network %(net)s from pool" +msgstr "" + +#: neutron/plugins/mlnx/db/mlnx_db_v2.py:138 +#, python-format +msgid "" +"Reserving specific vlan %(seg_id)s on physical network %(phy_net)s from " +"pool" +msgstr "" + +#: neutron/plugins/mlnx/db/mlnx_db_v2.py:143 +#, python-format +msgid "" +"Reserving specific vlan %(seg_id)s on physical network %(phy_net)s " +"outside pool" +msgstr "" + +#: neutron/plugins/mlnx/db/mlnx_db_v2.py:170 +#, python-format +msgid "Releasing vlan %(seg_id)s on physical network %(phy_net)s to pool" +msgstr "" + +#: neutron/plugins/mlnx/db/mlnx_db_v2.py:175 +#, python-format +msgid "Releasing vlan %(seg_id)s on physical network %(phy_net)s outside pool" +msgstr "" + +#: neutron/plugins/mlnx/db/mlnx_db_v2.py:181 +#, python-format +msgid "vlan_id %(seg_id)s on physical network %(phy_net)s not found" +msgstr "" + +#: neutron/plugins/mlnx/db/mlnx_db_v2.py:239 +msgid "Get_port_from_device_mac() called" +msgstr "" + +#: neutron/plugins/mlnx/db/mlnx_db_v2.py:247 +#, python-format +msgid "Set_port_status as %s called" +msgstr "" + +#: neutron/plugins/nec/nec_plugin.py:192 +#, python-format +msgid "_cleanup_ofc_tenant: No OFC tenant for %s" +msgstr "" + +#: neutron/plugins/nec/nec_plugin.py:195 +#, python-format +msgid "delete_ofc_tenant() failed due to %s" +msgstr "" + +#: neutron/plugins/nec/nec_plugin.py:211 +msgid "activate_port_if_ready(): skip, port.admin_state_up is False." +msgstr "" + +#: neutron/plugins/nec/nec_plugin.py:215 +msgid "activate_port_if_ready(): skip, network.admin_state_up is False." +msgstr "" + +#: neutron/plugins/nec/nec_plugin.py:219 +msgid "activate_port_if_ready(): skip, no portinfo for this port." +msgstr "" + +#: neutron/plugins/nec/nec_plugin.py:223 +msgid "activate_port_if_ready(): skip, ofc_port already exists." +msgstr "" + +#: neutron/plugins/nec/nec_plugin.py:231 +#, python-format +msgid "create_ofc_port() failed due to %s" +msgstr "" + +#: neutron/plugins/nec/nec_plugin.py:244 +#, python-format +msgid "deactivate_port(): skip, ofc_port for port=%s does not exist." +msgstr "" + +#: neutron/plugins/nec/nec_plugin.py:263 +#, python-format +msgid "deactivate_port(): OFC port for port=%s is already removed." +msgstr "" + +#: neutron/plugins/nec/nec_plugin.py:271 +#, python-format +msgid "Failed to delete port=%(port)s from OFC: %(exc)s" +msgstr "" + +#: neutron/plugins/nec/nec_plugin.py:290 +#, python-format +msgid "NECPluginV2.create_network() called, network=%s ." +msgstr "" + +#: neutron/plugins/nec/nec_plugin.py:307 +#, python-format +msgid "Failed to create network id=%(id)s on OFC: %(exc)s" +msgstr "" + +#: neutron/plugins/nec/nec_plugin.py:323 +#, python-format +msgid "NECPluginV2.update_network() called, id=%(id)s network=%(network)s ." +msgstr "" + +#: neutron/plugins/nec/nec_plugin.py:367 +#, python-format +msgid "NECPluginV2.delete_network() called, id=%s ." +msgstr "" + +#: neutron/plugins/nec/nec_plugin.py:401 +#, python-format +msgid "delete_network() failed due to %s" +msgstr "" + +#: neutron/plugins/nec/nec_plugin.py:542 +#, python-format +msgid "NECPluginV2.create_port() called, port=%s ." +msgstr "" + +#: neutron/plugins/nec/nec_plugin.py:606 +#, python-format +msgid "NECPluginV2.update_port() called, id=%(id)s port=%(port)s ." +msgstr "" + +#: neutron/plugins/nec/nec_plugin.py:634 +#, python-format +msgid "NECPluginV2.delete_port() called, id=%s ." +msgstr "" + +#: neutron/plugins/nec/nec_plugin.py:702 +#, python-format +msgid "" +"NECPluginV2RPCCallbacks.get_port_from_device() called, device=%(device)s " +"=> %(ret)s." +msgstr "" + +#: neutron/plugins/nec/nec_plugin.py:726 +#, python-format +msgid "NECPluginV2RPCCallbacks.update_ports() called, kwargs=%s ." +msgstr "" + +#: neutron/plugins/nec/nec_plugin.py:736 +#, python-format +msgid "" +"update_ports(): ignore unchanged portinfo in port_added message " +"(port_id=%s)." +msgstr "" + +#: neutron/plugins/nec/nec_plugin.py:756 +#, python-format +msgid "" +"update_ports(): ignore port_removed message due to portinfo for " +"port_id=%s was not registered" +msgstr "" + +#: neutron/plugins/nec/nec_plugin.py:761 +#, python-format +msgid "" +"update_ports(): ignore port_removed message received from different host " +"(registered_datapath_id=%(registered)s, " +"received_datapath_id=%(received)s)." +msgstr "" + +#: neutron/plugins/nec/nec_router.py:58 +#, python-format +msgid "RouterMixin.create_router() called, router=%s ." +msgstr "" + +#: neutron/plugins/nec/nec_router.py:84 +#, python-format +msgid "RouterMixin.update_router() called, id=%(id)s, router=%(router)s ." +msgstr "" + +#: neutron/plugins/nec/nec_router.py:102 +#, python-format +msgid "RouterMixin.delete_router() called, id=%s." +msgstr "" + +#: neutron/plugins/nec/nec_router.py:121 +#, python-format +msgid "" +"RouterMixin.add_router_interface() called, id=%(id)s, " +"interface=%(interface)s." +msgstr "" + +#: neutron/plugins/nec/nec_router.py:128 +#, python-format +msgid "" +"RouterMixin.remove_router_interface() called, id=%(id)s, " +"interface=%(interface)s." +msgstr "" + +#: neutron/plugins/nec/nec_router.py:311 +#, python-format +msgid "" +"OFC does not support router with provider=%(provider)s, so removed it " +"from supported provider (new router driver map=%(driver_map)s)" +msgstr "" + +#: neutron/plugins/nec/nec_router.py:319 +#, python-format +msgid "" +"default_router_provider %(default)s is supported! Please specify one of " +"%(supported)s" +msgstr "" + +#: neutron/plugins/nec/nec_router.py:333 +#, python-format +msgid "Enabled router drivers: %s" +msgstr "" + +#: neutron/plugins/nec/nec_router.py:336 +#, python-format +msgid "" +"No router provider is enabled. neutron-server terminated! " +"(supported=%(supported)s, configured=%(config)s)" +msgstr "" + +#: neutron/plugins/nec/packet_filter.py:40 +msgid "Disabled packet-filter extension." +msgstr "" + +#: neutron/plugins/nec/packet_filter.py:45 +#, python-format +msgid "create_packet_filter() called, packet_filter=%s ." +msgstr "" + +#: neutron/plugins/nec/packet_filter.py:61 +#, python-format +msgid "update_packet_filter() called, id=%(id)s packet_filter=%(packet_filter)s ." +msgstr "" + +#: neutron/plugins/nec/packet_filter.py:134 +#: neutron/plugins/nec/packet_filter.py:187 +#, python-format +msgid "Failed to create packet_filter id=%(id)s on OFC: %(exc)s" +msgstr "" + +#: neutron/plugins/nec/packet_filter.py:144 +#, python-format +msgid "delete_packet_filter() called, id=%s ." +msgstr "" + +#: neutron/plugins/nec/packet_filter.py:162 +#, python-format +msgid "activate_packet_filter_if_ready() called, packet_filter=%s." +msgstr "" + +#: neutron/plugins/nec/packet_filter.py:171 +#, python-format +msgid "" +"activate_packet_filter_if_ready(): skip pf_id=%s, " +"packet_filter.admin_state_up is False." +msgstr "" + +#: neutron/plugins/nec/packet_filter.py:174 +#, python-format +msgid "" +"activate_packet_filter_if_ready(): skip pf_id=%s, no portinfo for the " +"in_port." +msgstr "" + +#: neutron/plugins/nec/packet_filter.py:177 +msgid "" +"_activate_packet_filter_if_ready(): skip, ofc_packet_filter already " +"exists." +msgstr "" + +#: neutron/plugins/nec/packet_filter.py:180 +#, python-format +msgid "activate_packet_filter_if_ready(): create packet_filter id=%s on OFC." +msgstr "" + +#: neutron/plugins/nec/packet_filter.py:200 +#, python-format +msgid "deactivate_packet_filter_if_ready() called, packet_filter=%s." +msgstr "" + +#: neutron/plugins/nec/packet_filter.py:205 +#, python-format +msgid "" +"deactivate_packet_filter(): skip, Not found OFC Mapping for packet_filter" +" id=%s." +msgstr "" + +#: neutron/plugins/nec/packet_filter.py:210 +#, python-format +msgid "deactivate_packet_filter(): deleting packet_filter id=%s from OFC." +msgstr "" + +#: neutron/plugins/nec/packet_filter.py:219 +#, python-format +msgid "Failed to delete packet_filter id=%(id)s from OFC: %(exc)s" +msgstr "" + +#: neutron/plugins/nec/packet_filter.py:250 +#, python-format +msgid "Error occurred while disabling packet filter(s) for port %s" +msgstr "" + +#: neutron/plugins/nec/router_drivers.py:125 +#, python-format +msgid "create_router() failed due to %s" +msgstr "" + +#: neutron/plugins/nec/router_drivers.py:154 +#, python-format +msgid "_update_ofc_routes() failed due to %s" +msgstr "" + +#: neutron/plugins/nec/router_drivers.py:169 +#, python-format +msgid "delete_router() failed due to %s" +msgstr "" + +#: neutron/plugins/nec/router_drivers.py:180 +#, python-format +msgid "" +"RouterOpenFlowDriver.add_interface(): the requested port has no subnet. " +"add_interface() is skipped. router_id=%(id)s, port=%(port)s)" +msgstr "" + +#: neutron/plugins/nec/router_drivers.py:200 +#, python-format +msgid "add_router_interface() failed due to %s" +msgstr "" + +#: neutron/plugins/nec/router_drivers.py:218 +#, python-format +msgid "delete_router_interface() failed due to %s" +msgstr "" + +#: neutron/plugins/nec/agent/nec_neutron_agent.py:53 +#, python-format +msgid "Update ports: added=%(added)s, removed=%(removed)s" +msgstr "" + +#: neutron/plugins/nec/agent/nec_neutron_agent.py:76 +#: neutron/plugins/oneconvergence/agent/nvsd_neutron_agent.py:51 +#, python-format +msgid "port_update received: %s" +msgstr "" + +#: neutron/plugins/nec/agent/nec_neutron_agent.py:220 +msgid "No port changed." +msgstr "" + +#: neutron/plugins/nec/common/config.py:35 +msgid "Host to connect to" +msgstr "" + +#: neutron/plugins/nec/common/config.py:37 +msgid "Base URL of OFC REST API. It is prepended to each API request." +msgstr "" + +#: neutron/plugins/nec/common/config.py:40 +msgid "Port to connect to" +msgstr "" + +#: neutron/plugins/nec/common/config.py:42 +msgid "Driver to use" +msgstr "" + +#: neutron/plugins/nec/common/config.py:44 +msgid "Enable packet filter" +msgstr "" + +#: neutron/plugins/nec/common/config.py:46 +msgid "Use SSL to connect" +msgstr "" + +#: neutron/plugins/nec/common/config.py:48 +msgid "Key file" +msgstr "" + +#: neutron/plugins/nec/common/config.py:50 +msgid "Certificate file" +msgstr "" + +#: neutron/plugins/nec/common/config.py:52 +msgid "Disable SSL certificate verification" +msgstr "" + +#: neutron/plugins/nec/common/config.py:54 +msgid "" +"Maximum attempts per OFC API request.NEC plugin retries API request to " +"OFC when OFC returns ServiceUnavailable (503).The value must be greater " +"than 0." +msgstr "" + +#: neutron/plugins/nec/common/config.py:63 +msgid "Default router provider to use." +msgstr "" + +#: neutron/plugins/nec/common/config.py:66 +msgid "List of enabled router providers." +msgstr "" + +#: neutron/plugins/nec/common/exceptions.py:20 +#, python-format +msgid "An OFC exception has occurred: %(reason)s" +msgstr "" + +#: neutron/plugins/nec/common/exceptions.py:30 +#, python-format +msgid "The specified OFC resource (%(resource)s) is not found." +msgstr "" + +#: neutron/plugins/nec/common/exceptions.py:34 +#, python-format +msgid "An exception occurred in NECPluginV2 DB: %(reason)s" +msgstr "" + +#: neutron/plugins/nec/common/exceptions.py:38 +#, python-format +msgid "" +"Neutron-OFC resource mapping for %(resource)s %(neutron_id)s is not " +"found. It may be deleted during processing." +msgstr "" + +#: neutron/plugins/nec/common/exceptions.py:44 +#, python-format +msgid "OFC returns Server Unavailable (503) (Retry-After=%(retry_after)s)" +msgstr "" + +#: neutron/plugins/nec/common/exceptions.py:53 +#, python-format +msgid "PortInfo %(id)s could not be found" +msgstr "" + +#: neutron/plugins/nec/common/exceptions.py:57 +msgid "" +"Invalid input for operation: datapath_id should be a hex string with at " +"most 8 bytes" +msgstr "" + +#: neutron/plugins/nec/common/exceptions.py:63 +msgid "Invalid input for operation: port_no should be [0:65535]" +msgstr "" + +#: neutron/plugins/nec/common/exceptions.py:68 +#, python-format +msgid "Router (provider=%(provider)s) does not support an external network" +msgstr "" + +#: neutron/plugins/nec/common/exceptions.py:73 +#, python-format +msgid "Provider %(provider)s could not be found" +msgstr "" + +#: neutron/plugins/nec/common/exceptions.py:77 +#, python-format +msgid "Cannot create more routers with provider=%(provider)s" +msgstr "" + +#: neutron/plugins/nec/common/exceptions.py:81 +#, python-format +msgid "" +"Provider of Router %(router_id)s is %(provider)s. This operation is " +"supported only for router provider %(expected_provider)s." +msgstr "" + +#: neutron/plugins/nec/common/ofc_client.py:54 +#, python-format +msgid "Operation on OFC failed: %(status)s%(msg)s" +msgstr "" + +#: neutron/plugins/nec/common/ofc_client.py:78 +#, python-format +msgid "Client request: %(host)s:%(port)s %(method)s %(action)s [%(body)s]" +msgstr "" + +#: neutron/plugins/nec/common/ofc_client.py:87 +#, python-format +msgid "OFC returns [%(status)s:%(data)s]" +msgstr "" + +#: neutron/plugins/nec/common/ofc_client.py:104 +#, python-format +msgid "OFC returns ServiceUnavailable (retry-after=%s)" +msgstr "" + +#: neutron/plugins/nec/common/ofc_client.py:108 +#, python-format +msgid "Specified resource %s does not exist on OFC " +msgstr "" + +#: neutron/plugins/nec/common/ofc_client.py:112 +#, python-format +msgid "Operation on OFC failed: status=%(status)s, detail=%(detail)s" +msgstr "" + +#: neutron/plugins/nec/common/ofc_client.py:115 +msgid "Operation on OFC failed" +msgstr "" + +#: neutron/plugins/nec/common/ofc_client.py:124 +#, python-format +msgid "Failed to connect OFC : %s" +msgstr "" + +#: neutron/plugins/nec/common/ofc_client.py:140 +#, python-format +msgid "Waiting for %s seconds due to OFC Service_Unavailable." +msgstr "" + +#: neutron/plugins/nec/db/api.py:108 +#, python-format +msgid "del_ofc_item(): NotFound item (resource=%(resource)s, id=%(id)s) " +msgstr "" + +#: neutron/plugins/nec/db/api.py:142 +#, python-format +msgid "del_portinfo(): NotFound portinfo for port_id: %s" +msgstr "" + +#: neutron/plugins/nec/db/api.py:163 +#: neutron/plugins/openvswitch/ovs_db_v2.py:317 +#, python-format +msgid "get_port_with_securitygroups() called:port_id=%s" +msgstr "" + +#: neutron/plugins/nec/db/router.py:85 +#, python-format +msgid "Add provider binding (router=%(router_id)s, provider=%(provider)s)" +msgstr "" + +#: neutron/plugins/nec/drivers/__init__.py:36 +#, python-format +msgid "Loading OFC driver: %s" +msgstr "" + +#: neutron/plugins/nec/drivers/pfc.py:33 +#, python-format +msgid "OFC %(resource)s ID has an invalid format: %(ofc_id)s" +msgstr "" + +#: neutron/plugins/nec/extensions/packetfilter.py:33 +msgid "Number of packet_filters allowed per tenant, -1 for unlimited" +msgstr "" + +#: neutron/plugins/nec/extensions/packetfilter.py:40 +#, python-format +msgid "PacketFilter %(id)s could not be found" +msgstr "" + +#: neutron/plugins/nec/extensions/packetfilter.py:44 +#, python-format +msgid "" +"IP version %(version)s is not supported for %(field)s (%(value)s is " +"specified)" +msgstr "" + +#: neutron/plugins/nec/extensions/packetfilter.py:49 +#, python-format +msgid "Packet Filter priority should be %(min)s-%(max)s (included)" +msgstr "" + +#: neutron/plugins/nec/extensions/packetfilter.py:53 +#, python-format +msgid "%(field)s field cannot be updated" +msgstr "" + +#: neutron/plugins/nec/extensions/packetfilter.py:57 +#, python-format +msgid "" +"The backend does not support duplicated priority. Priority %(priority)s " +"is in use" +msgstr "" + +#: neutron/plugins/nec/extensions/packetfilter.py:62 +#, python-format +msgid "" +"Ether Type '%(eth_type)s' conflicts with protocol '%(protocol)s'. Update " +"or clear protocol before changing ether type." +msgstr "" + +#: neutron/plugins/nuage/plugin.py:89 +#, python-format +msgid "%(resource)s with id %(resource_id)s does not exist" +msgstr "" + +#: neutron/plugins/nuage/plugin.py:98 +#, python-format +msgid "" +"Either %(resource)s %(req_resource)s not found or you dont have " +"credential to access it" +msgstr "" + +#: neutron/plugins/nuage/plugin.py:104 +#, python-format +msgid "" +"More than one entry found for %(resource)s %(req_resource)s. Use id " +"instead" +msgstr "" + +#: neutron/plugins/nuage/plugin.py:214 +#, python-format +msgid "Subnet %s not found on VSD" +msgstr "" + +#: neutron/plugins/nuage/plugin.py:219 +#, python-format +msgid "Port-Mapping for port %s not found on VSD" +msgstr "" + +#: neutron/plugins/nuage/plugin.py:314 +msgid "External network with subnets can not be changed to non-external network" +msgstr "" + +#: neutron/plugins/nuage/plugin.py:368 +msgid "" +"Either net_partition is not provided with subnet OR default net_partition" +" is not created at the start" +msgstr "" + +#: neutron/plugins/nuage/plugin.py:397 +#, python-format +msgid "Only one subnet is allowed per external network %s" +msgstr "" + +#: neutron/plugins/nuage/plugin.py:479 +#, python-format +msgid "" +"Unable to complete operation on subnet %s.One or more ports have an IP " +"allocation from this subnet." +msgstr "" + +#: neutron/plugins/nuage/plugin.py:509 +#, python-format +msgid "" +"Router %s does not hold default zone OR net_partition mapping. Router-IF " +"add failed" +msgstr "" + +#: neutron/plugins/nuage/plugin.py:519 +#, python-format +msgid "Subnet %s does not hold Nuage VSD reference. Router-IF add failed" +msgstr "" + +#: neutron/plugins/nuage/plugin.py:529 +#, python-format +msgid "" +"Subnet %(subnet)s and Router %(router)s belong to different net_partition" +" Router-IF add not permitted" +msgstr "" + +#: neutron/plugins/nuage/plugin.py:541 +#, python-format +msgid "Subnet %s has one or more active VMs Router-IF add not permitted" +msgstr "" + +#: neutron/plugins/nuage/plugin.py:587 neutron/plugins/nuage/plugin.py:592 +#: neutron/plugins/nuage/plugin.py:598 +#, python-format +msgid "No router interface found for Router %s. Router-IF delete failed" +msgstr "" + +#: neutron/plugins/nuage/plugin.py:614 +#, python-format +msgid "Subnet %s has one or more active VMs Router-IF delete not permitted" +msgstr "" + +#: neutron/plugins/nuage/plugin.py:623 +#, python-format +msgid "" +"Router %s does not hold net_partition assoc on Nuage VSD. Router-IF " +"delete failed" +msgstr "" + +#: neutron/plugins/nuage/plugin.py:662 +msgid "" +"Either net_partition is not provided with router OR default net_partition" +" is not created at the start" +msgstr "" + +#: neutron/plugins/nuage/plugin.py:708 +msgid "for same subnet, multiple static routes not allowed" +msgstr "" + +#: neutron/plugins/nuage/plugin.py:724 +#, python-format +msgid "Router %s does not hold net-partition assoc on VSD. extra-route failed" +msgstr "" + +#: neutron/plugins/nuage/plugin.py:837 +#, python-format +msgid "One or more router still attached to net_partition %s." +msgstr "" + +#: neutron/plugins/nuage/plugin.py:842 +#, python-format +msgid "NetPartition with %s does not exist" +msgstr "" + +#: neutron/plugins/nuage/plugin.py:888 +#, python-format +msgid "router %s is not associated with any net-partition" +msgstr "" + +#: neutron/plugins/nuage/plugin.py:903 +msgid "Floating IP can not be associated to VM in different router context" +msgstr "" + +#: neutron/plugins/nuage/common/config.py:22 +msgid "IP Address and Port of Nuage's VSD server" +msgstr "" + +#: neutron/plugins/nuage/common/config.py:25 +msgid "Username and password for authentication" +msgstr "" + +#: neutron/plugins/nuage/common/config.py:27 +msgid "Boolean for SSL connection with VSD server" +msgstr "" + +#: neutron/plugins/nuage/common/config.py:29 +msgid "Nuage provided base uri to reach out to VSD" +msgstr "" + +#: neutron/plugins/nuage/common/config.py:31 +msgid "" +"Organization name in which VSD will orchestrate network resources using " +"openstack" +msgstr "" + +#: neutron/plugins/nuage/common/config.py:34 +msgid "Nuage provided uri for initial authorization to access VSD" +msgstr "" + +#: neutron/plugins/nuage/common/config.py:38 +msgid "" +"Default Network partition in which VSD will orchestrate network resources" +" using openstack" +msgstr "" + +#: neutron/plugins/nuage/common/config.py:42 +msgid "Per Net Partition quota of floating ips" +msgstr "" + +#: neutron/plugins/nuage/common/exceptions.py:24 +#, python-format +msgid "Nuage Plugin does not support this operation: %(msg)s" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:93 +msgid "Agent terminated!: Failed to get a datapath." +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:112 +msgid "Agent terminated" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:146 +msgid "Agent failed to create agent config map" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:273 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1285 +#, python-format +msgid "Unable to create tunnel port. Invalid remote IP: %s" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:278 +#, python-format +msgid "ryu send_msg() result: %s" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:342 +#, python-format +msgid "network_delete received network %s" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:348 +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:575 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:287 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:610 +#, python-format +msgid "Network %s not used on agent." +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:357 +#, python-format +msgid "port_update received port %s" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:360 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:299 +msgid "tunnel_update received" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:366 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:308 +msgid "No tunnel_type specified, cannot create tunnels" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:369 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:311 +#, python-format +msgid "tunnel_type %s not supported by agent" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:490 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:518 +#, python-format +msgid "No local VLAN available for net-id=%s" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:493 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:526 +#, python-format +msgid "Assigning %(vlan_id)s as local vlan for net-id=%(net_uuid)s" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:505 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:548 +#, python-format +msgid "" +"Cannot provision %(network_type)s network for net-id=%(net_uuid)s - " +"tunneling disabled" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:513 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:567 +#, python-format +msgid "" +"Cannot provision flat network for net-id=%(net_uuid)s - no bridge for " +"physical_network %(physical_network)s" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:523 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:587 +#, python-format +msgid "" +"Cannot provision VLAN network for net-id=%(net_uuid)s - no bridge for " +"physical_network %(physical_network)s" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:532 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:596 +#, python-format +msgid "" +"Cannot provision unknown network type %(network_type)s for net-" +"id=%(net_uuid)s" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:578 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:613 +#, python-format +msgid "Reclaiming vlan = %(vlan_id)s from net-id = %(net_uuid)s" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:612 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:653 +#, python-format +msgid "" +"Cannot reclaim unknown network type %(network_type)s for net-" +"id=%(net_uuid)s" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:663 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:699 +#, python-format +msgid "port_unbound() net_uuid %s not in local_vlan_map" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:737 +#, python-format +msgid "ancillary bridge list: %s." +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:827 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:788 +msgid "" +"Failed to create OVS patch port. Cannot have tunneling enabled on this " +"agent, since this version of OVS does not support tunnels or patch ports." +" Agent terminated!" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:911 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:902 +#, python-format +msgid "Mapping physical network %(physical_network)s to bridge %(bridge)s" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:917 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:908 +#, python-format +msgid "" +"Bridge %(bridge)s for physical network %(physical_network)s does not " +"exist. Agent terminated!" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:988 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1012 +#, python-format +msgid "Port '%(port_name)s' has lost its vlan tag '%(vlan_tag)d'!" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:1021 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1041 +#, python-format +msgid "VIF port: %s has no ofport configured, and might not be able to transmit" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:1029 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1051 +#, python-format +msgid "No VIF port for port %s defined on agent." +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:1042 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1064 +#: neutron/tests/unit/ofagent/test_ofa_neutron_agent.py:686 +#: neutron/tests/unit/openvswitch/test_ovs_neutron_agent.py:820 +msgid "ofport should have a value that can be interpreted as an integer" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:1045 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1067 +#: neutron/tests/unit/ofagent/test_ofa_neutron_agent.py:669 +#: neutron/tests/unit/ofagent/test_ofa_neutron_agent.py:689 +#: neutron/tests/unit/openvswitch/test_ovs_neutron_agent.py:803 +#: neutron/tests/unit/openvswitch/test_ovs_neutron_agent.py:823 +#: neutron/tests/unit/openvswitch/test_ovs_neutron_agent.py:840 +#, python-format +msgid "Failed to set-up %(type)s tunnel port to %(ip)s" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:1094 +#, python-format +msgid "Processing port %s" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:1099 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1126 +#, python-format +msgid "" +"Port %s was not found on the integration bridge and will therefore not be" +" processed" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:1108 +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:1147 +#, python-format +msgid "Unable to get port details for %(device)s: %(e)s" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:1125 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1142 +#, python-format +msgid "Setting status for %s to UP" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:1129 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1146 +#, python-format +msgid "Setting status for %s to DOWN" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:1132 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1149 +#, python-format +msgid "Configuration for device %s completed." +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:1142 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1170 +#, python-format +msgid "Ancillary Port %s added" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:1217 +#, python-format +msgid "" +"process_network_ports - iteration:%(iter_num)d - " +"treat_devices_added_or_updated completed in %(elapsed).3f" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:1225 +#, python-format +msgid "" +"process_network_ports - iteration:%(iter_num)d - treat_devices_removed " +"completed in %(elapsed).3f" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:1238 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1263 +#, python-format +msgid "" +"process_ancillary_network_ports - iteration: %(iter_num)d - " +"treat_ancillary_devices_added completed in %(elapsed).3f" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:1247 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1272 +#, python-format +msgid "" +"process_ancillary_network_ports - iteration: %(iter_num)d - " +"treat_ancillary_devices_removed completed in %(elapsed).3f" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:1274 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1314 +#, python-format +msgid "Unable to sync tunnel IP %(local_ip)s: %(e)s" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:1302 +#, python-format +msgid "Agent ovsdb_monitor_loop - iteration:%d started" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:1313 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1368 +msgid "Agent tunnel out of sync with plugin!" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:1317 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1372 +msgid "Error while synchronizing tunnels" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:1321 +#, python-format +msgid "" +"Agent ovsdb_monitor_loop - iteration:%(iter_num)d - starting polling. " +"Elapsed:%(elapsed).3f" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:1334 +#, python-format +msgid "" +"Agent ovsdb_monitor_loop - iteration:%(iter_num)d - port information " +"retrieved. Elapsed:%(elapsed).3f" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:1344 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1399 +#, python-format +msgid "Starting to process devices in:%s" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:1348 +#, python-format +msgid "" +"Agent ovsdb_monitor_loop - iteration:%(iter_num)d - ports processed. " +"Elapsed:%(elapsed).3f" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:1363 +#, python-format +msgid "" +"Agent ovsdb_monitor_loop - iteration:%(iter_num)d - ancillary port info " +"retrieved. Elapsed:%(elapsed).3f" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:1373 +#, python-format +msgid "" +"Agent ovsdb_monitor_loop - iteration:%(iter_num)d - ancillary ports " +"processed. Elapsed:%(elapsed).3f" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:1388 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1441 +msgid "Error while processing VIF ports" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:1395 +#, python-format +msgid "" +"Agent ovsdb_monitor_loop - iteration:%(iter_num)d completed. Processed " +"ports statistics:%(port_stats)s. Elapsed:%(elapsed).3f" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:1431 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1485 +#, python-format +msgid "Parsing bridge_mappings failed: %s." +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:1454 +#, python-format +msgid "Invalid tunnel type specificed: %s" +msgstr "" + +#: neutron/plugins/ofagent/agent/ofa_neutron_agent.py:1457 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1512 +msgid "Tunneling cannot be enabled without a valid local_ip." +msgstr "" + +#: neutron/plugins/ofagent/common/config.py:24 +msgid "Number of seconds to retry acquiring an Open vSwitch datapath" +msgstr "" + +#: neutron/plugins/oneconvergence/plugin.py:240 +msgid "Failed to create subnet, deleting it from neutron" +msgstr "" + +#: neutron/plugins/oneconvergence/plugin.py:304 +#, python-format +msgid "Deleting newly created neutron port %s" +msgstr "" + +#: neutron/plugins/oneconvergence/plugin.py:374 +msgid "Failed to create floatingip" +msgstr "" + +#: neutron/plugins/oneconvergence/plugin.py:413 +msgid "Failed to create router" +msgstr "" + +#: neutron/plugins/oneconvergence/agent/nvsd_neutron_agent.py:154 +msgid "Port list is updated" +msgstr "" + +#: neutron/plugins/oneconvergence/agent/nvsd_neutron_agent.py:161 +msgid "AGENT looping....." +msgstr "" + +#: neutron/plugins/oneconvergence/agent/nvsd_neutron_agent.py:173 +msgid "NVSD Agent initialized successfully, now running... " +msgstr "" + +#: neutron/plugins/oneconvergence/lib/config.py:26 +msgid "NVSD Controller IP address" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/config.py:29 +msgid "NVSD Controller Port number" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/config.py:32 +msgid "NVSD Controller username" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/config.py:35 +msgid "NVSD Controller password" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/config.py:38 +msgid "NVSD controller REST API request timeout in seconds" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/config.py:40 +msgid "Number of login retries to NVSD controller" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/config.py:45 +msgid "integration bridge" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/exception.py:23 +#, python-format +msgid "An unknown nvsd plugin exception occurred: %(reason)s" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/exception.py:27 +#: neutron/plugins/vmware/api_client/exception.py:68 +msgid "The request has timed out." +msgstr "" + +#: neutron/plugins/oneconvergence/lib/exception.py:31 +msgid "Invalid access credentials to the Server." +msgstr "" + +#: neutron/plugins/oneconvergence/lib/exception.py:35 +#, python-format +msgid "A resource is not found: %(reason)s" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/exception.py:39 +#, python-format +msgid "Request sent to server is invalid: %(reason)s" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/exception.py:43 +#, python-format +msgid "Internal Server Error: %(reason)s" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/exception.py:47 +msgid "Connection is closed by the server." +msgstr "" + +#: neutron/plugins/oneconvergence/lib/exception.py:51 +#, python-format +msgid "The request is forbidden access to the resource: %(reason)s" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/exception.py:55 +#, python-format +msgid "Internal Server Error from NVSD controller: %(reason)s" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/nvsdlib.py:55 +#, python-format +msgid "Could not create a %(resource)s under tenant %(tenant_id)s" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/nvsdlib.py:59 +#, python-format +msgid "Failed to %(method)s %(resource)s id=%(resource_id)s" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/nvsdlib.py:65 +#, python-format +msgid "Failed to %(method)s %(resource)s" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/nvsdlib.py:109 +#, python-format +msgid "Network %(id)s created under tenant %(tenant_id)s" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/nvsdlib.py:126 +#, python-format +msgid "Network %(id)s updated under tenant %(tenant_id)s" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/nvsdlib.py:147 +#, python-format +msgid "Network %(id)s deleted under tenant %(tenant_id)s" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/nvsdlib.py:160 +#, python-format +msgid "Subnet %(id)s created under tenant %(tenant_id)s" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/nvsdlib.py:174 +#, python-format +msgid "Subnet %(id)s deleted under tenant %(tenant_id)s" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/nvsdlib.py:190 +#, python-format +msgid "Subnet %(id)s updated under tenant %(tenant_id)s" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/nvsdlib.py:222 +#, python-format +msgid "Port %(id)s created under tenant %(tenant_id)s" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/nvsdlib.py:246 +#, python-format +msgid "Port %(id)s updated under tenant %(tenant_id)s" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/nvsdlib.py:259 +#, python-format +msgid "Port %(id)s deleted under tenant %(tenant_id)s" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/nvsdlib.py:281 +#, python-format +msgid "Flatingip %(id)s created under tenant %(tenant_id)s" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/nvsdlib.py:298 +#, python-format +msgid "Flatingip %(id)s updated under tenant %(tenant_id)s" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/nvsdlib.py:312 +#, python-format +msgid "Flatingip %(id)s deleted under tenant %(tenant_id)s" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/nvsdlib.py:325 +#, python-format +msgid "Router %(id)s created under tenant %(tenant_id)s" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/nvsdlib.py:341 +#, python-format +msgid "Router %(id)s updated under tenant %(tenant_id)s" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/nvsdlib.py:351 +#, python-format +msgid "Router %(id)s deleted under tenant %(tenant_id)s" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/plugin_helper.py:81 +#, python-format +msgid "Unable to connect to NVSD controller. Exiting after %(retries)s attempts" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/plugin_helper.py:91 +#, python-format +msgid "Login Failed: %s" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/plugin_helper.py:92 +#, python-format +msgid "Unable to establish connection with Controller %s" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/plugin_helper.py:94 +msgid "Retrying after 1 second..." +msgstr "" + +#: neutron/plugins/oneconvergence/lib/plugin_helper.py:98 +#, python-format +msgid "Login Successful %(uri)s %(status)s" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/plugin_helper.py:102 +#, python-format +msgid "AuthToken = %s" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/plugin_helper.py:104 +msgid "login failed" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/plugin_helper.py:112 +msgid "No Token, Re-login" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/plugin_helper.py:129 +#, python-format +msgid "request: %(method)s %(uri)s successful" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/plugin_helper.py:136 +#, python-format +msgid "request: Request failed from Controller side :%s" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/plugin_helper.py:141 +#, python-format +msgid "Response is Null, Request timed out: %(method)s to %(uri)s" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/plugin_helper.py:153 +#, python-format +msgid "Request %(method)s %(uri)s body = %(body)s failed with status %(status)s" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/plugin_helper.py:157 +#, python-format +msgid "%s" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/plugin_helper.py:161 +#, python-format +msgid "%(method)s to %(url)s, unexpected response code: %(status)d" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/plugin_helper.py:167 +#, python-format +msgid "Request failed from Controller side with Status=%s" +msgstr "" + +#: neutron/plugins/oneconvergence/lib/plugin_helper.py:171 +#, python-format +msgid "Success: %(method)s %(url)s status=%(status)s" +msgstr "" + +#: neutron/plugins/openvswitch/ovs_db_v2.py:210 +#, python-format +msgid "Skipping unreasonable tunnel ID range %(tun_min)s:%(tun_max)s" +msgstr "" + +#: neutron/plugins/openvswitch/ovs_db_v2.py:258 +#, python-format +msgid "Reserving tunnel %s from pool" +msgstr "" + +#: neutron/plugins/openvswitch/ovs_db_v2.py:273 +#, python-format +msgid "Reserving specific tunnel %s from pool" +msgstr "" + +#: neutron/plugins/openvswitch/ovs_db_v2.py:276 +#, python-format +msgid "Reserving specific tunnel %s outside pool" +msgstr "" + +#: neutron/plugins/openvswitch/ovs_db_v2.py:299 +#, python-format +msgid "Releasing tunnel %s outside pool" +msgstr "" + +#: neutron/plugins/openvswitch/ovs_db_v2.py:301 +#, python-format +msgid "Releasing tunnel %s to pool" +msgstr "" + +#: neutron/plugins/openvswitch/ovs_db_v2.py:303 +#, python-format +msgid "tunnel_id %s not found" +msgstr "" + +#: neutron/plugins/openvswitch/ovs_db_v2.py:375 +#, python-format +msgid "Adding a tunnel endpoint for %s" +msgstr "" + +#: neutron/plugins/openvswitch/ovs_db_v2.py:391 +#, python-format +msgid "" +"Adding a tunnel endpoint failed due to a concurrenttransaction had been " +"committed (%s attempts left)" +msgstr "" + +#: neutron/plugins/openvswitch/ovs_db_v2.py:396 +msgid "Unable to generate a new tunnel id" +msgstr "" + +#: neutron/plugins/openvswitch/ovs_neutron_plugin.py:307 +#, python-format +msgid "Invalid tenant_network_type: %s. Server terminated!" +msgstr "" + +#: neutron/plugins/openvswitch/ovs_neutron_plugin.py:324 +#, python-format +msgid "Tunneling disabled but tenant_network_type is '%s'. Server terminated!" +msgstr "" + +#: neutron/plugins/openvswitch/ovs_neutron_plugin.py:370 +#, python-format +msgid "Invalid tunnel ID range: '%(range)s' - %(e)s. Server terminated!" +msgstr "" + +#: neutron/plugins/openvswitch/ovs_neutron_plugin.py:374 +#, python-format +msgid "Tunnel ID ranges: %s" +msgstr "" + +#: neutron/plugins/openvswitch/ovs_neutron_plugin.py:429 +#, python-format +msgid "%s networks are not enabled" +msgstr "" + +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:232 +msgid "OVS version can not support ARP responder." +msgstr "" + +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:296 +#, python-format +msgid "port_update message processed for port %s" +msgstr "" + +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:497 +#, python-format +msgid "Action %s not supported" +msgstr "" + +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:767 +#, python-format +msgid "Adding %s to list of bridges." +msgstr "" + +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:882 +#, python-format +msgid "" +"Creating an interface named %(name)s exceeds the %(limit)d character " +"limitation. It was shortened to %(new_name)s to fit." +msgstr "" + +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1242 +#, python-format +msgid "" +"process_network_ports - iteration:%(iter_num)d " +"-treat_devices_added_or_updated completed in %(elapsed).3f" +msgstr "" + +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1250 +#, python-format +msgid "" +"process_network_ports - iteration:%(iter_num)d -treat_devices_removed " +"completed in %(elapsed).3f" +msgstr "" + +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1351 +#, python-format +msgid "Agent rpc_loop - iteration:%d started" +msgstr "" + +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1376 +#, python-format +msgid "" +"Agent rpc_loop - iteration:%(iter_num)d - starting polling. " +"Elapsed:%(elapsed).3f" +msgstr "" + +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1389 +#, python-format +msgid "" +"Agent rpc_loop - iteration:%(iter_num)d - port information retrieved. " +"Elapsed:%(elapsed).3f" +msgstr "" + +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1404 +#, python-format +msgid "" +"Agent rpc_loop - iteration:%(iter_num)d -ports processed. " +"Elapsed:%(elapsed).3f" +msgstr "" + +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1418 +#, python-format +msgid "" +"Agent rpc_loop - iteration:%(iter_num)d -ancillary port info retrieved. " +"Elapsed:%(elapsed).3f" +msgstr "" + +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1427 +#, python-format +msgid "" +"Agent rpc_loop - iteration:%(iter_num)d - ancillary ports processed. " +"Elapsed:%(elapsed).3f" +msgstr "" + +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1448 +#, python-format +msgid "" +"Agent rpc_loop - iteration:%(iter_num)d completed. Processed ports " +"statistics: %(port_stats)s. Elapsed:%(elapsed).3f" +msgstr "" + +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1509 +#, python-format +msgid "Invalid tunnel type specified: %s" +msgstr "" + +#: neutron/plugins/openvswitch/common/config.py:30 +msgid "Enable tunneling support" +msgstr "" + +#: neutron/plugins/openvswitch/common/config.py:32 +msgid "Tunnel bridge to use" +msgstr "" + +#: neutron/plugins/openvswitch/common/config.py:34 +msgid "Peer patch port in integration bridge for tunnel bridge" +msgstr "" + +#: neutron/plugins/openvswitch/common/config.py:37 +msgid "Peer patch port in tunnel bridge for integration bridge" +msgstr "" + +#: neutron/plugins/openvswitch/common/config.py:40 +msgid "Local IP address of GRE tunnel endpoints." +msgstr "" + +#: neutron/plugins/openvswitch/common/config.py:43 +msgid "List of :" +msgstr "" + +#: neutron/plugins/openvswitch/common/config.py:45 +msgid "Network type for tenant networks (local, vlan, gre, vxlan, or none)" +msgstr "" + +#: neutron/plugins/openvswitch/common/config.py:53 +msgid "List of :" +msgstr "" + +#: neutron/plugins/openvswitch/common/config.py:55 +msgid "The type of tunnels to use when utilizing tunnels, either 'gre' or 'vxlan'" +msgstr "" + +#: neutron/plugins/openvswitch/common/config.py:58 +msgid "" +"Use veths instead of patch ports to interconnect the integration bridge " +"to physical bridges" +msgstr "" + +#: neutron/plugins/openvswitch/common/config.py:68 +msgid "Minimize polling by monitoring ovsdb for interface changes." +msgstr "" + +#: neutron/plugins/openvswitch/common/config.py:72 +msgid "" +"The number of seconds to wait before respawning the ovsdb monitor after " +"losing communication with it" +msgstr "" + +#: neutron/plugins/openvswitch/common/config.py:75 +msgid "Network types supported by the agent (gre and/or vxlan)" +msgstr "" + +#: neutron/plugins/openvswitch/common/config.py:80 +msgid "MTU size of veth interfaces" +msgstr "" + +#: neutron/plugins/openvswitch/common/config.py:82 +msgid "" +"Use ml2 l2population mechanism driver to learn remote mac and IPs and " +"improve tunnel scalability" +msgstr "" + +#: neutron/plugins/openvswitch/common/config.py:85 +msgid "Enable local ARP responder if it is supported" +msgstr "" + +#: neutron/plugins/openvswitch/common/config.py:87 +msgid "" +"Set or un-set the don't fragment (DF) bit on outgoing IP packet carrying " +"GRE/VXLAN tunnel" +msgstr "" + +#: neutron/plugins/plumgrid/common/exceptions.py:24 +#, python-format +msgid "PLUMgrid Plugin Error: %(err_msg)s" +msgstr "" + +#: neutron/plugins/plumgrid/common/exceptions.py:28 +#, python-format +msgid "Connection failed with PLUMgrid Director: %(err_msg)s" +msgstr "" + +#: neutron/plugins/plumgrid/drivers/fake_plumlib.py:31 +msgid "Python PLUMgrid Fake Library Started " +msgstr "" + +#: neutron/plugins/plumgrid/drivers/fake_plumlib.py:36 +#, python-format +msgid "Fake Director: %s" +msgstr "" + +#: neutron/plugins/plumgrid/drivers/plumlib.py:37 +msgid "Python PLUMgrid Library Started " +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:44 +msgid "PLUMgrid Director server to connect to" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:46 +msgid "PLUMgrid Director server port to connect to" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:48 +msgid "PLUMgrid Director admin username" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:50 +msgid "PLUMgrid Director admin password" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:52 +msgid "PLUMgrid Director server timeout" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:55 +msgid "PLUMgrid Driver" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:72 +msgid "Neutron PLUMgrid Director: Starting Plugin" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:77 +msgid "Neutron PLUMgrid Director: Neutron server with PLUMgrid Plugin has started" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:90 +#, python-format +msgid "Neutron PLUMgrid Director: %s" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:101 +msgid "Neutron PLUMgrid Director: create_network() called" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:115 +msgid "PLUMgrid Library: create_network() called" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:130 +msgid "Neutron PLUMgrid Director: update_network() called" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:142 +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:168 +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:352 +msgid "PLUMgrid Library: update_network() called" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:157 +msgid "Neutron PLUMgrid Director: delete_network() called" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:180 +msgid "Neutron PLUMgrid Director: create_port() called" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:199 +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:226 +msgid "PLUMgrid Library: create_port() called" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:214 +msgid "Neutron PLUMgrid Director: update_port() called" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:242 +msgid "Neutron PLUMgrid Director: delete_port() called" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:257 +msgid "PLUMgrid Library: delete_port() called" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:286 +msgid "Neutron PLUMgrid Director: create_subnet() called" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:310 +msgid "PLUMgrid Library: create_subnet() called" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:320 +msgid "Neutron PLUMgrid Director: delete_subnet() called" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:332 +msgid "PLUMgrid Library: delete_subnet() called" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:340 +msgid "update_subnet() called" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:364 +msgid "Neutron PLUMgrid Director: create_router() called" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:376 +msgid "PLUMgrid Library: create_router() called" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:386 +msgid "Neutron PLUMgrid Director: update_router() called" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:392 +msgid "PLUMgrid Library: update_router() called" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:401 +msgid "Neutron PLUMgrid Director: delete_router() called" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:411 +msgid "PLUMgrid Library: delete_router() called" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:419 +msgid "Neutron PLUMgrid Director: add_router_interface() called" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:439 +msgid "PLUMgrid Library: add_router_interface() called" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:450 +msgid "Neutron PLUMgrid Director: remove_router_interface() called" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:472 +msgid "PLUMgrid Library: remove_router_interface() called" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:483 +msgid "Neutron PLUMgrid Director: create_floatingip() called" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:490 +msgid "PLUMgrid Library: create_floatingip() called" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:499 +msgid "Neutron PLUMgrid Director: update_floatingip() called" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:508 +msgid "PLUMgrid Library: update_floatingip() called" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:518 +msgid "Neutron PLUMgrid Director: delete_floatingip() called" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:528 +msgid "PLUMgrid Library: delete_floatingip() called" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:535 +msgid "Neutron PLUMgrid Director: disassociate_floatingips() called" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:542 +msgid "PLUMgrid Library: disassociate_floatingips() called" +msgstr "" + +#: neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py:572 +msgid "" +"Networks with admin_state_up=False are not supported by PLUMgrid plugin " +"yet." +msgstr "" + +#: neutron/plugins/ryu/ryu_neutron_plugin.py:60 +#, python-format +msgid "get_ofp_rest_api: %s" +msgstr "" + +#: neutron/plugins/ryu/ryu_neutron_plugin.py:124 +msgid "Invalid configuration. check ryu.ini" +msgstr "" + +#: neutron/plugins/ryu/agent/ryu_neutron_agent.py:95 +#, python-format +msgid "Could not get IPv4 address from %(nic)s: %(cfg)s" +msgstr "" + +#: neutron/plugins/ryu/agent/ryu_neutron_agent.py:161 +#, python-format +msgid "External port %s" +msgstr "" + +#: neutron/plugins/ryu/agent/ryu_neutron_agent.py:169 +msgid "Get Ryu rest API address" +msgstr "" + +#: neutron/plugins/ryu/agent/ryu_neutron_agent.py:217 +msgid "Ryu rest API port isn't specified" +msgstr "" + +#: neutron/plugins/ryu/agent/ryu_neutron_agent.py:218 +#, python-format +msgid "Going to ofp controller mode %s" +msgstr "" + +#: neutron/plugins/ryu/agent/ryu_neutron_agent.py:294 +#, python-format +msgid "tunnel_ip %s" +msgstr "" + +#: neutron/plugins/ryu/agent/ryu_neutron_agent.py:296 +#, python-format +msgid "ovsdb_port %s" +msgstr "" + +#: neutron/plugins/ryu/agent/ryu_neutron_agent.py:298 +#, python-format +msgid "ovsdb_ip %s" +msgstr "" + +#: neutron/plugins/ryu/agent/ryu_neutron_agent.py:304 +#, python-format +msgid "Initialization failed: %s" +msgstr "" + +#: neutron/plugins/ryu/agent/ryu_neutron_agent.py:307 +msgid "" +"Ryu initialization on the node is done. Agent initialized successfully, " +"now running..." +msgstr "" + +#: neutron/plugins/ryu/common/config.py:24 +msgid "OpenFlow REST API location" +msgstr "" + +#: neutron/plugins/ryu/common/config.py:26 +msgid "Minimum tunnel ID to use" +msgstr "" + +#: neutron/plugins/ryu/common/config.py:28 +msgid "Maximum tunnel ID to use" +msgstr "" + +#: neutron/plugins/ryu/common/config.py:30 +msgid "Tunnel IP to use" +msgstr "" + +#: neutron/plugins/ryu/common/config.py:32 +msgid "Tunnel interface to use" +msgstr "" + +#: neutron/plugins/ryu/common/config.py:34 +msgid "OVSDB port to connect to" +msgstr "" + +#: neutron/plugins/ryu/common/config.py:36 +msgid "OVSDB IP to connect to" +msgstr "" + +#: neutron/plugins/ryu/common/config.py:38 +msgid "OVSDB interface to connect to" +msgstr "" + +#: neutron/plugins/ryu/db/api_v2.py:39 +#, python-format +msgid "get_port_from_device() called:port_id=%s" +msgstr "" + +#: neutron/plugins/ryu/db/api_v2.py:75 +#, python-format +msgid "" +"Invalid tunnel key options tunnel_key_min: %(key_min)d tunnel_key_max: " +"%(key_max)d. Using default value" +msgstr "" + +#: neutron/plugins/ryu/db/api_v2.py:155 +#, python-format +msgid "last_key %(last_key)s new_key %(new_key)s" +msgstr "" + +#: neutron/plugins/ryu/db/api_v2.py:158 +msgid "No key found" +msgstr "" + +#: neutron/plugins/ryu/db/api_v2.py:191 +#, python-format +msgid "Transaction retry exhausted (%d). Abandoned tunnel key allocation." +msgstr "" + +#: neutron/plugins/vmware/check_nsx_config.py:45 +#: neutron/plugins/vmware/check_nsx_config.py:80 +#, python-format +msgid "Error '%(err)s' when connecting to controller(s): %(ctl)s." +msgstr "" + +#: neutron/plugins/vmware/dhcpmeta_modes.py:55 +#, python-format +msgid "Invalid agent_mode: %s" +msgstr "" + +#: neutron/plugins/vmware/dhcpmeta_modes.py:106 +msgid "network_auto_schedule has been disabled" +msgstr "" + +#: neutron/plugins/vmware/dhcpmeta_modes.py:127 +#, python-format +msgid "Unable to run Neutron with config option '%s', as NSX does not support it" +msgstr "" + +#: neutron/plugins/vmware/dhcpmeta_modes.py:130 +#, python-format +msgid "Unmet dependency for config option '%s'" +msgstr "" + +#: neutron/plugins/vmware/nsx_cluster.py:49 +#, python-format +msgid "" +"Attribute '%s' has been deprecated or moved to a new section. See new " +"configuration file for details." +msgstr "" + +#: neutron/plugins/vmware/nsx_cluster.py:61 +#, python-format +msgid "The following cluster attributes were not specified: %s'" +msgstr "" + +#: neutron/plugins/vmware/api_client/__init__.py:28 +#, python-format +msgid "Invalid connection type: %s" +msgstr "" + +#: neutron/plugins/vmware/api_client/base.py:103 +#, python-format +msgid "[%d] no API providers currently available." +msgstr "" + +#: neutron/plugins/vmware/api_client/base.py:106 +#, python-format +msgid "[%d] Waiting to acquire API client connection." +msgstr "" + +#: neutron/plugins/vmware/api_client/base.py:110 +#, python-format +msgid "[%(rid)d] Connection %(conn)s idle for %(sec)0.2f seconds; reconnecting." +msgstr "" + +#: neutron/plugins/vmware/api_client/base.py:119 +#, python-format +msgid "[%(rid)d] Acquired connection %(conn)s. %(qsize)d connection(s) available." +msgstr "" + +#: neutron/plugins/vmware/api_client/base.py:140 +#, python-format +msgid "" +"[%(rid)d] Released connection %(conn)s is not an API provider for the " +"cluster" +msgstr "" + +#: neutron/plugins/vmware/api_client/base.py:150 +#, python-format +msgid "[%(rid)d] Connection returned in bad state, reconnecting to %(conn)s" +msgstr "" + +#: neutron/plugins/vmware/api_client/base.py:176 +#, python-format +msgid "[%(rid)d] Released connection %(conn)s. %(qsize)d connection(s) available." +msgstr "" + +#: neutron/plugins/vmware/api_client/base.py:186 +#, python-format +msgid "Login request for an invalid connection: '%s'" +msgstr "" + +#: neutron/plugins/vmware/api_client/base.py:197 +msgid "Waiting for auth to complete" +msgstr "" + +#: neutron/plugins/vmware/api_client/base.py:239 +#, python-format +msgid "Invalid conn_params value: '%s'" +msgstr "" + +#: neutron/plugins/vmware/api_client/client.py:93 +#, python-format +msgid "Request returns \"%s\"" +msgstr "" + +#: neutron/plugins/vmware/api_client/client.py:106 +#, python-format +msgid "Request timed out: %(method)s to %(url)s" +msgstr "" + +#: neutron/plugins/vmware/api_client/client.py:117 +#, python-format +msgid "Received error code: %s" +msgstr "" + +#: neutron/plugins/vmware/api_client/client.py:118 +#, python-format +msgid "Server Error Message: %s" +msgstr "" + +#: neutron/plugins/vmware/api_client/client.py:124 +#, python-format +msgid "" +"%(method)s to %(url)s, unexpected response code: %(status)d (content = " +"'%(body)s')" +msgstr "" + +#: neutron/plugins/vmware/api_client/client.py:141 +msgid "Unable to determine NSX version. Plugin might not work as expected." +msgstr "" + +#: neutron/plugins/vmware/api_client/eventlet_client.py:145 +#, python-format +msgid "Login error \"%s\"" +msgstr "" + +#: neutron/plugins/vmware/api_client/eventlet_client.py:150 +#, python-format +msgid "Saving new authentication cookie '%s'" +msgstr "" + +#: neutron/plugins/vmware/api_client/eventlet_request.py:102 +msgid "Joining an invalid green thread" +msgstr "" + +#: neutron/plugins/vmware/api_client/eventlet_request.py:122 +#, python-format +msgid "[%d] Request timeout." +msgstr "" + +#: neutron/plugins/vmware/api_client/eventlet_request.py:123 +msgid "Request timeout" +msgstr "" + +#: neutron/plugins/vmware/api_client/eventlet_request.py:149 +#, python-format +msgid "[%(rid)d] Completed request '%(method)s %(url)s': %(status)s" +msgstr "" + +#: neutron/plugins/vmware/api_client/eventlet_request.py:156 +#, python-format +msgid "[%(rid)d] Error while handling request: %(req)s" +msgstr "" + +#: neutron/plugins/vmware/api_client/eventlet_request.py:212 +#, python-format +msgid "[%(rid)d] Failed to parse API provider: %(e)s" +msgstr "" + +#: neutron/plugins/vmware/api_client/exception.py:41 +msgid "Server denied session's authentication credentials." +msgstr "" + +#: neutron/plugins/vmware/api_client/exception.py:45 +msgid "An entity referenced in the request was not found." +msgstr "" + +#: neutron/plugins/vmware/api_client/exception.py:49 +msgid "Request conflicts with configuration on a different entity." +msgstr "" + +#: neutron/plugins/vmware/api_client/exception.py:54 +msgid "" +"Request could not completed because the associated resource could not be " +"reached." +msgstr "" + +#: neutron/plugins/vmware/api_client/exception.py:59 +msgid "The request is forbidden from accessing the referenced resource." +msgstr "" + +#: neutron/plugins/vmware/api_client/exception.py:64 +msgid "Create/Update actions are forbidden when in read-only mode." +msgstr "" + +#: neutron/plugins/vmware/api_client/exception.py:72 +msgid "The server is unable to fulfill the request due to a bad syntax" +msgstr "" + +#: neutron/plugins/vmware/api_client/exception.py:77 +msgid "The backend received an invalid security certificate." +msgstr "" + +#: neutron/plugins/vmware/api_client/request.py:85 +msgid "No API connections available" +msgstr "" + +#: neutron/plugins/vmware/api_client/request.py:90 +#, python-format +msgid "[%(rid)d] Issuing - request %(conn)s" +msgstr "" + +#: neutron/plugins/vmware/api_client/request.py:116 +#, python-format +msgid "Setting X-Nvp-Wait-For-Config-Generation request header: '%s'" +msgstr "" + +#: neutron/plugins/vmware/api_client/request.py:122 +#, python-format +msgid "[%(rid)d] Exception issuing request: %(e)s" +msgstr "" + +#: neutron/plugins/vmware/api_client/request.py:130 +#, python-format +msgid "[%(rid)d] Completed request '%(conn)s': %(status)s (%(elapsed)s seconds)" +msgstr "" + +#: neutron/plugins/vmware/api_client/request.py:139 +#, python-format +msgid "Reading X-Nvp-config-Generation response header: '%s'" +msgstr "" + +#: neutron/plugins/vmware/api_client/request.py:168 +#, python-format +msgid "[%d] Maximum redirects exceeded, aborting request" +msgstr "" + +#: neutron/plugins/vmware/api_client/request.py:178 +#, python-format +msgid "[%(rid)d] Redirecting request to: %(conn)s" +msgstr "" + +#: neutron/plugins/vmware/api_client/request.py:191 +#, python-format +msgid "[%(rid)d] Request '%(method)s %(url)s' received: %(status)s" +msgstr "" + +#: neutron/plugins/vmware/api_client/request.py:195 +#, python-format +msgid "Server error return: %s" +msgstr "" + +#: neutron/plugins/vmware/api_client/request.py:199 +msgid "Invalid server response" +msgstr "" + +#: neutron/plugins/vmware/api_client/request.py:204 +#, python-format +msgid "[%(rid)d] Failed request '%(conn)s': '%(msg)s' (%(elapsed)s seconds)" +msgstr "" + +#: neutron/plugins/vmware/api_client/request.py:238 +#, python-format +msgid "[%d] Received redirect status without location header field" +msgstr "" + +#: neutron/plugins/vmware/api_client/request.py:255 +#, python-format +msgid "[%(rid)d] Received invalid redirect location: '%(url)s'" +msgstr "" + +#: neutron/plugins/vmware/api_client/request.py:259 +#, python-format +msgid "[%(rid)d] Received malformed redirect location: %(url)s" +msgstr "" + +#: neutron/plugins/vmware/api_client/version.py:30 +#, python-format +msgid "Unable to fetch NSX version from response headers :%s" +msgstr "" + +#: neutron/plugins/vmware/common/config.py:39 +msgid "" +"Maximum number of ports of a logical switch on a bridged transport zone " +"(default 5000)" +msgstr "" + +#: neutron/plugins/vmware/common/config.py:43 +msgid "" +"Maximum number of ports of a logical switch on an overlay transport zone " +"(default 256)" +msgstr "" + +#: neutron/plugins/vmware/common/config.py:47 +msgid "Maximum concurrent connections to each NSX controller." +msgstr "" + +#: neutron/plugins/vmware/common/config.py:52 +msgid "" +"Number of seconds a generation id should be valid for (default -1 meaning" +" do not time out)" +msgstr "" + +#: neutron/plugins/vmware/common/config.py:56 +msgid "" +"If set to access_network this enables a dedicated connection to the " +"metadata proxy for metadata server access via Neutron router. If set to " +"dhcp_host_route this enables host route injection via the dhcp agent. " +"This option is only useful if running on a host that does not support " +"namespaces otherwise access_network should be used." +msgstr "" + +#: neutron/plugins/vmware/common/config.py:65 +msgid "" +"The default network tranport type to use (stt, gre, bridge, ipsec_gre, or" +" ipsec_stt)" +msgstr "" + +#: neutron/plugins/vmware/common/config.py:69 +msgid "The mode used to implement DHCP/metadata services." +msgstr "" + +#: neutron/plugins/vmware/common/config.py:71 +msgid "" +"The default option leverages service nodes to perform packet replication " +"though one could set to this to 'source' to perform replication locally. " +"This is useful if one does not want to deploy a service node(s)." +msgstr "" + +#: neutron/plugins/vmware/common/config.py:80 +msgid "" +"Interval in seconds between runs of the state synchronization task. Set " +"it to 0 to disable it" +msgstr "" + +#: neutron/plugins/vmware/common/config.py:84 +msgid "" +"Maximum value for the additional random delay in seconds between runs of " +"the state synchronization task" +msgstr "" + +#: neutron/plugins/vmware/common/config.py:89 +msgid "" +"Minimum delay, in seconds, between two state synchronization queries to " +"NSX. It must not exceed state_sync_interval" +msgstr "" + +#: neutron/plugins/vmware/common/config.py:94 +msgid "" +"Minimum number of resources to be retrieved from NSX during state " +"synchronization" +msgstr "" + +#: neutron/plugins/vmware/common/config.py:98 +msgid "" +"Always read operational status from backend on show operations. Enabling " +"this option might slow down the system." +msgstr "" + +#: neutron/plugins/vmware/common/config.py:107 +msgid "User name for NSX controllers in this cluster" +msgstr "" + +#: neutron/plugins/vmware/common/config.py:112 +msgid "Password for NSX controllers in this cluster" +msgstr "" + +#: neutron/plugins/vmware/common/config.py:115 +msgid "Total time limit for a cluster request" +msgstr "" + +#: neutron/plugins/vmware/common/config.py:118 +msgid "Time before aborting a request" +msgstr "" + +#: neutron/plugins/vmware/common/config.py:121 +msgid "Number of time a request should be retried" +msgstr "" + +#: neutron/plugins/vmware/common/config.py:124 +msgid "Number of times a redirect should be followed" +msgstr "" + +#: neutron/plugins/vmware/common/config.py:127 +msgid "Lists the NSX controllers in this cluster" +msgstr "" + +#: neutron/plugins/vmware/common/config.py:132 +msgid "" +"This is uuid of the default NSX Transport zone that will be used for " +"creating tunneled isolated \"Neutron\" networks. It needs to be created " +"in NSX before starting Neutron with the nsx plugin." +msgstr "" + +#: neutron/plugins/vmware/common/config.py:137 +msgid "" +"Unique identifier of the NSX L3 Gateway service which will be used for " +"implementing routers and floating IPs" +msgstr "" + +#: neutron/plugins/vmware/common/config.py:141 +msgid "" +"Unique identifier of the NSX L2 Gateway service which will be used by " +"default for network gateways" +msgstr "" + +#: neutron/plugins/vmware/common/config.py:144 +msgid "" +"Unique identifier of the Service Cluster which will be used by logical " +"services like dhcp and metadata" +msgstr "" + +#: neutron/plugins/vmware/common/config.py:147 +msgid "" +"Name of the interface on a L2 Gateway transport nodewhich should be used " +"by default when setting up a network connection" +msgstr "" + +#: neutron/plugins/vmware/common/config.py:157 +msgid "User name for vsm" +msgstr "" + +#: neutron/plugins/vmware/common/config.py:161 +msgid "Password for vsm" +msgstr "" + +#: neutron/plugins/vmware/common/config.py:163 +msgid "uri for vsm" +msgstr "" + +#: neutron/plugins/vmware/common/config.py:165 +msgid "Optional parameter identifying the ID of datacenter to deploy NSX Edges" +msgstr "" + +#: neutron/plugins/vmware/common/config.py:168 +#: neutron/plugins/vmware/common/config.py:174 +msgid "Optional parameter identifying the ID of datastore to deploy NSX Edges" +msgstr "" + +#: neutron/plugins/vmware/common/config.py:171 +msgid "Optional parameter identifying the ID of resource to deploy NSX Edges" +msgstr "" + +#: neutron/plugins/vmware/common/config.py:177 +msgid "Network ID for physical network connectivity" +msgstr "" + +#: neutron/plugins/vmware/common/config.py:180 +msgid "Task status check interval" +msgstr "" + +#: neutron/plugins/vmware/common/config.py:194 +#, python-format +msgid "Invalid replication_mode: %s" +msgstr "" + +#: neutron/plugins/vmware/common/exceptions.py:21 +#, python-format +msgid "An unexpected error occurred in the NSX Plugin: %(err_msg)s" +msgstr "" + +#: neutron/plugins/vmware/common/exceptions.py:25 +#, python-format +msgid "Unable to fulfill request with version %(version)s." +msgstr "" + +#: neutron/plugins/vmware/common/exceptions.py:29 +#, python-format +msgid "Invalid NSX connection parameters: %(conn_params)s" +msgstr "" + +#: neutron/plugins/vmware/common/exceptions.py:33 +#, python-format +msgid "" +"Invalid cluster values: %(invalid_attrs)s. Please ensure that these " +"values are specified in the [DEFAULT] section of the NSX plugin ini file." +msgstr "" + +#: neutron/plugins/vmware/common/exceptions.py:39 +#, python-format +msgid "Unable to find cluster config entry for nova zone: %(nova_zone)s" +msgstr "" + +#: neutron/plugins/vmware/common/exceptions.py:44 +#, python-format +msgid "" +"Unable to create port on network %(network)s. Maximum number of ports " +"reached" +msgstr "" + +#: neutron/plugins/vmware/common/exceptions.py:49 +#, python-format +msgid "" +"While retrieving NAT rules, %(actual_rules)s were found whereas rules in " +"the (%(min_rules)s,%(max_rules)s) interval were expected" +msgstr "" + +#: neutron/plugins/vmware/common/exceptions.py:55 +#, python-format +msgid "Invalid NSX attachment type '%(attachment_type)s'" +msgstr "" + +#: neutron/plugins/vmware/common/exceptions.py:59 +msgid "" +"The networking backend is currently in maintenance mode and therefore " +"unable to accept requests which modify its state. Please try later." +msgstr "" + +#: neutron/plugins/vmware/common/exceptions.py:65 +#, python-format +msgid "Gateway Service %(gateway)s is already in use" +msgstr "" + +#: neutron/plugins/vmware/common/exceptions.py:69 +msgid "" +"An invalid security certificate was specified for the gateway device. " +"Certificates must be enclosed between '-----BEGIN CERTIFICATE-----' and '" +"-----END CERTIFICATE-----'" +msgstr "" + +#: neutron/plugins/vmware/common/exceptions.py:76 +#, python-format +msgid "Quota exceeded for Vcns resource: %(overs)s: %(err_msg)s" +msgstr "" + +#: neutron/plugins/vmware/common/exceptions.py:80 +#, python-format +msgid "Router %(router_id)s is in use by Loadbalancer Service %(vip_id)s" +msgstr "" + +#: neutron/plugins/vmware/common/exceptions.py:85 +#, python-format +msgid "Router %(router_id)s is in use by firewall Service %(firewall_id)s" +msgstr "" + +#: neutron/plugins/vmware/common/exceptions.py:90 +#, python-format +msgid "Error happened in NSX VCNS Driver: %(err_msg)s" +msgstr "" + +#: neutron/plugins/vmware/common/exceptions.py:94 +#, python-format +msgid "" +"Service cluster: '%(cluster_id)s' is unavailable. Please, check NSX setup" +" and/or configuration" +msgstr "" + +#: neutron/plugins/vmware/common/exceptions.py:99 +#, python-format +msgid "" +"An error occurred while connecting LSN %(lsn_id)s and network %(net_id)s " +"via port %(port_id)s" +msgstr "" + +#: neutron/plugins/vmware/common/exceptions.py:108 +#, python-format +msgid "Unable to find LSN for %(entity)s %(entity_id)s" +msgstr "" + +#: neutron/plugins/vmware/common/exceptions.py:112 +#, python-format +msgid "Unable to find port for LSN %(lsn_id)s and %(entity)s %(entity_id)s" +msgstr "" + +#: neutron/plugins/vmware/common/exceptions.py:117 +#, python-format +msgid "Unable to migrate network '%(net_id)s' to LSN: %(reason)s" +msgstr "" + +#: neutron/plugins/vmware/common/exceptions.py:121 +#, python-format +msgid "Configuration conflict on Logical Service Node %(lsn_id)s" +msgstr "" + +#: neutron/plugins/vmware/common/nsx_utils.py:63 +#, python-format +msgid "Unable to find NSX switches for Neutron network %s" +msgstr "" + +#: neutron/plugins/vmware/common/nsx_utils.py:110 +#, python-format +msgid "Unable to find NSX port for Neutron port %s" +msgstr "" + +#: neutron/plugins/vmware/common/nsx_utils.py:150 +#, python-format +msgid "Unable to find NSX security profile for Neutron security group %s" +msgstr "" + +#: neutron/plugins/vmware/common/nsx_utils.py:154 +#, python-format +msgid "Multiple NSX security profiles found for Neutron security group %s" +msgstr "" + +#: neutron/plugins/vmware/common/nsx_utils.py:185 +#, python-format +msgid "Unable to find NSX router for Neutron router %s" +msgstr "" + +#: neutron/plugins/vmware/common/nsx_utils.py:243 +#, python-format +msgid "" +"Unable to retrieve operational status for gateway devices belonging to " +"tenant: %s" +msgstr "" + +#: neutron/plugins/vmware/common/nsx_utils.py:246 +msgid "Unable to retrieve operational status for gateway devices" +msgstr "" + +#: neutron/plugins/vmware/common/sync.py:231 +#, python-format +msgid "" +"Minimum request delay:%(req_delay)s must not exceed synchronization " +"interval:%(sync_interval)s" +msgstr "" + +#: neutron/plugins/vmware/common/sync.py:264 +#, python-format +msgid "Logical switch for neutron network %s not found on NSX." +msgstr "" + +#: neutron/plugins/vmware/common/sync.py:299 +#: neutron/plugins/vmware/common/sync.py:381 +#: neutron/plugins/vmware/common/sync.py:476 +#, python-format +msgid "Updating status for neutron resource %(q_id)s to: %(status)s" +msgstr "" + +#: neutron/plugins/vmware/common/sync.py:351 +#, python-format +msgid "Logical router for neutron router %s not found on NSX." +msgstr "" + +#: neutron/plugins/vmware/common/sync.py:401 +#, python-format +msgid "Unable to find Neutron router id for NSX logical router: %s" +msgstr "" + +#: neutron/plugins/vmware/common/sync.py:443 +#, python-format +msgid "Logical switch port for neutron port %s not found on NSX." +msgstr "" + +#: neutron/plugins/vmware/common/sync.py:536 +#, python-format +msgid "" +"Requested page size is %(cur_chunk_size)d.It might be necessary to do " +"%(num_requests)d round-trips to NSX for fetching data. Please tune sync " +"parameters to ensure chunk size is less than %(max_page_size)d" +msgstr "" + +#: neutron/plugins/vmware/common/sync.py:569 +#, python-format +msgid "Fetching up to %s resources from NSX backend" +msgstr "" + +#: neutron/plugins/vmware/common/sync.py:589 +#, python-format +msgid "Total data size: %d" +msgstr "" + +#: neutron/plugins/vmware/common/sync.py:593 +#, python-format +msgid "" +"Fetched %(num_lswitches)d logical switches, %(num_lswitchports)d logical " +"switch ports,%(num_lrouters)d logical routers" +msgstr "" + +#: neutron/plugins/vmware/common/sync.py:609 +#, python-format +msgid "Running state synchronization task. Chunk: %s" +msgstr "" + +#: neutron/plugins/vmware/common/sync.py:619 +#, python-format +msgid "" +"An error occurred while communicating with NSX backend. Will retry " +"synchronization in %d seconds" +msgstr "" + +#: neutron/plugins/vmware/common/sync.py:623 +#, python-format +msgid "Time elapsed querying NSX: %s" +msgstr "" + +#: neutron/plugins/vmware/common/sync.py:630 +#, python-format +msgid "Number of chunks: %d" +msgstr "" + +#: neutron/plugins/vmware/common/sync.py:648 +#, python-format +msgid "Time elapsed hashing data: %s" +msgstr "" + +#: neutron/plugins/vmware/common/sync.py:660 +#, python-format +msgid "Synchronization for chunk %(chunk_num)d of %(total_chunks)d performed" +msgstr "" + +#: neutron/plugins/vmware/common/sync.py:672 +#, python-format +msgid "Time elapsed at end of sync: %s" +msgstr "" + +#: neutron/plugins/vmware/common/utils.py:64 +#, python-format +msgid "Specified name:'%s' exceeds maximum length. It will be truncated on NSX" +msgstr "" + +#: neutron/plugins/vmware/dbexts/db.py:76 +#, python-format +msgid "Port mapping for %s already available" +msgstr "" + +#: neutron/plugins/vmware/dbexts/db.py:123 +#, python-format +msgid "NSX identifiers for neutron port %s not yet stored in Neutron DB" +msgstr "" + +#: neutron/plugins/vmware/dbexts/db.py:134 +#, python-format +msgid "NSX identifiers for neutron router %s not yet stored in Neutron DB" +msgstr "" + +#: neutron/plugins/vmware/dbexts/db.py:149 +#, python-format +msgid "NSX identifiers for neutron security group %s not yet stored in Neutron DB" +msgstr "" + +#: neutron/plugins/vmware/dbexts/lsn_db.py:87 +#: neutron/plugins/vmware/dhcp_meta/lsnmanager.py:71 +#, python-format +msgid "Unable to find Logical Service Node for network %s" +msgstr "" + +#: neutron/plugins/vmware/dbexts/networkgw_db.py:45 +#, python-format +msgid "" +"Network Gateway '%(gateway_id)s' still has active mappings with one or " +"more neutron networks." +msgstr "" + +#: neutron/plugins/vmware/dbexts/networkgw_db.py:50 +#, python-format +msgid "Network Gateway %(gateway_id)s could not be found" +msgstr "" + +#: neutron/plugins/vmware/dbexts/networkgw_db.py:54 +#, python-format +msgid "" +"Network Gateway Device '%(device_id)s' is still used by one or more " +"network gateways." +msgstr "" + +#: neutron/plugins/vmware/dbexts/networkgw_db.py:59 +#, python-format +msgid "Network Gateway Device %(device_id)s could not be found." +msgstr "" + +#: neutron/plugins/vmware/dbexts/networkgw_db.py:63 +#, python-format +msgid "" +"Port '%(port_id)s' is owned by '%(device_owner)s' and therefore cannot be" +" deleted directly via the port API." +msgstr "" + +#: neutron/plugins/vmware/dbexts/networkgw_db.py:68 +#, python-format +msgid "" +"The specified mapping '%(mapping)s' is already in use on network gateway " +"'%(gateway_id)s'." +msgstr "" + +#: neutron/plugins/vmware/dbexts/networkgw_db.py:73 +#, python-format +msgid "" +"Multiple network connections found on '%(gateway_id)s' with provided " +"criteria." +msgstr "" + +#: neutron/plugins/vmware/dbexts/networkgw_db.py:78 +#, python-format +msgid "" +"The connection %(network_mapping_info)s was not found on the network " +"gateway '%(network_gateway_id)s'" +msgstr "" + +#: neutron/plugins/vmware/dbexts/networkgw_db.py:83 +#, python-format +msgid "The network gateway %(gateway_id)s cannot be updated or deleted" +msgstr "" + +#: neutron/plugins/vmware/dbexts/networkgw_db.py:191 +msgid "" +"A network identifier must be specified when connecting a network to a " +"network gateway. Unable to complete operation" +msgstr "" + +#: neutron/plugins/vmware/dbexts/networkgw_db.py:197 +#, python-format +msgid "" +"Invalid keys found among the ones provided in request body: " +"%(connection_attrs)s." +msgstr "" + +#: neutron/plugins/vmware/dbexts/networkgw_db.py:203 +msgid "" +"In order to specify a segmentation id the segmentation type must be " +"specified as well" +msgstr "" + +#: neutron/plugins/vmware/dbexts/networkgw_db.py:207 +msgid "Cannot specify a segmentation id when the segmentation type is flat" +msgstr "" + +#: neutron/plugins/vmware/dbexts/networkgw_db.py:262 +#, python-format +msgid "Created network gateway with id:%s" +msgstr "" + +#: neutron/plugins/vmware/dbexts/networkgw_db.py:274 +#, python-format +msgid "Updated network gateway with id:%s" +msgstr "" + +#: neutron/plugins/vmware/dbexts/networkgw_db.py:289 +#, python-format +msgid "Network gateway '%s' was destroyed." +msgstr "" + +#: neutron/plugins/vmware/dbexts/networkgw_db.py:306 +#, python-format +msgid "Connecting network '%(network_id)s' to gateway '%(network_gateway_id)s'" +msgstr "" + +#: neutron/plugins/vmware/dbexts/networkgw_db.py:347 +#, python-format +msgid "" +"Requested network '%(network_id)s' not found.Unable to create network " +"connection on gateway '%(network_gateway_id)s" +msgstr "" + +#: neutron/plugins/vmware/dbexts/networkgw_db.py:355 +#, python-format +msgid "" +"Gateway port for '%(network_gateway_id)s' created on network " +"'%(network_id)s':%(port_id)s" +msgstr "" + +#: neutron/plugins/vmware/dbexts/networkgw_db.py:371 +#, python-format +msgid "Ensured no Ip addresses are configured on port %s" +msgstr "" + +#: neutron/plugins/vmware/dbexts/networkgw_db.py:381 +#, python-format +msgid "" +"Disconnecting network '%(network_id)s' from gateway " +"'%(network_gateway_id)s'" +msgstr "" + +#: neutron/plugins/vmware/dbexts/networkgw_db.py:475 +#, python-format +msgid "Created network gateway device: %s" +msgstr "" + +#: neutron/plugins/vmware/dbexts/networkgw_db.py:486 +#, python-format +msgid "Updated network gateway device: %s" +msgstr "" + +#: neutron/plugins/vmware/dbexts/networkgw_db.py:499 +#, python-format +msgid "Deleted network gateway device: %s." +msgstr "" + +#: neutron/plugins/vmware/dbexts/nsxrouter.py:61 +#, python-format +msgid "Nsx router extension successfully processed for router:%s" +msgstr "" + +#: neutron/plugins/vmware/dbexts/qos_db.py:294 +#, python-format +msgid "DSCP value (%s) will be ignored with 'trusted' marking" +msgstr "" + +#: neutron/plugins/vmware/dbexts/vcns_db.py:77 +#, python-format +msgid "Rule Resource binding with id:%s not found!" +msgstr "" + +#: neutron/plugins/vmware/dbexts/vcns_db.py:94 +msgid "Rule Resource binding not found!" +msgstr "" + +#: neutron/plugins/vmware/dbexts/vcns_db.py:123 +#: neutron/plugins/vmware/dbexts/vcns_db.py:133 +#, python-format +msgid "VIP Resource binding with id:%s not found!" +msgstr "" + +#: neutron/plugins/vmware/dbexts/vcns_db.py:162 +#, python-format +msgid "" +"Pool Resource binding with edge_id:%(edge_id)s pool_vseid:%(pool_vseid)s " +"not found!" +msgstr "" + +#: neutron/plugins/vmware/dbexts/vcns_db.py:174 +#, python-format +msgid "Pool Resource binding with id:%s not found!" +msgstr "" + +#: neutron/plugins/vmware/dbexts/vcns_db.py:200 +#, python-format +msgid "Monitor Resource binding with id:%s not found!" +msgstr "" + +#: neutron/plugins/vmware/dhcp_meta/lsnmanager.py:40 +msgid "" +"Pull LSN information from NSX in case it is missing from the local data " +"store. This is useful to rebuild the local store in case of server " +"recovery." +msgstr "" + +#: neutron/plugins/vmware/dhcp_meta/lsnmanager.py:82 +#, python-format +msgid "Unable to create LSN for network %s" +msgstr "" + +#: neutron/plugins/vmware/dhcp_meta/lsnmanager.py:90 +#, python-format +msgid "Unable to delete Logical Service Node %s" +msgstr "" + +#: neutron/plugins/vmware/dhcp_meta/lsnmanager.py:107 +#, python-format +msgid "" +"Unable to find Logical Service Node Port for LSN %(lsn_id)s and subnet " +"%(subnet_id)s" +msgstr "" + +#: neutron/plugins/vmware/dhcp_meta/lsnmanager.py:129 +#, python-format +msgid "" +"Unable to find Logical Service Node Port for LSN %(lsn_id)s and mac " +"address %(mac)s" +msgstr "" + +#: neutron/plugins/vmware/dhcp_meta/lsnmanager.py:149 +#, python-format +msgid "Unable to create port for LSN %s" +msgstr "" + +#: neutron/plugins/vmware/dhcp_meta/lsnmanager.py:157 +#, python-format +msgid "Unable to delete LSN Port %s" +msgstr "" + +#: neutron/plugins/vmware/dhcp_meta/lsnmanager.py:174 +#, python-format +msgid "Metadata port not found while attempting to delete it from network %s" +msgstr "" + +#: neutron/plugins/vmware/dhcp_meta/lsnmanager.py:177 +#, python-format +msgid "Unable to find Logical Services Node Port with MAC %s" +msgstr "" + +#: neutron/plugins/vmware/dhcp_meta/lsnmanager.py:262 +#, python-format +msgid "" +"Unable to configure dhcp for Logical Service Node %(lsn_id)s and port " +"%(lsn_port_id)s" +msgstr "" + +#: neutron/plugins/vmware/dhcp_meta/lsnmanager.py:283 +#, python-format +msgid "Unable to configure metadata for subnet %s" +msgstr "" + +#: neutron/plugins/vmware/dhcp_meta/lsnmanager.py:305 +#, python-format +msgid "Error while configuring LSN port %s" +msgstr "" + +#: neutron/plugins/vmware/dhcp_meta/lsnmanager.py:377 +#, python-format +msgid "Unable to save LSN for network %s" +msgstr "" + +#: neutron/plugins/vmware/dhcp_meta/lsnmanager.py:443 +#, python-format +msgid "Unable to save LSN port for subnet %s" +msgstr "" + +#: neutron/plugins/vmware/dhcp_meta/migration.py:81 +#, python-format +msgid "Port %s is already gone" +msgstr "" + +#: neutron/plugins/vmware/dhcp_meta/migration.py:112 +msgid "LSN already exist" +msgstr "" + +#: neutron/plugins/vmware/dhcp_meta/migration.py:116 +msgid "Cannot migrate an external network" +msgstr "" + +#: neutron/plugins/vmware/dhcp_meta/migration.py:125 +msgid "Cannot migrate a 'metadata' network" +msgstr "" + +#: neutron/plugins/vmware/dhcp_meta/migration.py:128 +msgid "Unable to support multiple subnets per network" +msgstr "" + +#: neutron/plugins/vmware/dhcp_meta/nsx.py:39 +msgid "Comma separated list of additional domain name servers" +msgstr "" + +#: neutron/plugins/vmware/dhcp_meta/nsx.py:47 +msgid "Default DHCP lease time" +msgstr "" + +#: neutron/plugins/vmware/dhcp_meta/nsx.py:55 +msgid "IP address used by Metadata server." +msgstr "" + +#: neutron/plugins/vmware/dhcp_meta/nsx.py:59 +msgid "TCP Port used by Metadata server." +msgstr "" + +#: neutron/plugins/vmware/dhcp_meta/nsx.py:136 +#, python-format +msgid "" +"Error while creating subnet %(cidr)s for network %(network)s. Please, " +"contact administrator" +msgstr "" + +#: neutron/plugins/vmware/dhcp_meta/nsx.py:206 +#: neutron/plugins/vmware/dhcp_meta/nsx.py:224 +#, python-format +msgid "Performing DHCP %(action)s for resource: %(resource)s" +msgstr "" + +#: neutron/plugins/vmware/dhcp_meta/nsx.py:211 +#, python-format +msgid "Network %s is external: no LSN to create" +msgstr "" + +#: neutron/plugins/vmware/dhcp_meta/nsx.py:219 +#, python-format +msgid "Logical Services Node for network %s configured successfully" +msgstr "" + +#: neutron/plugins/vmware/dhcp_meta/nsx.py:241 +#, python-format +msgid "Error while configuring DHCP for port %s" +msgstr "" + +#: neutron/plugins/vmware/dhcp_meta/nsx.py:253 +#, python-format +msgid "DHCP is disabled for subnet %s: nothing to do" +msgstr "" + +#: neutron/plugins/vmware/dhcp_meta/nsx.py:272 +#, python-format +msgid "DHCP for port %s configured successfully" +msgstr "" + +#: neutron/plugins/vmware/dhcp_meta/nsx.py:280 +#, python-format +msgid "Network %s is external: nothing to do" +msgstr "" + +#: neutron/plugins/vmware/dhcp_meta/nsx.py:288 +#, python-format +msgid "Configuring metadata entry for port %s" +msgstr "" + +#: neutron/plugins/vmware/dhcp_meta/nsx.py:300 +#, python-format +msgid "Metadata for port %s configured successfully" +msgstr "" + +#: neutron/plugins/vmware/dhcp_meta/nsx.py:304 +#, python-format +msgid "Handle metadata access via router: %(r)s and interface %(i)s" +msgstr "" + +#: neutron/plugins/vmware/dhcp_meta/nsx.py:321 +#, python-format +msgid "Metadata for router %s handled successfully" +msgstr "" + +#: neutron/plugins/vmware/dhcp_meta/rpc.py:75 +#, python-format +msgid "Subnet %s does not have a gateway, the metadata route will not be created" +msgstr "" + +#: neutron/plugins/vmware/dhcp_meta/rpc.py:99 +msgid "Metadata access network is disabled" +msgstr "" + +#: neutron/plugins/vmware/dhcp_meta/rpc.py:102 +msgid "" +"Overlapping IPs must be enabled in order to setup the metadata access " +"network" +msgstr "" + +#: neutron/plugins/vmware/dhcp_meta/rpc.py:122 +#, python-format +msgid "" +"No router interface found for router '%s'. No metadata access network " +"should be created or destroyed" +msgstr "" + +#: neutron/plugins/vmware/dhcp_meta/rpc.py:130 +#, python-format +msgid "" +"An error occurred while operating on the metadata access network for " +"router:'%s'" +msgstr "" + +#: neutron/plugins/vmware/extensions/networkgw.py:84 +msgid "Cannot create a gateway with an empty device list" +msgstr "" + +#: neutron/plugins/vmware/extensions/networkgw.py:100 +#, python-format +msgid "Unexpected keys found in device description:%s" +msgstr "" + +#: neutron/plugins/vmware/extensions/networkgw.py:104 +#, python-format +msgid "%s: provided data are not iterable" +msgstr "" + +#: neutron/plugins/vmware/extensions/networkgw.py:111 +msgid "A connector type is required to create a gateway device" +msgstr "" + +#: neutron/plugins/vmware/extensions/networkgw.py:120 +#, python-format +msgid "Unknown connector type: %s" +msgstr "" + +#: neutron/plugins/vmware/extensions/networkgw.py:127 +msgid "Number of network gateways allowed per tenant, -1 for unlimited" +msgstr "" + +#: neutron/plugins/vmware/extensions/qos.py:34 +msgid "Need to be admin in order to create queue called default" +msgstr "" + +#: neutron/plugins/vmware/extensions/qos.py:38 +msgid "Default queue already exists." +msgstr "" + +#: neutron/plugins/vmware/extensions/qos.py:42 +#, python-format +msgid "Invalid value for dscp %(data)s must be integer value between 0 and 63." +msgstr "" + +#: neutron/plugins/vmware/extensions/qos.py:47 +msgid "The qos marking cannot be set to 'trusted' when the DSCP field is set" +msgstr "" + +#: neutron/plugins/vmware/extensions/qos.py:52 +msgid "Invalid bandwidth rate, min greater than max." +msgstr "" + +#: neutron/plugins/vmware/extensions/qos.py:56 +#, python-format +msgid "Invalid bandwidth rate, %(data)s must be a non negative integer." +msgstr "" + +#: neutron/plugins/vmware/extensions/qos.py:61 +#, python-format +msgid "Queue %(id)s does not exist" +msgstr "" + +#: neutron/plugins/vmware/extensions/qos.py:65 +msgid "Unable to delete queue attached to port." +msgstr "" + +#: neutron/plugins/vmware/extensions/qos.py:69 +msgid "Port is not associated with lqueue" +msgstr "" + +#: neutron/plugins/vmware/extensions/qos.py:80 +#, python-format +msgid "'%s' must be a non negative integer." +msgstr "" + +#: neutron/plugins/vmware/nsxlib/__init__.py:77 +#, python-format +msgid "Error. %(type)s exception: %(exc)s." +msgstr "" + +#: neutron/plugins/vmware/nsxlib/__init__.py:81 +#, python-format +msgid "locals=[%s]" +msgstr "" + +#: neutron/plugins/vmware/nsxlib/lsn.py:171 +#, python-format +msgid "" +"Attempt to plug Logical Services Node %(lsn)s into network with port " +"%(port)s failed. PatchAttachment already exists with another port" +msgstr "" + +#: neutron/plugins/vmware/nsxlib/router.py:252 +#, python-format +msgid "Cannot update NSX routes %(routes)s for router %(router_id)s" +msgstr "" + +#: neutron/plugins/vmware/nsxlib/router.py:346 +#, python-format +msgid "Created logical port %(lport_uuid)s on logical router %(lrouter_uuid)s" +msgstr "" + +#: neutron/plugins/vmware/nsxlib/router.py:374 +#, python-format +msgid "Updated logical port %(lport_uuid)s on logical router %(lrouter_uuid)s" +msgstr "" + +#: neutron/plugins/vmware/nsxlib/router.py:385 +#, python-format +msgid "" +"Delete logical router port %(lport_uuid)s on logical router " +"%(lrouter_uuid)s" +msgstr "" + +#: neutron/plugins/vmware/nsxlib/router.py:449 +#, python-format +msgid "Invalid keys for NAT match: %s" +msgstr "" + +#: neutron/plugins/vmware/nsxlib/router.py:455 +#, python-format +msgid "Creating NAT rule: %s" +msgstr "" + +#: neutron/plugins/vmware/nsxlib/router.py:470 +msgid "" +"No SNAT rules cannot be applied as they are not available in this version" +" of the NSX platform" +msgstr "" + +#: neutron/plugins/vmware/nsxlib/router.py:475 +msgid "" +"No DNAT rules cannot be applied as they are not available in this version" +" of the NSX platform" +msgstr "" + +#: neutron/plugins/vmware/nsxlib/router.py:609 +#, python-format +msgid "Router Port %(lport_id)s not found on router %(lrouter_id)s" +msgstr "" + +#: neutron/plugins/vmware/nsxlib/router.py:614 +#, python-format +msgid "" +"An exception occurred while updating IP addresses on a router logical " +"port:%s" +msgstr "" + +#: neutron/plugins/vmware/nsxlib/secgroup.py:94 +#, python-format +msgid "Created Security Profile: %s" +msgstr "" + +#: neutron/plugins/vmware/nsxlib/secgroup.py:120 +#, python-format +msgid "Updated Security Profile: %s" +msgstr "" + +#: neutron/plugins/vmware/nsxlib/secgroup.py:140 +#, python-format +msgid "Unable to find security profile %s on NSX backend" +msgstr "" + +#: neutron/plugins/vmware/nsxlib/switch.py:130 +#, python-format +msgid "Created logical switch: %s" +msgstr "" + +#: neutron/plugins/vmware/nsxlib/switch.py:150 +#: neutron/plugins/vmware/nsxlib/switch.py:165 +#, python-format +msgid "Network not found, Error: %s" +msgstr "" + +#: neutron/plugins/vmware/nsxlib/switch.py:188 +msgid "Port or Network not found" +msgstr "" + +#: neutron/plugins/vmware/nsxlib/switch.py:247 +#, python-format +msgid "Lswitch %s not found in NSX" +msgstr "" + +#: neutron/plugins/vmware/nsxlib/switch.py:256 +msgid "Unable to get ports" +msgstr "" + +#: neutron/plugins/vmware/nsxlib/switch.py:273 +#, python-format +msgid "" +"Looking for port with q_port_id tag '%(neutron_port_id)s' on: " +"'%(lswitch_uuid)s'" +msgstr "" + +#: neutron/plugins/vmware/nsxlib/switch.py:281 +#, python-format +msgid "" +"Found '%(num_ports)d' ports with q_port_id tag: '%(neutron_port_id)s'. " +"Only 1 was expected." +msgstr "" + +#: neutron/plugins/vmware/nsxlib/switch.py:290 +#, python-format +msgid "get_port() %(network)s %(port)s" +msgstr "" + +#: neutron/plugins/vmware/nsxlib/switch.py:298 +#: neutron/plugins/vmware/nsxlib/switch.py:329 +#, python-format +msgid "Port or Network not found, Error: %s" +msgstr "" + +#: neutron/plugins/vmware/nsxlib/switch.py:324 +#, python-format +msgid "Updated logical port %(result)s on logical switch %(uuid)s" +msgstr "" + +#: neutron/plugins/vmware/nsxlib/switch.py:359 +#, python-format +msgid "Created logical port %(result)s on logical switch %(uuid)s" +msgstr "" + +#: neutron/plugins/vmware/nsxlib/switch.py:371 +#, python-format +msgid "Port not found, Error: %s" +msgstr "" + +#: neutron/plugins/vmware/nsxlib/versioning.py:56 +msgid "Operation may not be supported" +msgstr "" + +#: neutron/plugins/vmware/nsxlib/versioning.py:64 +msgid "" +"NSX version is not set. Unable to complete request correctly. Check log " +"for NSX communication errors." +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:211 +#, python-format +msgid "Unable to process default l2 gw service:%s" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:246 +#, python-format +msgid "Created NSX router port:%s" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:248 +#: neutron/plugins/vmware/plugins/service.py:438 +#, python-format +msgid "Unable to create port on NSX logical router %s" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:251 +#, python-format +msgid "" +"Unable to create logical router port for neutron port id %(port_id)s on " +"router %(nsx_router_id)s" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:329 +#, python-format +msgid "Attached %(att)s to NSX router port %(port)s" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:335 +#, python-format +msgid "" +"Unable to plug attachment in NSX logical router port %(r_port_id)s, " +"associated with Neutron %(q_port_id)s" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:341 +#, python-format +msgid "" +"Unable to plug attachment in router port %(r_port_id)s for neutron port " +"id %(q_port_id)s on router %(router_id)s" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:387 +msgid "An exception occurred while selecting logical switch for the port" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:427 +#, python-format +msgid "" +"An exception occurred while creating the neutron port %s on the NSX " +"plaform" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:439 +#: neutron/plugins/vmware/plugins/base.py:491 +#: neutron/plugins/vmware/plugins/base.py:689 +#, python-format +msgid "" +"NSX plugin does not support regular VIF ports on external networks. Port " +"%s will be down." +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:460 +#, python-format +msgid "" +"_nsx_create_port completed for port %(name)s on network %(network_id)s. " +"The new port id is %(id)s." +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:471 +#, python-format +msgid "" +"Concurrent network deletion detected; Back-end Port %(nsx_id)s creation " +"to be rolled back for Neutron port: %(neutron_id)s" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:483 +#, python-format +msgid "NSX Port %s already gone" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:498 +#, python-format +msgid "Port '%s' was already deleted on NSX platform" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:505 +#, python-format +msgid "_nsx_delete_port completed for port %(port_id)s on network %(net_id)s" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:510 +#, python-format +msgid "Port %s not found in NSX" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:519 +#, python-format +msgid "" +"Neutron port %(port_id)s not found on NSX backend. Terminating delete " +"operation. A dangling router port might have been left on router " +"%(router_id)s" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:534 +#: neutron/plugins/vmware/plugins/base.py:1069 +#, python-format +msgid "" +"Ignoring exception as this means the peer for port '%s' has already been " +"deleted." +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:546 +#, python-format +msgid "" +"It is not allowed to create router interface ports on external networks " +"as '%s'" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:571 +#, python-format +msgid "" +"_nsx_create_router_port completed for port %(name)s on network " +"%(network_id)s. The new port id is %(id)s." +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:584 +#, python-format +msgid "" +"device_id field must be populated in order to create an external gateway " +"port for network %s" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:594 +#, python-format +msgid "The gateway port for the NSX router %s was not found on the backend" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:635 +#, python-format +msgid "" +"_nsx_create_ext_gw_port completed on external network %(ext_net_id)s, " +"attached to router:%(router_id)s. NSX port id is %(nsx_port_id)s" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:670 +#: neutron/plugins/vmware/plugins/base.py:1806 +#, python-format +msgid "Logical router resource %s not found on NSX platform" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:674 +#: neutron/plugins/vmware/plugins/base.py:1810 +msgid "Unable to update logical routeron NSX Platform" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:676 +#, python-format +msgid "" +"_nsx_delete_ext_gw_port completed on external network %(ext_net_id)s, " +"attached to NSX router:%(router_id)s" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:718 +#, python-format +msgid "" +"_nsx_create_l2_gw_port completed for port %(name)s on network " +"%(network_id)s. The new port id is %(id)s." +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:760 +#, python-format +msgid "%s required" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:765 +msgid "Segmentation ID cannot be specified with flat network type" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:769 +msgid "Segmentation ID must be specified with vlan network type" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:773 +#: neutron/plugins/vmware/plugins/base.py:789 +#, python-format +msgid "%(segmentation_id)s out of range (%(min_id)s through %(max_id)s)" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:795 +#, python-format +msgid "%(net_type_param)s %(net_type_value)s not supported" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:840 +#, python-format +msgid "No switch has available ports (%d checked)" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:874 +#, python-format +msgid "Maximum number of logical ports reached for logical network %s" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:966 +#, python-format +msgid "" +"Network with admin_state_up=False are not yet supported by this plugin. " +"Ignoring setting for network %s" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:1058 +#, python-format +msgid "" +"A nsx lport identifier was not found for neutron port '%s'. Unable to " +"remove the peer router port for this switch port" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:1077 +#, python-format +msgid "delete_network completed for tenant: %s" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:1080 +#: neutron/plugins/vmware/plugins/service.py:540 +#, python-format +msgid "Did not found lswitch %s in NSX" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:1115 +msgid "admin_state_up=False networks are not supported." +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:1135 +#, python-format +msgid "Unable to find NSX mappings for neutron network:%s" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:1142 +#, python-format +msgid "" +"Logical switch update on NSX backend failed. Neutron network " +"id:%(net_id)s; NSX lswitch id:%(lswitch_id)s;Error:%(error)s" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:1213 +#, python-format +msgid "port created on NSX backend for tenant %(tenant_id)s: (%(id)s)" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:1216 +#, python-format +msgid "Logical switch for network %s was not found in NSX." +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:1227 +msgid "Unable to create port or set port attachment in NSX." +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:1315 +#, python-format +msgid "Updating port: %s" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:1350 +#, python-format +msgid "Unable to update port id: %s." +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:1433 +msgid "" +"Cannot create a distributed router with the NSX platform currently in " +"execution. Please, try without specifying the 'distributed' attribute." +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:1439 +msgid "Unable to create logical router on NSX Platform" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:1450 +#, python-format +msgid "" +"Unable to create L3GW port on logical router %(router_uuid)s. Verify " +"Default Layer-3 Gateway service %(def_l3_gw_svc)s id is correct" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:1460 +#, python-format +msgid "Unable to create router %s on NSX backend" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:1489 +#: neutron/plugins/vmware/plugins/base.py:1574 +#: neutron/plugins/vmware/plugins/service.py:200 +#: neutron/plugins/vmware/plugins/service.py:1235 +#, python-format +msgid "Network '%s' is not a valid external network" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:1538 +#, python-format +msgid "Failed to set gateway info for router being created:%s - removing router" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:1541 +#, python-format +msgid "" +"Create router failed while setting external gateway. Router:%s has been " +"removed from DB and backend" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:1583 +msgid "" +"'routes' cannot contain route '0.0.0.0/0', this must be updated through " +"the default gateway attribute" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:1599 +#, python-format +msgid "Logical router %s not found on NSX Platform" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:1603 +msgid "Unable to update logical router on NSX Platform" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:1605 +msgid "" +"Request cannot contain 'routes' with the NSX platform currently in " +"execution. Please, try without specifying the static routes." +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:1662 +#, python-format +msgid "Logical router '%s' not found on NSX Platform" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:1666 +#, python-format +msgid "Unable to delete logical router '%s' on NSX Platform" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:1676 +#, python-format +msgid "" +"Unable to remove NSX mapping for Neutron router %(router_id)s because of " +"the following exception:%(d_exc)s" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:1744 +#, python-format +msgid "" +"Add_router_interface completed for subnet:%(subnet_id)s and " +"router:%(router_id)s" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:1842 +#, python-format +msgid "" +"An error occurred while removing NAT rules on the NSX platform for " +"floating ip:%s" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:1847 +msgid "An incorrect number of matching NAT rules was found on the NSX platform" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:1979 +#, python-format +msgid "" +"An error occurred while creating NAT rules on the NSX platform for " +"floating ip:%(floating_ip)s mapped to internal ip:%(internal_ip)s" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:1985 +msgid "Failed to update NAT rules for floatingip update" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:2022 +#, python-format +msgid "The port '%s' is not associated with floating IPs" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:2025 +#, python-format +msgid "Nat rules not found in nsx for port: %s" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:2061 +#, python-format +msgid "Unable to create l2_gw_service for: %s" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:2084 +msgid "" +"Unable to remove gateway service from NSX plaform - the resource was not " +"found" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:2113 +#, python-format +msgid "Unable to update name on NSX backend for network gateway: %s" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:2142 +#, python-format +msgid "" +"Rolling back database changes for gateway device %s because of an error " +"in the NSX backend" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:2187 +#: neutron/plugins/vmware/plugins/base.py:2225 +#, python-format +msgid "" +"Neutron gateway device: %(neutron_id)s; NSX transport node identifier: " +"%(nsx_id)s; Operational status: %(status)s." +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:2322 +#, python-format +msgid "" +"Removal of gateway device: %(neutron_id)s failed on NSX backend (NSX " +"id:%(nsx_id)s) because the NSX resource was not found" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:2329 +#, python-format +msgid "" +"Removal of gateway device: %(neutron_id)s failed on NSX backend (NSX " +"id:%(nsx_id)s). Neutron and NSX states have diverged." +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:2377 +#, python-format +msgid "" +"Error while updating security profile %(uuid)s with name %(name)s: " +"%(error)s." +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:2409 +#, python-format +msgid "" +"The NSX security profile %(sec_profile_id)s, associated with the Neutron " +"security group %(sec_group_id)s was not found on the backend" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:2417 +#, python-format +msgid "" +"An exception occurred while removing the NSX security profile " +"%(sec_profile_id)s, associated with Netron security group " +"%(sec_group_id)s" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:2424 +#, python-format +msgid "Unable to remove security group %s from backend" +msgstr "" + +#: neutron/plugins/vmware/plugins/base.py:2437 +#, python-format +msgid "Port values not valid for protocol: %s" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:143 +#, python-format +msgid "EDGE: router = %s" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:174 +msgid "EDGE: _vcns_create_ext_gw_port" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:183 +msgid "EDGE: _vcns_delete_ext_gw_port" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:338 +#, python-format +msgid "VCNS: delete default gateway %s" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:423 +#, python-format +msgid "An exception occurred while creating a port on lswitch %s" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:471 +#, python-format +msgid "Unable to create integration logic switch for router %s" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:481 +#, python-format +msgid "Unable to add router interface to integration lswitch for router %s" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:491 +#, python-format +msgid "Unable to create advance service router for %s" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:882 +#, python-format +msgid "Failed to create firewall on vShield Edge bound on router %s" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:890 +msgid "Bad Firewall request Input" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:905 +#: neutron/plugins/vmware/plugins/service.py:1221 +msgid "router_id is not provided!" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:909 +#: neutron/plugins/vmware/plugins/service.py:1609 +#, python-format +msgid "router_id:%s is not an advanced router!" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:914 +msgid "A firewall is already associated with the router" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:1111 +#, python-format +msgid "Failed to find the edge with vip_id: %s" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:1148 +#, python-format +msgid "" +"Operation can't be performed, Since resource %(model)s : %(id)s is in " +"DELETEing status!" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:1157 +#, python-format +msgid "Resource %(model)s : %(id)s not found!" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:1182 +#, python-format +msgid "Failed to create healthmonitor associated with pool id: %s!" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:1199 +msgid "Failed to create pool on vshield edge" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:1218 +msgid "create_vip() called" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:1226 +#, python-format +msgid "router_id: %s is not an advanced router!" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:1259 +msgid "Failed to create vip!" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:1299 +#, python-format +msgid "Failed to update vip with id: %s!" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:1316 +#, python-format +msgid "Failed to delete vip with id: %s!" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:1372 +#, python-format +msgid "Failed to update pool with id: %s!" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:1394 +#: neutron/plugins/vmware/plugins/service.py:1441 +#: neutron/plugins/vmware/plugins/service.py:1464 +msgid "Failed to update pool with the member" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:1420 +msgid "Failed to update old pool with the member" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:1484 +#, python-format +msgid "Failed to update monitor with id: %s!" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:1494 +msgid "Vcns right now can only support one monitor per pool" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:1523 +msgid "Failed to associate monitor with pool!" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:1554 +msgid "Failed to update pool with pool_monitor!" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:1571 +#, python-format +msgid "" +"Failed to update ipsec vpn configuration on edge, since the router: %s " +"does not have a gateway yet!" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:1596 +msgid "Bad or unsupported Input request!" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:1599 +#, python-format +msgid "" +"Failed to update ipsec VPN configuration with vpnservice: " +"%(vpnservice_id)s on vShield Edge: %(edge_id)s" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:1606 +msgid "create_vpnservice() called" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:1614 +#, python-format +msgid "a vpnservice is already associated with the router: %s" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:1727 +#, python-format +msgid "Start deploying %(edge_id)s for router %(name)s" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:1733 +#: neutron/plugins/vmware/plugins/service.py:1770 +#, python-format +msgid "Failed to deploy Edge for router %s" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:1751 +#, python-format +msgid "Router %s not found" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:1754 +#, python-format +msgid "Successfully deployed %(edge_id)s for router %(name)s" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:1786 +#, python-format +msgid "interface_update_result %d" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:1789 +#, python-format +msgid "snat_create_result %d" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:1792 +#, python-format +msgid "snat_delete_result %d" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:1795 +#, python-format +msgid "dnat_create_result %d" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:1798 +#, python-format +msgid "dnat_delete_result %d" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:1801 +#, python-format +msgid "routes_update_result %d" +msgstr "" + +#: neutron/plugins/vmware/plugins/service.py:1804 +#, python-format +msgid "nat_update_result %d" +msgstr "" + +#: neutron/plugins/vmware/shell/commands.py:24 +#, python-format +msgid "" +"\n" +"Service type = %s\n" +msgstr "" + +#: neutron/plugins/vmware/shell/commands.py:27 +#, python-format +msgid "Service uuids = %s\n" +msgstr "" + +#: neutron/plugins/vmware/shell/commands.py:28 +#, python-format +msgid "" +"Port uuids = %s\n" +"\n" +msgstr "" + +#: neutron/plugins/vmware/shell/commands.py:37 +msgid "ID or name of network to run report on" +msgstr "" + +#: neutron/plugins/vmware/shell/commands.py:46 +msgid "Migration report is:\n" +msgstr "" + +#: neutron/plugins/vmware/shell/commands.py:56 +msgid "ID or name of network to migrate" +msgstr "" + +#: neutron/plugins/vmware/shell/commands.py:66 +msgid "Migration has been successful:\n" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_appliance_driver.py:135 +#, python-format +msgid "" +"VCNS: Failed to get edge status:\n" +"%s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_appliance_driver.py:162 +#, python-format +msgid "VCNS: start updating vnic %s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_appliance_driver.py:167 +#, python-format +msgid "" +"VCNS: Failed to update vnic %(config)s:\n" +"%(response)s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_appliance_driver.py:173 +#, python-format +msgid "VCNS: Failed to update vnic %d" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_appliance_driver.py:181 +#, python-format +msgid "VCNS: update vnic %(index)d: %(addr)s %(netmask)s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_appliance_driver.py:190 +#, python-format +msgid "Vnic %d currently not supported" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_appliance_driver.py:211 +#, python-format +msgid "VCNS: start deploying edge %s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_appliance_driver.py:219 +#, python-format +msgid "VCNS: deploying edge %s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_appliance_driver.py:224 +#, python-format +msgid "VCNS: deploy edge failed for router %s." +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_appliance_driver.py:243 +#, python-format +msgid "VCNS: Edge %s status query failed." +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_appliance_driver.py:248 +#, python-format +msgid "VCNS: Unable to retrieve edge %(edge_id)s status. Retry %(retries)d." +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_appliance_driver.py:255 +#, python-format +msgid "VCNS: Unable to retrieve edge %s status. Abort." +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_appliance_driver.py:259 +#, python-format +msgid "VCNS: Edge %s status" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_appliance_driver.py:266 +#, python-format +msgid "VCNS: Failed to deploy edge %(edge_id)s for %(name)s, status %(status)d" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_appliance_driver.py:273 +#, python-format +msgid "VCNS: Edge %(edge_id)s deployed for router %(name)s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_appliance_driver.py:280 +#, python-format +msgid "VCNS: start destroying edge %s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_appliance_driver.py:288 +#, python-format +msgid "" +"VCNS: Failed to delete %(edge_id)s:\n" +"%(response)s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_appliance_driver.py:294 +#, python-format +msgid "VCNS: Failed to delete %s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_appliance_driver.py:304 +#, python-format +msgid "" +"VCNS: Failed to get edges:\n" +"%s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_appliance_driver.py:382 +#, python-format +msgid "" +"VCNS: Failed to get nat config:\n" +"%s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_appliance_driver.py:389 +#, python-format +msgid "VCNS: start creating nat rules: %s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_appliance_driver.py:405 +#: neutron/plugins/vmware/vshield/edge_appliance_driver.py:530 +#, python-format +msgid "" +"VCNS: Failed to create snat rule:\n" +"%s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_appliance_driver.py:413 +#, python-format +msgid "VCNS: create snat rule %(src)s/%(translated)s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_appliance_driver.py:436 +#, python-format +msgid "VCNS: start deleting %(type)s rules: %(addr)s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_appliance_driver.py:447 +#, python-format +msgid "" +"VCNS: Failed to delete snat rule:\n" +"%s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_appliance_driver.py:454 +#, python-format +msgid "VCNS: delete snat rule %s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_appliance_driver.py:472 +#, python-format +msgid "VCNS: create dnat rule %(dst)s/%(translated)s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_appliance_driver.py:493 +#, python-format +msgid "VCNS: delete dnat rule %s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_appliance_driver.py:517 +#, python-format +msgid "VCNS: start updating nat rules: %s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_appliance_driver.py:538 +#, python-format +msgid "" +"VCNS: update nat rule\n" +"SNAT:%(snat)s\n" +"DNAT:%(dnat)s\n" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_appliance_driver.py:576 +#, python-format +msgid "VCNS: start updating routes for %s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_appliance_driver.py:600 +#, python-format +msgid "" +"VCNS: Failed to update routes:\n" +"%s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_appliance_driver.py:652 +msgid "Failed to get service config" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_appliance_driver.py:664 +msgid "Failed to enable loadbalancer service config" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_firewall_driver.py:41 +#, python-format +msgid "Invalid action value %s in a firewall rule" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_firewall_driver.py:50 +#, python-format +msgid "Invalid action value %s in a vshield firewall rule" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_firewall_driver.py:190 +#, python-format +msgid "Failed to get firewall with edge id: %s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_firewall_driver.py:208 +#, python-format +msgid "No rule id:%s found in the edge_firewall_binding" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_firewall_driver.py:218 +#, python-format +msgid "Failed to get firewall rule: %(rule_id)s with edge_id: %(edge_id)s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_firewall_driver.py:234 +#, python-format +msgid "Failed to update firewall with edge_id: %s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_firewall_driver.py:246 +#, python-format +msgid "Failed to delete firewall with edge_id:%s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_firewall_driver.py:260 +#, python-format +msgid "Failed to update firewall rule: %(rule_id)s with edge_id: %(edge_id)s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_firewall_driver.py:273 +#, python-format +msgid "Failed to delete firewall rule: %(rule_id)s with edge_id: %(edge_id)s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_firewall_driver.py:290 +#: neutron/plugins/vmware/vshield/edge_firewall_driver.py:318 +#, python-format +msgid "Failed to add firewall rule above: %(rule_id)s with edge_id: %(edge_id)s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_firewall_driver.py:329 +#, python-format +msgid "Failed to append a firewall rulewith edge_id: %s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_firewall_driver.py:350 +msgid "Can't execute insert rule operation without reference rule_id" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_ipsecvpn_driver.py:50 +#, python-format +msgid "" +"Unsupported ike_version: %s! Only 'v1' ike version is supported on " +"vshield Edge!" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_ipsecvpn_driver.py:64 +msgid "" +"IKEPolicy and IPsecPolicy should have consistent auth_algorithm, " +"encryption_algorithm and pfs for VSE!" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_ipsecvpn_driver.py:72 +#, python-format +msgid "" +"Unsupported encryption_algorithm: %s! '3des', 'aes-128' and 'aes-256' are" +" supported on VSE right now." +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_ipsecvpn_driver.py:81 +#, python-format +msgid "Unsupported pfs: %s! 'group2' and 'group5' are supported on VSE right now." +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_ipsecvpn_driver.py:89 +#, python-format +msgid "" +"Unsupported transform protocol: %s! 'esp' is supported by default on VSE " +"right now." +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_ipsecvpn_driver.py:98 +#, python-format +msgid "" +"Unsupported encapsulation mode: %s! 'tunnel' is supported by default on " +"VSE right now." +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_ipsecvpn_driver.py:136 +#, python-format +msgid "Failed to update ipsec vpn configuration with edge_id: %s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_ipsecvpn_driver.py:143 +#, python-format +msgid "IPsec config not found on edge: %s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_ipsecvpn_driver.py:146 +#, python-format +msgid "Failed to delete ipsec vpn configuration with edge_id: %s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_loadbalancer_driver.py:154 +#, python-format +msgid "Invalid %(protocol)s persistence method: %(type)s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_loadbalancer_driver.py:180 +#, python-format +msgid "Failed to create app profile on edge: %s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_loadbalancer_driver.py:191 +#, python-format +msgid "Failed to create vip on vshield edge: %s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_loadbalancer_driver.py:209 +#, python-format +msgid "vip_binding not found with id: %(id)s edge_id: %(edge_id)s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_loadbalancer_driver.py:226 +msgid "Failed to get vip on edge" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_loadbalancer_driver.py:243 +#, python-format +msgid "Failed to update app profile on edge: %s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_loadbalancer_driver.py:251 +#, python-format +msgid "Failed to update vip on edge: %s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_loadbalancer_driver.py:262 +#, python-format +msgid "vip not found on edge: %s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_loadbalancer_driver.py:265 +#, python-format +msgid "Failed to delete vip on edge: %s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_loadbalancer_driver.py:270 +#, python-format +msgid "app profile not found on edge: %s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_loadbalancer_driver.py:273 +#, python-format +msgid "Failed to delete app profile on edge: %s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_loadbalancer_driver.py:284 +msgid "Failed to create pool" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_loadbalancer_driver.py:301 +#, python-format +msgid "pool_binding not found with id: %(id)s edge_id: %(edge_id)s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_loadbalancer_driver.py:311 +msgid "Failed to get pool on edge" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_loadbalancer_driver.py:323 +msgid "Failed to update pool" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_loadbalancer_driver.py:333 +msgid "Failed to delete pool" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_loadbalancer_driver.py:343 +#, python-format +msgid "Failed to create monitor on edge: %s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_loadbalancer_driver.py:361 +#, python-format +msgid "monitor_binding not found with id: %(id)s edge_id: %(edge_id)s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_loadbalancer_driver.py:371 +#, python-format +msgid "Failed to get monitor on edge: %s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_loadbalancer_driver.py:388 +#, python-format +msgid "Failed to update monitor on edge: %s" +msgstr "" + +#: neutron/plugins/vmware/vshield/edge_loadbalancer_driver.py:399 +msgid "Failed to delete monitor" +msgstr "" + +#: neutron/plugins/vmware/vshield/vcns.py:54 +#, python-format +msgid "VcnsApiHelper('%(method)s', '%(uri)s', '%(body)s')" +msgstr "" + +#: neutron/plugins/vmware/vshield/vcns.py:62 +#, python-format +msgid "Header: '%s'" +msgstr "" + +#: neutron/plugins/vmware/vshield/vcns.py:63 +#, python-format +msgid "Content: '%s'" +msgstr "" + +#: neutron/plugins/vmware/vshield/common/exceptions.py:35 +#, python-format +msgid "%(resource)s not found: %(msg)s" +msgstr "" + +#: neutron/plugins/vmware/vshield/common/exceptions.py:39 +#, python-format +msgid "An unknown exception %(status)s occurred: %(response)s." +msgstr "" + +#: neutron/plugins/vmware/vshield/common/exceptions.py:50 +#, python-format +msgid "Resource %(uri)s has been redirected" +msgstr "" + +#: neutron/plugins/vmware/vshield/common/exceptions.py:54 +#, python-format +msgid "Request %(uri)s is Bad, response %(response)s" +msgstr "" + +#: neutron/plugins/vmware/vshield/common/exceptions.py:58 +#, python-format +msgid "Forbidden: %(uri)s" +msgstr "" + +#: neutron/plugins/vmware/vshield/common/exceptions.py:62 +#, python-format +msgid "Resource %(uri)s not found" +msgstr "" + +#: neutron/plugins/vmware/vshield/common/exceptions.py:66 +#, python-format +msgid "Media Type %(uri)s is not supported" +msgstr "" + +#: neutron/plugins/vmware/vshield/common/exceptions.py:70 +#, python-format +msgid "Service Unavailable: %(uri)s" +msgstr "" + +#: neutron/plugins/vmware/vshield/tasks/tasks.py:46 +#, python-format +msgid "Invalid state %(state)d" +msgstr "" + +#: neutron/plugins/vmware/vshield/tasks/tasks.py:50 +#, python-format +msgid "State %(state)d skipped. Current state %(current)d" +msgstr "" + +#: neutron/plugins/vmware/vshield/tasks/tasks.py:96 +#, python-format +msgid "Task %(task)s encountered exception in %(func)s at state %(state)s" +msgstr "" + +#: neutron/plugins/vmware/vshield/tasks/tasks.py:182 +#, python-format +msgid "Start task %s" +msgstr "" + +#: neutron/plugins/vmware/vshield/tasks/tasks.py:188 +#: neutron/plugins/vmware/vshield/tasks/tasks.py:208 +#: neutron/plugins/vmware/vshield/tasks/tasks.py:231 +#, python-format +msgid "Task %(task)s encountered exception in %(cb)s" +msgstr "" + +#: neutron/plugins/vmware/vshield/tasks/tasks.py:194 +#: neutron/plugins/vmware/vshield/tasks/tasks.py:213 +#, python-format +msgid "Task %(task)s return %(status)s" +msgstr "" + +#: neutron/plugins/vmware/vshield/tasks/tasks.py:296 +msgid "Stopping TaskManager" +msgstr "" + +#: neutron/plugins/vmware/vshield/tasks/tasks.py:321 +msgid "TaskManager terminating because of an exception" +msgstr "" + +#: neutron/plugins/vmware/vshield/tasks/tasks.py:343 +msgid "TaskManager terminated" +msgstr "" + +#: neutron/plugins/vmware/vshield/tasks/tasks.py:375 +msgid "Exception in _check_pending_tasks" +msgstr "" + +#: neutron/scheduler/dhcp_agent_scheduler.py:51 +#, python-format +msgid "Agent %s already present" +msgstr "" + +#: neutron/scheduler/dhcp_agent_scheduler.py:52 +#, python-format +msgid "" +"Network %(network_id)s is scheduled to be hosted by DHCP agent " +"%(agent_id)s" +msgstr "" + +#: neutron/scheduler/dhcp_agent_scheduler.py:70 +#, python-format +msgid "Network %s is hosted already" +msgstr "" + +#: neutron/scheduler/dhcp_agent_scheduler.py:79 +#: neutron/scheduler/dhcp_agent_scheduler.py:88 +msgid "No more DHCP agents" +msgstr "" + +#: neutron/scheduler/dhcp_agent_scheduler.py:108 +msgid "No non-hosted networks" +msgstr "" + +#: neutron/scheduler/dhcp_agent_scheduler.py:119 +#, python-format +msgid "DHCP agent %s is not active" +msgstr "" + +#: neutron/scheduler/l3_agent_scheduler.py:63 +#, python-format +msgid "No enabled L3 agent on host %s" +msgstr "" + +#: neutron/scheduler/l3_agent_scheduler.py:68 +#, python-format +msgid "L3 agent %s is not active" +msgstr "" + +#: neutron/scheduler/l3_agent_scheduler.py:76 +#: neutron/scheduler/l3_agent_scheduler.py:127 +#, python-format +msgid "Router %(router_id)s has already been hosted by L3 agent %(agent_id)s" +msgstr "" + +#: neutron/scheduler/l3_agent_scheduler.py:95 +msgid "No non-hosted routers" +msgstr "" + +#: neutron/scheduler/l3_agent_scheduler.py:109 +#, python-format +msgid "No routers compatible with L3 agent configuration on host %s" +msgstr "" + +#: neutron/scheduler/l3_agent_scheduler.py:135 +msgid "No active L3 agents" +msgstr "" + +#: neutron/scheduler/l3_agent_scheduler.py:140 +#, python-format +msgid "No L3 agents can host the router %s" +msgstr "" + +#: neutron/scheduler/l3_agent_scheduler.py:153 +#, python-format +msgid "Router %(router_id)s is scheduled to L3 agent %(agent_id)s" +msgstr "" + +#: neutron/server/__init__.py:42 +msgid "" +"ERROR: Unable to find configuration file via the default search paths " +"(~/.neutron/, ~/, /etc/neutron/, /etc/) and the '--config-file' option!" +msgstr "" + +#: neutron/server/__init__.py:54 +msgid "RPC was already started in parent process by plugin." +msgstr "" + +#: neutron/server/__init__.py:66 +#, python-format +msgid "ERROR: %s" +msgstr "" + +#: neutron/services/provider_configuration.py:27 +msgid "" +"Defines providers for advanced services using the format: " +"::[:default]" +msgstr "" + +#: neutron/services/provider_configuration.py:45 +#, python-format +msgid "Provider name is limited by 255 characters: %s" +msgstr "" + +#: neutron/services/provider_configuration.py:54 +msgid "Invalid service provider format" +msgstr "" + +#: neutron/services/provider_configuration.py:62 +#, python-format +msgid "Invalid provider format. Last part should be 'default' or empty: %s" +msgstr "" + +#: neutron/services/provider_configuration.py:68 +#, python-format +msgid "Service type '%(svc_type)s' is not allowed, allowed types: %(allowed)s" +msgstr "" + +#: neutron/services/provider_configuration.py:82 +#, python-format +msgid "" +"Service provider '%(provider)s' could not be found for service type " +"%(service_type)s" +msgstr "" + +#: neutron/services/provider_configuration.py:87 +#, python-format +msgid "Service type %(service_type)s does not have a default service provider" +msgstr "" + +#: neutron/services/provider_configuration.py:92 +#, python-format +msgid "" +"Resource '%(resource_id)s' is already associated with provider " +"'%(provider)s' for service type '%(service_type)s'" +msgstr "" + +#: neutron/services/provider_configuration.py:105 +#, python-format +msgid "Driver %s is not unique across providers" +msgstr "" + +#: neutron/services/provider_configuration.py:115 +#, python-format +msgid "Multiple default providers for service %s" +msgstr "" + +#: neutron/services/provider_configuration.py:126 +#, python-format +msgid "Multiple providers specified for service %s" +msgstr "" + +#: neutron/services/service_base.py:70 +#, python-format +msgid "No providers specified for '%s' service, exiting" +msgstr "" + +#: neutron/services/service_base.py:81 +#, python-format +msgid "Loaded '%(provider)s' provider for service %(service_type)s" +msgstr "" + +#: neutron/services/service_base.py:87 +#, python-format +msgid "Error loading provider '%(provider)s' for service %(service_type)s" +msgstr "" + +#: neutron/services/service_base.py:98 +#, python-format +msgid "Default provider is not specified for service type %s" +msgstr "" + +#: neutron/services/firewall/fwaas_plugin.py:43 +msgid "set_firewall_status() called" +msgstr "" + +#: neutron/services/firewall/fwaas_plugin.py:51 +#, python-format +msgid "Firewall %(fw_id)s in PENDING_DELETE state, not changing to %(status)s" +msgstr "" + +#: neutron/services/firewall/fwaas_plugin.py:66 +msgid "firewall_deleted() called" +msgstr "" + +#: neutron/services/firewall/fwaas_plugin.py:74 +#, python-format +msgid "Firewall %(fw)s unexpectedly deleted by agent, status was %(status)s" +msgstr "" + +#: neutron/services/firewall/fwaas_plugin.py:82 +msgid "get_firewalls_for_tenant() called" +msgstr "" + +#: neutron/services/firewall/fwaas_plugin.py:91 +msgid "get_firewalls_for_tenant_without_rules() called" +msgstr "" + +#: neutron/services/firewall/fwaas_plugin.py:97 +msgid "get_tenants_with_firewalls() called" +msgstr "" + +#: neutron/services/firewall/fwaas_plugin.py:145 +#, python-format +msgid "" +"Exceeded allowed count of firewalls for tenant %(tenant_id)s. Only one " +"firewall is supported per tenant." +msgstr "" + +#: neutron/services/firewall/agents/firewall_agent_api.py:31 +msgid "Name of the FWaaS Driver" +msgstr "" + +#: neutron/services/firewall/agents/firewall_agent_api.py:35 +msgid "Enable FWaaS" +msgstr "" + +#: neutron/services/firewall/agents/l3reference/firewall_l3_agent.py:43 +msgid "Retrieve Firewall with rules from Plugin" +msgstr "" + +#: neutron/services/firewall/agents/l3reference/firewall_l3_agent.py:52 +msgid "Retrieve Tenants with Firewalls configured from Plugin" +msgstr "" + +#: neutron/services/firewall/agents/l3reference/firewall_l3_agent.py:64 +msgid "Initializing firewall agent" +msgstr "" + +#: neutron/services/firewall/agents/l3reference/firewall_l3_agent.py:72 +#, python-format +msgid "FWaaS Driver Loaded: '%s'" +msgstr "" + +#: neutron/services/firewall/agents/l3reference/firewall_l3_agent.py:75 +#, python-format +msgid "Error importing FWaaS device driver: %s" +msgstr "" + +#: neutron/services/firewall/agents/l3reference/firewall_l3_agent.py:112 +#, python-format +msgid "%(func_name)s from agent for fw: %(fwid)s" +msgstr "" + +#: neutron/services/firewall/agents/l3reference/firewall_l3_agent.py:120 +#, python-format +msgid "No Routers on tenant: %s" +msgstr "" + +#: neutron/services/firewall/agents/l3reference/firewall_l3_agent.py:127 +#: neutron/services/firewall/agents/l3reference/firewall_l3_agent.py:260 +#, python-format +msgid "Apply fw on Router List: '%s'" +msgstr "" + +#: neutron/services/firewall/agents/l3reference/firewall_l3_agent.py:139 +#, python-format +msgid "Firewall Driver Error for %(func_name)s for fw: %(fwid)s" +msgstr "" + +#: neutron/services/firewall/agents/l3reference/firewall_l3_agent.py:154 +#, python-format +msgid "FWaaS RPC failure in %(func_name)s for fw: %(fwid)s" +msgstr "" + +#: neutron/services/firewall/agents/l3reference/firewall_l3_agent.py:171 +#: neutron/services/firewall/agents/l3reference/firewall_l3_agent.py:187 +#, python-format +msgid "Firewall Driver Error on fw state %(fwmsg)s for fw: %(fwid)s" +msgstr "" + +#: neutron/services/firewall/agents/l3reference/firewall_l3_agent.py:199 +#, python-format +msgid "Process router add, router_id: '%s'" +msgstr "" + +#: neutron/services/firewall/agents/l3reference/firewall_l3_agent.py:210 +#, python-format +msgid "Process router add, fw_list: '%s'" +msgstr "" + +#: neutron/services/firewall/agents/l3reference/firewall_l3_agent.py:227 +#, python-format +msgid "FWaaS RPC info call failed for '%s'." +msgstr "" + +#: neutron/services/firewall/agents/l3reference/firewall_l3_agent.py:242 +#, python-format +msgid "Tenants with Firewalls: '%s'" +msgstr "" + +#: neutron/services/firewall/agents/l3reference/firewall_l3_agent.py:252 +#, python-format +msgid "Router List: '%s'" +msgstr "" + +#: neutron/services/firewall/agents/l3reference/firewall_l3_agent.py:254 +#, python-format +msgid "fw_list: '%s'" +msgstr "" + +#: neutron/services/firewall/agents/l3reference/firewall_l3_agent.py:271 +msgid "Failed fwaas process services sync" +msgstr "" + +#: neutron/services/firewall/agents/varmour/varmour_api.py:29 +msgid "vArmour director ip" +msgstr "" + +#: neutron/services/firewall/agents/varmour/varmour_api.py:31 +msgid "vArmour director port" +msgstr "" + +#: neutron/services/firewall/agents/varmour/varmour_api.py:33 +msgid "vArmour director username" +msgstr "" + +#: neutron/services/firewall/agents/varmour/varmour_api.py:35 +msgid "vArmour director password" +msgstr "" + +#: neutron/services/firewall/agents/varmour/varmour_api.py:45 +msgid "An unknown exception." +msgstr "" + +#: neutron/services/firewall/agents/varmour/varmour_api.py:59 +msgid "Invalid login credential." +msgstr "" + +#: neutron/services/firewall/agents/varmour/varmour_api.py:65 +msgid "vArmourRestAPI: started" +msgstr "" + +#: neutron/services/firewall/agents/varmour/varmour_api.py:98 +#, python-format +msgid "vArmourRestAPI: %(server)s %(port)s" +msgstr "" + +#: neutron/services/firewall/agents/varmour/varmour_api.py:104 +#, python-format +msgid "vArmourRestAPI Sending: %(method)s %(action)s %(headers)s %(body_data)s" +msgstr "" + +#: neutron/services/firewall/agents/varmour/varmour_api.py:115 +#, python-format +msgid "vArmourRestAPI Response: %(status)s %(resp_str)s" +msgstr "" + +#: neutron/services/firewall/agents/varmour/varmour_api.py:123 +msgid "vArmourRestAPI: Could not establish HTTP connection" +msgstr "" + +#: neutron/services/firewall/agents/varmour/varmour_router.py:49 +msgid "vArmourL3NATAgent: __init__" +msgstr "" + +#: neutron/services/firewall/agents/varmour/varmour_router.py:63 +#, python-format +msgid "_router_added: %s" +msgstr "" + +#: neutron/services/firewall/agents/varmour/varmour_router.py:70 +#, python-format +msgid "_router_removed: %s" +msgstr "" + +#: neutron/services/firewall/agents/varmour/varmour_router.py:111 +#, python-format +msgid "_va_unset_zone_interfaces: %s" +msgstr "" + +#: neutron/services/firewall/agents/varmour/varmour_router.py:143 +#, python-format +msgid "_va_set_interface_ip: %(pif)s %(cidr)s" +msgstr "" + +#: neutron/services/firewall/agents/varmour/varmour_router.py:163 +#, python-format +msgid "_va_config_trusted_zone: %s" +msgstr "" + +#: neutron/services/firewall/agents/varmour/varmour_router.py:194 +#, python-format +msgid "_va_config_untrusted_zone: %s" +msgstr "" + +#: neutron/services/firewall/agents/varmour/varmour_router.py:209 +#, python-format +msgid "_va_config_untrusted_zone: gw=%r" +msgstr "" + +#: neutron/services/firewall/agents/varmour/varmour_router.py:222 +#, python-format +msgid "_va_config_router_snat_rules: %s" +msgstr "" + +#: neutron/services/firewall/agents/varmour/varmour_router.py:254 +#, python-format +msgid "_va_config_floating_ips: %s" +msgstr "" + +#: neutron/services/firewall/agents/varmour/varmour_router.py:276 +#, python-format +msgid "process_router: %s" +msgstr "" + +#: neutron/services/firewall/agents/varmour/varmour_router.py:287 +msgid "Unable to parse interface mapping." +msgstr "" + +#: neutron/services/firewall/agents/varmour/varmour_router.py:290 +msgid "Unable to read interface mapping." +msgstr "" + +#: neutron/services/firewall/agents/varmour/varmour_router.py:309 +#, python-format +msgid "external_gateway_added: %s" +msgstr "" + +#: neutron/services/firewall/drivers/linux/iptables_fwaas.py:47 +msgid "Initializing fwaas iptables driver" +msgstr "" + +#: neutron/services/firewall/drivers/linux/iptables_fwaas.py:50 +#, python-format +msgid "Creating firewall %(fw_id)s for tenant %(tid)s)" +msgstr "" + +#: neutron/services/firewall/drivers/linux/iptables_fwaas.py:59 +#, python-format +msgid "Failed to create firewall: %s" +msgstr "" + +#: neutron/services/firewall/drivers/linux/iptables_fwaas.py:63 +#, python-format +msgid "Deleting firewall %(fw_id)s for tenant %(tid)s)" +msgstr "" + +#: neutron/services/firewall/drivers/linux/iptables_fwaas.py:75 +#, python-format +msgid "Failed to delete firewall: %s" +msgstr "" + +#: neutron/services/firewall/drivers/linux/iptables_fwaas.py:79 +#, python-format +msgid "Updating firewall %(fw_id)s for tenant %(tid)s)" +msgstr "" + +#: neutron/services/firewall/drivers/linux/iptables_fwaas.py:88 +#, python-format +msgid "Failed to update firewall: %s" +msgstr "" + +#: neutron/services/firewall/drivers/linux/iptables_fwaas.py:92 +#, python-format +msgid "Applying firewall %(fw_id)s for tenant %(tid)s)" +msgstr "" + +#: neutron/services/firewall/drivers/linux/iptables_fwaas.py:111 +#, python-format +msgid "Failed to apply default policy on firewall: %s" +msgstr "" + +#: neutron/services/firewall/drivers/varmour/varmour_fwaas.py:28 +msgid "Initializing fwaas vArmour driver" +msgstr "" + +#: neutron/services/firewall/drivers/varmour/varmour_fwaas.py:33 +#, python-format +msgid "create_firewall (%s)" +msgstr "" + +#: neutron/services/firewall/drivers/varmour/varmour_fwaas.py:38 +#, python-format +msgid "update_firewall (%s)" +msgstr "" + +#: neutron/services/firewall/drivers/varmour/varmour_fwaas.py:46 +#, python-format +msgid "delete_firewall (%s)" +msgstr "" + +#: neutron/services/firewall/drivers/varmour/varmour_fwaas.py:51 +#, python-format +msgid "apply_default_policy (%s)" +msgstr "" + +#: neutron/services/firewall/drivers/varmour/varmour_fwaas.py:61 +#, python-format +msgid "Updating firewall (%s)" +msgstr "" + +#: neutron/services/firewall/drivers/varmour/varmour_fwaas.py:110 +msgid "Unsupported IP version rule." +msgstr "" + +#: neutron/services/l3_router/l3_apic.py:55 +msgid "L3 Router Service Plugin for basic L3 using the APIC" +msgstr "" + +#: neutron/services/l3_router/l3_apic.py:96 +#, python-format +msgid "Error attaching subnet %(subnet_id)s to router %(router_id)s" +msgstr "" + +#: neutron/services/l3_router/l3_apic.py:131 +#, python-format +msgid "Error detaching subnet %(subnet_id)s from router %(router_id)s" +msgstr "" + +#: neutron/services/loadbalancer/agent_scheduler.py:101 +#, python-format +msgid "Pool %(pool_id)s has already been hosted by lbaas agent %(agent_id)s" +msgstr "" + +#: neutron/services/loadbalancer/agent_scheduler.py:109 +#, python-format +msgid "No active lbaas agents for pool %s" +msgstr "" + +#: neutron/services/loadbalancer/agent_scheduler.py:115 +#, python-format +msgid "No lbaas agent supporting device driver %s" +msgstr "" + +#: neutron/services/loadbalancer/agent_scheduler.py:124 +#, python-format +msgid "Pool %(pool_id)s is scheduled to lbaas agent %(agent_id)s" +msgstr "" + +#: neutron/services/loadbalancer/plugin.py:82 +#, python-format +msgid "Delete associated loadbalancer pools before removing providers %s" +msgstr "" + +#: neutron/services/loadbalancer/plugin.py:91 +#, python-format +msgid "Error retrieving driver for provider %s" +msgstr "" + +#: neutron/services/loadbalancer/plugin.py:99 +#, python-format +msgid "Error retrieving provider for pool %s" +msgstr "" + +#: neutron/services/loadbalancer/plugin.py:190 +#, python-format +msgid "Failed to delete pool %s, putting it in ERROR state" +msgstr "" + +#: neutron/services/loadbalancer/agent/agent.py:36 +msgid "Seconds between periodic task runs" +msgstr "" + +#: neutron/services/loadbalancer/agent/agent_manager.py:39 +msgid "Drivers used to manage loadbalancing devices" +msgstr "" + +#: neutron/services/loadbalancer/agent/agent_manager.py:45 +#, python-format +msgid "Unknown device with pool_id %(pool_id)s" +msgstr "" + +#: neutron/services/loadbalancer/agent/agent_manager.py:94 +#, python-format +msgid "Error importing loadbalancer device driver: %s" +msgstr "" + +#: neutron/services/loadbalancer/agent/agent_manager.py:101 +#, python-format +msgid "Multiple device drivers with the same name found: %s" +msgstr "" + +#: neutron/services/loadbalancer/agent/agent_manager.py:141 +#, python-format +msgid "Error updating statistics on pool %s" +msgstr "" + +#: neutron/services/loadbalancer/agent/agent_manager.py:157 +msgid "Unable to retrieve ready devices" +msgstr "" + +#: neutron/services/loadbalancer/agent/agent_manager.py:174 +#: neutron/services/loadbalancer/agent/agent_manager.py:239 +#, python-format +msgid "No device driver on agent: %s." +msgstr "" + +#: neutron/services/loadbalancer/agent/agent_manager.py:184 +#, python-format +msgid "Unable to deploy instance for pool: %s" +msgstr "" + +#: neutron/services/loadbalancer/agent/agent_manager.py:194 +#, python-format +msgid "Unable to destroy device for pool: %s" +msgstr "" + +#: neutron/services/loadbalancer/agent/agent_manager.py:207 +#, python-format +msgid "%(operation)s %(obj)s %(id)s failed on device driver %(driver)s" +msgstr "" + +#: neutron/services/loadbalancer/agent/agent_manager.py:333 +#, python-format +msgid "Destroying pool %s due to agent disabling" +msgstr "" + +#: neutron/services/loadbalancer/agent/agent_manager.py:336 +#, python-format +msgid "Agent_updated by server side %s!" +msgstr "" + +#: neutron/services/loadbalancer/drivers/common/agent_driver_base.py:40 +msgid "Driver to use for scheduling pool to a default loadbalancer agent" +msgstr "" + +#: neutron/services/loadbalancer/drivers/common/agent_driver_base.py:48 +msgid "Device driver for agent should be specified in plugin driver." +msgstr "" + +#: neutron/services/loadbalancer/drivers/common/agent_driver_base.py:72 +#, python-format +msgid "Multiple lbaas agents found on host %s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/common/agent_driver_base.py:152 +#, python-format +msgid "Unknown object type: %s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/common/agent_driver_base.py:163 +#, python-format +msgid "" +"Cannot update status: %(obj_type)s %(obj_id)s not found in the DB, it was" +" probably deleted concurrently" +msgstr "" + +#: neutron/services/loadbalancer/drivers/common/agent_driver_base.py:186 +#, python-format +msgid "Unable to find port %s to plug." +msgstr "" + +#: neutron/services/loadbalancer/drivers/common/agent_driver_base.py:210 +#: neutron/services/loadbalancer/drivers/common/agent_driver_base.py:227 +#, python-format +msgid "" +"Unable to find port %s to unplug. This can occur when the Vip has been " +"deleted first." +msgstr "" + +#: neutron/services/loadbalancer/drivers/embrane/config.py:32 +msgid "Load Balancer image id (Embrane LB)" +msgstr "" + +#: neutron/services/loadbalancer/drivers/embrane/config.py:34 +msgid "In band Security Zone id for LBs" +msgstr "" + +#: neutron/services/loadbalancer/drivers/embrane/config.py:36 +msgid "Out of band Security Zone id for LBs" +msgstr "" + +#: neutron/services/loadbalancer/drivers/embrane/config.py:38 +msgid "Management Security Zone id for LBs" +msgstr "" + +#: neutron/services/loadbalancer/drivers/embrane/config.py:40 +msgid "Dummy user traffic Security Zone id for LBs" +msgstr "" + +#: neutron/services/loadbalancer/drivers/embrane/config.py:44 +msgid "choose LB image flavor to use, accepted values: small, medium" +msgstr "" + +#: neutron/services/loadbalancer/drivers/embrane/config.py:47 +msgid "resource synchronization interval in seconds" +msgstr "" + +#: neutron/services/loadbalancer/drivers/embrane/constants.py:51 +#, python-format +msgid "%s, probably was cancelled through the heleos UI" +msgstr "" + +#: neutron/services/loadbalancer/drivers/embrane/constants.py:58 +#, python-format +msgid "" +"Failed to delete the backend load balancer for reason %s. Please remove " +"it manually through the heleos UI" +msgstr "" + +#: neutron/services/loadbalancer/drivers/embrane/constants.py:61 +#, python-format +msgid "" +"No subnet is associated to member %s (required to identify the proper " +"load balancer port)" +msgstr "" + +#: neutron/services/loadbalancer/drivers/embrane/driver.py:88 +msgid "Connection limit is not supported by Embrane LB" +msgstr "" + +#: neutron/services/loadbalancer/drivers/embrane/driver.py:94 +#, python-format +msgid "Session persistence %s not supported by Embrane LBaaS" +msgstr "" + +#: neutron/services/loadbalancer/drivers/embrane/driver.py:132 +#, python-format +msgid "Subnet assigned to pool %s doesn't exist, backend port can't be created" +msgstr "" + +#: neutron/services/loadbalancer/drivers/embrane/agent/lb_operations.py:111 +#, python-format +msgid "" +"The load balancer %s had no physical representation, likely already " +"deleted" +msgstr "" + +#: neutron/services/loadbalancer/drivers/haproxy/namespace_driver.py:46 +msgid "Location to store config and state files" +msgstr "" + +#: neutron/services/loadbalancer/drivers/haproxy/namespace_driver.py:52 +msgid "The user group" +msgstr "" + +#: neutron/services/loadbalancer/drivers/haproxy/namespace_driver.py:58 +msgid "" +"When delete and re-add the same vip, send this many gratuitous ARPs to " +"flush the ARP cache in the Router. Set it below or equal to 0 to disable " +"this feature." +msgstr "" + +#: neutron/services/loadbalancer/drivers/haproxy/namespace_driver.py:75 +#, python-format +msgid "Error importing interface driver: %s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/haproxy/namespace_driver.py:171 +#, python-format +msgid "Stats socket not found for pool %s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/haproxy/namespace_driver.py:213 +#, python-format +msgid "Error while connecting to stats socket: %s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/haproxy/namespace_driver.py:392 +#, python-format +msgid "Unable to kill haproxy process: %s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/netscaler/ncc_client.py:43 +#, python-format +msgid "NCC Error %d" +msgstr "" + +#: neutron/services/loadbalancer/drivers/netscaler/ncc_client.py:54 +msgid "No NetScaler Control Center URI specified. Cannot connect." +msgstr "" + +#: neutron/services/loadbalancer/drivers/netscaler/ncc_client.py:133 +#, python-format +msgid "Connection error occurred while connecting to %s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/netscaler/ncc_client.py:138 +#, python-format +msgid "SSL error occurred while connecting to %s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/netscaler/ncc_client.py:143 +#, python-format +msgid "Request to %s timed out" +msgstr "" + +#: neutron/services/loadbalancer/drivers/netscaler/ncc_client.py:150 +msgid "Request did not specify a valid URL" +msgstr "" + +#: neutron/services/loadbalancer/drivers/netscaler/ncc_client.py:154 +#, python-format +msgid "Too many redirects occurred for request to %s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/netscaler/ncc_client.py:158 +#, python-format +msgid "A request error while connecting to %s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/netscaler/ncc_client.py:163 +#, python-format +msgid "A unknown error occurred during request to %s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/netscaler/ncc_client.py:171 +#, python-format +msgid "Unable to login. Invalid credentials passed.for: %s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/netscaler/ncc_client.py:175 +#, python-format +msgid "Failed %(method)s operation on %(url)s status code: %(response_status)s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/netscaler/netscaler_driver.py:29 +msgid "The URL to reach the NetScaler Control Center Server." +msgstr "" + +#: neutron/services/loadbalancer/drivers/netscaler/netscaler_driver.py:33 +msgid "Username to login to the NetScaler Control Center Server." +msgstr "" + +#: neutron/services/loadbalancer/drivers/netscaler/netscaler_driver.py:37 +msgid "Password to login to the NetScaler Control Center Server." +msgstr "" + +#: neutron/services/loadbalancer/drivers/netscaler/netscaler_driver.py:75 +#, python-format +msgid "NetScaler driver vip creation: %s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/netscaler/netscaler_driver.py:90 +#, python-format +msgid "NetScaler driver vip %(vip_id)s update: %(vip_obj)s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/netscaler/netscaler_driver.py:105 +#, python-format +msgid "NetScaler driver vip removal: %s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/netscaler/netscaler_driver.py:126 +#, python-format +msgid "NetScaler driver pool creation: %s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/netscaler/netscaler_driver.py:141 +#, python-format +msgid "NetScaler driver pool %(pool_id)s update: %(pool_obj)s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/netscaler/netscaler_driver.py:156 +#, python-format +msgid "NetScaler driver pool removal: %s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/netscaler/netscaler_driver.py:173 +#, python-format +msgid "NetScaler driver poolmember creation: %s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/netscaler/netscaler_driver.py:191 +#, python-format +msgid "NetScaler driver poolmember %(member_id)s update: %(member_obj)s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/netscaler/netscaler_driver.py:208 +#, python-format +msgid "NetScaler driver poolmember removal: %s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/netscaler/netscaler_driver.py:226 +#, python-format +msgid "" +"NetScaler driver healthmonitor creation for pool %(pool_id)s: " +"%(monitor_obj)s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/netscaler/netscaler_driver.py:249 +#, python-format +msgid "NetScaler driver healthmonitor %(monitor_id)s update: %(monitor_obj)s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/netscaler/netscaler_driver.py:270 +#, python-format +msgid "NetScaler driver healthmonitor %(monitor_id)sremoval for pool %(pool_id)s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/netscaler/netscaler_driver.py:290 +#, python-format +msgid "NetScaler driver pool stats retrieval: %s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/netscaler/netscaler_driver.py:415 +#, python-format +msgid "" +"Filtering ports based on network_id=%(network_id)s, " +"tenant_id=%(tenant_id)s, device_id=%(device_id)s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/netscaler/netscaler_driver.py:430 +#, python-format +msgid "Found an existing SNAT port for subnet %s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/netscaler/netscaler_driver.py:433 +#, python-format +msgid "Found no SNAT ports for subnet %s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/netscaler/netscaler_driver.py:454 +#, python-format +msgid "Created SNAT port: %s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/netscaler/netscaler_driver.py:462 +#, python-format +msgid "Removed SNAT port: %s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/netscaler/netscaler_driver.py:469 +#, python-format +msgid "No SNAT port found for subnet %s. Creating one..." +msgstr "" + +#: neutron/services/loadbalancer/drivers/netscaler/netscaler_driver.py:477 +#, python-format +msgid "SNAT port: %s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/netscaler/netscaler_driver.py:487 +#, python-format +msgid "Removing SNAT port for subnet %s as this is the last pool using it..." +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:62 +msgid "IP address of vDirect server." +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:64 +msgid "IP address of secondary vDirect server." +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:67 +msgid "vDirect user name." +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:70 +msgid "vDirect user password." +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:73 +msgid "Service ADC type. Default: VA." +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:76 +msgid "Service ADC version." +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:79 +msgid "Enables or disables the Service HA pair. Default: False." +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:83 +msgid "Service throughput. Default: 1000." +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:86 +msgid "Service SSL throughput. Default: 100." +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:89 +msgid "Service compression throughput. Default: 100." +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:92 +msgid "Size of service cache. Default: 20." +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:95 +msgid "Name of l2_l3 workflow. Default: openstack_l2_l3." +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:99 +msgid "Name of l4 workflow. Default: openstack_l4." +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:107 +msgid "Parameter for l2_l3 workflow constructor." +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:114 +msgid "Parameter for l2_l3 workflow setup." +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:117 +msgid "List of actions that are not pushed to the completion queue." +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:121 +msgid "Name of the l4 workflow action. Default: BaseCreate." +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:125 +msgid "Resource pool IDs." +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:128 +msgid "A required VLAN for the interswitch link to use." +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:131 +msgid "" +"Enable or disable Alteon interswitch link for stateful session failover. " +"Default: False." +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:231 +#, python-format +msgid "" +"vip: %(vip)s, extended_vip: %(extended_vip)s, service_name: " +"%(service_name)s, " +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:265 +#, python-format +msgid "Retrieved pip nport: %(port)r for vip: %(vip)s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:273 +#, python-format +msgid "Found no pip nports associated with vip: %s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:282 +#, python-format +msgid "Failed to remove workflow %s. Going to set vip to ERROR status" +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:296 +#, python-format +msgid "pip nport id: %s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:300 +#, python-format +msgid "pip nport delete failed: %r" +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:388 +#, python-format +msgid "" +"_handle_pool_health_monitor. health_monitor = %(hm_id)s pool_id = " +"%(pool_id)s delete = %(delete)s vip_id = %(vip_id)s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:419 +msgid "Starting operation completion handling thread" +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:449 +#, python-format +msgid "_update_workflow response: %s " +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:458 +#: neutron/services/loadbalancer/drivers/radware/driver.py:489 +#, python-format +msgid "Pushing operation %s to the queue" +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:466 +#, python-format +msgid "Remove the workflow %s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:474 +#, python-format +msgid "Post-remove workflow function %r completed" +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:478 +#, python-format +msgid "Post-remove workflow function %r failed" +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:595 +#, python-format +msgid "create_workflow response: %s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:664 +#, python-format +msgid "" +"vDirectRESTClient:init server=%(server)s, secondary " +"server=%(sec_server)s, port=%(port)d, ssl=%(ssl)r" +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:670 +#, python-format +msgid "Fliping servers. Current is: %(server)s, switching to %(secondary)s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:683 +msgid "" +"REST client is not able to recover since only one vDirect server is " +"configured." +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:691 +#, python-format +msgid "vDirect server is not responding (%s)." +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:695 +#, python-format +msgid "vDirect server is not active (%s)." +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:723 +msgid "vdirectRESTClient: Could not establish HTTPS connection" +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:730 +msgid "vdirectRESTClient: Could not establish HTTP connection" +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:747 +#, python-format +msgid "vdirectRESTClient: %(action)s failure, %(e)r" +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:815 +#, python-format +msgid "" +"Operation %(oper)s is completed after %(sec_to_completion)d sec with " +"success status: %(success)s :" +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:827 +#, python-format +msgid "Operation %(operation)s failed. Reason: %(msg)s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:859 +#, python-format +msgid "Operation %s is not completed yet.." +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:874 +msgid "Exception was thrown inside OperationCompletionHandler" +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:883 +#, python-format +msgid "Post-operation function %(func)r completed after operation %(oper)r" +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:889 +#, python-format +msgid "Post-operation function %(func)r failed after operation %(oper)r" +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:930 +#, python-format +msgid "_update: %s " +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/driver.py:970 +#, python-format +msgid "_remove_object_from_db %s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/exceptions.py:22 +msgid "An unknown exception occurred in Radware LBaaS provider." +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/exceptions.py:26 +msgid "" +"vDirect user/password missing. Specify in configuration file, under " +"[radware] section" +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/exceptions.py:31 +#, python-format +msgid "" +"Workflow %(workflow)s is missing on vDirect server. Upload missing " +"workflow" +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/exceptions.py:36 +#, python-format +msgid "" +"REST request failed with status %(status)s. Reason: %(reason)s, " +"Description: %(description)s. Success status codes are %(success_codes)s" +msgstr "" + +#: neutron/services/loadbalancer/drivers/radware/exceptions.py:42 +#, python-format +msgid "%(operation)s operation is not supported for %(entity)s." +msgstr "" + +#: neutron/services/metering/agents/metering_agent.py:70 +msgid "Metering driver" +msgstr "" + +#: neutron/services/metering/agents/metering_agent.py:72 +msgid "Interval between two metering measures" +msgstr "" + +#: neutron/services/metering/agents/metering_agent.py:74 +msgid "Interval between two metering reports" +msgstr "" + +#: neutron/services/metering/agents/metering_agent.py:98 +#, python-format +msgid "Loading Metering driver %s" +msgstr "" + +#: neutron/services/metering/agents/metering_agent.py:100 +msgid "A metering driver must be specified" +msgstr "" + +#: neutron/services/metering/agents/metering_agent.py:115 +#, python-format +msgid "Send metering report: %s" +msgstr "" + +#: neutron/services/metering/agents/metering_agent.py:179 +#, python-format +msgid "Driver %(driver)s does not implement %(func)s" +msgstr "" + +#: neutron/services/metering/agents/metering_agent.py:183 +#, python-format +msgid "Driver %(driver)s:%(func)s runtime error" +msgstr "" + +#: neutron/services/metering/agents/metering_agent.py:218 +msgid "Get router traffic counters" +msgstr "" + +#: neutron/services/metering/agents/metering_agent.py:222 +msgid "Update metering rules from agent" +msgstr "" + +#: neutron/services/metering/agents/metering_agent.py:227 +msgid "Creating a metering label from agent" +msgstr "" + +#: neutron/services/metering/agents/metering_agent.py:234 +msgid "Delete a metering label from agent" +msgstr "" + +#: neutron/services/metering/drivers/iptables/iptables_driver.py:90 +#, python-format +msgid "Loading interface driver %s" +msgstr "" + +#: neutron/services/vpn/agent.py:26 +msgid "The vpn device drivers Neutron will use" +msgstr "" + +#: neutron/services/vpn/plugin.py:46 +#, python-format +msgid "VPN plugin using service driver: %s" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_csr_rest_client.py:71 +#, python-format +msgid "RESPONSE: %s" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_csr_rest_client.py:84 +#, python-format +msgid "%(method)s: Request for %(resource)s payload: %(payload)s" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_csr_rest_client.py:91 +#, python-format +msgid "%(method)s Took %(time).2f seconds to process" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_csr_rest_client.py:97 +#, python-format +msgid "%(method)s: Request timeout%(ssl)s (%(timeout).3f sec) for CSR(%(host)s)" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_csr_rest_client.py:106 +#, python-format +msgid "%(method)s: Unable to connect to CSR(%(host)s)" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_csr_rest_client.py:110 +#, python-format +msgid "%(method)s: Unexpected error for CSR (%(host)s): %(error)s" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_csr_rest_client.py:116 +#, python-format +msgid "%(method)s: Completed [%(status)s]" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_csr_rest_client.py:131 +#, python-format +msgid "%(auth)s with CSR %(host)s" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_csr_rest_client.py:138 +#, python-format +msgid "Successfully authenticated with CSR %s" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_csr_rest_client.py:140 +#, python-format +msgid "Failed authentication with CSR %(host)s [%(status)s]" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_csr_rest_client.py:175 +#, python-format +msgid "%(method)s: Request timeout for CSR(%(host)s)" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:43 +msgid "Status check interval for Cisco CSR IPSec connections" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:54 +#, python-format +msgid "Cisco CSR failed to create %(resource)s (%(which)s)" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:58 +#, python-format +msgid "Cisco CSR failed to change %(tunnel)s admin state to %(state)s" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:63 +#, python-format +msgid "" +"Required %(resource)s attribute %(attr)s mapping for Cisco CSR is missing" +" in device driver" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:68 +#, python-format +msgid "" +"Device driver does not have a mapping of '%(value)s for attribute " +"%(attr)s of %(resource)s" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:83 +#, python-format +msgid "Scanning config files %s for Cisco CSR configurations" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:88 +#, python-format +msgid "Config file parse error: %s" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:92 +#, python-format +msgid "Unable to parse config files %s for Cisco CSR info" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:102 +#, python-format +msgid "Ignoring Cisco CSR configuration entry - router IP %s is not valid" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:113 +#, python-format +msgid "Ignoring Cisco CSR for router %(router)s - missing %(field)s setting" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:121 +#, python-format +msgid "Ignoring Cisco CSR for router %s - timeout is not a floating point number" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:130 +#, python-format +msgid "Ignoring Cisco CSR for subnet %s - REST management is not an IP address" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:137 +#, python-format +msgid "Ignoring Cisco CSR for router %s - local tunnel is not an IP address" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:147 +#, python-format +msgid "Found CSR for router %(router)s: %(info)s" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:213 +#, python-format +msgid "Loaded %(num)d Cisco CSR configuration%(plural)s" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:217 +#, python-format +msgid "No Cisco CSR configurations found in: %s" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:228 +#, python-format +msgid "Handling VPN service update notification '%s'" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:250 +#, python-format +msgid "Update: Existing connection %s changed" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:257 +#, python-format +msgid "Update: Connection %s no longer admin down" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:263 +#, python-format +msgid "Update: Connection %s forced to admin down" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:271 +#, python-format +msgid "Update: Created new connection %s in admin down state" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:276 +#, python-format +msgid "Update: Created new connection %s" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:288 +#, python-format +msgid "" +"Update: Skipping VPN service %(service)s as it's router (%(csr_id)s is " +"not associated with a Cisco CSR" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:294 +#, python-format +msgid "Update: Existing VPN service %s detected" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:298 +#, python-format +msgid "Update: New VPN service %s detected" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:307 +msgid "Update: Completed update processing" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:337 +#, python-format +msgid "Mark: %(service)d VPN services and %(conn)d IPSec connections marked dirty" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:359 +#, python-format +msgid "" +"Sweep: Removed %(service)d dirty VPN service%(splural)s and %(conn)d " +"dirty IPSec connection%(cplural)s" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:374 +#, python-format +msgid "Report: Collecting status for IPSec connections on VPN service %s" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:380 +#, python-format +msgid "Connection %s forced down" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:384 +#, python-format +msgid "Connection %(conn)s reported %(status)s" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:389 +#, python-format +msgid "Report: Adding info for IPSec connection %s" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:409 +#, python-format +msgid "Report: Adding info for VPN service %s" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:431 +msgid "Report: Starting status report processing" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:433 +#, python-format +msgid "Report: Collecting status for VPN service %s" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:439 +msgid "Sending status report update to plugin" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:441 +msgid "Report: Completed status report processing" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:736 +#, python-format +msgid "Unable to create %(resource)s %(which)s: %(status)d" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:749 +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:777 +#, python-format +msgid "Internal error - '%s' is not defined" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:762 +#, python-format +msgid "Unable to delete %(resource)s %(which)s: %(status)d" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:771 +#, python-format +msgid "Performing rollback action %(action)s for resource %(resource)s" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:797 +#, python-format +msgid "Creating IPSec connection %s" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:830 +#, python-format +msgid "FAILED: Create of IPSec site-to-site connection %s" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:833 +#, python-format +msgid "SUCCESS: Created IPSec site-to-site connection %s" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:842 +#, python-format +msgid "Deleting IPSec connection %s" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:844 +#, python-format +msgid "Unable to find connection %s" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:848 +#, python-format +msgid "SUCCESS: Deleted IPSec site-to-site connection %s" +msgstr "" + +#: neutron/services/vpn/device_drivers/cisco_ipsec.py:856 +#, python-format +msgid "Unable to change %(tunnel)s admin state to %(state)s" +msgstr "" + +#: neutron/services/vpn/device_drivers/ipsec.py:46 +msgid "Location to store ipsec server config files" +msgstr "" + +#: neutron/services/vpn/device_drivers/ipsec.py:49 +msgid "Interval for checking ipsec status" +msgstr "" + +#: neutron/services/vpn/device_drivers/ipsec.py:59 +msgid "Template file for ipsec configuration" +msgstr "" + +#: neutron/services/vpn/device_drivers/ipsec.py:65 +msgid "Template file for ipsec secret configuration" +msgstr "" + +#: neutron/services/vpn/device_drivers/ipsec.py:247 +#, python-format +msgid "Failed to enable vpn process on router %s" +msgstr "" + +#: neutron/services/vpn/device_drivers/ipsec.py:258 +#, python-format +msgid "Failed to disable vpn process on router %s" +msgstr "" + +#: neutron/services/vpn/service_drivers/__init__.py:76 +#, python-format +msgid "Notify agent at %(topic)s.%(host)s the message %(method)s %(args)s" +msgstr "" + +#: neutron/services/vpn/service_drivers/cisco_csr_db.py:46 +#, python-format +msgid "Fatal - %(reason)s" +msgstr "" + +#: neutron/services/vpn/service_drivers/cisco_csr_db.py:80 +#, python-format +msgid "No available Cisco CSR %(type)s IDs from %(min)d..%(max)d" +msgstr "" + +#: neutron/services/vpn/service_drivers/cisco_csr_db.py:135 +#, python-format +msgid "" +"Database inconsistency between IPSec connection and Cisco CSR mapping " +"table (%s)" +msgstr "" + +#: neutron/services/vpn/service_drivers/cisco_csr_db.py:161 +#, python-format +msgid "Reserved new CSR ID %(csr_id)d for %(policy)s ID %(policy_id)s" +msgstr "" + +#: neutron/services/vpn/service_drivers/cisco_csr_db.py:188 +#, python-format +msgid "" +"Mappings for IPSec connection %(conn)s - tunnel=%(tunnel)s " +"ike_policy=%(csr_ike)d ipsec_policy=%(csr_ipsec)d" +msgstr "" + +#: neutron/services/vpn/service_drivers/cisco_csr_db.py:197 +#, python-format +msgid "" +"Existing entry for IPSec connection %s not found in Cisco CSR mapping " +"table" +msgstr "" + +#: neutron/services/vpn/service_drivers/cisco_csr_db.py:224 +#, python-format +msgid "" +"Attempt to create duplicate entry in Cisco CSR mapping table for " +"connection %s" +msgstr "" + +#: neutron/services/vpn/service_drivers/cisco_csr_db.py:227 +#, python-format +msgid "" +"Mapped connection %(conn_id)s to Tunnel%(tunnel_id)d using IKE policy ID " +"%(ike_id)d and IPSec policy ID %(ipsec_id)d" +msgstr "" + +#: neutron/services/vpn/service_drivers/cisco_csr_db.py:239 +#, python-format +msgid "Removed mapping for connection %s" +msgstr "" + +#: neutron/services/vpn/service_drivers/cisco_ipsec.py:39 +#, python-format +msgid "" +"Cisco CSR does not support %(resource)s attribute %(key)s with value " +"'%(value)s'" +msgstr "" + +#: neutron/services/vpn/service_drivers/cisco_ipsec.py:160 +#, python-format +msgid "IPSec connection %s validated for Cisco CSR" +msgstr "" + +#: neutron/tests/unit/test_api_v2_resource.py:174 +#: neutron/tests/unit/test_api_v2_resource.py:244 +msgid "Unmapped error" +msgstr "" + +#: neutron/tests/unit/bigswitch/fake_server.py:72 +#, python-format +msgid "" +"Request: action=%(action)s, uri=%(uri)r, body=%(body)s, " +"headers=%(headers)s" +msgstr "" + +#: neutron/tests/unit/bigswitch/fake_server.py:124 +#, python-format +msgid "No floating IPs in requesturi=%(uri)s, body=%(body)s" +msgstr "" + +#: neutron/tests/unit/bigswitch/fake_server.py:133 +#, python-format +msgid "Expected floating IPs from multiple tenants.uri=%(uri)s, body=%(body)s" +msgstr "" + +#: neutron/tests/unit/bigswitch/fake_server.py:178 +#, python-format +msgid "No host cert for %(server)s in cert %(cert)s" +msgstr "" + +#: neutron/tests/unit/db/loadbalancer/test_db_loadbalancer.py:217 +#: neutron/tests/unit/db/loadbalancer/test_db_loadbalancer.py:239 +#: neutron/tests/unit/db/loadbalancer/test_db_loadbalancer.py:258 +#: neutron/tests/unit/db/loadbalancer/test_db_loadbalancer.py:281 +#, python-format +msgid "Unexpected error code: %s" +msgstr "" + +#: neutron/tests/unit/ml2/drivers/mechanism_logger.py:32 +#, python-format +msgid "" +"%(method)s called with network settings %(current)s (original settings " +"%(original)s) and network segments %(segments)s" +msgstr "" + +#: neutron/tests/unit/ml2/drivers/mechanism_logger.py:59 +#, python-format +msgid "" +"%(method)s called with subnet settings %(current)s (original settings " +"%(original)s)" +msgstr "" + +#: neutron/tests/unit/ml2/drivers/mechanism_logger.py:85 +#, python-format +msgid "" +"%(method)s called with port settings %(current)s (original settings " +"%(original)s) bound to segment %(segment)s (original segment " +"%(original_segment)s) using driver %(driver)s (original driver " +"%(original_driver)s) on network %(network)s" +msgstr "" + +#: neutron/tests/unit/nec/stub_ofc_driver.py:65 +#, python-format +msgid "(create_tenant) OFC tenant %s already exists" +msgstr "" + +#: neutron/tests/unit/nec/stub_ofc_driver.py:77 +#, python-format +msgid "(delete_tenant) OFC tenant %s not found" +msgstr "" + +#: neutron/tests/unit/nec/stub_ofc_driver.py:79 +msgid "delete_tenant: SUCCEED" +msgstr "" + +#: neutron/tests/unit/nec/stub_ofc_driver.py:86 +#, python-format +msgid "(create_network) OFC tenant %s not found" +msgstr "" + +#: neutron/tests/unit/nec/stub_ofc_driver.py:89 +#, python-format +msgid "(create_network) OFC network %s already exists" +msgstr "" + +#: neutron/tests/unit/nec/stub_ofc_driver.py:100 +#, python-format +msgid "(update_network) OFC network %s not found" +msgstr "" + +#: neutron/tests/unit/nec/stub_ofc_driver.py:104 +msgid "update_network: SUCCEED" +msgstr "" + +#: neutron/tests/unit/nec/stub_ofc_driver.py:112 +#, python-format +msgid "(delete_network) OFC network %s not found" +msgstr "" + +#: neutron/tests/unit/nec/stub_ofc_driver.py:114 +msgid "delete_network: SUCCEED" +msgstr "" + +#: neutron/tests/unit/nec/stub_ofc_driver.py:121 +#, python-format +msgid "(create_port) OFC network %s not found" +msgstr "" + +#: neutron/tests/unit/nec/stub_ofc_driver.py:124 +#, python-format +msgid "(create_port) OFC port %s already exists" +msgstr "" + +#: neutron/tests/unit/nec/stub_ofc_driver.py:138 +#, python-format +msgid "(delete_port) OFC port %s not found" +msgstr "" + +#: neutron/tests/unit/nec/stub_ofc_driver.py:140 +msgid "delete_port: SUCCEED" +msgstr "" + +#: neutron/tests/unit/nec/stub_ofc_driver.py:173 +#, python-format +msgid "(create_router) OFC tenant %s not found" +msgstr "" + +#: neutron/tests/unit/nec/stub_ofc_driver.py:176 +#, python-format +msgid "(create_router) OFC router %s already exists" +msgstr "" + +#: neutron/tests/unit/nec/stub_ofc_driver.py:179 +msgid "Operation on OFC is failed" +msgstr "" + +#: neutron/tests/unit/nec/stub_ofc_driver.py:193 +#: neutron/tests/unit/nec/stub_ofc_driver.py:283 +#, python-format +msgid "(delete_router) OFC router %s not found" +msgstr "" + +#: neutron/tests/unit/nec/stub_ofc_driver.py:195 +msgid "delete_router: SUCCEED" +msgstr "" + +#: neutron/tests/unit/nec/stub_ofc_driver.py:203 +#, python-format +msgid "(add_router_interface) ip_address %s is not a valid format (a.b.c.d/N)." +msgstr "" + +#: neutron/tests/unit/nec/stub_ofc_driver.py:208 +#, python-format +msgid "(add_router_interface) OFC router %s not found" +msgstr "" + +#: neutron/tests/unit/nec/stub_ofc_driver.py:211 +#, python-format +msgid "(add_router_interface) OFC network %s not found" +msgstr "" + +#: neutron/tests/unit/nec/stub_ofc_driver.py:218 +#, python-format +msgid "add_router_interface: SUCCEED (if_id=%s)" +msgstr "" + +#: neutron/tests/unit/nec/stub_ofc_driver.py:226 +#: neutron/tests/unit/nec/stub_ofc_driver.py:243 +#, python-format +msgid "(delete_router_interface) OFC router interface %s not found" +msgstr "" + +#: neutron/tests/unit/nec/stub_ofc_driver.py:235 +msgid "update_router_route: SUCCEED" +msgstr "" + +#: neutron/tests/unit/nec/stub_ofc_driver.py:246 +msgid "delete_router_interface: SUCCEED" +msgstr "" + +#: neutron/tests/unit/nec/stub_ofc_driver.py:256 +#, python-format +msgid "(add_router_route) OFC router %s not found" +msgstr "" + +#: neutron/tests/unit/nec/stub_ofc_driver.py:261 +#, python-format +msgid "(add_router_route) route to \"%s\" already exists" +msgstr "" + +#: neutron/tests/unit/nec/stub_ofc_driver.py:266 +#, python-format +msgid "add_router_route: SUCCEED (route_id=%s)" +msgstr "" + +#: neutron/tests/unit/nec/stub_ofc_driver.py:275 +#, python-format +msgid "(delete_router_route) OFC router route %s not found" +msgstr "" + +#: neutron/tests/unit/nec/stub_ofc_driver.py:277 +msgid "delete_router_route: SUCCEED" +msgstr "" + +#: neutron/tests/unit/nec/stub_ofc_driver.py:290 +#, python-format +msgid "list_router_routes: routes=%s" +msgstr "" + +#: neutron/tests/unit/nec/test_ofc_client.py:86 +msgid "The specified OFC resource (/somewhere) is not found." +msgstr "" + +#: neutron/tests/unit/nec/test_ofc_client.py:90 +#: neutron/tests/unit/nec/test_ofc_client.py:96 +#: neutron/tests/unit/nec/test_ofc_client.py:105 +msgid "An OFC exception has occurred: Operation on OFC failed" +msgstr "" + +#: neutron/tests/unit/nec/test_ofc_client.py:112 +msgid "An OFC exception has occurred: Failed to connect OFC : " +msgstr "" + +#: neutron/tests/unit/vmware/apiclient/fake.py:406 +#, python-format +msgid "lswitch:%s not found" +msgstr "" + +#: neutron/tests/unit/vmware/apiclient/fake.py:415 +#, python-format +msgid "lrouter:%s not found" +msgstr "" + +#: neutron/tests/unit/vmware/vshield/fake_vcns.py:104 +#, python-format +msgid "Job %s does not nexist" +msgstr "" + +#: neutron/tests/unit/vmware/vshield/fake_vcns.py:116 +#: neutron/tests/unit/vmware/vshield/fake_vcns.py:127 +#: neutron/tests/unit/vmware/vshield/fake_vcns.py:144 +#: neutron/tests/unit/vmware/vshield/fake_vcns.py:162 +#: neutron/tests/unit/vmware/vshield/fake_vcns.py:184 +#: neutron/tests/unit/vmware/vshield/fake_vcns.py:206 +#: neutron/tests/unit/vmware/vshield/fake_vcns.py:290 +#: neutron/tests/unit/vmware/vshield/fake_vcns.py:304 +#: neutron/tests/unit/vmware/vshield/fake_vcns.py:318 +#: neutron/tests/unit/vmware/vshield/fake_vcns.py:360 +#, python-format +msgid "Edge %s does not exist" +msgstr "" + +#: neutron/tests/unit/vmware/vshield/fake_vcns.py:194 +#, python-format +msgid "Rule id %d doest not exist" +msgstr "" + +#: neutron/tests/unit/vmware/vshield/fake_vcns.py:257 +#, python-format +msgid "Lswitch %s does not exist" +msgstr "" + +#: neutron/tests/unit/vmware/vshield/test_edge_router.py:128 +msgid "Tasks not completed" +msgstr "" + diff --git a/icehouse-patches/neutron/dvr-patch/neutron/locale/pt_BR/LC_MESSAGES/neutron-log-error.po b/icehouse-patches/neutron/dvr-patch/neutron/locale/pt_BR/LC_MESSAGES/neutron-log-error.po new file mode 100644 index 00000000..5f04744a --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/locale/pt_BR/LC_MESSAGES/neutron-log-error.po @@ -0,0 +1,168 @@ +# Translations template for neutron. +# Copyright (C) 2014 ORGANIZATION +# This file is distributed under the same license as the neutron project. +# +# Translators: +msgid "" +msgstr "" +"Project-Id-Version: Neutron\n" +"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" +"POT-Creation-Date: 2014-06-09 06:08+0000\n" +"PO-Revision-Date: 2014-05-29 07:49+0000\n" +"Last-Translator: FULL NAME \n" +"Language-Team: Portuguese (Brazil) (http://www.transifex.com/projects/p/" +"neutron/language/pt_BR/)\n" +"Language: pt_BR\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 1.3\n" +"Plural-Forms: nplurals=2; plural=(n > 1);\n" + +#: neutron/openstack/common/excutils.py:76 +#, python-format +msgid "Original exception being dropped: %s" +msgstr "Exceção original sendo descartada: %s" + +#: neutron/openstack/common/excutils.py:105 +#, python-format +msgid "Unexpected exception occurred %d time(s)... retrying." +msgstr "Exceção não esperada ocorreu %d vez(es)... tentando novamente." + +#: neutron/openstack/common/periodic_task.py:179 +#, python-format +msgid "Error during %(full_task_name)s: %(e)s" +msgstr "" + +#: neutron/openstack/common/service.py:188 +msgid "Exception during rpc cleanup." +msgstr "Exceção durante limpeza de RPC." + +#: neutron/openstack/common/service.py:276 +msgid "Unhandled exception" +msgstr "Exceção não tratada" + +#: neutron/openstack/common/db/api.py:72 +msgid "DB exceeded retry limit." +msgstr "" + +#: neutron/openstack/common/db/api.py:76 +msgid "DB connection error." +msgstr "" + +#: neutron/openstack/common/db/sqlalchemy/session.py:460 +msgid "DB exception wrapped." +msgstr "Exceção de BD incluída." + +#: neutron/openstack/common/middleware/notifier.py:40 +#, python-format +msgid "An exception occurred processing the API call: %s " +msgstr "" + +#: neutron/openstack/common/rpc/amqp.py:480 +#: neutron/openstack/common/rpc/impl_zmq.py:286 +msgid "Exception during message handling" +msgstr "Exceção durante a manipulação de mensagem" + +#: neutron/openstack/common/rpc/common.py:88 +msgid "Exception in string format operation" +msgstr "Exceção na operação de formato de sequência" + +#: neutron/openstack/common/rpc/common.py:292 +#, python-format +msgid "Returning exception %s to caller" +msgstr "Retornando exceção %s ao método de origem" + +#: neutron/openstack/common/rpc/impl_kombu.py:156 +msgid "Failed to process message ... skipping it." +msgstr "Falha ao processar mensagem...pulando ela." + +#: neutron/openstack/common/rpc/impl_kombu.py:160 +msgid "Failed to process message ... will requeue." +msgstr "Falha ao processar mensagem... Irá voltar para a fila." + +#: neutron/openstack/common/rpc/impl_kombu.py:571 +#, python-format +msgid "" +"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " +"again in %(sleep_time)d seconds." +msgstr "" +"O servidor AMQP em %(hostname)s:%(port)d está inatingível: %(err_str)s. " +"Tentando novamente em %(sleep_time)d segundos." + +#: neutron/openstack/common/rpc/impl_kombu.py:625 +#: neutron/openstack/common/rpc/impl_qpid.py:575 +#, python-format +msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" +msgstr "Falha ao declarar consumidor para o tópico '%(topic)s': %(err_str)s" + +#: neutron/openstack/common/rpc/impl_kombu.py:647 +#: neutron/openstack/common/rpc/impl_qpid.py:594 +#, python-format +msgid "Failed to consume message from queue: %s" +msgstr "Falha ao consumir mensagem da fila: %s" + +#: neutron/openstack/common/rpc/impl_kombu.py:686 +#: neutron/openstack/common/rpc/impl_qpid.py:629 +#, python-format +msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" +msgstr "Falha ao publicar mensagem no tópico '%(topic)s': %(err_str)s" + +#: neutron/openstack/common/rpc/impl_qpid.py:191 +msgid "Failed to process message... skipping it." +msgstr "Falha ao processar mensagem... ignorando-a." + +#: neutron/openstack/common/rpc/impl_qpid.py:517 +#, python-format +msgid "Unable to connect to AMQP server: %(e)s. Sleeping %(delay)s seconds" +msgstr "" +"Não é possível conectar ao servidor AMQP: %(e)s. Suspendendo em %(delay)s " +"segundos" + +#: neutron/openstack/common/rpc/impl_qpid.py:602 +msgid "Error processing message. Skipping it." +msgstr "Erro ao processar mensagem. Ignorando-o." + +#: neutron/openstack/common/rpc/impl_zmq.py:96 +msgid "JSON serialization failed." +msgstr "Falha na serialização de JSON." + +#: neutron/openstack/common/rpc/impl_zmq.py:195 +msgid "ZeroMQ socket could not be closed." +msgstr "" + +#: neutron/openstack/common/rpc/impl_zmq.py:339 +msgid "RPC message did not include method." +msgstr "A mensagem de RPC não incluiu o método." + +#: neutron/openstack/common/rpc/impl_zmq.py:476 +msgid "Topic socket file creation failed." +msgstr "Falha na criação do arquivo de soquete do tópico." + +#: neutron/openstack/common/rpc/impl_zmq.py:482 +#, python-format +msgid "" +"Local per-topic backlog buffer full for topic %(topic)s. Dropping message." +msgstr "" +"Buffer de lista não processada por tópico local integral para o tópico " +"%(topic)s. Descartando mensagem." + +#: neutron/openstack/common/rpc/impl_zmq.py:498 +#, python-format +msgid "Required IPC directory does not exist at %s" +msgstr "Diretório IPC requerido não existe em %s" + +#: neutron/openstack/common/rpc/impl_zmq.py:507 +#, python-format +msgid "Permission denied to IPC directory at %s" +msgstr "Permissão negada para o doretório IPC em %s" + +#: neutron/openstack/common/rpc/impl_zmq.py:510 +msgid "Could not create ZeroMQ receiver daemon. Socket may already be in use." +msgstr "" +"Não foi possível criar o daemon receptor ZeroMQ. O soquete já pode estar em " +"uso." + +#: neutron/openstack/common/rpc/impl_zmq.py:563 +msgid "ZMQ Envelope version unsupported or unknown." +msgstr "Versão de Envelope ZMQ não suportada ou desconhecida." diff --git a/icehouse-patches/neutron/dvr-patch/neutron/locale/pt_BR/LC_MESSAGES/neutron-log-info.po b/icehouse-patches/neutron/dvr-patch/neutron/locale/pt_BR/LC_MESSAGES/neutron-log-info.po new file mode 100644 index 00000000..11be020e --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/locale/pt_BR/LC_MESSAGES/neutron-log-info.po @@ -0,0 +1,128 @@ +# Translations template for neutron. +# Copyright (C) 2014 ORGANIZATION +# This file is distributed under the same license as the neutron project. +# +# Translators: +msgid "" +msgstr "" +"Project-Id-Version: Neutron\n" +"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" +"POT-Creation-Date: 2014-06-16 06:08+0000\n" +"PO-Revision-Date: 2014-05-29 07:49+0000\n" +"Last-Translator: FULL NAME \n" +"Language-Team: Portuguese (Brazil) (http://www.transifex.com/projects/p/" +"neutron/language/pt_BR/)\n" +"Language: pt_BR\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 1.3\n" +"Plural-Forms: nplurals=2; plural=(n > 1);\n" + +#: neutron/openstack/common/periodic_task.py:125 +#, python-format +msgid "Skipping periodic task %(task)s because its interval is negative" +msgstr "" + +#: neutron/openstack/common/periodic_task.py:130 +#, python-format +msgid "Skipping periodic task %(task)s because it is disabled" +msgstr "" + +#: neutron/openstack/common/service.py:176 +#, python-format +msgid "Caught %s, exiting" +msgstr "%s capturadas, saindo" + +#: neutron/openstack/common/service.py:240 +msgid "Parent process has died unexpectedly, exiting" +msgstr "Processo pai saiu inesperadamente, saindo" + +#: neutron/openstack/common/service.py:271 +#, python-format +msgid "Child caught %s, exiting" +msgstr "" + +#: neutron/openstack/common/service.py:310 +msgid "Forking too fast, sleeping" +msgstr "Bifurcação muito rápida, suspendendo" + +#: neutron/openstack/common/service.py:329 +#, python-format +msgid "Started child %d" +msgstr "Filho %d iniciado" + +#: neutron/openstack/common/service.py:339 +#, python-format +msgid "Starting %d workers" +msgstr "Iniciando %d trabalhadores" + +#: neutron/openstack/common/service.py:356 +#, python-format +msgid "Child %(pid)d killed by signal %(sig)d" +msgstr "%(pid)d filho eliminado pelo sinal %(sig)d" + +#: neutron/openstack/common/service.py:360 +#, python-format +msgid "Child %(pid)s exited with status %(code)d" +msgstr "Filho %(pid)s encerrando com status %(code)d" + +#: neutron/openstack/common/service.py:399 +#, python-format +msgid "Caught %s, stopping children" +msgstr "%s capturado, parando filhos" + +#: neutron/openstack/common/service.py:408 +msgid "Wait called after thread killed. Cleaning up." +msgstr "" + +#: neutron/openstack/common/service.py:424 +#, python-format +msgid "Waiting on %d children to exit" +msgstr "Aguardando em %d filhos para sair" + +#: neutron/openstack/common/db/sqlalchemy/utils.py:379 +#, python-format +msgid "Deleting duplicated row with id: %(id)s from table: %(table)s" +msgstr "Excluindo linha duplicada com ID: %(id)s da tabela: %(table)s" + +#: neutron/openstack/common/rpc/impl_kombu.py:498 +#, python-format +msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" +msgstr "Reconectando ao servidor AMQP em %(hostname)s:%(port)d" + +#: neutron/openstack/common/rpc/impl_kombu.py:520 +#, python-format +msgid "Connected to AMQP server on %(hostname)s:%(port)d" +msgstr "Conectado ao servidor AMQP em %(hostname)s:%(port)d" + +#: neutron/openstack/common/rpc/impl_qpid.py:523 +#, python-format +msgid "Connected to AMQP server on %s" +msgstr "Conectado ao servidor AMQP em %s" + +#: neutron/openstack/common/rpc/impl_zmq.py:371 +msgid "Registering reactor" +msgstr "Registrando reator" + +#: neutron/openstack/common/rpc/impl_zmq.py:383 +msgid "In reactor registered" +msgstr "No reator registrado" + +#: neutron/openstack/common/rpc/impl_zmq.py:388 +msgid "Consuming socket" +msgstr "Consumindo soquete" + +#: neutron/openstack/common/rpc/impl_zmq.py:438 +#, python-format +msgid "Creating proxy for topic: %s" +msgstr "Criando proxy para o tópico: %s" + +#: neutron/openstack/common/rpc/impl_zmq.py:591 +msgid "Skipping topic registration. Already registered." +msgstr "Ignorando registro de tópico. Já registrado." + +#: neutron/openstack/common/rpc/matchmaker.py:216 +#, python-format +msgid "Matchmaker unregistered: %(key)s, %(host)s" +msgstr "Matchmaker não registrado: %(key)s, %(host)s" diff --git a/icehouse-patches/neutron/dvr-patch/neutron/locale/zh_CN/LC_MESSAGES/neutron-log-error.po b/icehouse-patches/neutron/dvr-patch/neutron/locale/zh_CN/LC_MESSAGES/neutron-log-error.po new file mode 100644 index 00000000..b581129f --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/locale/zh_CN/LC_MESSAGES/neutron-log-error.po @@ -0,0 +1,162 @@ +# Translations template for neutron. +# Copyright (C) 2014 ORGANIZATION +# This file is distributed under the same license as the neutron project. +# +# Translators: +msgid "" +msgstr "" +"Project-Id-Version: Neutron\n" +"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" +"POT-Creation-Date: 2014-06-09 06:08+0000\n" +"PO-Revision-Date: 2014-05-29 07:49+0000\n" +"Last-Translator: FULL NAME \n" +"Language-Team: Chinese (China) (http://www.transifex.com/projects/p/neutron/" +"language/zh_CN/)\n" +"Language: zh_CN\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 1.3\n" +"Plural-Forms: nplurals=1; plural=0;\n" + +#: neutron/openstack/common/excutils.py:76 +#, python-format +msgid "Original exception being dropped: %s" +msgstr "正在删除原始异常:%s" + +#: neutron/openstack/common/excutils.py:105 +#, python-format +msgid "Unexpected exception occurred %d time(s)... retrying." +msgstr "意外的异常已发生 %d 次...正在重试。" + +#: neutron/openstack/common/periodic_task.py:179 +#, python-format +msgid "Error during %(full_task_name)s: %(e)s" +msgstr "" + +#: neutron/openstack/common/service.py:188 +msgid "Exception during rpc cleanup." +msgstr "在RPC清除期间发生异常。" + +#: neutron/openstack/common/service.py:276 +msgid "Unhandled exception" +msgstr "存在未处理的异常" + +#: neutron/openstack/common/db/api.py:72 +msgid "DB exceeded retry limit." +msgstr "" + +#: neutron/openstack/common/db/api.py:76 +msgid "DB connection error." +msgstr "" + +#: neutron/openstack/common/db/sqlalchemy/session.py:460 +msgid "DB exception wrapped." +msgstr "数据库异常被包裹。" + +#: neutron/openstack/common/middleware/notifier.py:40 +#, python-format +msgid "An exception occurred processing the API call: %s " +msgstr "" + +#: neutron/openstack/common/rpc/amqp.py:480 +#: neutron/openstack/common/rpc/impl_zmq.py:286 +msgid "Exception during message handling" +msgstr "消息处理期间发生异常" + +#: neutron/openstack/common/rpc/common.py:88 +msgid "Exception in string format operation" +msgstr "字符串格式操作中发生异常" + +#: neutron/openstack/common/rpc/common.py:292 +#, python-format +msgid "Returning exception %s to caller" +msgstr "正在将异常 %s 返回至调用者" + +#: neutron/openstack/common/rpc/impl_kombu.py:156 +msgid "Failed to process message ... skipping it." +msgstr "未能处理消息...正在跳过该消息。" + +#: neutron/openstack/common/rpc/impl_kombu.py:160 +msgid "Failed to process message ... will requeue." +msgstr "未能处理消息...将重新排队。" + +#: neutron/openstack/common/rpc/impl_kombu.py:571 +#, python-format +msgid "" +"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " +"again in %(sleep_time)d seconds." +msgstr "" +"%(hostname)s:%(port)d 上的 AMQP 服务器不可访问:%(err_str)s。将在 " +"%(sleep_time)d 秒后再次进行尝试。" + +#: neutron/openstack/common/rpc/impl_kombu.py:625 +#: neutron/openstack/common/rpc/impl_qpid.py:575 +#, python-format +msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" +msgstr "未能针对主题“%(topic)s”声明使用者:%(err_str)s" + +#: neutron/openstack/common/rpc/impl_kombu.py:647 +#: neutron/openstack/common/rpc/impl_qpid.py:594 +#, python-format +msgid "Failed to consume message from queue: %s" +msgstr "未能使用队列中的消息:%s" + +#: neutron/openstack/common/rpc/impl_kombu.py:686 +#: neutron/openstack/common/rpc/impl_qpid.py:629 +#, python-format +msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" +msgstr "未能将消息发布到主题“%(topic)s”:%(err_str)s" + +#: neutron/openstack/common/rpc/impl_qpid.py:191 +msgid "Failed to process message... skipping it." +msgstr "未能处理消息... 正在跳过该消息。" + +#: neutron/openstack/common/rpc/impl_qpid.py:517 +#, python-format +msgid "Unable to connect to AMQP server: %(e)s. Sleeping %(delay)s seconds" +msgstr "无法连接至 AMQP 服务器:%(e)s。正在休眠,持续时间为 %(delay)s 秒" + +#: neutron/openstack/common/rpc/impl_qpid.py:602 +msgid "Error processing message. Skipping it." +msgstr "处理消息时出错。正在跳过该消息。" + +#: neutron/openstack/common/rpc/impl_zmq.py:96 +msgid "JSON serialization failed." +msgstr "JSON 序列化失败。" + +#: neutron/openstack/common/rpc/impl_zmq.py:195 +msgid "ZeroMQ socket could not be closed." +msgstr "" + +#: neutron/openstack/common/rpc/impl_zmq.py:339 +msgid "RPC message did not include method." +msgstr "RPC 消息未包括方法。" + +#: neutron/openstack/common/rpc/impl_zmq.py:476 +msgid "Topic socket file creation failed." +msgstr "主题套接字文件创建失败。" + +#: neutron/openstack/common/rpc/impl_zmq.py:482 +#, python-format +msgid "" +"Local per-topic backlog buffer full for topic %(topic)s. Dropping message." +msgstr "对于主题 %(topic)s,本地“每主题”储备缓冲区已满。正在删除消息。" + +#: neutron/openstack/common/rpc/impl_zmq.py:498 +#, python-format +msgid "Required IPC directory does not exist at %s" +msgstr "在%s不存在需要的IPC目录" + +#: neutron/openstack/common/rpc/impl_zmq.py:507 +#, python-format +msgid "Permission denied to IPC directory at %s" +msgstr "在 %s 的IPC目录的权限被拒绝" + +#: neutron/openstack/common/rpc/impl_zmq.py:510 +msgid "Could not create ZeroMQ receiver daemon. Socket may already be in use." +msgstr "未能创建 ZeroMQ 接收器守护程序。套接字可能已在使用中。" + +#: neutron/openstack/common/rpc/impl_zmq.py:563 +msgid "ZMQ Envelope version unsupported or unknown." +msgstr "ZMQ 包络版本不受支持或未知。" diff --git a/icehouse-patches/neutron/dvr-patch/neutron/locale/zh_CN/LC_MESSAGES/neutron-log-info.po b/icehouse-patches/neutron/dvr-patch/neutron/locale/zh_CN/LC_MESSAGES/neutron-log-info.po new file mode 100644 index 00000000..ccd2e998 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/locale/zh_CN/LC_MESSAGES/neutron-log-info.po @@ -0,0 +1,128 @@ +# Translations template for neutron. +# Copyright (C) 2014 ORGANIZATION +# This file is distributed under the same license as the neutron project. +# +# Translators: +msgid "" +msgstr "" +"Project-Id-Version: Neutron\n" +"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" +"POT-Creation-Date: 2014-06-16 06:08+0000\n" +"PO-Revision-Date: 2014-05-29 07:49+0000\n" +"Last-Translator: FULL NAME \n" +"Language-Team: Chinese (China) (http://www.transifex.com/projects/p/neutron/" +"language/zh_CN/)\n" +"Language: zh_CN\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 1.3\n" +"Plural-Forms: nplurals=1; plural=0;\n" + +#: neutron/openstack/common/periodic_task.py:125 +#, python-format +msgid "Skipping periodic task %(task)s because its interval is negative" +msgstr "" + +#: neutron/openstack/common/periodic_task.py:130 +#, python-format +msgid "Skipping periodic task %(task)s because it is disabled" +msgstr "" + +#: neutron/openstack/common/service.py:176 +#, python-format +msgid "Caught %s, exiting" +msgstr "捕获到 %s,正在退出" + +#: neutron/openstack/common/service.py:240 +msgid "Parent process has died unexpectedly, exiting" +msgstr "父进程已意外终止,正在退出" + +#: neutron/openstack/common/service.py:271 +#, python-format +msgid "Child caught %s, exiting" +msgstr "" + +#: neutron/openstack/common/service.py:310 +msgid "Forking too fast, sleeping" +msgstr "派生速度太快,正在休眠" + +#: neutron/openstack/common/service.py:329 +#, python-format +msgid "Started child %d" +msgstr "已启动子代 %d" + +#: neutron/openstack/common/service.py:339 +#, python-format +msgid "Starting %d workers" +msgstr "正在启动 %d 工作程序" + +#: neutron/openstack/common/service.py:356 +#, python-format +msgid "Child %(pid)d killed by signal %(sig)d" +msgstr "信号 %(sig)d 已终止子代 %(pid)d" + +#: neutron/openstack/common/service.py:360 +#, python-format +msgid "Child %(pid)s exited with status %(code)d" +msgstr "子代 %(pid)s 已退出,状态为 %(code)d" + +#: neutron/openstack/common/service.py:399 +#, python-format +msgid "Caught %s, stopping children" +msgstr "捕获到 %s,正在停止子代" + +#: neutron/openstack/common/service.py:408 +msgid "Wait called after thread killed. Cleaning up." +msgstr "" + +#: neutron/openstack/common/service.py:424 +#, python-format +msgid "Waiting on %d children to exit" +msgstr "正在等待 %d 个子代退出" + +#: neutron/openstack/common/db/sqlalchemy/utils.py:379 +#, python-format +msgid "Deleting duplicated row with id: %(id)s from table: %(table)s" +msgstr "正在从表 %(table)s 中删除具有id %(id)s 的重复行" + +#: neutron/openstack/common/rpc/impl_kombu.py:498 +#, python-format +msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" +msgstr "正在重新连接至 %(hostname)s:%(port)d 上的 AMQP 服务器" + +#: neutron/openstack/common/rpc/impl_kombu.py:520 +#, python-format +msgid "Connected to AMQP server on %(hostname)s:%(port)d" +msgstr "已连接至 %(hostname)s:%(port)d 上的 AMQP 服务器" + +#: neutron/openstack/common/rpc/impl_qpid.py:523 +#, python-format +msgid "Connected to AMQP server on %s" +msgstr "已连接至 %s 上的 AMQP 服务器" + +#: neutron/openstack/common/rpc/impl_zmq.py:371 +msgid "Registering reactor" +msgstr "正在注册反应器" + +#: neutron/openstack/common/rpc/impl_zmq.py:383 +msgid "In reactor registered" +msgstr "已注册内部反应器" + +#: neutron/openstack/common/rpc/impl_zmq.py:388 +msgid "Consuming socket" +msgstr "正在使用套接字" + +#: neutron/openstack/common/rpc/impl_zmq.py:438 +#, python-format +msgid "Creating proxy for topic: %s" +msgstr "正在为主题创建代理:%s" + +#: neutron/openstack/common/rpc/impl_zmq.py:591 +msgid "Skipping topic registration. Already registered." +msgstr "正在跳过主题注册。已注册。" + +#: neutron/openstack/common/rpc/matchmaker.py:216 +#, python-format +msgid "Matchmaker unregistered: %(key)s, %(host)s" +msgstr "Matchmaker已注销: %(key)s, %(host)s" diff --git a/icehouse-patches/neutron/dvr-patch/neutron/locale/zh_TW/LC_MESSAGES/neutron-log-info.po b/icehouse-patches/neutron/dvr-patch/neutron/locale/zh_TW/LC_MESSAGES/neutron-log-info.po new file mode 100644 index 00000000..c6e025cc --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/locale/zh_TW/LC_MESSAGES/neutron-log-info.po @@ -0,0 +1,128 @@ +# Translations template for neutron. +# Copyright (C) 2014 ORGANIZATION +# This file is distributed under the same license as the neutron project. +# +# Translators: +msgid "" +msgstr "" +"Project-Id-Version: Neutron\n" +"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" +"POT-Creation-Date: 2014-06-16 06:08+0000\n" +"PO-Revision-Date: 2014-05-29 07:49+0000\n" +"Last-Translator: FULL NAME \n" +"Language-Team: Chinese (Taiwan) (http://www.transifex.com/projects/p/neutron/" +"language/zh_TW/)\n" +"Language: zh_TW\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 1.3\n" +"Plural-Forms: nplurals=1; plural=0;\n" + +#: neutron/openstack/common/periodic_task.py:125 +#, python-format +msgid "Skipping periodic task %(task)s because its interval is negative" +msgstr "" + +#: neutron/openstack/common/periodic_task.py:130 +#, python-format +msgid "Skipping periodic task %(task)s because it is disabled" +msgstr "" + +#: neutron/openstack/common/service.py:176 +#, python-format +msgid "Caught %s, exiting" +msgstr "已捕捉到 %s,正在結束" + +#: neutron/openstack/common/service.py:240 +msgid "Parent process has died unexpectedly, exiting" +msgstr "母程序已非預期地當掉,正在結束" + +#: neutron/openstack/common/service.py:271 +#, python-format +msgid "Child caught %s, exiting" +msgstr "" + +#: neutron/openstack/common/service.py:310 +msgid "Forking too fast, sleeping" +msgstr "分岔太快,正在休眠" + +#: neutron/openstack/common/service.py:329 +#, python-format +msgid "Started child %d" +msgstr "已開始子行程 %d" + +#: neutron/openstack/common/service.py:339 +#, python-format +msgid "Starting %d workers" +msgstr "正在啟動 %d 個工作程式" + +#: neutron/openstack/common/service.py:356 +#, python-format +msgid "Child %(pid)d killed by signal %(sig)d" +msgstr "信號 %(sig)d 結束了子項 %(pid)d" + +#: neutron/openstack/common/service.py:360 +#, python-format +msgid "Child %(pid)s exited with status %(code)d" +msgstr "子項 %(pid)s 已結束,狀態為 %(code)d" + +#: neutron/openstack/common/service.py:399 +#, python-format +msgid "Caught %s, stopping children" +msgstr "已捕捉到 %s,正在停止子項" + +#: neutron/openstack/common/service.py:408 +msgid "Wait called after thread killed. Cleaning up." +msgstr "" + +#: neutron/openstack/common/service.py:424 +#, python-format +msgid "Waiting on %d children to exit" +msgstr "正在等待 %d 個子項結束" + +#: neutron/openstack/common/db/sqlalchemy/utils.py:379 +#, python-format +msgid "Deleting duplicated row with id: %(id)s from table: %(table)s" +msgstr "" + +#: neutron/openstack/common/rpc/impl_kombu.py:498 +#, python-format +msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" +msgstr "正在重新連接至 %(hostname)s:%(port)d 上的 AMQP 伺服器" + +#: neutron/openstack/common/rpc/impl_kombu.py:520 +#, python-format +msgid "Connected to AMQP server on %(hostname)s:%(port)d" +msgstr "已連接至 %(hostname)s:%(port)d 上的 AMQP 伺服器" + +#: neutron/openstack/common/rpc/impl_qpid.py:523 +#, python-format +msgid "Connected to AMQP server on %s" +msgstr "已連接至 %s 上的 AMQP 伺服器" + +#: neutron/openstack/common/rpc/impl_zmq.py:371 +msgid "Registering reactor" +msgstr "正在登錄反應程式" + +#: neutron/openstack/common/rpc/impl_zmq.py:383 +msgid "In reactor registered" +msgstr "已登錄輸入反應程式" + +#: neutron/openstack/common/rpc/impl_zmq.py:388 +msgid "Consuming socket" +msgstr "正在耗用 Socket" + +#: neutron/openstack/common/rpc/impl_zmq.py:438 +#, python-format +msgid "Creating proxy for topic: %s" +msgstr "正在給主題 %s 建立 Proxy" + +#: neutron/openstack/common/rpc/impl_zmq.py:591 +msgid "Skipping topic registration. Already registered." +msgstr "正在跳過主題登錄。已登錄。" + +#: neutron/openstack/common/rpc/matchmaker.py:216 +#, python-format +msgid "Matchmaker unregistered: %(key)s, %(host)s" +msgstr "已取消登錄符合程式:%(key)s, %(host)s" diff --git a/icehouse-patches/neutron/dvr-patch/neutron/manager.py b/icehouse-patches/neutron/dvr-patch/neutron/manager.py new file mode 100644 index 00000000..3a21f617 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/manager.py @@ -0,0 +1,225 @@ +# Copyright 2011 VMware, Inc +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import weakref + +from oslo.config import cfg + +from neutron.common import rpc as n_rpc +from neutron.common import utils +from neutron.openstack.common import importutils +from neutron.openstack.common import log as logging +from neutron.openstack.common import periodic_task +from neutron.plugins.common import constants + +from stevedore import driver + + +LOG = logging.getLogger(__name__) + + +class Manager(n_rpc.RpcCallback, periodic_task.PeriodicTasks): + + # Set RPC API version to 1.0 by default. + RPC_API_VERSION = '1.0' + + def __init__(self, host=None): + if not host: + host = cfg.CONF.host + self.host = host + super(Manager, self).__init__() + + def periodic_tasks(self, context, raise_on_error=False): + self.run_periodic_tasks(context, raise_on_error=raise_on_error) + + def init_host(self): + """Handle initialization if this is a standalone service. + + Child classes should override this method. + + """ + pass + + def after_start(self): + """Handler post initialization stuff. + + Child classes can override this method. + """ + pass + + +def validate_post_plugin_load(): + """Checks if the configuration variables are valid. + + If the configuration is invalid then the method will return an error + message. If all is OK then it will return None. + """ + if ('dhcp_agents_per_network' in cfg.CONF and + cfg.CONF.dhcp_agents_per_network <= 0): + msg = _("dhcp_agents_per_network must be >= 1. '%s' " + "is invalid.") % cfg.CONF.dhcp_agents_per_network + return msg + + +def validate_pre_plugin_load(): + """Checks if the configuration variables are valid. + + If the configuration is invalid then the method will return an error + message. If all is OK then it will return None. + """ + if cfg.CONF.core_plugin is None: + msg = _('Neutron core_plugin not configured!') + return msg + + +class NeutronManager(object): + """Neutron's Manager class. + + Neutron's Manager class is responsible for parsing a config file and + instantiating the correct plugin that concretely implements + neutron_plugin_base class. + The caller should make sure that NeutronManager is a singleton. + """ + _instance = None + + def __init__(self, options=None, config_file=None): + # If no options have been provided, create an empty dict + if not options: + options = {} + + msg = validate_pre_plugin_load() + if msg: + LOG.critical(msg) + raise Exception(msg) + + # NOTE(jkoelker) Testing for the subclass with the __subclasshook__ + # breaks tach monitoring. It has been removed + # intentionally to allow v2 plugins to be monitored + # for performance metrics. + plugin_provider = cfg.CONF.core_plugin + LOG.info(_("Loading core plugin: %s"), plugin_provider) + self.plugin = self._get_plugin_instance('neutron.core_plugins', + plugin_provider) + msg = validate_post_plugin_load() + if msg: + LOG.critical(msg) + raise Exception(msg) + + # core plugin as a part of plugin collection simplifies + # checking extensions + # TODO(enikanorov): make core plugin the same as + # the rest of service plugins + self.service_plugins = {constants.CORE: self.plugin} + self._load_service_plugins() + + def _get_plugin_instance(self, namespace, plugin_provider): + try: + # Try to resolve plugin by name + mgr = driver.DriverManager(namespace, plugin_provider) + plugin_class = mgr.driver + except RuntimeError as e1: + # fallback to class name + try: + plugin_class = importutils.import_class(plugin_provider) + except ImportError as e2: + LOG.exception(_("Error loading plugin by name, %s"), e1) + LOG.exception(_("Error loading plugin by class, %s"), e2) + raise ImportError(_("Plugin not found.")) + return plugin_class() + + def _load_services_from_core_plugin(self): + """Puts core plugin in service_plugins for supported services.""" + LOG.debug(_("Loading services supported by the core plugin")) + + # supported service types are derived from supported extensions + for ext_alias in getattr(self.plugin, + "supported_extension_aliases", []): + if ext_alias in constants.EXT_TO_SERVICE_MAPPING: + service_type = constants.EXT_TO_SERVICE_MAPPING[ext_alias] + self.service_plugins[service_type] = self.plugin + LOG.info(_("Service %s is supported by the core plugin"), + service_type) + + def _load_service_plugins(self): + """Loads service plugins. + + Starts from the core plugin and checks if it supports + advanced services then loads classes provided in configuration. + """ + # load services from the core plugin first + self._load_services_from_core_plugin() + + plugin_providers = cfg.CONF.service_plugins + LOG.debug(_("Loading service plugins: %s"), plugin_providers) + for provider in plugin_providers: + if provider == '': + continue + + LOG.info(_("Loading Plugin: %s"), provider) + plugin_inst = self._get_plugin_instance('neutron.service_plugins', + provider) + + # only one implementation of svc_type allowed + # specifying more than one plugin + # for the same type is a fatal exception + if plugin_inst.get_plugin_type() in self.service_plugins: + raise ValueError(_("Multiple plugins for service " + "%s were configured") % + plugin_inst.get_plugin_type()) + + self.service_plugins[plugin_inst.get_plugin_type()] = plugin_inst + + # search for possible agent notifiers declared in service plugin + # (needed by agent management extension) + if (hasattr(self.plugin, 'agent_notifiers') and + hasattr(plugin_inst, 'agent_notifiers')): + self.plugin.agent_notifiers.update(plugin_inst.agent_notifiers) + + LOG.debug(_("Successfully loaded %(type)s plugin. " + "Description: %(desc)s"), + {"type": plugin_inst.get_plugin_type(), + "desc": plugin_inst.get_plugin_description()}) + + @classmethod + @utils.synchronized("manager") + def _create_instance(cls): + if not cls.has_instance(): + cls._instance = cls() + + @classmethod + def has_instance(cls): + return cls._instance is not None + + @classmethod + def clear_instance(cls): + cls._instance = None + + @classmethod + def get_instance(cls): + # double checked locking + if not cls.has_instance(): + cls._create_instance() + return cls._instance + + @classmethod + def get_plugin(cls): + # Return a weakref to minimize gc-preventing references. + return weakref.proxy(cls.get_instance().plugin) + + @classmethod + def get_service_plugins(cls): + # Return weakrefs to minimize gc-preventing references. + return dict((x, weakref.proxy(y)) + for x, y in cls.get_instance().service_plugins.iteritems()) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/neutron_plugin_base_v2.py b/icehouse-patches/neutron/dvr-patch/neutron/neutron_plugin_base_v2.py new file mode 100644 index 00000000..8c0c7804 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/neutron_plugin_base_v2.py @@ -0,0 +1,352 @@ +# Copyright 2011 VMware, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +v2 Neutron Plug-in API specification. + +:class:`NeutronPluginBaseV2` provides the definition of minimum set of +methods that needs to be implemented by a v2 Neutron Plug-in. +""" + +import abc +import six + + +@six.add_metaclass(abc.ABCMeta) +class NeutronPluginBaseV2(object): + + @abc.abstractmethod + def create_subnet(self, context, subnet): + """Create a subnet. + + Create a subnet, which represents a range of IP addresses + that can be allocated to devices + + :param context: neutron api request context + :param subnet: dictionary describing the subnet, with keys + as listed in the :obj:`RESOURCE_ATTRIBUTE_MAP` object + in :file:`neutron/api/v2/attributes.py`. All keys will + be populated. + """ + pass + + @abc.abstractmethod + def update_subnet(self, context, id, subnet): + """Update values of a subnet. + + :param context: neutron api request context + :param id: UUID representing the subnet to update. + :param subnet: dictionary with keys indicating fields to update. + valid keys are those that have a value of True for + 'allow_put' as listed in the + :obj:`RESOURCE_ATTRIBUTE_MAP` object in + :file:`neutron/api/v2/attributes.py`. + """ + pass + + @abc.abstractmethod + def get_subnet(self, context, id, fields=None): + """Retrieve a subnet. + + :param context: neutron api request context + :param id: UUID representing the subnet to fetch. + :param fields: a list of strings that are valid keys in a + subnet dictionary as listed in the + :obj:`RESOURCE_ATTRIBUTE_MAP` object in + :file:`neutron/api/v2/attributes.py`. Only these fields + will be returned. + """ + pass + + @abc.abstractmethod + def get_subnets(self, context, filters=None, fields=None, + sorts=None, limit=None, marker=None, page_reverse=False): + """Retrieve a list of subnets. + + The contents of the list depends on + the identity of the user making the request (as indicated by the + context) as well as any filters. + + :param context: neutron api request context + :param filters: a dictionary with keys that are valid keys for + a subnet as listed in the :obj:`RESOURCE_ATTRIBUTE_MAP` + object in :file:`neutron/api/v2/attributes.py`. + Values in this dictiontary are an iterable containing + values that will be used for an exact match comparison + for that value. Each result returned by this + function will have matched one of the values for each + key in filters. + :param fields: a list of strings that are valid keys in a + subnet dictionary as listed in the + :obj:`RESOURCE_ATTRIBUTE_MAP` object in + :file:`neutron/api/v2/attributes.py`. Only these fields + will be returned. + """ + pass + + def get_subnets_count(self, context, filters=None): + """Return the number of subnets. + + The result depends on the identity of + the user making the request (as indicated by the context) as well as + any filters. + + :param context: neutron api request context + :param filters: a dictionary with keys that are valid keys for + a network as listed in the + :obj:`RESOURCE_ATTRIBUTE_MAP` object in + :file:`neutron/api/v2/attributes.py`. Values in this + dictiontary are an iterable containing values that + will be used for an exact match comparison for that + value. Each result returned by this function will + have matched one of the values for each key in filters. + + .. note:: this method is optional, as it was not part of the originally + defined plugin API. + """ + raise NotImplementedError + + @abc.abstractmethod + def delete_subnet(self, context, id): + """Delete a subnet. + + :param context: neutron api request context + :param id: UUID representing the subnet to delete. + """ + pass + + @abc.abstractmethod + def create_network(self, context, network): + """Create a network. + + Create a network, which represents an L2 network segment which + can have a set of subnets and ports associated with it. + + :param context: neutron api request context + :param network: dictionary describing the network, with keys + as listed in the :obj:`RESOURCE_ATTRIBUTE_MAP` object + in :file:`neutron/api/v2/attributes.py`. All keys will + be populated. + + """ + pass + + @abc.abstractmethod + def update_network(self, context, id, network): + """Update values of a network. + + :param context: neutron api request context + :param id: UUID representing the network to update. + :param network: dictionary with keys indicating fields to update. + valid keys are those that have a value of True for + 'allow_put' as listed in the + :obj:`RESOURCE_ATTRIBUTE_MAP` object in + :file:`neutron/api/v2/attributes.py`. + """ + pass + + @abc.abstractmethod + def get_network(self, context, id, fields=None): + """Retrieve a network. + + :param context: neutron api request context + :param id: UUID representing the network to fetch. + :param fields: a list of strings that are valid keys in a + network dictionary as listed in the + :obj:`RESOURCE_ATTRIBUTE_MAP` object in + :file:`neutron/api/v2/attributes.py`. Only these fields + will be returned. + """ + pass + + @abc.abstractmethod + def get_networks(self, context, filters=None, fields=None, + sorts=None, limit=None, marker=None, page_reverse=False): + """Retrieve a list of networks. + + The contents of the list depends on + the identity of the user making the request (as indicated by the + context) as well as any filters. + + :param context: neutron api request context + :param filters: a dictionary with keys that are valid keys for + a network as listed in the + :obj:`RESOURCE_ATTRIBUTE_MAP` object in + :file:`neutron/api/v2/attributes.py`. Values in this + dictiontary are an iterable containing values that will + be used for an exact match comparison for that value. + Each result returned by this function will have matched + one of the values for each key in filters. + :param fields: a list of strings that are valid keys in a + network dictionary as listed in the + :obj:`RESOURCE_ATTRIBUTE_MAP` object in + :file:`neutron/api/v2/attributes.py`. Only these fields + will be returned. + """ + pass + + def get_networks_count(self, context, filters=None): + """Return the number of networks. + + The result depends on the identity + of the user making the request (as indicated by the context) as well + as any filters. + + :param context: neutron api request context + :param filters: a dictionary with keys that are valid keys for + a network as listed in the + :obj:`RESOURCE_ATTRIBUTE_MAP` object + in :file:`neutron/api/v2/attributes.py`. Values in + this dictiontary are an iterable containing values that + will be used for an exact match comparison for that + value. Each result returned by this function will have + matched one of the values for each key in filters. + + NOTE: this method is optional, as it was not part of the originally + defined plugin API. + """ + raise NotImplementedError + + @abc.abstractmethod + def delete_network(self, context, id): + """Delete a network. + + :param context: neutron api request context + :param id: UUID representing the network to delete. + """ + pass + + @abc.abstractmethod + def create_port(self, context, port): + """Create a port. + + Create a port, which is a connection point of a device (e.g., a VM + NIC) to attach to a L2 neutron network. + + :param context: neutron api request context + :param port: dictionary describing the port, with keys as listed in the + :obj:`RESOURCE_ATTRIBUTE_MAP` object in + :file:`neutron/api/v2/attributes.py`. All keys will be + populated. + """ + pass + + @abc.abstractmethod + def update_port(self, context, id, port): + """Update values of a port. + + :param context: neutron api request context + :param id: UUID representing the port to update. + :param port: dictionary with keys indicating fields to update. + valid keys are those that have a value of True for + 'allow_put' as listed in the :obj:`RESOURCE_ATTRIBUTE_MAP` + object in :file:`neutron/api/v2/attributes.py`. + """ + pass + + @abc.abstractmethod + def get_port(self, context, id, fields=None): + """Retrieve a port. + + :param context: neutron api request context + :param id: UUID representing the port to fetch. + :param fields: a list of strings that are valid keys in a port + dictionary as listed in the + :obj:`RESOURCE_ATTRIBUTE_MAP` object in + :file:`neutron/api/v2/attributes.py`. Only these fields + will be returned. + """ + pass + + @abc.abstractmethod + def get_ports(self, context, filters=None, fields=None, + sorts=None, limit=None, marker=None, page_reverse=False): + """Retrieve a list of ports. + + The contents of the list depends on the identity of the user making + the request (as indicated by the context) as well as any filters. + + :param context: neutron api request context + :param filters: a dictionary with keys that are valid keys for + a port as listed in the :obj:`RESOURCE_ATTRIBUTE_MAP` + object in :file:`neutron/api/v2/attributes.py`. Values + in this dictiontary are an iterable containing values + that will be used for an exact match comparison for + that value. Each result returned by this function will + have matched one of the values for each key in filters. + :param fields: a list of strings that are valid keys in a + port dictionary as listed in the + :obj:`RESOURCE_ATTRIBUTE_MAP` object in + :file:`neutron/api/v2/attributes.py`. Only these fields + will be returned. + """ + pass + + def get_ports_count(self, context, filters=None): + """Return the number of ports. + + The result depends on the identity of the user making the request + (as indicated by the context) as well as any filters. + + :param context: neutron api request context + :param filters: a dictionary with keys that are valid keys for + a network as listed in the + :obj:`RESOURCE_ATTRIBUTE_MAP` object in + :file:`neutron/api/v2/attributes.py`. Values in this + dictiontary are an iterable containing values that will + be used for an exact match comparison for that value. + Each result returned by this function will have matched + one of the values for each key in filters. + + .. note:: this method is optional, as it was not part of the originally + defined plugin API. + """ + raise NotImplementedError + + @abc.abstractmethod + def delete_port(self, context, id): + """Delete a port. + + :param context: neutron api request context + :param id: UUID representing the port to delete. + """ + pass + + def start_rpc_listeners(self): + """Start the RPC listeners. + + Most plugins start RPC listeners implicitly on initialization. In + order to support multiple process RPC, the plugin needs to expose + control over when this is started. + + .. note:: this method is optional, as it was not part of the originally + defined plugin API. + """ + raise NotImplementedError + + def rpc_workers_supported(self): + """Return whether the plugin supports multiple RPC workers. + + A plugin that supports multiple RPC workers should override the + start_rpc_listeners method to ensure that this method returns True and + that start_rpc_listeners is called at the appropriate time. + Alternately, a plugin can override this method to customize detection + of support for multiple rpc workers + + .. note:: this method is optional, as it was not part of the originally + defined plugin API. + """ + return (self.__class__.start_rpc_listeners != + NeutronPluginBaseV2.start_rpc_listeners) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/notifiers/nova.py b/icehouse-patches/neutron/dvr-patch/neutron/notifiers/nova.py new file mode 100644 index 00000000..4c233ff1 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/notifiers/nova.py @@ -0,0 +1,249 @@ +# Copyright (c) 2014 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import eventlet +from novaclient import exceptions as nova_exceptions +import novaclient.v1_1.client as nclient +from novaclient.v1_1.contrib import server_external_events +from oslo.config import cfg +from sqlalchemy.orm import attributes as sql_attr + +from neutron.common import constants +from neutron import context +from neutron import manager +from neutron.openstack.common import log as logging +from neutron.openstack.common import uuidutils + + +LOG = logging.getLogger(__name__) + +VIF_UNPLUGGED = 'network-vif-unplugged' +VIF_PLUGGED = 'network-vif-plugged' +NEUTRON_NOVA_EVENT_STATUS_MAP = {constants.PORT_STATUS_ACTIVE: 'completed', + constants.PORT_STATUS_ERROR: 'failed', + constants.PORT_STATUS_DOWN: 'completed'} + + +class Notifier(object): + + def __init__(self): + # TODO(arosen): we need to cache the endpoints and figure out + # how to deal with different regions here.... + bypass_url = "%s/%s" % (cfg.CONF.nova_url, + cfg.CONF.nova_admin_tenant_id) + self.nclient = nclient.Client( + username=cfg.CONF.nova_admin_username, + api_key=cfg.CONF.nova_admin_password, + project_id=None, + tenant_id=cfg.CONF.nova_admin_tenant_id, + auth_url=cfg.CONF.nova_admin_auth_url, + cacert=cfg.CONF.nova_ca_certificates_file, + insecure=cfg.CONF.nova_api_insecure, + bypass_url=bypass_url, + region_name=cfg.CONF.nova_region_name, + extensions=[server_external_events]) + self.pending_events = [] + self._waiting_to_send = False + + def queue_event(self, event): + """Called to queue sending an event with the next batch of events. + + Sending events individually, as they occur, has been problematic as it + can result in a flood of sends. Previously, there was a loopingcall + thread that would send batched events on a periodic interval. However, + maintaining a persistent thread in the loopingcall was also + problematic. + + This replaces the loopingcall with a mechanism that creates a + short-lived thread on demand when the first event is queued. That + thread will sleep once for the same send_events_interval to allow other + events to queue up in pending_events and then will send them when it + wakes. + + If a thread is already alive and waiting, this call will simply queue + the event and return leaving it up to the thread to send it. + + :param event: the event that occurred. + """ + if not event: + return + + self.pending_events.append(event) + + if self._waiting_to_send: + return + + self._waiting_to_send = True + + def last_out_sends(): + eventlet.sleep(cfg.CONF.send_events_interval) + self._waiting_to_send = False + self.send_events() + + eventlet.spawn_n(last_out_sends) + + def _is_compute_port(self, port): + try: + if (port['device_id'] and uuidutils.is_uuid_like(port['device_id']) + and port['device_owner'].startswith('compute:')): + return True + except (KeyError, AttributeError): + pass + return False + + def _get_network_changed_event(self, device_id): + return {'name': 'network-changed', + 'server_uuid': device_id} + + @property + def _plugin(self): + # NOTE(arosen): this cannot be set in __init__ currently since + # this class is initalized at the same time as NeutronManager() + # which is decorated with synchronized() + if not hasattr(self, '_plugin_ref'): + self._plugin_ref = manager.NeutronManager.get_plugin() + return self._plugin_ref + + def send_network_change(self, action, original_obj, + returned_obj): + """Called when a network change is made that nova cares about. + + :param action: the event that occurred. + :param original_obj: the previous value of resource before action. + :param returned_obj: the body returned to client as result of action. + """ + + if not cfg.CONF.notify_nova_on_port_data_changes: + return + + event = self.create_port_changed_event(action, original_obj, + returned_obj) + self.queue_event(event) + + def create_port_changed_event(self, action, original_obj, returned_obj): + port = None + if action == 'update_port': + port = returned_obj['port'] + + elif action in ['update_floatingip', 'create_floatingip', + 'delete_floatingip']: + # NOTE(arosen) if we are associating a floatingip the + # port_id is in the returned_obj. Otherwise on disassociate + # it's in the original_object + port_id = (returned_obj['floatingip'].get('port_id') or + original_obj.get('port_id')) + + if port_id is None: + return + + ctx = context.get_admin_context() + port = self._plugin.get_port(ctx, port_id) + + if port and self._is_compute_port(port): + return self._get_network_changed_event(port['device_id']) + + def record_port_status_changed(self, port, current_port_status, + previous_port_status, initiator): + """Determine if nova needs to be notified due to port status change. + """ + # clear out previous _notify_event + port._notify_event = None + # If there is no device_id set there is nothing we can do here. + if not port.device_id: + LOG.debug(_("device_id is not set on port yet.")) + return + + if not port.id: + LOG.warning(_("Port ID not set! Nova will not be notified of " + "port status change.")) + return + + # We only want to notify about nova ports. + if not self._is_compute_port(port): + return + + # We notify nova when a vif is unplugged which only occurs when + # the status goes from ACTIVE to DOWN. + if (previous_port_status == constants.PORT_STATUS_ACTIVE and + current_port_status == constants.PORT_STATUS_DOWN): + event_name = VIF_UNPLUGGED + + # We only notify nova when a vif is plugged which only occurs + # when the status goes from: + # NO_VALUE/DOWN/BUILD -> ACTIVE/ERROR. + elif (previous_port_status in [sql_attr.NO_VALUE, + constants.PORT_STATUS_DOWN, + constants.PORT_STATUS_BUILD] + and current_port_status in [constants.PORT_STATUS_ACTIVE, + constants.PORT_STATUS_ERROR]): + event_name = VIF_PLUGGED + # All the remaining state transitions are of no interest to nova + else: + LOG.debug(_("Ignoring state change previous_port_status: " + "%(pre_status)s current_port_status: %(cur_status)s" + " port_id %(id)s") % + {'pre_status': previous_port_status, + 'cur_status': current_port_status, + 'id': port.id}) + return + + port._notify_event = ( + {'server_uuid': port.device_id, + 'name': event_name, + 'status': NEUTRON_NOVA_EVENT_STATUS_MAP.get(current_port_status), + 'tag': port.id}) + + def send_port_status(self, mapper, connection, port): + event = getattr(port, "_notify_event", None) + self.queue_event(event) + port._notify_event = None + + def send_events(self): + if not self.pending_events: + return + + batched_events = self.pending_events + self.pending_events = [] + + LOG.debug(_("Sending events: %s"), batched_events) + try: + response = self.nclient.server_external_events.create( + batched_events) + except nova_exceptions.NotFound: + LOG.warning(_("Nova returned NotFound for event: %s"), + batched_events) + except Exception: + LOG.exception(_("Failed to notify nova on events: %s"), + batched_events) + else: + if not isinstance(response, list): + LOG.error(_("Error response returned from nova: %s"), + response) + return + response_error = False + for event in response: + try: + code = event['code'] + except KeyError: + response_error = True + continue + if code != 200: + LOG.warning(_("Nova event: %s returned with failed " + "status"), event) + else: + LOG.info(_("Nova event response: %s"), event) + if response_error: + LOG.error(_("Error response returned from nova: %s"), + response) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/openstack/common/gettextutils.py b/icehouse-patches/neutron/dvr-patch/neutron/openstack/common/gettextutils.py new file mode 100644 index 00000000..1ec1499f --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/openstack/common/gettextutils.py @@ -0,0 +1,498 @@ +# Copyright 2012 Red Hat, Inc. +# Copyright 2013 IBM Corp. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +gettext for openstack-common modules. + +Usual usage in an openstack.common module: + + from neutron.openstack.common.gettextutils import _ +""" + +import copy +import functools +import gettext +import locale +from logging import handlers +import os + +from babel import localedata +import six + +_AVAILABLE_LANGUAGES = {} + +# FIXME(dhellmann): Remove this when moving to oslo.i18n. +USE_LAZY = False + + +class TranslatorFactory(object): + """Create translator functions + """ + + def __init__(self, domain, lazy=False, localedir=None): + """Establish a set of translation functions for the domain. + + :param domain: Name of translation domain, + specifying a message catalog. + :type domain: str + :param lazy: Delays translation until a message is emitted. + Defaults to False. + :type lazy: Boolean + :param localedir: Directory with translation catalogs. + :type localedir: str + """ + self.domain = domain + self.lazy = lazy + if localedir is None: + localedir = os.environ.get(domain.upper() + '_LOCALEDIR') + self.localedir = localedir + + def _make_translation_func(self, domain=None): + """Return a new translation function ready for use. + + Takes into account whether or not lazy translation is being + done. + + The domain can be specified to override the default from the + factory, but the localedir from the factory is always used + because we assume the log-level translation catalogs are + installed in the same directory as the main application + catalog. + + """ + if domain is None: + domain = self.domain + if self.lazy: + return functools.partial(Message, domain=domain) + t = gettext.translation( + domain, + localedir=self.localedir, + fallback=True, + ) + if six.PY3: + return t.gettext + return t.ugettext + + @property + def primary(self): + "The default translation function." + return self._make_translation_func() + + def _make_log_translation_func(self, level): + return self._make_translation_func(self.domain + '-log-' + level) + + @property + def log_info(self): + "Translate info-level log messages." + return self._make_log_translation_func('info') + + @property + def log_warning(self): + "Translate warning-level log messages." + return self._make_log_translation_func('warning') + + @property + def log_error(self): + "Translate error-level log messages." + return self._make_log_translation_func('error') + + @property + def log_critical(self): + "Translate critical-level log messages." + return self._make_log_translation_func('critical') + + +# NOTE(dhellmann): When this module moves out of the incubator into +# oslo.i18n, these global variables can be moved to an integration +# module within each application. + +# Create the global translation functions. +_translators = TranslatorFactory('neutron') + +# The primary translation function using the well-known name "_" +_ = _translators.primary + +# Translators for log levels. +# +# The abbreviated names are meant to reflect the usual use of a short +# name like '_'. The "L" is for "log" and the other letter comes from +# the level. +_LI = _translators.log_info +_LW = _translators.log_warning +_LE = _translators.log_error +_LC = _translators.log_critical + +# NOTE(dhellmann): End of globals that will move to the application's +# integration module. + + +def enable_lazy(): + """Convenience function for configuring _() to use lazy gettext + + Call this at the start of execution to enable the gettextutils._ + function to use lazy gettext functionality. This is useful if + your project is importing _ directly instead of using the + gettextutils.install() way of importing the _ function. + """ + # FIXME(dhellmann): This function will be removed in oslo.i18n, + # because the TranslatorFactory makes it superfluous. + global _, _LI, _LW, _LE, _LC, USE_LAZY + tf = TranslatorFactory('neutron', lazy=True) + _ = tf.primary + _LI = tf.log_info + _LW = tf.log_warning + _LE = tf.log_error + _LC = tf.log_critical + USE_LAZY = True + + +def install(domain, lazy=False): + """Install a _() function using the given translation domain. + + Given a translation domain, install a _() function using gettext's + install() function. + + The main difference from gettext.install() is that we allow + overriding the default localedir (e.g. /usr/share/locale) using + a translation-domain-specific environment variable (e.g. + NOVA_LOCALEDIR). + + :param domain: the translation domain + :param lazy: indicates whether or not to install the lazy _() function. + The lazy _() introduces a way to do deferred translation + of messages by installing a _ that builds Message objects, + instead of strings, which can then be lazily translated into + any available locale. + """ + if lazy: + from six import moves + tf = TranslatorFactory(domain, lazy=True) + moves.builtins.__dict__['_'] = tf.primary + else: + localedir = '%s_LOCALEDIR' % domain.upper() + if six.PY3: + gettext.install(domain, + localedir=os.environ.get(localedir)) + else: + gettext.install(domain, + localedir=os.environ.get(localedir), + unicode=True) + + +class Message(six.text_type): + """A Message object is a unicode object that can be translated. + + Translation of Message is done explicitly using the translate() method. + For all non-translation intents and purposes, a Message is simply unicode, + and can be treated as such. + """ + + def __new__(cls, msgid, msgtext=None, params=None, + domain='neutron', *args): + """Create a new Message object. + + In order for translation to work gettext requires a message ID, this + msgid will be used as the base unicode text. It is also possible + for the msgid and the base unicode text to be different by passing + the msgtext parameter. + """ + # If the base msgtext is not given, we use the default translation + # of the msgid (which is in English) just in case the system locale is + # not English, so that the base text will be in that locale by default. + if not msgtext: + msgtext = Message._translate_msgid(msgid, domain) + # We want to initialize the parent unicode with the actual object that + # would have been plain unicode if 'Message' was not enabled. + msg = super(Message, cls).__new__(cls, msgtext) + msg.msgid = msgid + msg.domain = domain + msg.params = params + return msg + + def translate(self, desired_locale=None): + """Translate this message to the desired locale. + + :param desired_locale: The desired locale to translate the message to, + if no locale is provided the message will be + translated to the system's default locale. + + :returns: the translated message in unicode + """ + + translated_message = Message._translate_msgid(self.msgid, + self.domain, + desired_locale) + if self.params is None: + # No need for more translation + return translated_message + + # This Message object may have been formatted with one or more + # Message objects as substitution arguments, given either as a single + # argument, part of a tuple, or as one or more values in a dictionary. + # When translating this Message we need to translate those Messages too + translated_params = _translate_args(self.params, desired_locale) + + translated_message = translated_message % translated_params + + return translated_message + + @staticmethod + def _translate_msgid(msgid, domain, desired_locale=None): + if not desired_locale: + system_locale = locale.getdefaultlocale() + # If the system locale is not available to the runtime use English + if not system_locale[0]: + desired_locale = 'en_US' + else: + desired_locale = system_locale[0] + + locale_dir = os.environ.get(domain.upper() + '_LOCALEDIR') + lang = gettext.translation(domain, + localedir=locale_dir, + languages=[desired_locale], + fallback=True) + if six.PY3: + translator = lang.gettext + else: + translator = lang.ugettext + + translated_message = translator(msgid) + return translated_message + + def __mod__(self, other): + # When we mod a Message we want the actual operation to be performed + # by the parent class (i.e. unicode()), the only thing we do here is + # save the original msgid and the parameters in case of a translation + params = self._sanitize_mod_params(other) + unicode_mod = super(Message, self).__mod__(params) + modded = Message(self.msgid, + msgtext=unicode_mod, + params=params, + domain=self.domain) + return modded + + def _sanitize_mod_params(self, other): + """Sanitize the object being modded with this Message. + + - Add support for modding 'None' so translation supports it + - Trim the modded object, which can be a large dictionary, to only + those keys that would actually be used in a translation + - Snapshot the object being modded, in case the message is + translated, it will be used as it was when the Message was created + """ + if other is None: + params = (other,) + elif isinstance(other, dict): + # Merge the dictionaries + # Copy each item in case one does not support deep copy. + params = {} + if isinstance(self.params, dict): + for key, val in self.params.items(): + params[key] = self._copy_param(val) + for key, val in other.items(): + params[key] = self._copy_param(val) + else: + params = self._copy_param(other) + return params + + def _copy_param(self, param): + try: + return copy.deepcopy(param) + except Exception: + # Fallback to casting to unicode this will handle the + # python code-like objects that can't be deep-copied + return six.text_type(param) + + def __add__(self, other): + msg = _('Message objects do not support addition.') + raise TypeError(msg) + + def __radd__(self, other): + return self.__add__(other) + + if six.PY2: + def __str__(self): + # NOTE(luisg): Logging in python 2.6 tries to str() log records, + # and it expects specifically a UnicodeError in order to proceed. + msg = _('Message objects do not support str() because they may ' + 'contain non-ascii characters. ' + 'Please use unicode() or translate() instead.') + raise UnicodeError(msg) + + +def get_available_languages(domain): + """Lists the available languages for the given translation domain. + + :param domain: the domain to get languages for + """ + if domain in _AVAILABLE_LANGUAGES: + return copy.copy(_AVAILABLE_LANGUAGES[domain]) + + localedir = '%s_LOCALEDIR' % domain.upper() + find = lambda x: gettext.find(domain, + localedir=os.environ.get(localedir), + languages=[x]) + + # NOTE(mrodden): en_US should always be available (and first in case + # order matters) since our in-line message strings are en_US + language_list = ['en_US'] + # NOTE(luisg): Babel <1.0 used a function called list(), which was + # renamed to locale_identifiers() in >=1.0, the requirements master list + # requires >=0.9.6, uncapped, so defensively work with both. We can remove + # this check when the master list updates to >=1.0, and update all projects + list_identifiers = (getattr(localedata, 'list', None) or + getattr(localedata, 'locale_identifiers')) + locale_identifiers = list_identifiers() + + for i in locale_identifiers: + if find(i) is not None: + language_list.append(i) + + # NOTE(luisg): Babel>=1.0,<1.3 has a bug where some OpenStack supported + # locales (e.g. 'zh_CN', and 'zh_TW') aren't supported even though they + # are perfectly legitimate locales: + # https://github.com/mitsuhiko/babel/issues/37 + # In Babel 1.3 they fixed the bug and they support these locales, but + # they are still not explicitly "listed" by locale_identifiers(). + # That is why we add the locales here explicitly if necessary so that + # they are listed as supported. + aliases = {'zh': 'zh_CN', + 'zh_Hant_HK': 'zh_HK', + 'zh_Hant': 'zh_TW', + 'fil': 'tl_PH'} + for (locale_, alias) in six.iteritems(aliases): + if locale_ in language_list and alias not in language_list: + language_list.append(alias) + + _AVAILABLE_LANGUAGES[domain] = language_list + return copy.copy(language_list) + + +def translate(obj, desired_locale=None): + """Gets the translated unicode representation of the given object. + + If the object is not translatable it is returned as-is. + If the locale is None the object is translated to the system locale. + + :param obj: the object to translate + :param desired_locale: the locale to translate the message to, if None the + default system locale will be used + :returns: the translated object in unicode, or the original object if + it could not be translated + """ + message = obj + if not isinstance(message, Message): + # If the object to translate is not already translatable, + # let's first get its unicode representation + message = six.text_type(obj) + if isinstance(message, Message): + # Even after unicoding() we still need to check if we are + # running with translatable unicode before translating + return message.translate(desired_locale) + return obj + + +def _translate_args(args, desired_locale=None): + """Translates all the translatable elements of the given arguments object. + + This method is used for translating the translatable values in method + arguments which include values of tuples or dictionaries. + If the object is not a tuple or a dictionary the object itself is + translated if it is translatable. + + If the locale is None the object is translated to the system locale. + + :param args: the args to translate + :param desired_locale: the locale to translate the args to, if None the + default system locale will be used + :returns: a new args object with the translated contents of the original + """ + if isinstance(args, tuple): + return tuple(translate(v, desired_locale) for v in args) + if isinstance(args, dict): + translated_dict = {} + for (k, v) in six.iteritems(args): + translated_v = translate(v, desired_locale) + translated_dict[k] = translated_v + return translated_dict + return translate(args, desired_locale) + + +class TranslationHandler(handlers.MemoryHandler): + """Handler that translates records before logging them. + + The TranslationHandler takes a locale and a target logging.Handler object + to forward LogRecord objects to after translating them. This handler + depends on Message objects being logged, instead of regular strings. + + The handler can be configured declaratively in the logging.conf as follows: + + [handlers] + keys = translatedlog, translator + + [handler_translatedlog] + class = handlers.WatchedFileHandler + args = ('/var/log/api-localized.log',) + formatter = context + + [handler_translator] + class = openstack.common.log.TranslationHandler + target = translatedlog + args = ('zh_CN',) + + If the specified locale is not available in the system, the handler will + log in the default locale. + """ + + def __init__(self, locale=None, target=None): + """Initialize a TranslationHandler + + :param locale: locale to use for translating messages + :param target: logging.Handler object to forward + LogRecord objects to after translation + """ + # NOTE(luisg): In order to allow this handler to be a wrapper for + # other handlers, such as a FileHandler, and still be able to + # configure it using logging.conf, this handler has to extend + # MemoryHandler because only the MemoryHandlers' logging.conf + # parsing is implemented such that it accepts a target handler. + handlers.MemoryHandler.__init__(self, capacity=0, target=target) + self.locale = locale + + def setFormatter(self, fmt): + self.target.setFormatter(fmt) + + def emit(self, record): + # We save the message from the original record to restore it + # after translation, so other handlers are not affected by this + original_msg = record.msg + original_args = record.args + + try: + self._translate_and_log_record(record) + finally: + record.msg = original_msg + record.args = original_args + + def _translate_and_log_record(self, record): + record.msg = translate(record.msg, self.locale) + + # In addition to translating the message, we also need to translate + # arguments that were passed to the log method that were not part + # of the main message e.g., log.info(_('Some message %s'), this_one)) + record.args = _translate_args(record.args, self.locale) + + self.target.emit(record) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/openstack/common/importutils.py b/icehouse-patches/neutron/dvr-patch/neutron/openstack/common/importutils.py new file mode 100644 index 00000000..4038808c --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/openstack/common/importutils.py @@ -0,0 +1,73 @@ +# Copyright 2011 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Import related utilities and helper functions. +""" + +import sys +import traceback + + +def import_class(import_str): + """Returns a class from a string including module and class.""" + mod_str, _sep, class_str = import_str.rpartition('.') + __import__(mod_str) + try: + return getattr(sys.modules[mod_str], class_str) + except AttributeError: + raise ImportError('Class %s cannot be found (%s)' % + (class_str, + traceback.format_exception(*sys.exc_info()))) + + +def import_object(import_str, *args, **kwargs): + """Import a class and return an instance of it.""" + return import_class(import_str)(*args, **kwargs) + + +def import_object_ns(name_space, import_str, *args, **kwargs): + """Tries to import object from default namespace. + + Imports a class and return an instance of it, first by trying + to find the class in a default namespace, then failing back to + a full path if not found in the default namespace. + """ + import_value = "%s.%s" % (name_space, import_str) + try: + return import_class(import_value)(*args, **kwargs) + except ImportError: + return import_class(import_str)(*args, **kwargs) + + +def import_module(import_str): + """Import a module.""" + __import__(import_str) + return sys.modules[import_str] + + +def import_versioned_module(version, submodule=None): + module = 'neutron.v%s' % version + if submodule: + module = '.'.join((module, submodule)) + return import_module(module) + + +def try_import(import_str, default=None): + """Try to import a module and if it fails return default.""" + try: + return import_module(import_str) + except ImportError: + return default diff --git a/icehouse-patches/neutron/dvr-patch/neutron/openstack/common/jsonutils.py b/icehouse-patches/neutron/dvr-patch/neutron/openstack/common/jsonutils.py new file mode 100644 index 00000000..502d6bcd --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/openstack/common/jsonutils.py @@ -0,0 +1,186 @@ +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# Copyright 2011 Justin Santa Barbara +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +''' +JSON related utilities. + +This module provides a few things: + + 1) A handy function for getting an object down to something that can be + JSON serialized. See to_primitive(). + + 2) Wrappers around loads() and dumps(). The dumps() wrapper will + automatically use to_primitive() for you if needed. + + 3) This sets up anyjson to use the loads() and dumps() wrappers if anyjson + is available. +''' + + +import codecs +import datetime +import functools +import inspect +import itertools +import sys + +if sys.version_info < (2, 7): + # On Python <= 2.6, json module is not C boosted, so try to use + # simplejson module if available + try: + import simplejson as json + except ImportError: + import json +else: + import json + +import six +import six.moves.xmlrpc_client as xmlrpclib + +from neutron.openstack.common import gettextutils +from neutron.openstack.common import importutils +from neutron.openstack.common import strutils +from neutron.openstack.common import timeutils + +netaddr = importutils.try_import("netaddr") + +_nasty_type_tests = [inspect.ismodule, inspect.isclass, inspect.ismethod, + inspect.isfunction, inspect.isgeneratorfunction, + inspect.isgenerator, inspect.istraceback, inspect.isframe, + inspect.iscode, inspect.isbuiltin, inspect.isroutine, + inspect.isabstract] + +_simple_types = (six.string_types + six.integer_types + + (type(None), bool, float)) + + +def to_primitive(value, convert_instances=False, convert_datetime=True, + level=0, max_depth=3): + """Convert a complex object into primitives. + + Handy for JSON serialization. We can optionally handle instances, + but since this is a recursive function, we could have cyclical + data structures. + + To handle cyclical data structures we could track the actual objects + visited in a set, but not all objects are hashable. Instead we just + track the depth of the object inspections and don't go too deep. + + Therefore, convert_instances=True is lossy ... be aware. + + """ + # handle obvious types first - order of basic types determined by running + # full tests on nova project, resulting in the following counts: + # 572754 + # 460353 + # 379632 + # 274610 + # 199918 + # 114200 + # 51817 + # 26164 + # 6491 + # 283 + # 19 + if isinstance(value, _simple_types): + return value + + if isinstance(value, datetime.datetime): + if convert_datetime: + return timeutils.strtime(value) + else: + return value + + # value of itertools.count doesn't get caught by nasty_type_tests + # and results in infinite loop when list(value) is called. + if type(value) == itertools.count: + return six.text_type(value) + + # FIXME(vish): Workaround for LP bug 852095. Without this workaround, + # tests that raise an exception in a mocked method that + # has a @wrap_exception with a notifier will fail. If + # we up the dependency to 0.5.4 (when it is released) we + # can remove this workaround. + if getattr(value, '__module__', None) == 'mox': + return 'mock' + + if level > max_depth: + return '?' + + # The try block may not be necessary after the class check above, + # but just in case ... + try: + recursive = functools.partial(to_primitive, + convert_instances=convert_instances, + convert_datetime=convert_datetime, + level=level, + max_depth=max_depth) + if isinstance(value, dict): + return dict((k, recursive(v)) for k, v in six.iteritems(value)) + elif isinstance(value, (list, tuple)): + return [recursive(lv) for lv in value] + + # It's not clear why xmlrpclib created their own DateTime type, but + # for our purposes, make it a datetime type which is explicitly + # handled + if isinstance(value, xmlrpclib.DateTime): + value = datetime.datetime(*tuple(value.timetuple())[:6]) + + if convert_datetime and isinstance(value, datetime.datetime): + return timeutils.strtime(value) + elif isinstance(value, gettextutils.Message): + return value.data + elif hasattr(value, 'iteritems'): + return recursive(dict(value.iteritems()), level=level + 1) + elif hasattr(value, '__iter__'): + return recursive(list(value)) + elif convert_instances and hasattr(value, '__dict__'): + # Likely an instance of something. Watch for cycles. + # Ignore class member vars. + return recursive(value.__dict__, level=level + 1) + elif netaddr and isinstance(value, netaddr.IPAddress): + return six.text_type(value) + else: + if any(test(value) for test in _nasty_type_tests): + return six.text_type(value) + return value + except TypeError: + # Class objects are tricky since they may define something like + # __iter__ defined but it isn't callable as list(). + return six.text_type(value) + + +def dumps(value, default=to_primitive, **kwargs): + return json.dumps(value, default=default, **kwargs) + + +def loads(s, encoding='utf-8', **kwargs): + return json.loads(strutils.safe_decode(s, encoding), **kwargs) + + +def load(fp, encoding='utf-8', **kwargs): + return json.load(codecs.getreader(encoding)(fp), **kwargs) + + +try: + import anyjson +except ImportError: + pass +else: + anyjson._modules.append((__name__, 'dumps', TypeError, + 'loads', ValueError, 'load')) + anyjson.force_implementation(__name__) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/openstack/common/log.py b/icehouse-patches/neutron/dvr-patch/neutron/openstack/common/log.py new file mode 100644 index 00000000..d767b898 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/openstack/common/log.py @@ -0,0 +1,732 @@ +# Copyright 2011 OpenStack Foundation. +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""OpenStack logging handler. + +This module adds to logging functionality by adding the option to specify +a context object when calling the various log methods. If the context object +is not specified, default formatting is used. Additionally, an instance uuid +may be passed as part of the log message, which is intended to make it easier +for admins to find messages related to a specific instance. + +It also allows setting of formatting information through conf. + +""" + +import inspect +import itertools +import logging +import logging.config +import logging.handlers +import os +import re +import sys +import traceback + +from oslo.config import cfg +import six +from six import moves + +from neutron.openstack.common.gettextutils import _ +from neutron.openstack.common import importutils +from neutron.openstack.common import jsonutils +from neutron.openstack.common import local + + +_DEFAULT_LOG_DATE_FORMAT = "%Y-%m-%d %H:%M:%S" + +_SANITIZE_KEYS = ['adminPass', 'admin_pass', 'password', 'admin_password'] + +# NOTE(ldbragst): Let's build a list of regex objects using the list of +# _SANITIZE_KEYS we already have. This way, we only have to add the new key +# to the list of _SANITIZE_KEYS and we can generate regular expressions +# for XML and JSON automatically. +_SANITIZE_PATTERNS = [] +_FORMAT_PATTERNS = [r'(%(key)s\s*[=]\s*[\"\']).*?([\"\'])', + r'(<%(key)s>).*?()', + r'([\"\']%(key)s[\"\']\s*:\s*[\"\']).*?([\"\'])', + r'([\'"].*?%(key)s[\'"]\s*:\s*u?[\'"]).*?([\'"])', + r'([\'"].*?%(key)s[\'"]\s*,\s*\'--?[A-z]+\'\s*,\s*u?[\'"])' + '.*?([\'"])', + r'(%(key)s\s*--?[A-z]+\s*).*?([\s])'] + +for key in _SANITIZE_KEYS: + for pattern in _FORMAT_PATTERNS: + reg_ex = re.compile(pattern % {'key': key}, re.DOTALL) + _SANITIZE_PATTERNS.append(reg_ex) + + +common_cli_opts = [ + cfg.BoolOpt('debug', + short='d', + default=False, + help='Print debugging output (set logging level to ' + 'DEBUG instead of default WARNING level).'), + cfg.BoolOpt('verbose', + short='v', + default=False, + help='Print more verbose output (set logging level to ' + 'INFO instead of default WARNING level).'), +] + +logging_cli_opts = [ + cfg.StrOpt('log-config-append', + metavar='PATH', + deprecated_name='log-config', + help='The name of a logging configuration file. This file ' + 'is appended to any existing logging configuration ' + 'files. For details about logging configuration files, ' + 'see the Python logging module documentation.'), + cfg.StrOpt('log-format', + metavar='FORMAT', + help='DEPRECATED. ' + 'A logging.Formatter log message format string which may ' + 'use any of the available logging.LogRecord attributes. ' + 'This option is deprecated. Please use ' + 'logging_context_format_string and ' + 'logging_default_format_string instead.'), + cfg.StrOpt('log-date-format', + default=_DEFAULT_LOG_DATE_FORMAT, + metavar='DATE_FORMAT', + help='Format string for %%(asctime)s in log records. ' + 'Default: %(default)s .'), + cfg.StrOpt('log-file', + metavar='PATH', + deprecated_name='logfile', + help='(Optional) Name of log file to output to. ' + 'If no default is set, logging will go to stdout.'), + cfg.StrOpt('log-dir', + deprecated_name='logdir', + help='(Optional) The base directory used for relative ' + '--log-file paths.'), + cfg.BoolOpt('use-syslog', + default=False, + help='Use syslog for logging. ' + 'Existing syslog format is DEPRECATED during I, ' + 'and will change in J to honor RFC5424.'), + cfg.BoolOpt('use-syslog-rfc-format', + # TODO(bogdando) remove or use True after existing + # syslog format deprecation in J + default=False, + help='(Optional) Enables or disables syslog rfc5424 format ' + 'for logging. If enabled, prefixes the MSG part of the ' + 'syslog message with APP-NAME (RFC5424). The ' + 'format without the APP-NAME is deprecated in I, ' + 'and will be removed in J.'), + cfg.StrOpt('syslog-log-facility', + default='LOG_USER', + help='Syslog facility to receive log lines.') +] + +generic_log_opts = [ + cfg.BoolOpt('use_stderr', + default=True, + help='Log output to standard error.') +] + +log_opts = [ + cfg.StrOpt('logging_context_format_string', + default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s ' + '%(name)s [%(request_id)s %(user_identity)s] ' + '%(instance)s%(message)s', + help='Format string to use for log messages with context.'), + cfg.StrOpt('logging_default_format_string', + default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s ' + '%(name)s [-] %(instance)s%(message)s', + help='Format string to use for log messages without context.'), + cfg.StrOpt('logging_debug_format_suffix', + default='%(funcName)s %(pathname)s:%(lineno)d', + help='Data to append to log format when level is DEBUG.'), + cfg.StrOpt('logging_exception_prefix', + default='%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s ' + '%(instance)s', + help='Prefix each line of exception output with this format.'), + cfg.ListOpt('default_log_levels', + default=[ + 'amqp=WARN', + 'amqplib=WARN', + 'boto=WARN', + 'qpid=WARN', + 'sqlalchemy=WARN', + 'suds=INFO', + 'oslo.messaging=INFO', + 'iso8601=WARN', + 'requests.packages.urllib3.connectionpool=WARN' + ], + help='List of logger=LEVEL pairs.'), + cfg.BoolOpt('publish_errors', + default=False, + help='Enables or disables publication of error events.'), + cfg.BoolOpt('fatal_deprecations', + default=False, + help='Enables or disables fatal status of deprecations.'), + + # NOTE(mikal): there are two options here because sometimes we are handed + # a full instance (and could include more information), and other times we + # are just handed a UUID for the instance. + cfg.StrOpt('instance_format', + default='[instance: %(uuid)s] ', + help='The format for an instance that is passed with the log ' + 'message. '), + cfg.StrOpt('instance_uuid_format', + default='[instance: %(uuid)s] ', + help='The format for an instance UUID that is passed with the ' + 'log message. '), +] + +CONF = cfg.CONF +CONF.register_cli_opts(common_cli_opts) +CONF.register_cli_opts(logging_cli_opts) +CONF.register_opts(generic_log_opts) +CONF.register_opts(log_opts) + +# our new audit level +# NOTE(jkoelker) Since we synthesized an audit level, make the logging +# module aware of it so it acts like other levels. +logging.AUDIT = logging.INFO + 1 +logging.addLevelName(logging.AUDIT, 'AUDIT') + + +try: + NullHandler = logging.NullHandler +except AttributeError: # NOTE(jkoelker) NullHandler added in Python 2.7 + class NullHandler(logging.Handler): + def handle(self, record): + pass + + def emit(self, record): + pass + + def createLock(self): + self.lock = None + + +def _dictify_context(context): + if context is None: + return None + if not isinstance(context, dict) and getattr(context, 'to_dict', None): + context = context.to_dict() + return context + + +def _get_binary_name(): + return os.path.basename(inspect.stack()[-1][1]) + + +def _get_log_file_path(binary=None): + logfile = CONF.log_file + logdir = CONF.log_dir + + if logfile and not logdir: + return logfile + + if logfile and logdir: + return os.path.join(logdir, logfile) + + if logdir: + binary = binary or _get_binary_name() + return '%s.log' % (os.path.join(logdir, binary),) + + return None + + +def mask_password(message, secret="***"): + """Replace password with 'secret' in message. + + :param message: The string which includes security information. + :param secret: value with which to replace passwords. + :returns: The unicode value of message with the password fields masked. + + For example: + + >>> mask_password("'adminPass' : 'aaaaa'") + "'adminPass' : '***'" + >>> mask_password("'admin_pass' : 'aaaaa'") + "'admin_pass' : '***'" + >>> mask_password('"password" : "aaaaa"') + '"password" : "***"' + >>> mask_password("'original_password' : 'aaaaa'") + "'original_password' : '***'" + >>> mask_password("u'original_password' : u'aaaaa'") + "u'original_password' : u'***'" + """ + message = six.text_type(message) + + # NOTE(ldbragst): Check to see if anything in message contains any key + # specified in _SANITIZE_KEYS, if not then just return the message since + # we don't have to mask any passwords. + if not any(key in message for key in _SANITIZE_KEYS): + return message + + secret = r'\g<1>' + secret + r'\g<2>' + for pattern in _SANITIZE_PATTERNS: + message = re.sub(pattern, secret, message) + return message + + +class BaseLoggerAdapter(logging.LoggerAdapter): + + def audit(self, msg, *args, **kwargs): + self.log(logging.AUDIT, msg, *args, **kwargs) + + +class LazyAdapter(BaseLoggerAdapter): + def __init__(self, name='unknown', version='unknown'): + self._logger = None + self.extra = {} + self.name = name + self.version = version + + @property + def logger(self): + if not self._logger: + self._logger = getLogger(self.name, self.version) + return self._logger + + +class ContextAdapter(BaseLoggerAdapter): + warn = logging.LoggerAdapter.warning + + def __init__(self, logger, project_name, version_string): + self.logger = logger + self.project = project_name + self.version = version_string + self._deprecated_messages_sent = dict() + + @property + def handlers(self): + return self.logger.handlers + + def deprecated(self, msg, *args, **kwargs): + """Call this method when a deprecated feature is used. + + If the system is configured for fatal deprecations then the message + is logged at the 'critical' level and :class:`DeprecatedConfig` will + be raised. + + Otherwise, the message will be logged (once) at the 'warn' level. + + :raises: :class:`DeprecatedConfig` if the system is configured for + fatal deprecations. + + """ + stdmsg = _("Deprecated: %s") % msg + if CONF.fatal_deprecations: + self.critical(stdmsg, *args, **kwargs) + raise DeprecatedConfig(msg=stdmsg) + + # Using a list because a tuple with dict can't be stored in a set. + sent_args = self._deprecated_messages_sent.setdefault(msg, list()) + + if args in sent_args: + # Already logged this message, so don't log it again. + return + + sent_args.append(args) + self.warn(stdmsg, *args, **kwargs) + + def process(self, msg, kwargs): + # NOTE(mrodden): catch any Message/other object and + # coerce to unicode before they can get + # to the python logging and possibly + # cause string encoding trouble + if not isinstance(msg, six.string_types): + msg = six.text_type(msg) + + if 'extra' not in kwargs: + kwargs['extra'] = {} + extra = kwargs['extra'] + + context = kwargs.pop('context', None) + if not context: + context = getattr(local.store, 'context', None) + if context: + extra.update(_dictify_context(context)) + + instance = kwargs.pop('instance', None) + instance_uuid = (extra.get('instance_uuid') or + kwargs.pop('instance_uuid', None)) + instance_extra = '' + if instance: + instance_extra = CONF.instance_format % instance + elif instance_uuid: + instance_extra = (CONF.instance_uuid_format + % {'uuid': instance_uuid}) + extra['instance'] = instance_extra + + extra.setdefault('user_identity', kwargs.pop('user_identity', None)) + + extra['project'] = self.project + extra['version'] = self.version + extra['extra'] = extra.copy() + return msg, kwargs + + +class JSONFormatter(logging.Formatter): + def __init__(self, fmt=None, datefmt=None): + # NOTE(jkoelker) we ignore the fmt argument, but its still there + # since logging.config.fileConfig passes it. + self.datefmt = datefmt + + def formatException(self, ei, strip_newlines=True): + lines = traceback.format_exception(*ei) + if strip_newlines: + lines = [moves.filter( + lambda x: x, + line.rstrip().splitlines()) for line in lines] + lines = list(itertools.chain(*lines)) + return lines + + def format(self, record): + message = {'message': record.getMessage(), + 'asctime': self.formatTime(record, self.datefmt), + 'name': record.name, + 'msg': record.msg, + 'args': record.args, + 'levelname': record.levelname, + 'levelno': record.levelno, + 'pathname': record.pathname, + 'filename': record.filename, + 'module': record.module, + 'lineno': record.lineno, + 'funcname': record.funcName, + 'created': record.created, + 'msecs': record.msecs, + 'relative_created': record.relativeCreated, + 'thread': record.thread, + 'thread_name': record.threadName, + 'process_name': record.processName, + 'process': record.process, + 'traceback': None} + + if hasattr(record, 'extra'): + message['extra'] = record.extra + + if record.exc_info: + message['traceback'] = self.formatException(record.exc_info) + + return jsonutils.dumps(message) + + +def _create_logging_excepthook(product_name): + def logging_excepthook(exc_type, value, tb): + extra = {'exc_info': (exc_type, value, tb)} + getLogger(product_name).critical( + "".join(traceback.format_exception_only(exc_type, value)), + **extra) + return logging_excepthook + + +class LogConfigError(Exception): + + message = _('Error loading logging config %(log_config)s: %(err_msg)s') + + def __init__(self, log_config, err_msg): + self.log_config = log_config + self.err_msg = err_msg + + def __str__(self): + return self.message % dict(log_config=self.log_config, + err_msg=self.err_msg) + + +def _load_log_config(log_config_append): + try: + logging.config.fileConfig(log_config_append, + disable_existing_loggers=False) + except moves.configparser.Error as exc: + raise LogConfigError(log_config_append, six.text_type(exc)) + + +def setup(product_name, version='unknown'): + """Setup logging.""" + if CONF.log_config_append: + _load_log_config(CONF.log_config_append) + else: + _setup_logging_from_conf(product_name, version) + sys.excepthook = _create_logging_excepthook(product_name) + + +def set_defaults(logging_context_format_string): + cfg.set_defaults( + log_opts, logging_context_format_string=logging_context_format_string) + + +def _find_facility_from_conf(): + facility_names = logging.handlers.SysLogHandler.facility_names + facility = getattr(logging.handlers.SysLogHandler, + CONF.syslog_log_facility, + None) + + if facility is None and CONF.syslog_log_facility in facility_names: + facility = facility_names.get(CONF.syslog_log_facility) + + if facility is None: + valid_facilities = facility_names.keys() + consts = ['LOG_AUTH', 'LOG_AUTHPRIV', 'LOG_CRON', 'LOG_DAEMON', + 'LOG_FTP', 'LOG_KERN', 'LOG_LPR', 'LOG_MAIL', 'LOG_NEWS', + 'LOG_AUTH', 'LOG_SYSLOG', 'LOG_USER', 'LOG_UUCP', + 'LOG_LOCAL0', 'LOG_LOCAL1', 'LOG_LOCAL2', 'LOG_LOCAL3', + 'LOG_LOCAL4', 'LOG_LOCAL5', 'LOG_LOCAL6', 'LOG_LOCAL7'] + valid_facilities.extend(consts) + raise TypeError(_('syslog facility must be one of: %s') % + ', '.join("'%s'" % fac + for fac in valid_facilities)) + + return facility + + +class RFCSysLogHandler(logging.handlers.SysLogHandler): + def __init__(self, *args, **kwargs): + self.binary_name = _get_binary_name() + # Do not use super() unless type(logging.handlers.SysLogHandler) + # is 'type' (Python 2.7). + # Use old style calls, if the type is 'classobj' (Python 2.6) + logging.handlers.SysLogHandler.__init__(self, *args, **kwargs) + + def format(self, record): + # Do not use super() unless type(logging.handlers.SysLogHandler) + # is 'type' (Python 2.7). + # Use old style calls, if the type is 'classobj' (Python 2.6) + msg = logging.handlers.SysLogHandler.format(self, record) + msg = self.binary_name + ' ' + msg + return msg + + +def _setup_logging_from_conf(project, version): + log_root = getLogger(None).logger + for handler in log_root.handlers: + log_root.removeHandler(handler) + + if CONF.use_syslog: + facility = _find_facility_from_conf() + # TODO(bogdando) use the format provided by RFCSysLogHandler + # after existing syslog format deprecation in J + if CONF.use_syslog_rfc_format: + syslog = RFCSysLogHandler(address='/dev/log', + facility=facility) + else: + syslog = logging.handlers.SysLogHandler(address='/dev/log', + facility=facility) + log_root.addHandler(syslog) + + logpath = _get_log_file_path() + if logpath: + filelog = logging.handlers.WatchedFileHandler(logpath) + log_root.addHandler(filelog) + + if CONF.use_stderr: + streamlog = ColorHandler() + log_root.addHandler(streamlog) + + elif not logpath: + # pass sys.stdout as a positional argument + # python2.6 calls the argument strm, in 2.7 it's stream + streamlog = logging.StreamHandler(sys.stdout) + log_root.addHandler(streamlog) + + if CONF.publish_errors: + try: + handler = importutils.import_object( + "neutron.openstack.common.log_handler.PublishErrorsHandler", + logging.ERROR) + except ImportError: + handler = importutils.import_object( + "oslo.messaging.notify.log_handler.PublishErrorsHandler", + logging.ERROR) + log_root.addHandler(handler) + + datefmt = CONF.log_date_format + for handler in log_root.handlers: + # NOTE(alaski): CONF.log_format overrides everything currently. This + # should be deprecated in favor of context aware formatting. + if CONF.log_format: + handler.setFormatter(logging.Formatter(fmt=CONF.log_format, + datefmt=datefmt)) + log_root.info('Deprecated: log_format is now deprecated and will ' + 'be removed in the next release') + else: + handler.setFormatter(ContextFormatter(project=project, + version=version, + datefmt=datefmt)) + + if CONF.debug: + log_root.setLevel(logging.DEBUG) + elif CONF.verbose: + log_root.setLevel(logging.INFO) + else: + log_root.setLevel(logging.WARNING) + + for pair in CONF.default_log_levels: + mod, _sep, level_name = pair.partition('=') + logger = logging.getLogger(mod) + # NOTE(AAzza) in python2.6 Logger.setLevel doesn't convert string name + # to integer code. + if sys.version_info < (2, 7): + level = logging.getLevelName(level_name) + logger.setLevel(level) + else: + logger.setLevel(level_name) + + +_loggers = {} + + +def getLogger(name='unknown', version='unknown'): + if name not in _loggers: + _loggers[name] = ContextAdapter(logging.getLogger(name), + name, + version) + return _loggers[name] + + +def getLazyLogger(name='unknown', version='unknown'): + """Returns lazy logger. + + Creates a pass-through logger that does not create the real logger + until it is really needed and delegates all calls to the real logger + once it is created. + """ + return LazyAdapter(name, version) + + +class WritableLogger(object): + """A thin wrapper that responds to `write` and logs.""" + + def __init__(self, logger, level=logging.INFO): + self.logger = logger + self.level = level + + def write(self, msg): + self.logger.log(self.level, msg.rstrip()) + + +class ContextFormatter(logging.Formatter): + """A context.RequestContext aware formatter configured through flags. + + The flags used to set format strings are: logging_context_format_string + and logging_default_format_string. You can also specify + logging_debug_format_suffix to append extra formatting if the log level is + debug. + + For information about what variables are available for the formatter see: + http://docs.python.org/library/logging.html#formatter + + If available, uses the context value stored in TLS - local.store.context + + """ + + def __init__(self, *args, **kwargs): + """Initialize ContextFormatter instance + + Takes additional keyword arguments which can be used in the message + format string. + + :keyword project: project name + :type project: string + :keyword version: project version + :type version: string + + """ + + self.project = kwargs.pop('project', 'unknown') + self.version = kwargs.pop('version', 'unknown') + + logging.Formatter.__init__(self, *args, **kwargs) + + def format(self, record): + """Uses contextstring if request_id is set, otherwise default.""" + + # store project info + record.project = self.project + record.version = self.version + + # store request info + context = getattr(local.store, 'context', None) + if context: + d = _dictify_context(context) + for k, v in d.items(): + setattr(record, k, v) + + # NOTE(sdague): default the fancier formatting params + # to an empty string so we don't throw an exception if + # they get used + for key in ('instance', 'color', 'user_identity'): + if key not in record.__dict__: + record.__dict__[key] = '' + + if record.__dict__.get('request_id'): + fmt = CONF.logging_context_format_string + else: + fmt = CONF.logging_default_format_string + + if (record.levelno == logging.DEBUG and + CONF.logging_debug_format_suffix): + fmt += " " + CONF.logging_debug_format_suffix + + if sys.version_info < (3, 2): + self._fmt = fmt + else: + self._style = logging.PercentStyle(fmt) + self._fmt = self._style._fmt + # Cache this on the record, Logger will respect our formatted copy + if record.exc_info: + record.exc_text = self.formatException(record.exc_info, record) + return logging.Formatter.format(self, record) + + def formatException(self, exc_info, record=None): + """Format exception output with CONF.logging_exception_prefix.""" + if not record: + return logging.Formatter.formatException(self, exc_info) + + stringbuffer = moves.StringIO() + traceback.print_exception(exc_info[0], exc_info[1], exc_info[2], + None, stringbuffer) + lines = stringbuffer.getvalue().split('\n') + stringbuffer.close() + + if CONF.logging_exception_prefix.find('%(asctime)') != -1: + record.asctime = self.formatTime(record, self.datefmt) + + formatted_lines = [] + for line in lines: + pl = CONF.logging_exception_prefix % record.__dict__ + fl = '%s%s' % (pl, line) + formatted_lines.append(fl) + return '\n'.join(formatted_lines) + + +class ColorHandler(logging.StreamHandler): + LEVEL_COLORS = { + logging.DEBUG: '\033[00;32m', # GREEN + logging.INFO: '\033[00;36m', # CYAN + logging.AUDIT: '\033[01;36m', # BOLD CYAN + logging.WARN: '\033[01;33m', # BOLD YELLOW + logging.ERROR: '\033[01;31m', # BOLD RED + logging.CRITICAL: '\033[01;31m', # BOLD RED + } + + def format(self, record): + record.color = self.LEVEL_COLORS[record.levelno] + return logging.StreamHandler.format(self, record) + + +class DeprecatedConfig(Exception): + message = _("Fatal call to deprecated config: %(msg)s") + + def __init__(self, msg): + super(Exception, self).__init__(self.message % dict(msg=msg)) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/openstack/common/periodic_task.py b/icehouse-patches/neutron/dvr-patch/neutron/openstack/common/periodic_task.py new file mode 100644 index 00000000..1ebdc1af --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/openstack/common/periodic_task.py @@ -0,0 +1,183 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import time + +from oslo.config import cfg +import six + +from neutron.openstack.common.gettextutils import _, _LE, _LI +from neutron.openstack.common import log as logging + + +periodic_opts = [ + cfg.BoolOpt('run_external_periodic_tasks', + default=True, + help='Some periodic tasks can be run in a separate process. ' + 'Should we run them here?'), +] + +CONF = cfg.CONF +CONF.register_opts(periodic_opts) + +LOG = logging.getLogger(__name__) + +DEFAULT_INTERVAL = 60.0 + + +class InvalidPeriodicTaskArg(Exception): + message = _("Unexpected argument for periodic task creation: %(arg)s.") + + +def periodic_task(*args, **kwargs): + """Decorator to indicate that a method is a periodic task. + + This decorator can be used in two ways: + + 1. Without arguments '@periodic_task', this will be run on the default + interval of 60 seconds. + + 2. With arguments: + @periodic_task(spacing=N [, run_immediately=[True|False]]) + this will be run on approximately every N seconds. If this number is + negative the periodic task will be disabled. If the run_immediately + argument is provided and has a value of 'True', the first run of the + task will be shortly after task scheduler starts. If + run_immediately is omitted or set to 'False', the first time the + task runs will be approximately N seconds after the task scheduler + starts. + """ + def decorator(f): + # Test for old style invocation + if 'ticks_between_runs' in kwargs: + raise InvalidPeriodicTaskArg(arg='ticks_between_runs') + + # Control if run at all + f._periodic_task = True + f._periodic_external_ok = kwargs.pop('external_process_ok', False) + if f._periodic_external_ok and not CONF.run_external_periodic_tasks: + f._periodic_enabled = False + else: + f._periodic_enabled = kwargs.pop('enabled', True) + + # Control frequency + f._periodic_spacing = kwargs.pop('spacing', 0) + f._periodic_immediate = kwargs.pop('run_immediately', False) + if f._periodic_immediate: + f._periodic_last_run = None + else: + f._periodic_last_run = time.time() + return f + + # NOTE(sirp): The `if` is necessary to allow the decorator to be used with + # and without parents. + # + # In the 'with-parents' case (with kwargs present), this function needs to + # return a decorator function since the interpreter will invoke it like: + # + # periodic_task(*args, **kwargs)(f) + # + # In the 'without-parents' case, the original function will be passed + # in as the first argument, like: + # + # periodic_task(f) + if kwargs: + return decorator + else: + return decorator(args[0]) + + +class _PeriodicTasksMeta(type): + def __init__(cls, names, bases, dict_): + """Metaclass that allows us to collect decorated periodic tasks.""" + super(_PeriodicTasksMeta, cls).__init__(names, bases, dict_) + + # NOTE(sirp): if the attribute is not present then we must be the base + # class, so, go ahead an initialize it. If the attribute is present, + # then we're a subclass so make a copy of it so we don't step on our + # parent's toes. + try: + cls._periodic_tasks = cls._periodic_tasks[:] + except AttributeError: + cls._periodic_tasks = [] + + try: + cls._periodic_spacing = cls._periodic_spacing.copy() + except AttributeError: + cls._periodic_spacing = {} + + for value in cls.__dict__.values(): + if getattr(value, '_periodic_task', False): + task = value + name = task.__name__ + + if task._periodic_spacing < 0: + LOG.info(_LI('Skipping periodic task %(task)s because ' + 'its interval is negative'), + {'task': name}) + continue + if not task._periodic_enabled: + LOG.info(_LI('Skipping periodic task %(task)s because ' + 'it is disabled'), + {'task': name}) + continue + + # A periodic spacing of zero indicates that this task should + # be run on the default interval to avoid running too + # frequently. + if task._periodic_spacing == 0: + task._periodic_spacing = DEFAULT_INTERVAL + + cls._periodic_tasks.append((name, task)) + cls._periodic_spacing[name] = task._periodic_spacing + + +@six.add_metaclass(_PeriodicTasksMeta) +class PeriodicTasks(object): + def __init__(self): + super(PeriodicTasks, self).__init__() + self._periodic_last_run = {} + for name, task in self._periodic_tasks: + self._periodic_last_run[name] = task._periodic_last_run + + def run_periodic_tasks(self, context, raise_on_error=False): + """Tasks to be run at a periodic interval.""" + idle_for = DEFAULT_INTERVAL + for task_name, task in self._periodic_tasks: + full_task_name = '.'.join([self.__class__.__name__, task_name]) + + spacing = self._periodic_spacing[task_name] + last_run = self._periodic_last_run[task_name] + + # If a periodic task is _nearly_ due, then we'll run it early + idle_for = min(idle_for, spacing) + if last_run is not None: + delta = last_run + spacing - time.time() + if delta > 0.2: + idle_for = min(idle_for, delta) + continue + + LOG.debug("Running periodic task %(full_task_name)s", + {"full_task_name": full_task_name}) + self._periodic_last_run[task_name] = time.time() + + try: + task(self, context) + except Exception as e: + if raise_on_error: + raise + LOG.exception(_LE("Error during %(full_task_name)s: %(e)s"), + {"full_task_name": full_task_name, "e": e}) + time.sleep(0) + + return idle_for diff --git a/icehouse-patches/neutron/dvr-patch/neutron/openstack/common/service.py b/icehouse-patches/neutron/dvr-patch/neutron/openstack/common/service.py new file mode 100644 index 00000000..79ae9bc5 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/openstack/common/service.py @@ -0,0 +1,512 @@ +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# Copyright 2011 Justin Santa Barbara +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Generic Node base class for all workers that run on hosts.""" + +import errno +import logging as std_logging +import os +import random +import signal +import sys +import time + +try: + # Importing just the symbol here because the io module does not + # exist in Python 2.6. + from io import UnsupportedOperation # noqa +except ImportError: + # Python 2.6 + UnsupportedOperation = None + +import eventlet +from eventlet import event +from oslo.config import cfg + +from neutron.openstack.common import eventlet_backdoor +from neutron.openstack.common.gettextutils import _LE, _LI, _LW +from neutron.openstack.common import importutils +from neutron.openstack.common import log as logging +from neutron.openstack.common import systemd +from neutron.openstack.common import threadgroup + + +rpc = importutils.try_import('neutron.openstack.common.rpc') +CONF = cfg.CONF +LOG = logging.getLogger(__name__) + + +def _sighup_supported(): + return hasattr(signal, 'SIGHUP') + + +def _is_daemon(): + # The process group for a foreground process will match the + # process group of the controlling terminal. If those values do + # not match, or ioctl() fails on the stdout file handle, we assume + # the process is running in the background as a daemon. + # http://www.gnu.org/software/bash/manual/bashref.html#Job-Control-Basics + try: + is_daemon = os.getpgrp() != os.tcgetpgrp(sys.stdout.fileno()) + except OSError as err: + if err.errno == errno.ENOTTY: + # Assume we are a daemon because there is no terminal. + is_daemon = True + else: + raise + except UnsupportedOperation: + # Could not get the fileno for stdout, so we must be a daemon. + is_daemon = True + return is_daemon + + +def _is_sighup_and_daemon(signo): + if not (_sighup_supported() and signo == signal.SIGHUP): + # Avoid checking if we are a daemon, because the signal isn't + # SIGHUP. + return False + return _is_daemon() + + +def _signo_to_signame(signo): + signals = {signal.SIGTERM: 'SIGTERM', + signal.SIGINT: 'SIGINT'} + if _sighup_supported(): + signals[signal.SIGHUP] = 'SIGHUP' + return signals[signo] + + +def _set_signals_handler(handler): + signal.signal(signal.SIGTERM, handler) + signal.signal(signal.SIGINT, handler) + if _sighup_supported(): + signal.signal(signal.SIGHUP, handler) + + +class Launcher(object): + """Launch one or more services and wait for them to complete.""" + + def __init__(self): + """Initialize the service launcher. + + :returns: None + + """ + self.services = Services() + self.backdoor_port = eventlet_backdoor.initialize_if_enabled() + + def launch_service(self, service): + """Load and start the given service. + + :param service: The service you would like to start. + :returns: None + + """ + service.backdoor_port = self.backdoor_port + self.services.add(service) + + def stop(self): + """Stop all services which are currently running. + + :returns: None + + """ + self.services.stop() + + def wait(self): + """Waits until all services have been stopped, and then returns. + + :returns: None + + """ + self.services.wait() + + def restart(self): + """Reload config files and restart service. + + :returns: None + + """ + cfg.CONF.reload_config_files() + self.services.restart() + + +class SignalExit(SystemExit): + def __init__(self, signo, exccode=1): + super(SignalExit, self).__init__(exccode) + self.signo = signo + + +class ServiceLauncher(Launcher): + def _handle_signal(self, signo, frame): + # Allow the process to be killed again and die from natural causes + _set_signals_handler(signal.SIG_DFL) + raise SignalExit(signo) + + def handle_signal(self): + _set_signals_handler(self._handle_signal) + + def _wait_for_exit_or_signal(self, ready_callback=None): + status = None + signo = 0 + + LOG.debug('Full set of CONF:') + CONF.log_opt_values(LOG, std_logging.DEBUG) + + try: + if ready_callback: + ready_callback() + super(ServiceLauncher, self).wait() + except SignalExit as exc: + signame = _signo_to_signame(exc.signo) + LOG.info(_LI('Caught %s, exiting'), signame) + status = exc.code + signo = exc.signo + except SystemExit as exc: + status = exc.code + finally: + self.stop() + if rpc: + try: + rpc.cleanup() + except Exception: + # We're shutting down, so it doesn't matter at this point. + LOG.exception(_LE('Exception during rpc cleanup.')) + + return status, signo + + def wait(self, ready_callback=None): + systemd.notify_once() + while True: + self.handle_signal() + status, signo = self._wait_for_exit_or_signal(ready_callback) + if not _is_sighup_and_daemon(signo): + return status + self.restart() + + +class ServiceWrapper(object): + def __init__(self, service, workers): + self.service = service + self.workers = workers + self.children = set() + self.forktimes = [] + + +class ProcessLauncher(object): + def __init__(self, wait_interval=0.01): + """Constructor. + + :param wait_interval: The interval to sleep for between checks + of child process exit. + """ + self.children = {} + self.sigcaught = None + self.running = True + self.wait_interval = wait_interval + rfd, self.writepipe = os.pipe() + self.readpipe = eventlet.greenio.GreenPipe(rfd, 'r') + self.handle_signal() + + def handle_signal(self): + _set_signals_handler(self._handle_signal) + + def _handle_signal(self, signo, frame): + self.sigcaught = signo + self.running = False + + # Allow the process to be killed again and die from natural causes + _set_signals_handler(signal.SIG_DFL) + + def _pipe_watcher(self): + # This will block until the write end is closed when the parent + # dies unexpectedly + self.readpipe.read() + + LOG.info(_LI('Parent process has died unexpectedly, exiting')) + + sys.exit(1) + + def _child_process_handle_signal(self): + # Setup child signal handlers differently + def _sigterm(*args): + signal.signal(signal.SIGTERM, signal.SIG_DFL) + raise SignalExit(signal.SIGTERM) + + def _sighup(*args): + signal.signal(signal.SIGHUP, signal.SIG_DFL) + raise SignalExit(signal.SIGHUP) + + signal.signal(signal.SIGTERM, _sigterm) + if _sighup_supported(): + signal.signal(signal.SIGHUP, _sighup) + # Block SIGINT and let the parent send us a SIGTERM + signal.signal(signal.SIGINT, signal.SIG_IGN) + + def _child_wait_for_exit_or_signal(self, launcher): + status = 0 + signo = 0 + + # NOTE(johannes): All exceptions are caught to ensure this + # doesn't fallback into the loop spawning children. It would + # be bad for a child to spawn more children. + try: + launcher.wait() + except SignalExit as exc: + signame = _signo_to_signame(exc.signo) + LOG.info(_LI('Child caught %s, exiting'), signame) + status = exc.code + signo = exc.signo + except SystemExit as exc: + status = exc.code + except BaseException: + LOG.exception(_LE('Unhandled exception')) + status = 2 + finally: + launcher.stop() + + return status, signo + + def _child_process(self, service): + self._child_process_handle_signal() + + # Reopen the eventlet hub to make sure we don't share an epoll + # fd with parent and/or siblings, which would be bad + eventlet.hubs.use_hub() + + # Close write to ensure only parent has it open + os.close(self.writepipe) + # Create greenthread to watch for parent to close pipe + eventlet.spawn_n(self._pipe_watcher) + + # Reseed random number generator + random.seed() + + launcher = Launcher() + launcher.launch_service(service) + return launcher + + def _start_child(self, wrap): + if len(wrap.forktimes) > wrap.workers: + # Limit ourselves to one process a second (over the period of + # number of workers * 1 second). This will allow workers to + # start up quickly but ensure we don't fork off children that + # die instantly too quickly. + if time.time() - wrap.forktimes[0] < wrap.workers: + LOG.info(_LI('Forking too fast, sleeping')) + time.sleep(1) + + wrap.forktimes.pop(0) + + wrap.forktimes.append(time.time()) + + pid = os.fork() + if pid == 0: + launcher = self._child_process(wrap.service) + while True: + self._child_process_handle_signal() + status, signo = self._child_wait_for_exit_or_signal(launcher) + if not _is_sighup_and_daemon(signo): + break + launcher.restart() + + os._exit(status) + + LOG.info(_LI('Started child %d'), pid) + + wrap.children.add(pid) + self.children[pid] = wrap + + return pid + + def launch_service(self, service, workers=1): + wrap = ServiceWrapper(service, workers) + + LOG.info(_LI('Starting %d workers'), wrap.workers) + while self.running and len(wrap.children) < wrap.workers: + self._start_child(wrap) + + def _wait_child(self): + try: + # Don't block if no child processes have exited + pid, status = os.waitpid(0, os.WNOHANG) + if not pid: + return None + except OSError as exc: + if exc.errno not in (errno.EINTR, errno.ECHILD): + raise + return None + + if os.WIFSIGNALED(status): + sig = os.WTERMSIG(status) + LOG.info(_LI('Child %(pid)d killed by signal %(sig)d'), + dict(pid=pid, sig=sig)) + else: + code = os.WEXITSTATUS(status) + LOG.info(_LI('Child %(pid)s exited with status %(code)d'), + dict(pid=pid, code=code)) + + if pid not in self.children: + LOG.warning(_LW('pid %d not in child list'), pid) + return None + + wrap = self.children.pop(pid) + wrap.children.remove(pid) + return wrap + + def _respawn_children(self): + while self.running: + wrap = self._wait_child() + if not wrap: + # Yield to other threads if no children have exited + # Sleep for a short time to avoid excessive CPU usage + # (see bug #1095346) + eventlet.greenthread.sleep(self.wait_interval) + continue + while self.running and len(wrap.children) < wrap.workers: + self._start_child(wrap) + + def wait(self): + """Loop waiting on children to die and respawning as necessary.""" + + systemd.notify_once() + LOG.debug('Full set of CONF:') + CONF.log_opt_values(LOG, std_logging.DEBUG) + + try: + while True: + self.handle_signal() + self._respawn_children() + # No signal means that stop was called. Don't clean up here. + if not self.sigcaught: + return + + signame = _signo_to_signame(self.sigcaught) + LOG.info(_LI('Caught %s, stopping children'), signame) + if not _is_sighup_and_daemon(self.sigcaught): + break + + for pid in self.children: + os.kill(pid, signal.SIGHUP) + self.running = True + self.sigcaught = None + except eventlet.greenlet.GreenletExit: + LOG.info(_LI("Wait called after thread killed. Cleaning up.")) + + self.stop() + + def stop(self): + """Terminate child processes and wait on each.""" + self.running = False + for pid in self.children: + try: + os.kill(pid, signal.SIGTERM) + except OSError as exc: + if exc.errno != errno.ESRCH: + raise + + # Wait for children to die + if self.children: + LOG.info(_LI('Waiting on %d children to exit'), len(self.children)) + while self.children: + self._wait_child() + + +class Service(object): + """Service object for binaries running on hosts.""" + + def __init__(self, threads=1000): + self.tg = threadgroup.ThreadGroup(threads) + + # signal that the service is done shutting itself down: + self._done = event.Event() + + def reset(self): + # NOTE(Fengqian): docs for Event.reset() recommend against using it + self._done = event.Event() + + def start(self): + pass + + def stop(self): + self.tg.stop() + self.tg.wait() + # Signal that service cleanup is done: + if not self._done.ready(): + self._done.send() + + def wait(self): + self._done.wait() + + +class Services(object): + + def __init__(self): + self.services = [] + self.tg = threadgroup.ThreadGroup() + self.done = event.Event() + + def add(self, service): + self.services.append(service) + self.tg.add_thread(self.run_service, service, self.done) + + def stop(self): + # wait for graceful shutdown of services: + for service in self.services: + service.stop() + service.wait() + + # Each service has performed cleanup, now signal that the run_service + # wrapper threads can now die: + if not self.done.ready(): + self.done.send() + + # reap threads: + self.tg.stop() + + def wait(self): + self.tg.wait() + + def restart(self): + self.stop() + self.done = event.Event() + for restart_service in self.services: + restart_service.reset() + self.tg.add_thread(self.run_service, restart_service, self.done) + + @staticmethod + def run_service(service, done): + """Service start wrapper. + + :param service: service to run + :param done: event to wait on until a shutdown is triggered + :returns: None + + """ + service.start() + done.wait() + + +def launch(service, workers=1): + if workers is None or workers == 1: + launcher = ServiceLauncher() + launcher.launch_service(service) + else: + launcher = ProcessLauncher() + launcher.launch_service(service, workers=workers) + + return launcher diff --git a/icehouse-patches/neutron/dvr-patch/neutron/openstack/common/strutils.py b/icehouse-patches/neutron/dvr-patch/neutron/openstack/common/strutils.py new file mode 100644 index 00000000..8c796d4f --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/openstack/common/strutils.py @@ -0,0 +1,239 @@ +# Copyright 2011 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +System-level utilities and helper functions. +""" + +import math +import re +import sys +import unicodedata + +import six + +from neutron.openstack.common.gettextutils import _ + + +UNIT_PREFIX_EXPONENT = { + 'k': 1, + 'K': 1, + 'Ki': 1, + 'M': 2, + 'Mi': 2, + 'G': 3, + 'Gi': 3, + 'T': 4, + 'Ti': 4, +} +UNIT_SYSTEM_INFO = { + 'IEC': (1024, re.compile(r'(^[-+]?\d*\.?\d+)([KMGT]i?)?(b|bit|B)$')), + 'SI': (1000, re.compile(r'(^[-+]?\d*\.?\d+)([kMGT])?(b|bit|B)$')), +} + +TRUE_STRINGS = ('1', 't', 'true', 'on', 'y', 'yes') +FALSE_STRINGS = ('0', 'f', 'false', 'off', 'n', 'no') + +SLUGIFY_STRIP_RE = re.compile(r"[^\w\s-]") +SLUGIFY_HYPHENATE_RE = re.compile(r"[-\s]+") + + +def int_from_bool_as_string(subject): + """Interpret a string as a boolean and return either 1 or 0. + + Any string value in: + + ('True', 'true', 'On', 'on', '1') + + is interpreted as a boolean True. + + Useful for JSON-decoded stuff and config file parsing + """ + return bool_from_string(subject) and 1 or 0 + + +def bool_from_string(subject, strict=False, default=False): + """Interpret a string as a boolean. + + A case-insensitive match is performed such that strings matching 't', + 'true', 'on', 'y', 'yes', or '1' are considered True and, when + `strict=False`, anything else returns the value specified by 'default'. + + Useful for JSON-decoded stuff and config file parsing. + + If `strict=True`, unrecognized values, including None, will raise a + ValueError which is useful when parsing values passed in from an API call. + Strings yielding False are 'f', 'false', 'off', 'n', 'no', or '0'. + """ + if not isinstance(subject, six.string_types): + subject = six.text_type(subject) + + lowered = subject.strip().lower() + + if lowered in TRUE_STRINGS: + return True + elif lowered in FALSE_STRINGS: + return False + elif strict: + acceptable = ', '.join( + "'%s'" % s for s in sorted(TRUE_STRINGS + FALSE_STRINGS)) + msg = _("Unrecognized value '%(val)s', acceptable values are:" + " %(acceptable)s") % {'val': subject, + 'acceptable': acceptable} + raise ValueError(msg) + else: + return default + + +def safe_decode(text, incoming=None, errors='strict'): + """Decodes incoming text/bytes string using `incoming` if they're not + already unicode. + + :param incoming: Text's current encoding + :param errors: Errors handling policy. See here for valid + values http://docs.python.org/2/library/codecs.html + :returns: text or a unicode `incoming` encoded + representation of it. + :raises TypeError: If text is not an instance of str + """ + if not isinstance(text, (six.string_types, six.binary_type)): + raise TypeError("%s can't be decoded" % type(text)) + + if isinstance(text, six.text_type): + return text + + if not incoming: + incoming = (sys.stdin.encoding or + sys.getdefaultencoding()) + + try: + return text.decode(incoming, errors) + except UnicodeDecodeError: + # Note(flaper87) If we get here, it means that + # sys.stdin.encoding / sys.getdefaultencoding + # didn't return a suitable encoding to decode + # text. This happens mostly when global LANG + # var is not set correctly and there's no + # default encoding. In this case, most likely + # python will use ASCII or ANSI encoders as + # default encodings but they won't be capable + # of decoding non-ASCII characters. + # + # Also, UTF-8 is being used since it's an ASCII + # extension. + return text.decode('utf-8', errors) + + +def safe_encode(text, incoming=None, + encoding='utf-8', errors='strict'): + """Encodes incoming text/bytes string using `encoding`. + + If incoming is not specified, text is expected to be encoded with + current python's default encoding. (`sys.getdefaultencoding`) + + :param incoming: Text's current encoding + :param encoding: Expected encoding for text (Default UTF-8) + :param errors: Errors handling policy. See here for valid + values http://docs.python.org/2/library/codecs.html + :returns: text or a bytestring `encoding` encoded + representation of it. + :raises TypeError: If text is not an instance of str + """ + if not isinstance(text, (six.string_types, six.binary_type)): + raise TypeError("%s can't be encoded" % type(text)) + + if not incoming: + incoming = (sys.stdin.encoding or + sys.getdefaultencoding()) + + if isinstance(text, six.text_type): + return text.encode(encoding, errors) + elif text and encoding != incoming: + # Decode text before encoding it with `encoding` + text = safe_decode(text, incoming, errors) + return text.encode(encoding, errors) + else: + return text + + +def string_to_bytes(text, unit_system='IEC', return_int=False): + """Converts a string into an float representation of bytes. + + The units supported for IEC :: + + Kb(it), Kib(it), Mb(it), Mib(it), Gb(it), Gib(it), Tb(it), Tib(it) + KB, KiB, MB, MiB, GB, GiB, TB, TiB + + The units supported for SI :: + + kb(it), Mb(it), Gb(it), Tb(it) + kB, MB, GB, TB + + Note that the SI unit system does not support capital letter 'K' + + :param text: String input for bytes size conversion. + :param unit_system: Unit system for byte size conversion. + :param return_int: If True, returns integer representation of text + in bytes. (default: decimal) + :returns: Numerical representation of text in bytes. + :raises ValueError: If text has an invalid value. + + """ + try: + base, reg_ex = UNIT_SYSTEM_INFO[unit_system] + except KeyError: + msg = _('Invalid unit system: "%s"') % unit_system + raise ValueError(msg) + match = reg_ex.match(text) + if match: + magnitude = float(match.group(1)) + unit_prefix = match.group(2) + if match.group(3) in ['b', 'bit']: + magnitude /= 8 + else: + msg = _('Invalid string format: %s') % text + raise ValueError(msg) + if not unit_prefix: + res = magnitude + else: + res = magnitude * pow(base, UNIT_PREFIX_EXPONENT[unit_prefix]) + if return_int: + return int(math.ceil(res)) + return res + + +def to_slug(value, incoming=None, errors="strict"): + """Normalize string. + + Convert to lowercase, remove non-word characters, and convert spaces + to hyphens. + + Inspired by Django's `slugify` filter. + + :param value: Text to slugify + :param incoming: Text's current encoding + :param errors: Errors handling policy. See here for valid + values http://docs.python.org/2/library/codecs.html + :returns: slugified unicode representation of `value` + :raises TypeError: If text is not an instance of str + """ + value = safe_decode(value, incoming, errors) + # NOTE(aababilov): no need to use safe_(encode|decode) here: + # encodings are always "ascii", error handling is always "ignore" + # and types are always known (first: unicode; second: str) + value = unicodedata.normalize("NFKD", value).encode( + "ascii", "ignore").decode("ascii") + value = SLUGIFY_STRIP_RE.sub("", value).strip().lower() + return SLUGIFY_HYPHENATE_RE.sub("-", value) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/openstack/common/systemd.py b/icehouse-patches/neutron/dvr-patch/neutron/openstack/common/systemd.py new file mode 100644 index 00000000..cc02caba --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/openstack/common/systemd.py @@ -0,0 +1,104 @@ +# Copyright 2012-2014 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Helper module for systemd service readiness notification. +""" + +import os +import socket +import sys + +from neutron.openstack.common import log as logging + + +LOG = logging.getLogger(__name__) + + +def _abstractify(socket_name): + if socket_name.startswith('@'): + # abstract namespace socket + socket_name = '\0%s' % socket_name[1:] + return socket_name + + +def _sd_notify(unset_env, msg): + notify_socket = os.getenv('NOTIFY_SOCKET') + if notify_socket: + sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM) + try: + sock.connect(_abstractify(notify_socket)) + sock.sendall(msg) + if unset_env: + del os.environ['NOTIFY_SOCKET'] + except EnvironmentError: + LOG.debug("Systemd notification failed", exc_info=True) + finally: + sock.close() + + +def notify(): + """Send notification to Systemd that service is ready. + For details see + http://www.freedesktop.org/software/systemd/man/sd_notify.html + """ + _sd_notify(False, 'READY=1') + + +def notify_once(): + """Send notification once to Systemd that service is ready. + Systemd sets NOTIFY_SOCKET environment variable with the name of the + socket listening for notifications from services. + This method removes the NOTIFY_SOCKET environment variable to ensure + notification is sent only once. + """ + _sd_notify(True, 'READY=1') + + +def onready(notify_socket, timeout): + """Wait for systemd style notification on the socket. + + :param notify_socket: local socket address + :type notify_socket: string + :param timeout: socket timeout + :type timeout: float + :returns: 0 service ready + 1 service not ready + 2 timeout occurred + """ + sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM) + sock.settimeout(timeout) + sock.bind(_abstractify(notify_socket)) + try: + msg = sock.recv(512) + except socket.timeout: + return 2 + finally: + sock.close() + if 'READY=1' in msg: + return 0 + else: + return 1 + + +if __name__ == '__main__': + # simple CLI for testing + if len(sys.argv) == 1: + notify() + elif len(sys.argv) >= 2: + timeout = float(sys.argv[1]) + notify_socket = os.getenv('NOTIFY_SOCKET') + if notify_socket: + retval = onready(notify_socket, timeout) + sys.exit(retval) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/openstack/common/timeutils.py b/icehouse-patches/neutron/dvr-patch/neutron/openstack/common/timeutils.py new file mode 100644 index 00000000..c48da95f --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/openstack/common/timeutils.py @@ -0,0 +1,210 @@ +# Copyright 2011 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Time related utilities and helper functions. +""" + +import calendar +import datetime +import time + +import iso8601 +import six + + +# ISO 8601 extended time format with microseconds +_ISO8601_TIME_FORMAT_SUBSECOND = '%Y-%m-%dT%H:%M:%S.%f' +_ISO8601_TIME_FORMAT = '%Y-%m-%dT%H:%M:%S' +PERFECT_TIME_FORMAT = _ISO8601_TIME_FORMAT_SUBSECOND + + +def isotime(at=None, subsecond=False): + """Stringify time in ISO 8601 format.""" + if not at: + at = utcnow() + st = at.strftime(_ISO8601_TIME_FORMAT + if not subsecond + else _ISO8601_TIME_FORMAT_SUBSECOND) + tz = at.tzinfo.tzname(None) if at.tzinfo else 'UTC' + st += ('Z' if tz == 'UTC' else tz) + return st + + +def parse_isotime(timestr): + """Parse time from ISO 8601 format.""" + try: + return iso8601.parse_date(timestr) + except iso8601.ParseError as e: + raise ValueError(six.text_type(e)) + except TypeError as e: + raise ValueError(six.text_type(e)) + + +def strtime(at=None, fmt=PERFECT_TIME_FORMAT): + """Returns formatted utcnow.""" + if not at: + at = utcnow() + return at.strftime(fmt) + + +def parse_strtime(timestr, fmt=PERFECT_TIME_FORMAT): + """Turn a formatted time back into a datetime.""" + return datetime.datetime.strptime(timestr, fmt) + + +def normalize_time(timestamp): + """Normalize time in arbitrary timezone to UTC naive object.""" + offset = timestamp.utcoffset() + if offset is None: + return timestamp + return timestamp.replace(tzinfo=None) - offset + + +def is_older_than(before, seconds): + """Return True if before is older than seconds.""" + if isinstance(before, six.string_types): + before = parse_strtime(before).replace(tzinfo=None) + else: + before = before.replace(tzinfo=None) + + return utcnow() - before > datetime.timedelta(seconds=seconds) + + +def is_newer_than(after, seconds): + """Return True if after is newer than seconds.""" + if isinstance(after, six.string_types): + after = parse_strtime(after).replace(tzinfo=None) + else: + after = after.replace(tzinfo=None) + + return after - utcnow() > datetime.timedelta(seconds=seconds) + + +def utcnow_ts(): + """Timestamp version of our utcnow function.""" + if utcnow.override_time is None: + # NOTE(kgriffs): This is several times faster + # than going through calendar.timegm(...) + return int(time.time()) + + return calendar.timegm(utcnow().timetuple()) + + +def utcnow(): + """Overridable version of utils.utcnow.""" + if utcnow.override_time: + try: + return utcnow.override_time.pop(0) + except AttributeError: + return utcnow.override_time + return datetime.datetime.utcnow() + + +def iso8601_from_timestamp(timestamp): + """Returns an iso8601 formatted date from timestamp.""" + return isotime(datetime.datetime.utcfromtimestamp(timestamp)) + + +utcnow.override_time = None + + +def set_time_override(override_time=None): + """Overrides utils.utcnow. + + Make it return a constant time or a list thereof, one at a time. + + :param override_time: datetime instance or list thereof. If not + given, defaults to the current UTC time. + """ + utcnow.override_time = override_time or datetime.datetime.utcnow() + + +def advance_time_delta(timedelta): + """Advance overridden time using a datetime.timedelta.""" + assert utcnow.override_time is not None + try: + for dt in utcnow.override_time: + dt += timedelta + except TypeError: + utcnow.override_time += timedelta + + +def advance_time_seconds(seconds): + """Advance overridden time by seconds.""" + advance_time_delta(datetime.timedelta(0, seconds)) + + +def clear_time_override(): + """Remove the overridden time.""" + utcnow.override_time = None + + +def marshall_now(now=None): + """Make an rpc-safe datetime with microseconds. + + Note: tzinfo is stripped, but not required for relative times. + """ + if not now: + now = utcnow() + return dict(day=now.day, month=now.month, year=now.year, hour=now.hour, + minute=now.minute, second=now.second, + microsecond=now.microsecond) + + +def unmarshall_time(tyme): + """Unmarshall a datetime dict.""" + return datetime.datetime(day=tyme['day'], + month=tyme['month'], + year=tyme['year'], + hour=tyme['hour'], + minute=tyme['minute'], + second=tyme['second'], + microsecond=tyme['microsecond']) + + +def delta_seconds(before, after): + """Return the difference between two timing objects. + + Compute the difference in seconds between two date, time, or + datetime objects (as a float, to microsecond resolution). + """ + delta = after - before + return total_seconds(delta) + + +def total_seconds(delta): + """Return the total seconds of datetime.timedelta object. + + Compute total seconds of datetime.timedelta, datetime.timedelta + doesn't have method total_seconds in Python2.6, calculate it manually. + """ + try: + return delta.total_seconds() + except AttributeError: + return ((delta.days * 24 * 3600) + delta.seconds + + float(delta.microseconds) / (10 ** 6)) + + +def is_soon(dt, window): + """Determines if time is going to happen in the next window seconds. + + :param dt: the time + :param window: minimum seconds to remain to consider the time not soon + + :return: True if expiration is within the given duration + """ + soon = (utcnow() + datetime.timedelta(seconds=window)) + return normalize_time(dt) <= soon diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/bigswitch/__init__.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/bigswitch/__init__.py new file mode 100644 index 00000000..131b31cc --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/bigswitch/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2012 Big Switch Networks, Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/bigswitch/agent/restproxy_agent.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/bigswitch/agent/restproxy_agent.py new file mode 100644 index 00000000..caf57e4b --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/bigswitch/agent/restproxy_agent.py @@ -0,0 +1,181 @@ +# Copyright 2014 Big Switch Networks, Inc. +# All Rights Reserved. +# +# Copyright 2011 VMware, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# @author: Kevin Benton, kevin.benton@bigswitch.com + +import sys +import time + +import eventlet +eventlet.monkey_patch() + +from oslo.config import cfg + +from neutron.agent.linux import ovs_lib +from neutron.agent.linux import utils +from neutron.agent import rpc as agent_rpc +from neutron.agent import securitygroups_rpc as sg_rpc +from neutron.common import config +from neutron.common import rpc as n_rpc +from neutron.common import topics +from neutron import context as q_context +from neutron.extensions import securitygroup as ext_sg +from neutron.openstack.common import excutils +from neutron.openstack.common import log +from neutron.plugins.bigswitch import config as pl_config + +LOG = log.getLogger(__name__) + + +class IVSBridge(ovs_lib.OVSBridge): + ''' + This class does not provide parity with OVS using IVS. + It's only the bare minimum necessary to use IVS with this agent. + ''' + def run_vsctl(self, args, check_error=False): + full_args = ["ivs-ctl"] + args + try: + return utils.execute(full_args, root_helper=self.root_helper) + except Exception as e: + with excutils.save_and_reraise_exception() as ctxt: + LOG.error(_("Unable to execute %(cmd)s. " + "Exception: %(exception)s"), + {'cmd': full_args, 'exception': e}) + if not check_error: + ctxt.reraise = False + + def get_vif_port_set(self): + port_names = self.get_port_name_list() + edge_ports = set(port_names) + return edge_ports + + def get_vif_port_by_id(self, port_id): + # IVS in nova uses hybrid method with last 14 chars of UUID + name = 'qvo%s' % port_id[:14] + if name in self.get_vif_port_set(): + return name + return False + + +class PluginApi(agent_rpc.PluginApi, + sg_rpc.SecurityGroupServerRpcApiMixin): + pass + + +class SecurityGroupAgent(sg_rpc.SecurityGroupAgentRpcMixin): + def __init__(self, context, plugin_rpc, root_helper): + self.context = context + self.plugin_rpc = plugin_rpc + self.root_helper = root_helper + self.init_firewall() + + +class RestProxyAgent(n_rpc.RpcCallback, + sg_rpc.SecurityGroupAgentRpcCallbackMixin): + + RPC_API_VERSION = '1.1' + + def __init__(self, integ_br, polling_interval, root_helper, vs='ovs'): + super(RestProxyAgent, self).__init__() + self.polling_interval = polling_interval + self._setup_rpc() + self.sg_agent = SecurityGroupAgent(self.context, + self.plugin_rpc, + root_helper) + if vs == 'ivs': + self.int_br = IVSBridge(integ_br, root_helper) + else: + self.int_br = ovs_lib.OVSBridge(integ_br, root_helper) + + def _setup_rpc(self): + self.topic = topics.AGENT + self.plugin_rpc = PluginApi(topics.PLUGIN) + self.context = q_context.get_admin_context_without_session() + self.endpoints = [self] + consumers = [[topics.PORT, topics.UPDATE], + [topics.SECURITY_GROUP, topics.UPDATE]] + self.connection = agent_rpc.create_consumers(self.endpoints, + self.topic, + consumers) + + def port_update(self, context, **kwargs): + LOG.debug(_("Port update received")) + port = kwargs.get('port') + vif_port = self.int_br.get_vif_port_by_id(port['id']) + if not vif_port: + LOG.debug(_("Port %s is not present on this host."), port['id']) + return + + LOG.debug(_("Port %s found. Refreshing firewall."), port['id']) + if ext_sg.SECURITYGROUPS in port: + self.sg_agent.refresh_firewall() + + def _update_ports(self, registered_ports): + ports = self.int_br.get_vif_port_set() + if ports == registered_ports: + return + added = ports - registered_ports + removed = registered_ports - ports + return {'current': ports, + 'added': added, + 'removed': removed} + + def _process_devices_filter(self, port_info): + if 'added' in port_info: + self.sg_agent.prepare_devices_filter(port_info['added']) + if 'removed' in port_info: + self.sg_agent.remove_devices_filter(port_info['removed']) + + def daemon_loop(self): + ports = set() + + while True: + start = time.time() + try: + port_info = self._update_ports(ports) + if port_info: + LOG.debug(_("Agent loop has new device")) + self._process_devices_filter(port_info) + ports = port_info['current'] + except Exception: + LOG.exception(_("Error in agent event loop")) + + elapsed = max(time.time() - start, 0) + if (elapsed < self.polling_interval): + time.sleep(self.polling_interval - elapsed) + else: + LOG.debug(_("Loop iteration exceeded interval " + "(%(polling_interval)s vs. %(elapsed)s)!"), + {'polling_interval': self.polling_interval, + 'elapsed': elapsed}) + + +def main(): + config.init(sys.argv[1:]) + config.setup_logging(cfg.CONF) + pl_config.register_config() + + integ_br = cfg.CONF.RESTPROXYAGENT.integration_bridge + polling_interval = cfg.CONF.RESTPROXYAGENT.polling_interval + root_helper = cfg.CONF.AGENT.root_helper + bsnagent = RestProxyAgent(integ_br, polling_interval, root_helper, + cfg.CONF.RESTPROXYAGENT.virtual_switch_type) + bsnagent.daemon_loop() + sys.exit(0) + +if __name__ == "__main__": + main() diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/bigswitch/config.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/bigswitch/config.py new file mode 100644 index 00000000..431cdd44 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/bigswitch/config.py @@ -0,0 +1,122 @@ +# Copyright 2014 Big Switch Networks, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Mandeep Dhami, Big Switch Networks, Inc. +# @author: Sumit Naiksatam, sumitnaiksatam@gmail.com, Big Switch Networks, Inc. +# @author: Kevin Benton, Big Switch Networks, Inc. + +""" +This module manages configuration options +""" + +from oslo.config import cfg + +from neutron.agent.common import config as agconfig +from neutron.common import utils +from neutron.extensions import portbindings + +restproxy_opts = [ + cfg.ListOpt('servers', default=['localhost:8800'], + help=_("A comma separated list of Big Switch or Floodlight " + "servers and port numbers. The plugin proxies the " + "requests to the Big Switch/Floodlight server, " + "which performs the networking configuration. Only one" + "server is needed per deployment, but you may wish to" + "deploy multiple servers to support failover.")), + cfg.StrOpt('server_auth', secret=True, + help=_("The username and password for authenticating against " + " the Big Switch or Floodlight controller.")), + cfg.BoolOpt('server_ssl', default=True, + help=_("If True, Use SSL when connecting to the Big Switch or " + "Floodlight controller.")), + cfg.BoolOpt('ssl_sticky', default=True, + help=_("Trust and store the first certificate received for " + "each controller address and use it to validate future " + "connections to that address.")), + cfg.BoolOpt('no_ssl_validation', default=False, + help=_("Disables SSL certificate validation for controllers")), + cfg.BoolOpt('cache_connections', default=True, + help=_("Re-use HTTP/HTTPS connections to the controller.")), + cfg.StrOpt('ssl_cert_directory', + default='/etc/neutron/plugins/bigswitch/ssl', + help=_("Directory containing ca_certs and host_certs " + "certificate directories.")), + cfg.BoolOpt('sync_data', default=False, + help=_("Sync data on connect")), + cfg.BoolOpt('auto_sync_on_failure', default=True, + help=_("If neutron fails to create a resource because " + "the backend controller doesn't know of a dependency, " + "the plugin automatically triggers a full data " + "synchronization to the controller.")), + cfg.IntOpt('consistency_interval', default=60, + help=_("Time between verifications that the backend controller " + "database is consistent with Neutron. (0 to disable)")), + cfg.IntOpt('server_timeout', default=10, + help=_("Maximum number of seconds to wait for proxy request " + "to connect and complete.")), + cfg.IntOpt('thread_pool_size', default=4, + help=_("Maximum number of threads to spawn to handle large " + "volumes of port creations.")), + cfg.StrOpt('neutron_id', default='neutron-' + utils.get_hostname(), + deprecated_name='quantum_id', + help=_("User defined identifier for this Neutron deployment")), + cfg.BoolOpt('add_meta_server_route', default=True, + help=_("Flag to decide if a route to the metadata server " + "should be injected into the VM")), +] +router_opts = [ + cfg.MultiStrOpt('tenant_default_router_rule', default=['*:any:any:permit'], + help=_("The default router rules installed in new tenant " + "routers. Repeat the config option for each rule. " + "Format is :::" + " Use an * to specify default for all tenants.")), + cfg.IntOpt('max_router_rules', default=200, + help=_("Maximum number of router rules")), +] +nova_opts = [ + cfg.StrOpt('vif_type', default='ovs', + help=_("Virtual interface type to configure on " + "Nova compute nodes")), +] + +# Each VIF Type can have a list of nova host IDs that are fixed to that type +for i in portbindings.VIF_TYPES: + opt = cfg.ListOpt('node_override_vif_' + i, default=[], + help=_("Nova compute nodes to manually set VIF " + "type to %s") % i) + nova_opts.append(opt) + +# Add the vif types for reference later +nova_opts.append(cfg.ListOpt('vif_types', + default=portbindings.VIF_TYPES, + help=_('List of allowed vif_type values.'))) + +agent_opts = [ + cfg.StrOpt('integration_bridge', default='br-int', + help=_('Name of integration bridge on compute ' + 'nodes used for security group insertion.')), + cfg.IntOpt('polling_interval', default=5, + help=_('Seconds between agent checks for port changes')), + cfg.StrOpt('virtual_switch_type', default='ovs', + help=_('Virtual switch type.')) +] + + +def register_config(): + cfg.CONF.register_opts(restproxy_opts, "RESTPROXY") + cfg.CONF.register_opts(router_opts, "ROUTER") + cfg.CONF.register_opts(nova_opts, "NOVA") + cfg.CONF.register_opts(agent_opts, "RESTPROXYAGENT") + agconfig.register_root_helper(cfg.CONF) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/bigswitch/db/__init__.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/bigswitch/db/__init__.py new file mode 100644 index 00000000..71aba5c6 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/bigswitch/db/__init__.py @@ -0,0 +1,16 @@ +# Copyright 2013 Big Switch Networks, Inc. +# All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Kevin Benton, Big Switch Networks, Inc. diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/bigswitch/db/consistency_db.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/bigswitch/db/consistency_db.py new file mode 100644 index 00000000..4d1a1db7 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/bigswitch/db/consistency_db.py @@ -0,0 +1,82 @@ +# Copyright 2014, Big Switch Networks +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +import sqlalchemy as sa + +from neutron.common import exceptions +from neutron.db import api as db +from neutron.db import model_base +from neutron.openstack.common import log as logging + +LOG = logging.getLogger(__name__) + + +class MultipleReadForUpdateCalls(exceptions.NeutronException): + message = _("Only one read_for_update call may be made at a time.") + + +class ConsistencyHash(model_base.BASEV2): + ''' + A simple table to store the latest consistency hash + received from a server. + For now we only support one global state so the + hash_id will always be '1' + ''' + __tablename__ = 'consistencyhashes' + hash_id = sa.Column(sa.String(255), + primary_key=True) + hash = sa.Column(sa.String(255), nullable=False) + + +class HashHandler(object): + ''' + A wrapper object to keep track of the session and hold the SQL + lock between the read and the update to prevent other servers + from reading the hash during a transaction. + ''' + def __init__(self, context=None, hash_id='1'): + self.hash_id = hash_id + self.session = db.get_session() if not context else context.session + self.hash_db_obj = None + self.transaction = None + + def read_for_update(self): + if self.transaction: + raise MultipleReadForUpdateCalls() + self.transaction = self.session.begin(subtransactions=True) + # Lock for update here to prevent another server from reading the hash + # while this one is in the middle of a transaction. + # This may not lock the SQL table in MySQL Galera deployments + # but that's okay because the worst case is a double-sync + res = (self.session.query(ConsistencyHash). + filter_by(hash_id=self.hash_id). + with_lockmode('update').first()) + if not res: + return '' + self.hash_db_obj = res + return res.hash + + def put_hash(self, hash): + hash = hash or '' + if not self.transaction: + self.transaction = self.session.begin(subtransactions=True) + if self.hash_db_obj is not None: + self.hash_db_obj.hash = hash + else: + conhash = ConsistencyHash(hash_id=self.hash_id, hash=hash) + self.session.merge(conhash) + self.transaction.commit() + self.transaction = None + LOG.debug(_("Consistency hash for group %(hash_id)s updated " + "to %(hash)s"), {'hash_id': self.hash_id, 'hash': hash}) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/bigswitch/db/porttracker_db.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/bigswitch/db/porttracker_db.py new file mode 100644 index 00000000..2aaa2b41 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/bigswitch/db/porttracker_db.py @@ -0,0 +1,51 @@ +# Copyright 2013, Big Switch Networks +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from neutron.api.v2 import attributes +from neutron.openstack.common import log as logging + +LOG = logging.getLogger(__name__) + + +def get_port_hostid(context, port_id): + # REVISIT(kevinbenton): this is a workaround to avoid portbindings_db + # relational table generation until one of the functions is called. + from neutron.db import portbindings_db + with context.session.begin(subtransactions=True): + query = context.session.query(portbindings_db.PortBindingPort) + res = query.filter_by(port_id=port_id).first() + if not res: + return False + return res.host + + +def put_port_hostid(context, port_id, host): + # REVISIT(kevinbenton): this is a workaround to avoid portbindings_db + # relational table generation until one of the functions is called. + from neutron.db import portbindings_db + if not attributes.is_attr_set(host): + LOG.warning(_("No host_id in port request to track port location.")) + return + if port_id == '': + LOG.warning(_("Received an empty port ID for host_id '%s'"), host) + return + if host == '': + LOG.debug(_("Received an empty host_id for port '%s'"), port_id) + return + LOG.debug(_("Logging port %(port)s on host_id %(host)s"), + {'port': port_id, 'host': host}) + with context.session.begin(subtransactions=True): + location = portbindings_db.PortBindingPort(port_id=port_id, host=host) + context.session.merge(location) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/bigswitch/extensions/__init__.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/bigswitch/extensions/__init__.py new file mode 100644 index 00000000..71aba5c6 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/bigswitch/extensions/__init__.py @@ -0,0 +1,16 @@ +# Copyright 2013 Big Switch Networks, Inc. +# All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Kevin Benton, Big Switch Networks, Inc. diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/bigswitch/extensions/routerrule.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/bigswitch/extensions/routerrule.py new file mode 100644 index 00000000..6970dd65 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/bigswitch/extensions/routerrule.py @@ -0,0 +1,142 @@ +# Copyright 2013 Big Switch Networks, Inc. +# All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Kevin Benton, Big Switch Networks, Inc. + +from neutron.api.v2 import attributes as attr +from neutron.common import exceptions as qexception +from neutron.openstack.common import log as logging + + +LOG = logging.getLogger(__name__) + + +# Router Rules Exceptions +class InvalidRouterRules(qexception.InvalidInput): + message = _("Invalid format for router rules: %(rule)s, %(reason)s") + + +class RulesExhausted(qexception.BadRequest): + message = _("Unable to complete rules update for %(router_id)s. " + "The number of rules exceeds the maximum %(quota)s.") + + +def convert_to_valid_router_rules(data): + """ + Validates and converts router rules to the appropriate data structure + Example argument = [{'source': 'any', 'destination': 'any', + 'action':'deny'}, + {'source': '1.1.1.1/32', 'destination': 'external', + 'action':'permit', + 'nexthops': ['1.1.1.254', '1.1.1.253']} + ] + """ + V4ANY = '0.0.0.0/0' + CIDRALL = ['any', 'external'] + if not isinstance(data, list): + emsg = _("Invalid data format for router rule: '%s'") % data + LOG.debug(emsg) + raise qexception.InvalidInput(error_message=emsg) + _validate_uniquerules(data) + rules = [] + expected_keys = ['source', 'destination', 'action'] + for rule in data: + rule['nexthops'] = rule.get('nexthops', []) + if not isinstance(rule['nexthops'], list): + rule['nexthops'] = rule['nexthops'].split('+') + + src = V4ANY if rule['source'] in CIDRALL else rule['source'] + dst = V4ANY if rule['destination'] in CIDRALL else rule['destination'] + + errors = [attr._verify_dict_keys(expected_keys, rule, False), + attr._validate_subnet(dst), + attr._validate_subnet(src), + _validate_nexthops(rule['nexthops']), + _validate_action(rule['action'])] + errors = [m for m in errors if m] + if errors: + LOG.debug(errors) + raise qexception.InvalidInput(error_message=errors) + rules.append(rule) + return rules + + +def _validate_nexthops(nexthops): + seen = [] + for ip in nexthops: + msg = attr._validate_ip_address(ip) + if ip in seen: + msg = _("Duplicate nexthop in rule '%s'") % ip + seen.append(ip) + if msg: + return msg + + +def _validate_action(action): + if action not in ['permit', 'deny']: + return _("Action must be either permit or deny." + " '%s' was provided") % action + + +def _validate_uniquerules(rules): + pairs = [] + for r in rules: + if 'source' not in r or 'destination' not in r: + continue + pairs.append((r['source'], r['destination'])) + + if len(set(pairs)) != len(pairs): + error = _("Duplicate router rules (src,dst) found '%s'") % pairs + LOG.debug(error) + raise qexception.InvalidInput(error_message=error) + + +class Routerrule(object): + + @classmethod + def get_name(cls): + return "Neutron Router Rule" + + @classmethod + def get_alias(cls): + return "router_rules" + + @classmethod + def get_description(cls): + return "Router rule configuration for L3 router" + + @classmethod + def get_namespace(cls): + return "http://docs.openstack.org/ext/neutron/routerrules/api/v1.0" + + @classmethod + def get_updated(cls): + return "2013-05-23T10:00:00-00:00" + + def get_extended_resources(self, version): + if version == "2.0": + return EXTENDED_ATTRIBUTES_2_0 + else: + return {} + +# Attribute Map +EXTENDED_ATTRIBUTES_2_0 = { + 'routers': { + 'router_rules': {'allow_post': False, 'allow_put': True, + 'convert_to': convert_to_valid_router_rules, + 'is_visible': True, + 'default': attr.ATTR_NOT_SPECIFIED}, + } +} diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/bigswitch/plugin.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/bigswitch/plugin.py new file mode 100644 index 00000000..3cc6e00d --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/bigswitch/plugin.py @@ -0,0 +1,1139 @@ +# Copyright 2012 Big Switch Networks, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Mandeep Dhami, Big Switch Networks, Inc. +# @author: Sumit Naiksatam, sumitnaiksatam@gmail.com, Big Switch Networks, Inc. + +""" +Neutron REST Proxy Plug-in for Big Switch and FloodLight Controllers. + +NeutronRestProxy provides a generic neutron plugin that translates all plugin +function calls to equivalent authenticated REST calls to a set of redundant +external network controllers. It also keeps persistent store for all neutron +state to allow for re-sync of the external controller(s), if required. + +The local state on the plugin also allows for local response and fast-fail +semantics where it can be determined based on the local persistent store. + +Network controller specific code is decoupled from this plugin and expected +to reside on the controller itself (via the REST interface). + +This allows for: + - independent authentication and redundancy schemes between neutron and the + network controller + - independent upgrade/development cycles between neutron and the controller + as it limits the proxy code upgrade requirement to neutron release cycle + and the controller specific code upgrade requirement to controller code + - ability to sync the controller with neutron for independent recovery/reset + +External REST API used by proxy is the same API as defined for neutron (JSON +subset) with some additional parameters (gateway on network-create and macaddr +on port-attach) on an additional PUT to do a bulk dump of all persistent data. +""" + +import copy +import functools +import httplib +import re + +import eventlet +from oslo.config import cfg +from sqlalchemy.orm import exc as sqlexc + +from neutron.agent import securitygroups_rpc as sg_rpc +from neutron.api import extensions as neutron_extensions +from neutron.api.rpc.agentnotifiers import dhcp_rpc_agent_api +from neutron.common import constants as const +from neutron.common import exceptions +from neutron.common import rpc as n_rpc +from neutron.common import topics +from neutron.common import utils +from neutron import context as qcontext +from neutron.db import agents_db +from neutron.db import agentschedulers_db +from neutron.db import allowedaddresspairs_db as addr_pair_db +from neutron.db import api as db +from neutron.db import db_base_plugin_v2 +from neutron.db import dhcp_rpc_base +from neutron.db import external_net_db +from neutron.db import extradhcpopt_db +from neutron.db import l3_db +from neutron.db import models_v2 +from neutron.db import securitygroups_db as sg_db +from neutron.db import securitygroups_rpc_base as sg_rpc_base +from neutron.extensions import allowedaddresspairs as addr_pair +from neutron.extensions import external_net +from neutron.extensions import extra_dhcp_opt as edo_ext +from neutron.extensions import l3 +from neutron.extensions import portbindings +from neutron import manager +from neutron.openstack.common import excutils +from neutron.openstack.common import importutils +from neutron.openstack.common import log as logging +from neutron.plugins.bigswitch import config as pl_config +from neutron.plugins.bigswitch.db import porttracker_db +from neutron.plugins.bigswitch import extensions +from neutron.plugins.bigswitch import routerrule_db +from neutron.plugins.bigswitch import servermanager +from neutron.plugins.bigswitch import version + +LOG = logging.getLogger(__name__) + +SYNTAX_ERROR_MESSAGE = _('Syntax error in server config file, aborting plugin') +METADATA_SERVER_IP = '169.254.169.254' + + +class AgentNotifierApi(n_rpc.RpcProxy, + sg_rpc.SecurityGroupAgentRpcApiMixin): + + BASE_RPC_API_VERSION = '1.1' + + def __init__(self, topic): + super(AgentNotifierApi, self).__init__( + topic=topic, default_version=self.BASE_RPC_API_VERSION) + self.topic_port_update = topics.get_topic_name( + topic, topics.PORT, topics.UPDATE) + + def port_update(self, context, port): + self.fanout_cast(context, + self.make_msg('port_update', + port=port), + topic=self.topic_port_update) + + +class RestProxyCallbacks(n_rpc.RpcCallback, + sg_rpc_base.SecurityGroupServerRpcCallbackMixin, + dhcp_rpc_base.DhcpRpcCallbackMixin): + + RPC_API_VERSION = '1.1' + + def get_port_from_device(self, device): + port_id = re.sub(r"^tap", "", device) + port = self.get_port_and_sgs(port_id) + if port: + port['device'] = device + return port + + def get_port_and_sgs(self, port_id): + """Get port from database with security group info.""" + + LOG.debug(_("get_port_and_sgs() called for port_id %s"), port_id) + session = db.get_session() + sg_binding_port = sg_db.SecurityGroupPortBinding.port_id + + with session.begin(subtransactions=True): + query = session.query( + models_v2.Port, + sg_db.SecurityGroupPortBinding.security_group_id + ) + query = query.outerjoin(sg_db.SecurityGroupPortBinding, + models_v2.Port.id == sg_binding_port) + query = query.filter(models_v2.Port.id.startswith(port_id)) + port_and_sgs = query.all() + if not port_and_sgs: + return + port = port_and_sgs[0][0] + plugin = manager.NeutronManager.get_plugin() + port_dict = plugin._make_port_dict(port) + port_dict['security_groups'] = [ + sg_id for port_, sg_id in port_and_sgs if sg_id] + port_dict['security_group_rules'] = [] + port_dict['security_group_source_groups'] = [] + port_dict['fixed_ips'] = [ip['ip_address'] + for ip in port['fixed_ips']] + return port_dict + + +class NeutronRestProxyV2Base(db_base_plugin_v2.NeutronDbPluginV2, + external_net_db.External_net_db_mixin, + routerrule_db.RouterRule_db_mixin): + + supported_extension_aliases = ["binding"] + servers = None + + def _get_all_data(self, get_ports=True, get_floating_ips=True, + get_routers=True): + admin_context = qcontext.get_admin_context() + networks = [] + # this method is used by the ML2 driver so it can't directly invoke + # the self.get_(ports|networks) methods + plugin = manager.NeutronManager.get_plugin() + all_networks = plugin.get_networks(admin_context) or [] + for net in all_networks: + mapped_network = self._get_mapped_network_with_subnets(net) + flips_n_ports = mapped_network + if get_floating_ips: + flips_n_ports = self._get_network_with_floatingips( + mapped_network) + + if get_ports: + ports = [] + net_filter = {'network_id': [net.get('id')]} + net_ports = plugin.get_ports(admin_context, + filters=net_filter) or [] + for port in net_ports: + mapped_port = self._map_state_and_status(port) + mapped_port['attachment'] = { + 'id': port.get('device_id'), + 'mac': port.get('mac_address'), + } + mapped_port = self._extend_port_dict_binding(admin_context, + mapped_port) + ports.append(mapped_port) + flips_n_ports['ports'] = ports + + if flips_n_ports: + networks.append(flips_n_ports) + + data = {'networks': networks} + + if get_routers: + routers = [] + all_routers = self.get_routers(admin_context) or [] + for router in all_routers: + interfaces = [] + mapped_router = self._map_state_and_status(router) + router_filter = { + 'device_owner': [const.DEVICE_OWNER_ROUTER_INTF], + 'device_id': [router.get('id')] + } + router_ports = self.get_ports(admin_context, + filters=router_filter) or [] + for port in router_ports: + net_id = port.get('network_id') + subnet_id = port['fixed_ips'][0]['subnet_id'] + intf_details = self._get_router_intf_details(admin_context, + net_id, + subnet_id) + interfaces.append(intf_details) + mapped_router['interfaces'] = interfaces + + routers.append(mapped_router) + + data.update({'routers': routers}) + return data + + def _send_all_data(self, send_ports=True, send_floating_ips=True, + send_routers=True, timeout=None, + triggered_by_tenant=None): + """Pushes all data to network ctrl (networks/ports, ports/attachments). + + This gives the controller an option to re-sync it's persistent store + with neutron's current view of that data. + """ + data = self._get_all_data(send_ports, send_floating_ips, send_routers) + data['triggered_by_tenant'] = triggered_by_tenant + errstr = _("Unable to update remote topology: %s") + return self.servers.rest_action('PUT', servermanager.TOPOLOGY_PATH, + data, errstr, timeout=timeout) + + def _get_network_with_floatingips(self, network, context=None): + if context is None: + context = qcontext.get_admin_context() + + net_id = network['id'] + net_filter = {'floating_network_id': [net_id]} + fl_ips = self.get_floatingips(context, + filters=net_filter) or [] + network['floatingips'] = fl_ips + + return network + + def _get_all_subnets_json_for_network(self, net_id, context=None): + if context is None: + context = qcontext.get_admin_context() + # start a sub-transaction to avoid breaking parent transactions + with context.session.begin(subtransactions=True): + subnets = self._get_subnets_by_network(context, + net_id) + subnets_details = [] + if subnets: + for subnet in subnets: + subnet_dict = self._make_subnet_dict(subnet) + mapped_subnet = self._map_state_and_status(subnet_dict) + subnets_details.append(mapped_subnet) + + return subnets_details + + def _get_mapped_network_with_subnets(self, network, context=None): + # if context is not provided, admin context is used + if context is None: + context = qcontext.get_admin_context() + network = self._map_state_and_status(network) + subnets = self._get_all_subnets_json_for_network(network['id'], + context) + network['subnets'] = subnets + for subnet in (subnets or []): + if subnet['gateway_ip']: + # FIX: For backward compatibility with wire protocol + network['gateway'] = subnet['gateway_ip'] + break + else: + network['gateway'] = '' + network[external_net.EXTERNAL] = self._network_is_external( + context, network['id']) + # include ML2 segmentation types + network['segmentation_types'] = getattr(self, "segmentation_types", "") + return network + + def _send_create_network(self, network, context=None): + tenant_id = network['tenant_id'] + mapped_network = self._get_mapped_network_with_subnets(network, + context) + self.servers.rest_create_network(tenant_id, mapped_network) + + def _send_update_network(self, network, context=None): + net_id = network['id'] + tenant_id = network['tenant_id'] + mapped_network = self._get_mapped_network_with_subnets(network, + context) + net_fl_ips = self._get_network_with_floatingips(mapped_network, + context) + self.servers.rest_update_network(tenant_id, net_id, net_fl_ips) + + def _send_delete_network(self, network, context=None): + net_id = network['id'] + tenant_id = network['tenant_id'] + self.servers.rest_delete_network(tenant_id, net_id) + + def _map_state_and_status(self, resource): + resource = copy.copy(resource) + + resource['state'] = ('UP' if resource.pop('admin_state_up', + True) else 'DOWN') + resource.pop('status', None) + + return resource + + def _warn_on_state_status(self, resource): + if resource.get('admin_state_up', True) is False: + LOG.warning(_("Setting admin_state_up=False is not supported " + "in this plugin version. Ignoring setting for " + "resource: %s"), resource) + + if 'status' in resource: + if resource['status'] != const.NET_STATUS_ACTIVE: + LOG.warning(_("Operational status is internally set by the " + "plugin. Ignoring setting status=%s."), + resource['status']) + + def _get_router_intf_details(self, context, intf_id, subnet_id): + + # we will use the network id as interface's id + net_id = intf_id + network = self.get_network(context, net_id) + subnet = self.get_subnet(context, subnet_id) + mapped_network = self._get_mapped_network_with_subnets(network) + mapped_subnet = self._map_state_and_status(subnet) + + data = { + 'id': intf_id, + "network": mapped_network, + "subnet": mapped_subnet + } + + return data + + def _extend_port_dict_binding(self, context, port): + cfg_vif_type = cfg.CONF.NOVA.vif_type.lower() + if not cfg_vif_type in (portbindings.VIF_TYPE_OVS, + portbindings.VIF_TYPE_IVS): + LOG.warning(_("Unrecognized vif_type in configuration " + "[%s]. Defaulting to ovs."), + cfg_vif_type) + cfg_vif_type = portbindings.VIF_TYPE_OVS + # In ML2, the host_id is already populated + if portbindings.HOST_ID in port: + hostid = port[portbindings.HOST_ID] + else: + hostid = porttracker_db.get_port_hostid(context, port['id']) + if hostid: + port[portbindings.HOST_ID] = hostid + override = self._check_hostvif_override(hostid) + if override: + cfg_vif_type = override + port[portbindings.VIF_TYPE] = cfg_vif_type + + port[portbindings.VIF_DETAILS] = { + # TODO(rkukura): Replace with new VIF security details + portbindings.CAP_PORT_FILTER: + 'security-group' in self.supported_extension_aliases, + portbindings.OVS_HYBRID_PLUG: True + } + return port + + def _check_hostvif_override(self, hostid): + for v in cfg.CONF.NOVA.vif_types: + if hostid in getattr(cfg.CONF.NOVA, "node_override_vif_" + v, []): + return v + return False + + def _get_port_net_tenantid(self, context, port): + net = super(NeutronRestProxyV2Base, + self).get_network(context, port["network_id"]) + return net['tenant_id'] + + def async_port_create(self, tenant_id, net_id, port): + try: + self.servers.rest_create_port(tenant_id, net_id, port) + except servermanager.RemoteRestError as e: + # 404 should never be received on a port create unless + # there are inconsistencies between the data in neutron + # and the data in the backend. + # Run a sync to get it consistent. + if (cfg.CONF.RESTPROXY.auto_sync_on_failure and + e.status == httplib.NOT_FOUND and + servermanager.NXNETWORK in e.reason): + LOG.error(_("Iconsistency with backend controller " + "triggering full synchronization.")) + # args depend on if we are operating in ML2 driver + # or as the full plugin + topoargs = self.servers.get_topo_function_args + self._send_all_data( + send_ports=topoargs['get_ports'], + send_floating_ips=topoargs['get_floating_ips'], + send_routers=topoargs['get_routers'], + triggered_by_tenant=tenant_id + ) + # If the full sync worked, the port will be created + # on the controller so it can be safely marked as active + else: + # Any errors that don't result in a successful auto-sync + # require that the port be placed into the error state. + LOG.error( + _("NeutronRestProxyV2: Unable to create port: %s"), e) + try: + self._set_port_status(port['id'], const.PORT_STATUS_ERROR) + except exceptions.PortNotFound: + # If port is already gone from DB and there was an error + # creating on the backend, everything is already consistent + pass + return + new_status = (const.PORT_STATUS_ACTIVE if port['state'] == 'UP' + else const.PORT_STATUS_DOWN) + try: + self._set_port_status(port['id'], new_status) + except exceptions.PortNotFound: + # This port was deleted before the create made it to the controller + # so it now needs to be deleted since the normal delete request + # would have deleted an non-existent port. + self.servers.rest_delete_port(tenant_id, net_id, port['id']) + + # NOTE(kevinbenton): workaround for eventlet/mysql deadlock + @utils.synchronized('bsn-port-barrier') + def _set_port_status(self, port_id, status): + session = db.get_session() + try: + port = session.query(models_v2.Port).filter_by(id=port_id).one() + port['status'] = status + session.flush() + except sqlexc.NoResultFound: + raise exceptions.PortNotFound(port_id=port_id) + + +def put_context_in_serverpool(f): + @functools.wraps(f) + def wrapper(self, context, *args, **kwargs): + self.servers.set_context(context) + return f(self, context, *args, **kwargs) + return wrapper + + +class NeutronRestProxyV2(NeutronRestProxyV2Base, + addr_pair_db.AllowedAddressPairsMixin, + extradhcpopt_db.ExtraDhcpOptMixin, + agentschedulers_db.DhcpAgentSchedulerDbMixin, + sg_rpc_base.SecurityGroupServerRpcMixin): + + _supported_extension_aliases = ["external-net", "router", "binding", + "router_rules", "extra_dhcp_opt", "quotas", + "dhcp_agent_scheduler", "agent", + "security-group", "allowed-address-pairs"] + + @property + def supported_extension_aliases(self): + if not hasattr(self, '_aliases'): + aliases = self._supported_extension_aliases[:] + sg_rpc.disable_security_group_extension_by_config(aliases) + self._aliases = aliases + return self._aliases + + def __init__(self): + super(NeutronRestProxyV2, self).__init__() + LOG.info(_('NeutronRestProxy: Starting plugin. Version=%s'), + version.version_string_with_vcs()) + pl_config.register_config() + self.evpool = eventlet.GreenPool(cfg.CONF.RESTPROXY.thread_pool_size) + + # Include the Big Switch Extensions path in the api_extensions + neutron_extensions.append_api_extensions_path(extensions.__path__) + + self.add_meta_server_route = cfg.CONF.RESTPROXY.add_meta_server_route + + # init network ctrl connections + self.servers = servermanager.ServerPool() + self.servers.get_topo_function = self._get_all_data + self.servers.get_topo_function_args = {'get_ports': True, + 'get_floating_ips': True, + 'get_routers': True} + + self.network_scheduler = importutils.import_object( + cfg.CONF.network_scheduler_driver + ) + + # setup rpc for security and DHCP agents + self._setup_rpc() + + if cfg.CONF.RESTPROXY.sync_data: + self._send_all_data() + + LOG.debug(_("NeutronRestProxyV2: initialization done")) + + def _setup_rpc(self): + self.conn = n_rpc.create_connection(new=True) + self.topic = topics.PLUGIN + self.notifier = AgentNotifierApi(topics.AGENT) + # init dhcp agent support + self._dhcp_agent_notifier = dhcp_rpc_agent_api.DhcpAgentNotifyAPI() + self.agent_notifiers[const.AGENT_TYPE_DHCP] = ( + self._dhcp_agent_notifier + ) + self.endpoints = [RestProxyCallbacks(), + agents_db.AgentExtRpcCallback()] + self.conn.create_consumer(self.topic, self.endpoints, + fanout=False) + # Consume from all consumers in threads + self.conn.consume_in_threads() + + @put_context_in_serverpool + def create_network(self, context, network): + """Create a network. + + Network represents an L2 network segment which can have a set of + subnets and ports associated with it. + + :param context: neutron api request context + :param network: dictionary describing the network + + :returns: a sequence of mappings with the following signature: + { + "id": UUID representing the network. + "name": Human-readable name identifying the network. + "tenant_id": Owner of network. NOTE: only admin user can specify + a tenant_id other than its own. + "admin_state_up": Sets admin state of network. + if down, network does not forward packets. + "status": Indicates whether network is currently operational + (values are "ACTIVE", "DOWN", "BUILD", and "ERROR") + "subnets": Subnets associated with this network. + } + + :raises: RemoteRestError + """ + LOG.debug(_("NeutronRestProxyV2: create_network() called")) + + self._warn_on_state_status(network['network']) + + with context.session.begin(subtransactions=True): + self._ensure_default_security_group( + context, + network['network']["tenant_id"] + ) + # create network in DB + new_net = super(NeutronRestProxyV2, self).create_network(context, + network) + self._process_l3_create(context, new_net, network['network']) + # create network on the network controller + self._send_create_network(new_net, context) + + # return created network + return new_net + + @put_context_in_serverpool + def update_network(self, context, net_id, network): + """Updates the properties of a particular Virtual Network. + + :param context: neutron api request context + :param net_id: uuid of the network to update + :param network: dictionary describing the updates + + :returns: a sequence of mappings with the following signature: + { + "id": UUID representing the network. + "name": Human-readable name identifying the network. + "tenant_id": Owner of network. NOTE: only admin user can + specify a tenant_id other than its own. + "admin_state_up": Sets admin state of network. + if down, network does not forward packets. + "status": Indicates whether network is currently operational + (values are "ACTIVE", "DOWN", "BUILD", and "ERROR") + "subnets": Subnets associated with this network. + } + + :raises: exceptions.NetworkNotFound + :raises: RemoteRestError + """ + LOG.debug(_("NeutronRestProxyV2.update_network() called")) + + self._warn_on_state_status(network['network']) + + session = context.session + with session.begin(subtransactions=True): + new_net = super(NeutronRestProxyV2, self).update_network( + context, net_id, network) + self._process_l3_update(context, new_net, network['network']) + + # update network on network controller + self._send_update_network(new_net, context) + return new_net + + # NOTE(kevinbenton): workaround for eventlet/mysql deadlock + @utils.synchronized('bsn-port-barrier') + @put_context_in_serverpool + def delete_network(self, context, net_id): + """Delete a network. + :param context: neutron api request context + :param id: UUID representing the network to delete. + + :returns: None + + :raises: exceptions.NetworkInUse + :raises: exceptions.NetworkNotFound + :raises: RemoteRestError + """ + LOG.debug(_("NeutronRestProxyV2: delete_network() called")) + + # Validate args + orig_net = super(NeutronRestProxyV2, self).get_network(context, net_id) + with context.session.begin(subtransactions=True): + self._process_l3_delete(context, net_id) + ret_val = super(NeutronRestProxyV2, self).delete_network(context, + net_id) + self._send_delete_network(orig_net, context) + return ret_val + + @put_context_in_serverpool + def create_port(self, context, port): + """Create a port, which is a connection point of a device + (e.g., a VM NIC) to attach to a L2 Neutron network. + :param context: neutron api request context + :param port: dictionary describing the port + + :returns: + { + "id": uuid represeting the port. + "network_id": uuid of network. + "tenant_id": tenant_id + "mac_address": mac address to use on this port. + "admin_state_up": Sets admin state of port. if down, port + does not forward packets. + "status": dicates whether port is currently operational + (limit values to "ACTIVE", "DOWN", "BUILD", and "ERROR") + "fixed_ips": list of subnet ID"s and IP addresses to be used on + this port + "device_id": identifies the device (e.g., virtual server) using + this port. + } + + :raises: exceptions.NetworkNotFound + :raises: exceptions.StateInvalid + :raises: RemoteRestError + """ + LOG.debug(_("NeutronRestProxyV2: create_port() called")) + + # Update DB in new session so exceptions rollback changes + with context.session.begin(subtransactions=True): + self._ensure_default_security_group_on_port(context, port) + sgids = self._get_security_groups_on_port(context, port) + # non-router port status is set to pending. it is then updated + # after the async rest call completes. router ports are synchronous + if port['port']['device_owner'] == l3_db.DEVICE_OWNER_ROUTER_INTF: + port['port']['status'] = const.PORT_STATUS_ACTIVE + else: + port['port']['status'] = const.PORT_STATUS_BUILD + dhcp_opts = port['port'].get(edo_ext.EXTRADHCPOPTS, []) + new_port = super(NeutronRestProxyV2, self).create_port(context, + port) + self._process_port_create_security_group(context, new_port, sgids) + if (portbindings.HOST_ID in port['port'] + and 'id' in new_port): + host_id = port['port'][portbindings.HOST_ID] + porttracker_db.put_port_hostid(context, new_port['id'], + host_id) + new_port[addr_pair.ADDRESS_PAIRS] = ( + self._process_create_allowed_address_pairs( + context, new_port, + port['port'].get(addr_pair.ADDRESS_PAIRS))) + self._process_port_create_extra_dhcp_opts(context, new_port, + dhcp_opts) + new_port = self._extend_port_dict_binding(context, new_port) + net = super(NeutronRestProxyV2, + self).get_network(context, new_port["network_id"]) + if self.add_meta_server_route: + if new_port['device_owner'] == const.DEVICE_OWNER_DHCP: + destination = METADATA_SERVER_IP + '/32' + self._add_host_route(context, destination, new_port) + + # create on network ctrl + mapped_port = self._map_state_and_status(new_port) + # ports have to be created synchronously when creating a router + # port since adding router interfaces is a multi-call process + if mapped_port['device_owner'] == l3_db.DEVICE_OWNER_ROUTER_INTF: + self.servers.rest_create_port(net["tenant_id"], + new_port["network_id"], + mapped_port) + else: + self.evpool.spawn_n(self.async_port_create, net["tenant_id"], + new_port["network_id"], mapped_port) + self.notify_security_groups_member_updated(context, new_port) + return new_port + + def get_port(self, context, id, fields=None): + with context.session.begin(subtransactions=True): + port = super(NeutronRestProxyV2, self).get_port(context, id, + fields) + self._extend_port_dict_binding(context, port) + return self._fields(port, fields) + + def get_ports(self, context, filters=None, fields=None): + with context.session.begin(subtransactions=True): + ports = super(NeutronRestProxyV2, self).get_ports(context, filters, + fields) + for port in ports: + self._extend_port_dict_binding(context, port) + return [self._fields(port, fields) for port in ports] + + @put_context_in_serverpool + def update_port(self, context, port_id, port): + """Update values of a port. + + :param context: neutron api request context + :param id: UUID representing the port to update. + :param port: dictionary with keys indicating fields to update. + + :returns: a mapping sequence with the following signature: + { + "id": uuid represeting the port. + "network_id": uuid of network. + "tenant_id": tenant_id + "mac_address": mac address to use on this port. + "admin_state_up": sets admin state of port. if down, port + does not forward packets. + "status": dicates whether port is currently operational + (limit values to "ACTIVE", "DOWN", "BUILD", and "ERROR") + "fixed_ips": list of subnet ID's and IP addresses to be used on + this port + "device_id": identifies the device (e.g., virtual server) using + this port. + } + + :raises: exceptions.StateInvalid + :raises: exceptions.PortNotFound + :raises: RemoteRestError + """ + LOG.debug(_("NeutronRestProxyV2: update_port() called")) + + self._warn_on_state_status(port['port']) + + # Validate Args + orig_port = super(NeutronRestProxyV2, self).get_port(context, port_id) + with context.session.begin(subtransactions=True): + # Update DB + new_port = super(NeutronRestProxyV2, + self).update_port(context, port_id, port) + ctrl_update_required = False + if addr_pair.ADDRESS_PAIRS in port['port']: + ctrl_update_required |= ( + self.update_address_pairs_on_port(context, port_id, port, + orig_port, new_port)) + self._update_extra_dhcp_opts_on_port(context, port_id, port, + new_port) + old_host_id = porttracker_db.get_port_hostid(context, + orig_port['id']) + if (portbindings.HOST_ID in port['port'] + and 'id' in new_port): + host_id = port['port'][portbindings.HOST_ID] + porttracker_db.put_port_hostid(context, new_port['id'], + host_id) + if old_host_id != host_id: + ctrl_update_required = True + + if (new_port.get("device_id") != orig_port.get("device_id") and + orig_port.get("device_id")): + ctrl_update_required = True + + if ctrl_update_required: + # tenant_id must come from network in case network is shared + net_tenant_id = self._get_port_net_tenantid(context, new_port) + new_port = self._extend_port_dict_binding(context, new_port) + mapped_port = self._map_state_and_status(new_port) + self.servers.rest_update_port(net_tenant_id, + new_port["network_id"], + mapped_port) + agent_update_required = self.update_security_group_on_port( + context, port_id, port, orig_port, new_port) + agent_update_required |= self.is_security_group_member_updated( + context, orig_port, new_port) + + # return new_port + return new_port + + # NOTE(kevinbenton): workaround for eventlet/mysql deadlock + @utils.synchronized('bsn-port-barrier') + @put_context_in_serverpool + def delete_port(self, context, port_id, l3_port_check=True): + """Delete a port. + :param context: neutron api request context + :param id: UUID representing the port to delete. + + :raises: exceptions.PortInUse + :raises: exceptions.PortNotFound + :raises: exceptions.NetworkNotFound + :raises: RemoteRestError + """ + LOG.debug(_("NeutronRestProxyV2: delete_port() called")) + + # if needed, check to see if this is a port owned by + # and l3-router. If so, we should prevent deletion. + if l3_port_check: + self.prevent_l3_port_deletion(context, port_id) + with context.session.begin(subtransactions=True): + self.disassociate_floatingips(context, port_id) + self._delete_port_security_group_bindings(context, port_id) + port = super(NeutronRestProxyV2, self).get_port(context, port_id) + # Tenant ID must come from network in case the network is shared + tenid = self._get_port_net_tenantid(context, port) + self._delete_port(context, port_id) + self.servers.rest_delete_port(tenid, port['network_id'], port_id) + + @put_context_in_serverpool + def create_subnet(self, context, subnet): + LOG.debug(_("NeutronRestProxyV2: create_subnet() called")) + + self._warn_on_state_status(subnet['subnet']) + + with context.session.begin(subtransactions=True): + # create subnet in DB + new_subnet = super(NeutronRestProxyV2, + self).create_subnet(context, subnet) + net_id = new_subnet['network_id'] + orig_net = super(NeutronRestProxyV2, + self).get_network(context, net_id) + # update network on network controller + self._send_update_network(orig_net, context) + return new_subnet + + @put_context_in_serverpool + def update_subnet(self, context, id, subnet): + LOG.debug(_("NeutronRestProxyV2: update_subnet() called")) + + self._warn_on_state_status(subnet['subnet']) + + with context.session.begin(subtransactions=True): + # update subnet in DB + new_subnet = super(NeutronRestProxyV2, + self).update_subnet(context, id, subnet) + net_id = new_subnet['network_id'] + orig_net = super(NeutronRestProxyV2, + self).get_network(context, net_id) + # update network on network controller + self._send_update_network(orig_net, context) + return new_subnet + + # NOTE(kevinbenton): workaround for eventlet/mysql deadlock + @utils.synchronized('bsn-port-barrier') + @put_context_in_serverpool + def delete_subnet(self, context, id): + LOG.debug(_("NeutronRestProxyV2: delete_subnet() called")) + orig_subnet = super(NeutronRestProxyV2, self).get_subnet(context, id) + net_id = orig_subnet['network_id'] + with context.session.begin(subtransactions=True): + # delete subnet in DB + super(NeutronRestProxyV2, self).delete_subnet(context, id) + orig_net = super(NeutronRestProxyV2, self).get_network(context, + net_id) + # update network on network controller - exception will rollback + self._send_update_network(orig_net, context) + + def _get_tenant_default_router_rules(self, tenant): + rules = cfg.CONF.ROUTER.tenant_default_router_rule + defaultset = [] + tenantset = [] + for rule in rules: + items = rule.split(':') + if len(items) == 5: + (tenantid, source, destination, action, nexthops) = items + elif len(items) == 4: + (tenantid, source, destination, action) = items + nexthops = '' + else: + continue + parsedrule = {'source': source, + 'destination': destination, 'action': action, + 'nexthops': nexthops.split(',')} + if parsedrule['nexthops'][0] == '': + parsedrule['nexthops'] = [] + if tenantid == '*': + defaultset.append(parsedrule) + if tenantid == tenant: + tenantset.append(parsedrule) + if tenantset: + return tenantset + return defaultset + + @put_context_in_serverpool + def create_router(self, context, router): + LOG.debug(_("NeutronRestProxyV2: create_router() called")) + + self._warn_on_state_status(router['router']) + + tenant_id = self._get_tenant_id_for_create(context, router["router"]) + + # set default router rules + rules = self._get_tenant_default_router_rules(tenant_id) + router['router']['router_rules'] = rules + + with context.session.begin(subtransactions=True): + # create router in DB + new_router = super(NeutronRestProxyV2, self).create_router(context, + router) + mapped_router = self._map_state_and_status(new_router) + self.servers.rest_create_router(tenant_id, mapped_router) + + # return created router + return new_router + + @put_context_in_serverpool + def update_router(self, context, router_id, router): + + LOG.debug(_("NeutronRestProxyV2.update_router() called")) + + self._warn_on_state_status(router['router']) + + orig_router = super(NeutronRestProxyV2, self).get_router(context, + router_id) + tenant_id = orig_router["tenant_id"] + with context.session.begin(subtransactions=True): + new_router = super(NeutronRestProxyV2, + self).update_router(context, router_id, router) + router = self._map_state_and_status(new_router) + + # update router on network controller + self.servers.rest_update_router(tenant_id, router, router_id) + + # return updated router + return new_router + + # NOTE(kevinbenton): workaround for eventlet/mysql deadlock. + # delete_router ends up calling _delete_port instead of delete_port. + @utils.synchronized('bsn-port-barrier') + @put_context_in_serverpool + def delete_router(self, context, router_id): + LOG.debug(_("NeutronRestProxyV2: delete_router() called")) + + with context.session.begin(subtransactions=True): + orig_router = self._get_router(context, router_id) + tenant_id = orig_router["tenant_id"] + + # Ensure that the router is not used + router_filter = {'router_id': [router_id]} + fips = self.get_floatingips_count(context.elevated(), + filters=router_filter) + if fips: + raise l3.RouterInUse(router_id=router_id) + + device_owner = l3_db.DEVICE_OWNER_ROUTER_INTF + device_filter = {'device_id': [router_id], + 'device_owner': [device_owner]} + ports = self.get_ports_count(context.elevated(), + filters=device_filter) + if ports: + raise l3.RouterInUse(router_id=router_id) + ret_val = super(NeutronRestProxyV2, + self).delete_router(context, router_id) + + # delete from network ctrl + self.servers.rest_delete_router(tenant_id, router_id) + return ret_val + + def add_router_interface(self, context, router_id, interface_info): + + LOG.debug(_("NeutronRestProxyV2: add_router_interface() called")) + + # Validate args + router = self._get_router(context, router_id) + tenant_id = router['tenant_id'] + + with context.session.begin(subtransactions=True): + # create interface in DB + new_intf_info = super(NeutronRestProxyV2, + self).add_router_interface(context, + router_id, + interface_info) + port = self._get_port(context, new_intf_info['port_id']) + net_id = port['network_id'] + subnet_id = new_intf_info['subnet_id'] + # we will use the port's network id as interface's id + interface_id = net_id + intf_details = self._get_router_intf_details(context, + interface_id, + subnet_id) + + # create interface on the network controller + self.servers.rest_add_router_interface(tenant_id, router_id, + intf_details) + return new_intf_info + + def remove_router_interface(self, context, router_id, interface_info): + + LOG.debug(_("NeutronRestProxyV2: remove_router_interface() called")) + + # Validate args + router = self._get_router(context, router_id) + tenant_id = router['tenant_id'] + + # we will first get the interface identifier before deleting in the DB + if not interface_info: + msg = _("Either subnet_id or port_id must be specified") + raise exceptions.BadRequest(resource='router', msg=msg) + if 'port_id' in interface_info: + port = self._get_port(context, interface_info['port_id']) + interface_id = port['network_id'] + elif 'subnet_id' in interface_info: + subnet = self._get_subnet(context, interface_info['subnet_id']) + interface_id = subnet['network_id'] + else: + msg = _("Either subnet_id or port_id must be specified") + raise exceptions.BadRequest(resource='router', msg=msg) + + with context.session.begin(subtransactions=True): + # remove router in DB + del_ret = super(NeutronRestProxyV2, + self).remove_router_interface(context, + router_id, + interface_info) + + # create router on the network controller + self.servers.rest_remove_router_interface(tenant_id, router_id, + interface_id) + return del_ret + + @put_context_in_serverpool + def create_floatingip(self, context, floatingip): + LOG.debug(_("NeutronRestProxyV2: create_floatingip() called")) + + with context.session.begin(subtransactions=True): + # create floatingip in DB + new_fl_ip = super(NeutronRestProxyV2, + self).create_floatingip(context, floatingip) + + # create floatingip on the network controller + try: + if 'floatingip' in self.servers.get_capabilities(): + self.servers.rest_create_floatingip( + new_fl_ip['tenant_id'], new_fl_ip) + else: + self._send_floatingip_update(context) + except servermanager.RemoteRestError as e: + with excutils.save_and_reraise_exception(): + LOG.error( + _("NeutronRestProxyV2: Unable to create remote " + "floating IP: %s"), e) + # return created floating IP + return new_fl_ip + + @put_context_in_serverpool + def update_floatingip(self, context, id, floatingip): + LOG.debug(_("NeutronRestProxyV2: update_floatingip() called")) + + with context.session.begin(subtransactions=True): + # update floatingip in DB + new_fl_ip = super(NeutronRestProxyV2, + self).update_floatingip(context, id, floatingip) + + # update network on network controller + if 'floatingip' in self.servers.get_capabilities(): + self.servers.rest_update_floatingip(new_fl_ip['tenant_id'], + new_fl_ip, id) + else: + self._send_floatingip_update(context) + return new_fl_ip + + @put_context_in_serverpool + def delete_floatingip(self, context, id): + LOG.debug(_("NeutronRestProxyV2: delete_floatingip() called")) + + with context.session.begin(subtransactions=True): + # delete floating IP in DB + old_fip = super(NeutronRestProxyV2, self).get_floatingip(context, + id) + super(NeutronRestProxyV2, self).delete_floatingip(context, id) + + # update network on network controller + if 'floatingip' in self.servers.get_capabilities(): + self.servers.rest_delete_floatingip(old_fip['tenant_id'], id) + else: + self._send_floatingip_update(context) + + def disassociate_floatingips(self, context, port_id): + LOG.debug(_("NeutronRestProxyV2: diassociate_floatingips() called")) + super(NeutronRestProxyV2, self).disassociate_floatingips(context, + port_id) + self._send_floatingip_update(context) + + # overriding method from l3_db as original method calls + # self.delete_floatingip() which in turn calls self.delete_port() which + # is locked with 'bsn-port-barrier' + @put_context_in_serverpool + def delete_disassociated_floatingips(self, context, network_id): + query = self._model_query(context, l3_db.FloatingIP) + query = query.filter_by(floating_network_id=network_id, + fixed_port_id=None, + router_id=None) + for fip in query: + context.session.delete(fip) + self._delete_port(context.elevated(), fip['floating_port_id']) + + def _send_floatingip_update(self, context): + try: + ext_net_id = self.get_external_network_id(context) + if ext_net_id: + # Use the elevated state of the context for the ext_net query + admin_context = context.elevated() + ext_net = super(NeutronRestProxyV2, + self).get_network(admin_context, ext_net_id) + # update external network on network controller + self._send_update_network(ext_net, admin_context) + except exceptions.TooManyExternalNetworks: + # get_external_network can raise errors when multiple external + # networks are detected, which isn't supported by the Plugin + LOG.error(_("NeutronRestProxyV2: too many external networks")) + + def _add_host_route(self, context, destination, port): + subnet = {} + for fixed_ip in port['fixed_ips']: + subnet_id = fixed_ip['subnet_id'] + nexthop = fixed_ip['ip_address'] + subnet['host_routes'] = [{'destination': destination, + 'nexthop': nexthop}] + updated_subnet = self.update_subnet(context, + subnet_id, + {'subnet': subnet}) + payload = {'subnet': updated_subnet} + self._dhcp_agent_notifier.notify(context, payload, + 'subnet.update.end') + LOG.debug(_("Adding host route: ")) + LOG.debug(_("Destination:%(dst)s nexthop:%(next)s"), + {'dst': destination, 'next': nexthop}) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/bigswitch/routerrule_db.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/bigswitch/routerrule_db.py new file mode 100644 index 00000000..d6dcc08c --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/bigswitch/routerrule_db.py @@ -0,0 +1,146 @@ +# Copyright 2013, Big Switch Networks +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo.config import cfg +import sqlalchemy as sa +from sqlalchemy import orm + +from neutron.db import l3_db +from neutron.db import model_base +from neutron.openstack.common import log as logging +from neutron.plugins.bigswitch.extensions import routerrule + + +LOG = logging.getLogger(__name__) + + +class RouterRule(model_base.BASEV2): + id = sa.Column(sa.Integer, primary_key=True) + source = sa.Column(sa.String(64), nullable=False) + destination = sa.Column(sa.String(64), nullable=False) + nexthops = orm.relationship('NextHop', cascade='all,delete') + action = sa.Column(sa.String(10), nullable=False) + router_id = sa.Column(sa.String(36), + sa.ForeignKey('routers.id', + ondelete="CASCADE")) + + +class NextHop(model_base.BASEV2): + rule_id = sa.Column(sa.Integer, + sa.ForeignKey('routerrules.id', + ondelete="CASCADE"), + primary_key=True) + nexthop = sa.Column(sa.String(64), nullable=False, primary_key=True) + + +class RouterRule_db_mixin(l3_db.L3_NAT_db_mixin): + """ Mixin class to support route rule configuration on a router""" + def update_router(self, context, id, router): + r = router['router'] + with context.session.begin(subtransactions=True): + router_db = self._get_router(context, id) + if 'router_rules' in r: + self._update_router_rules(context, + router_db, + r['router_rules']) + updated = super(RouterRule_db_mixin, self).update_router( + context, id, router) + updated['router_rules'] = self._get_router_rules_by_router_id( + context, id) + + return updated + + def create_router(self, context, router): + r = router['router'] + with context.session.begin(subtransactions=True): + router_db = super(RouterRule_db_mixin, self).create_router( + context, router) + if 'router_rules' in r: + self._update_router_rules(context, + router_db, + r['router_rules']) + else: + LOG.debug(_('No rules in router')) + router_db['router_rules'] = self._get_router_rules_by_router_id( + context, router_db['id']) + + return router_db + + def _update_router_rules(self, context, router, rules): + if len(rules) > cfg.CONF.ROUTER.max_router_rules: + raise routerrule.RulesExhausted( + router_id=router['id'], + quota=cfg.CONF.ROUTER.max_router_rules) + del_context = context.session.query(RouterRule) + del_context.filter_by(router_id=router['id']).delete() + context.session.expunge_all() + LOG.debug(_('Updating router rules to %s'), rules) + for rule in rules: + router_rule = RouterRule( + router_id=router['id'], + destination=rule['destination'], + source=rule['source'], + action=rule['action']) + router_rule.nexthops = [NextHop(nexthop=hop) + for hop in rule['nexthops']] + context.session.add(router_rule) + context.session.flush() + + def _make_router_rule_list(self, router_rules): + ruleslist = [] + for rule in router_rules: + hops = [hop['nexthop'] for hop in rule['nexthops']] + ruleslist.append({'id': rule['id'], + 'destination': rule['destination'], + 'source': rule['source'], + 'action': rule['action'], + 'nexthops': hops}) + return ruleslist + + def _get_router_rules_by_router_id(self, context, id): + query = context.session.query(RouterRule) + router_rules = query.filter_by(router_id=id).all() + return self._make_router_rule_list(router_rules) + + def get_router(self, context, id, fields=None): + with context.session.begin(subtransactions=True): + router = super(RouterRule_db_mixin, self).get_router( + context, id, fields) + router['router_rules'] = self._get_router_rules_by_router_id( + context, id) + return router + + def get_routers(self, context, filters=None, fields=None, + sorts=None, limit=None, marker=None, + page_reverse=False): + with context.session.begin(subtransactions=True): + routers = super(RouterRule_db_mixin, self).get_routers( + context, filters, fields, sorts=sorts, limit=limit, + marker=marker, page_reverse=page_reverse) + for router in routers: + router['router_rules'] = self._get_router_rules_by_router_id( + context, router['id']) + return routers + + def get_sync_data(self, context, router_ids=None, active=None): + """Query routers and their related floating_ips, interfaces.""" + with context.session.begin(subtransactions=True): + routers = super(RouterRule_db_mixin, + self).get_sync_data(context, router_ids, + active=active) + for router in routers: + router['router_rules'] = self._get_router_rules_by_router_id( + context, router['id']) + return routers diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/bigswitch/servermanager.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/bigswitch/servermanager.py new file mode 100644 index 00000000..b2e56271 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/bigswitch/servermanager.py @@ -0,0 +1,606 @@ +# Copyright 2014 Big Switch Networks, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Mandeep Dhami, Big Switch Networks, Inc. +# @author: Sumit Naiksatam, sumitnaiksatam@gmail.com, Big Switch Networks, Inc. +# @author: Kevin Benton, Big Switch Networks, Inc. + +""" +This module manages the HTTP and HTTPS connections to the backend controllers. + +The main class it provides for external use is ServerPool which manages a set +of ServerProxy objects that correspond to individual backend controllers. + +The following functionality is handled by this module: +- Translation of rest_* function calls to HTTP/HTTPS calls to the controllers +- Automatic failover between controllers +- SSL Certificate enforcement +- HTTP Authentication + +""" +import base64 +import httplib +import os +import socket +import ssl + +import eventlet +import eventlet.corolocal +from oslo.config import cfg + +from neutron.common import exceptions +from neutron.common import utils +from neutron.openstack.common import excutils +from neutron.openstack.common import jsonutils as json +from neutron.openstack.common import log as logging +from neutron.plugins.bigswitch.db import consistency_db as cdb + +LOG = logging.getLogger(__name__) + +# The following are used to invoke the API on the external controller +CAPABILITIES_PATH = "/capabilities" +NET_RESOURCE_PATH = "/tenants/%s/networks" +PORT_RESOURCE_PATH = "/tenants/%s/networks/%s/ports" +ROUTER_RESOURCE_PATH = "/tenants/%s/routers" +ROUTER_INTF_OP_PATH = "/tenants/%s/routers/%s/interfaces" +NETWORKS_PATH = "/tenants/%s/networks/%s" +FLOATINGIPS_PATH = "/tenants/%s/floatingips/%s" +PORTS_PATH = "/tenants/%s/networks/%s/ports/%s" +ATTACHMENT_PATH = "/tenants/%s/networks/%s/ports/%s/attachment" +ROUTERS_PATH = "/tenants/%s/routers/%s" +ROUTER_INTF_PATH = "/tenants/%s/routers/%s/interfaces/%s" +TOPOLOGY_PATH = "/topology" +HEALTH_PATH = "/health" +SUCCESS_CODES = range(200, 207) +FAILURE_CODES = [0, 301, 302, 303, 400, 401, 403, 404, 500, 501, 502, 503, + 504, 505] +BASE_URI = '/networkService/v1.1' +ORCHESTRATION_SERVICE_ID = 'Neutron v2.0' +HASH_MATCH_HEADER = 'X-BSN-BVS-HASH-MATCH' +# error messages +NXNETWORK = 'NXVNS' + + +class RemoteRestError(exceptions.NeutronException): + message = _("Error in REST call to remote network " + "controller: %(reason)s") + status = None + + def __init__(self, **kwargs): + self.status = kwargs.pop('status', None) + self.reason = kwargs.get('reason') + super(RemoteRestError, self).__init__(**kwargs) + + +class ServerProxy(object): + """REST server proxy to a network controller.""" + + def __init__(self, server, port, ssl, auth, neutron_id, timeout, + base_uri, name, mypool, combined_cert): + self.server = server + self.port = port + self.ssl = ssl + self.base_uri = base_uri + self.timeout = timeout + self.name = name + self.success_codes = SUCCESS_CODES + self.auth = None + self.neutron_id = neutron_id + self.failed = False + self.capabilities = [] + # enable server to reference parent pool + self.mypool = mypool + # cache connection here to avoid a SSL handshake for every connection + self.currentconn = None + if auth: + self.auth = 'Basic ' + base64.encodestring(auth).strip() + self.combined_cert = combined_cert + + def get_capabilities(self): + try: + body = self.rest_call('GET', CAPABILITIES_PATH)[2] + self.capabilities = json.loads(body) + except Exception: + LOG.exception(_("Couldn't retrieve capabilities. " + "Newer API calls won't be supported.")) + LOG.info(_("The following capabilities were received " + "for %(server)s: %(cap)s"), {'server': self.server, + 'cap': self.capabilities}) + return self.capabilities + + def rest_call(self, action, resource, data='', headers={}, timeout=False, + reconnect=False, hash_handler=None): + uri = self.base_uri + resource + body = json.dumps(data) + if not headers: + headers = {} + headers['Content-type'] = 'application/json' + headers['Accept'] = 'application/json' + headers['NeutronProxy-Agent'] = self.name + headers['Instance-ID'] = self.neutron_id + headers['Orchestration-Service-ID'] = ORCHESTRATION_SERVICE_ID + if hash_handler: + # this will be excluded on calls that don't need hashes + # (e.g. topology sync, capability checks) + headers[HASH_MATCH_HEADER] = hash_handler.read_for_update() + else: + hash_handler = cdb.HashHandler() + if 'keep-alive' in self.capabilities: + headers['Connection'] = 'keep-alive' + else: + reconnect = True + if self.auth: + headers['Authorization'] = self.auth + + LOG.debug(_("ServerProxy: server=%(server)s, port=%(port)d, " + "ssl=%(ssl)r"), + {'server': self.server, 'port': self.port, 'ssl': self.ssl}) + LOG.debug(_("ServerProxy: resource=%(resource)s, data=%(data)r, " + "headers=%(headers)r, action=%(action)s"), + {'resource': resource, 'data': data, 'headers': headers, + 'action': action}) + + # unspecified timeout is False because a timeout can be specified as + # None to indicate no timeout. + if timeout is False: + timeout = self.timeout + + if timeout != self.timeout: + # need a new connection if timeout has changed + reconnect = True + + if not self.currentconn or reconnect: + if self.currentconn: + self.currentconn.close() + if self.ssl: + self.currentconn = HTTPSConnectionWithValidation( + self.server, self.port, timeout=timeout) + if self.currentconn is None: + LOG.error(_('ServerProxy: Could not establish HTTPS ' + 'connection')) + return 0, None, None, None + self.currentconn.combined_cert = self.combined_cert + else: + self.currentconn = httplib.HTTPConnection( + self.server, self.port, timeout=timeout) + if self.currentconn is None: + LOG.error(_('ServerProxy: Could not establish HTTP ' + 'connection')) + return 0, None, None, None + + try: + self.currentconn.request(action, uri, body, headers) + response = self.currentconn.getresponse() + hash_handler.put_hash(response.getheader(HASH_MATCH_HEADER)) + respstr = response.read() + respdata = respstr + if response.status in self.success_codes: + try: + respdata = json.loads(respstr) + except ValueError: + # response was not JSON, ignore the exception + pass + ret = (response.status, response.reason, respstr, respdata) + except httplib.HTTPException: + # If we were using a cached connection, try again with a new one. + with excutils.save_and_reraise_exception() as ctxt: + self.currentconn.close() + if reconnect: + # if reconnect is true, this was on a fresh connection so + # reraise since this server seems to be broken + ctxt.reraise = True + else: + # if reconnect is false, it was a cached connection so + # try one more time before re-raising + ctxt.reraise = False + return self.rest_call(action, resource, data, headers, + timeout=timeout, reconnect=True) + except (socket.timeout, socket.error) as e: + self.currentconn.close() + LOG.error(_('ServerProxy: %(action)s failure, %(e)r'), + {'action': action, 'e': e}) + ret = 0, None, None, None + LOG.debug(_("ServerProxy: status=%(status)d, reason=%(reason)r, " + "ret=%(ret)s, data=%(data)r"), {'status': ret[0], + 'reason': ret[1], + 'ret': ret[2], + 'data': ret[3]}) + return ret + + +class ServerPool(object): + + def __init__(self, timeout=False, + base_uri=BASE_URI, name='NeutronRestProxy'): + LOG.debug(_("ServerPool: initializing")) + # 'servers' is the list of network controller REST end-points + # (used in order specified till one succeeds, and it is sticky + # till next failure). Use 'server_auth' to encode api-key + servers = cfg.CONF.RESTPROXY.servers + self.auth = cfg.CONF.RESTPROXY.server_auth + self.ssl = cfg.CONF.RESTPROXY.server_ssl + self.neutron_id = cfg.CONF.RESTPROXY.neutron_id + self.base_uri = base_uri + self.name = name + self.contexts = {} + self.timeout = cfg.CONF.RESTPROXY.server_timeout + self.always_reconnect = not cfg.CONF.RESTPROXY.cache_connections + default_port = 8000 + if timeout is not False: + self.timeout = timeout + + # Function to use to retrieve topology for consistency syncs. + # Needs to be set by module that uses the servermanager. + self.get_topo_function = None + self.get_topo_function_args = {} + + if not servers: + raise cfg.Error(_('Servers not defined. Aborting server manager.')) + servers = [s if len(s.rsplit(':', 1)) == 2 + else "%s:%d" % (s, default_port) + for s in servers] + if any((len(spl) != 2 or not spl[1].isdigit()) + for spl in [sp.rsplit(':', 1) + for sp in servers]): + raise cfg.Error(_('Servers must be defined as :. ' + 'Configuration was %s') % servers) + self.servers = [ + self.server_proxy_for(server, int(port)) + for server, port in (s.rsplit(':', 1) for s in servers) + ] + eventlet.spawn(self._consistency_watchdog, + cfg.CONF.RESTPROXY.consistency_interval) + LOG.debug(_("ServerPool: initialization done")) + + def set_context(self, context): + # this context needs to be local to the greenthread + # so concurrent requests don't use the wrong context + self.contexts[eventlet.corolocal.get_ident()] = context + + def pop_context(self): + # Don't store these contexts after use. They should only + # last for one request. + try: + return self.contexts.pop(eventlet.corolocal.get_ident()) + except KeyError: + return None + + def get_capabilities(self): + # lookup on first try + try: + return self.capabilities + except AttributeError: + # each server should return a list of capabilities it supports + # e.g. ['floatingip'] + capabilities = [set(server.get_capabilities()) + for server in self.servers] + # Pool only supports what all of the servers support + self.capabilities = set.intersection(*capabilities) + return self.capabilities + + def server_proxy_for(self, server, port): + combined_cert = self._get_combined_cert_for_server(server, port) + return ServerProxy(server, port, self.ssl, self.auth, self.neutron_id, + self.timeout, self.base_uri, self.name, mypool=self, + combined_cert=combined_cert) + + def _get_combined_cert_for_server(self, server, port): + # The ssl library requires a combined file with all trusted certs + # so we make one containing the trusted CAs and the corresponding + # host cert for this server + combined_cert = None + if self.ssl and not cfg.CONF.RESTPROXY.no_ssl_validation: + base_ssl = cfg.CONF.RESTPROXY.ssl_cert_directory + host_dir = os.path.join(base_ssl, 'host_certs') + ca_dir = os.path.join(base_ssl, 'ca_certs') + combined_dir = os.path.join(base_ssl, 'combined') + combined_cert = os.path.join(combined_dir, '%s.pem' % server) + if not os.path.exists(base_ssl): + raise cfg.Error(_('ssl_cert_directory [%s] does not exist. ' + 'Create it or disable ssl.') % base_ssl) + for automake in [combined_dir, ca_dir, host_dir]: + if not os.path.exists(automake): + os.makedirs(automake) + + # get all CA certs + certs = self._get_ca_cert_paths(ca_dir) + + # check for a host specific cert + hcert, exists = self._get_host_cert_path(host_dir, server) + if exists: + certs.append(hcert) + elif cfg.CONF.RESTPROXY.ssl_sticky: + self._fetch_and_store_cert(server, port, hcert) + certs.append(hcert) + if not certs: + raise cfg.Error(_('No certificates were found to verify ' + 'controller %s') % (server)) + self._combine_certs_to_file(certs, combined_cert) + return combined_cert + + def _combine_certs_to_file(self, certs, cfile): + ''' + Concatenates the contents of each certificate in a list of + certificate paths to one combined location for use with ssl + sockets. + ''' + with open(cfile, 'w') as combined: + for c in certs: + with open(c, 'r') as cert_handle: + combined.write(cert_handle.read()) + + def _get_host_cert_path(self, host_dir, server): + ''' + returns full path and boolean indicating existence + ''' + hcert = os.path.join(host_dir, '%s.pem' % server) + if os.path.exists(hcert): + return hcert, True + return hcert, False + + def _get_ca_cert_paths(self, ca_dir): + certs = [os.path.join(root, name) + for name in [ + name for (root, dirs, files) in os.walk(ca_dir) + for name in files + ] + if name.endswith('.pem')] + return certs + + def _fetch_and_store_cert(self, server, port, path): + ''' + Grabs a certificate from a server and writes it to + a given path. + ''' + try: + cert = ssl.get_server_certificate((server, port)) + except Exception as e: + raise cfg.Error(_('Could not retrieve initial ' + 'certificate from controller %(server)s. ' + 'Error details: %(error)s') % + {'server': server, 'error': str(e)}) + + LOG.warning(_("Storing to certificate for host %(server)s " + "at %(path)s") % {'server': server, + 'path': path}) + self._file_put_contents(path, cert) + + return cert + + def _file_put_contents(self, path, contents): + # Simple method to write to file. + # Created for easy Mocking + with open(path, 'w') as handle: + handle.write(contents) + + def server_failure(self, resp, ignore_codes=[]): + """Define failure codes as required. + + Note: We assume 301-303 is a failure, and try the next server in + the server pool. + """ + return (resp[0] in FAILURE_CODES and resp[0] not in ignore_codes) + + def action_success(self, resp): + """Defining success codes as required. + + Note: We assume any valid 2xx as being successful response. + """ + return resp[0] in SUCCESS_CODES + + @utils.synchronized('bsn-rest-call') + def rest_call(self, action, resource, data, headers, ignore_codes, + timeout=False): + hash_handler = cdb.HashHandler(context=self.pop_context()) + good_first = sorted(self.servers, key=lambda x: x.failed) + first_response = None + for active_server in good_first: + ret = active_server.rest_call(action, resource, data, headers, + timeout, + reconnect=self.always_reconnect, + hash_handler=hash_handler) + # If inconsistent, do a full synchronization + if ret[0] == httplib.CONFLICT: + if not self.get_topo_function: + raise cfg.Error(_('Server requires synchronization, ' + 'but no topology function was defined.')) + data = self.get_topo_function(**self.get_topo_function_args) + active_server.rest_call('PUT', TOPOLOGY_PATH, data, + timeout=None) + # Store the first response as the error to be bubbled up to the + # user since it was a good server. Subsequent servers will most + # likely be cluster slaves and won't have a useful error for the + # user (e.g. 302 redirect to master) + if not first_response: + first_response = ret + if not self.server_failure(ret, ignore_codes): + active_server.failed = False + return ret + else: + LOG.error(_('ServerProxy: %(action)s failure for servers: ' + '%(server)r Response: %(response)s'), + {'action': action, + 'server': (active_server.server, + active_server.port), + 'response': ret[3]}) + LOG.error(_("ServerProxy: Error details: status=%(status)d, " + "reason=%(reason)r, ret=%(ret)s, data=%(data)r"), + {'status': ret[0], 'reason': ret[1], 'ret': ret[2], + 'data': ret[3]}) + active_server.failed = True + + # All servers failed, reset server list and try again next time + LOG.error(_('ServerProxy: %(action)s failure for all servers: ' + '%(server)r'), + {'action': action, + 'server': tuple((s.server, + s.port) for s in self.servers)}) + return first_response + + def rest_action(self, action, resource, data='', errstr='%s', + ignore_codes=[], headers={}, timeout=False): + """ + Wrapper for rest_call that verifies success and raises a + RemoteRestError on failure with a provided error string + By default, 404 errors on DELETE calls are ignored because + they already do not exist on the backend. + """ + if not ignore_codes and action == 'DELETE': + ignore_codes = [404] + resp = self.rest_call(action, resource, data, headers, ignore_codes, + timeout) + if self.server_failure(resp, ignore_codes): + LOG.error(errstr, resp[2]) + raise RemoteRestError(reason=resp[2], status=resp[0]) + if resp[0] in ignore_codes: + LOG.warning(_("NeutronRestProxyV2: Received and ignored error " + "code %(code)s on %(action)s action to resource " + "%(resource)s"), + {'code': resp[2], 'action': action, + 'resource': resource}) + return resp + + def rest_create_router(self, tenant_id, router): + resource = ROUTER_RESOURCE_PATH % tenant_id + data = {"router": router} + errstr = _("Unable to create remote router: %s") + self.rest_action('POST', resource, data, errstr) + + def rest_update_router(self, tenant_id, router, router_id): + resource = ROUTERS_PATH % (tenant_id, router_id) + data = {"router": router} + errstr = _("Unable to update remote router: %s") + self.rest_action('PUT', resource, data, errstr) + + def rest_delete_router(self, tenant_id, router_id): + resource = ROUTERS_PATH % (tenant_id, router_id) + errstr = _("Unable to delete remote router: %s") + self.rest_action('DELETE', resource, errstr=errstr) + + def rest_add_router_interface(self, tenant_id, router_id, intf_details): + resource = ROUTER_INTF_OP_PATH % (tenant_id, router_id) + data = {"interface": intf_details} + errstr = _("Unable to add router interface: %s") + self.rest_action('POST', resource, data, errstr) + + def rest_remove_router_interface(self, tenant_id, router_id, interface_id): + resource = ROUTER_INTF_PATH % (tenant_id, router_id, interface_id) + errstr = _("Unable to delete remote intf: %s") + self.rest_action('DELETE', resource, errstr=errstr) + + def rest_create_network(self, tenant_id, network): + resource = NET_RESOURCE_PATH % tenant_id + data = {"network": network} + errstr = _("Unable to create remote network: %s") + self.rest_action('POST', resource, data, errstr) + + def rest_update_network(self, tenant_id, net_id, network): + resource = NETWORKS_PATH % (tenant_id, net_id) + data = {"network": network} + errstr = _("Unable to update remote network: %s") + self.rest_action('PUT', resource, data, errstr) + + def rest_delete_network(self, tenant_id, net_id): + resource = NETWORKS_PATH % (tenant_id, net_id) + errstr = _("Unable to update remote network: %s") + self.rest_action('DELETE', resource, errstr=errstr) + + def rest_create_port(self, tenant_id, net_id, port): + resource = ATTACHMENT_PATH % (tenant_id, net_id, port["id"]) + data = {"port": port} + device_id = port.get("device_id") + if not port["mac_address"] or not device_id: + # controller only cares about ports attached to devices + LOG.warning(_("No device MAC attached to port %s. " + "Skipping notification to controller."), port["id"]) + return + data["attachment"] = {"id": device_id, + "mac": port["mac_address"]} + errstr = _("Unable to create remote port: %s") + self.rest_action('PUT', resource, data, errstr) + + def rest_delete_port(self, tenant_id, network_id, port_id): + resource = ATTACHMENT_PATH % (tenant_id, network_id, port_id) + errstr = _("Unable to delete remote port: %s") + self.rest_action('DELETE', resource, errstr=errstr) + + def rest_update_port(self, tenant_id, net_id, port): + # Controller has no update operation for the port endpoint + # the create PUT method will replace + self.rest_create_port(tenant_id, net_id, port) + + def rest_create_floatingip(self, tenant_id, floatingip): + resource = FLOATINGIPS_PATH % (tenant_id, floatingip['id']) + errstr = _("Unable to create floating IP: %s") + self.rest_action('PUT', resource, errstr=errstr) + + def rest_update_floatingip(self, tenant_id, floatingip, oldid): + resource = FLOATINGIPS_PATH % (tenant_id, oldid) + errstr = _("Unable to update floating IP: %s") + self.rest_action('PUT', resource, errstr=errstr) + + def rest_delete_floatingip(self, tenant_id, oldid): + resource = FLOATINGIPS_PATH % (tenant_id, oldid) + errstr = _("Unable to delete floating IP: %s") + self.rest_action('DELETE', resource, errstr=errstr) + + def _consistency_watchdog(self, polling_interval=60): + if 'consistency' not in self.get_capabilities(): + LOG.warning(_("Backend server(s) do not support automated " + "consitency checks.")) + return + if not polling_interval: + LOG.warning(_("Consistency watchdog disabled by polling interval " + "setting of %s."), polling_interval) + return + while True: + # If consistency is supported, all we have to do is make any + # rest call and the consistency header will be added. If it + # doesn't match, the backend will return a synchronization error + # that will be handled by the rest_action. + eventlet.sleep(polling_interval) + try: + self.rest_action('GET', HEALTH_PATH) + except Exception: + LOG.exception(_("Encountered an error checking controller " + "health.")) + + +class HTTPSConnectionWithValidation(httplib.HTTPSConnection): + + # If combined_cert is None, the connection will continue without + # any certificate validation. + combined_cert = None + + def connect(self): + try: + sock = socket.create_connection((self.host, self.port), + self.timeout, self.source_address) + except AttributeError: + # python 2.6 doesn't have the source_address attribute + sock = socket.create_connection((self.host, self.port), + self.timeout) + if self._tunnel_host: + self.sock = sock + self._tunnel() + + if self.combined_cert: + self.sock = ssl.wrap_socket(sock, self.key_file, self.cert_file, + cert_reqs=ssl.CERT_REQUIRED, + ca_certs=self.combined_cert) + else: + self.sock = ssl.wrap_socket(sock, self.key_file, + self.cert_file, + cert_reqs=ssl.CERT_NONE) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/bigswitch/tests/__init__.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/bigswitch/tests/__init__.py new file mode 100644 index 00000000..131b31cc --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/bigswitch/tests/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2012 Big Switch Networks, Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/bigswitch/tests/test_server.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/bigswitch/tests/test_server.py new file mode 100644 index 00000000..90e680bb --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/bigswitch/tests/test_server.py @@ -0,0 +1,186 @@ +#!/usr/bin/env python +# Copyright 2012, Big Switch Networks, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Mandeep Dhami, Big Switch Networks, Inc. + +"""Test server mocking a REST based network ctrl. + +Used for NeutronRestProxy tests +""" +from __future__ import print_function + +import re + +from six import moves +from wsgiref import simple_server + +from neutron.openstack.common import jsonutils as json + + +class TestNetworkCtrl(object): + + def __init__(self, host='', port=8000, + default_status='404 Not Found', + default_response='404 Not Found', + debug=False): + self.host = host + self.port = port + self.default_status = default_status + self.default_response = default_response + self.debug = debug + self.debug_env = False + self.debug_resp = False + self.matches = [] + + def match(self, prior, method_regexp, uri_regexp, handler, data=None, + multi=True): + """Add to the list of exptected inputs. + + The incoming request is matched in the order of priority. For same + priority, match the oldest match request first. + + :param prior: intgere priority of this match (e.g. 100) + :param method_regexp: regexp to match method (e.g. 'PUT|POST') + :param uri_regexp: regexp to match uri (e.g. '/quantum/v?.?/') + :param handler: function with signature: + lambda(method, uri, body, **kwargs) : status, body + where + - method: HTTP method for this request + - uri: URI for this HTTP request + - body: body of this HTTP request + - kwargs are: + - data: data object that was in the match call + - node: TestNetworkCtrl object itself + - id: offset of the matching tuple + and return values is: + (status, body) where: + - status: HTTP resp status (e.g. '200 OK'). + If None, use default_status + - body: HTTP resp body. If None, use '' + """ + assert int(prior) == prior, 'Priority should an integer be >= 0' + assert prior >= 0, 'Priority should an integer be >= 0' + + lo, hi = 0, len(self.matches) + while lo < hi: + mid = (lo + hi) // 2 + if prior < self.matches[mid]: + hi = mid + else: + lo = mid + 1 + self.matches.insert(lo, (prior, method_regexp, uri_regexp, handler, + data, multi)) + + def remove_id(self, id_): + assert id_ >= 0, 'remove_id: id < 0' + assert id_ <= len(self.matches), 'remove_id: id > len()' + self.matches.pop(id_) + + def request_handler(self, method, uri, body): + retstatus = self.default_status + retbody = self.default_response + for i in moves.xrange(len(self.matches)): + (prior, method_regexp, uri_regexp, handler, data, multi) = \ + self.matches[i] + if re.match(method_regexp, method) and re.match(uri_regexp, uri): + kwargs = { + 'data': data, + 'node': self, + 'id': i, + } + retstatus, retbody = handler(method, uri, body, **kwargs) + if multi is False: + self.remove_id(i) + break + if retbody is None: + retbody = '' + return (retstatus, retbody) + + def server(self): + def app(environ, start_response): + uri = environ['PATH_INFO'] + method = environ['REQUEST_METHOD'] + headers = [('Content-type', 'text/json')] + content_len_str = environ['CONTENT_LENGTH'] + + content_len = 0 + request_data = None + if content_len_str: + content_len = int(content_len_str) + request_data = environ.get('wsgi.input').read(content_len) + if request_data: + try: + request_data = json.loads(request_data) + except Exception: + # OK for it not to be json! Ignore it + pass + + if self.debug: + print('\n') + if self.debug_env: + print('environ:') + for (key, value) in sorted(environ.iteritems()): + print(' %16s : %s' % (key, value)) + + print('%s %s' % (method, uri)) + if request_data: + print('%s' % + json.dumps(request_data, sort_keys=True, indent=4)) + + status, body = self.request_handler(method, uri, None) + body_data = None + if body: + try: + body_data = json.loads(body) + except Exception: + # OK for it not to be json! Ignore it + pass + + start_response(status, headers) + if self.debug: + if self.debug_env: + print('%s: %s' % ('Response', + json.dumps(body_data, sort_keys=True, indent=4))) + return body + return simple_server.make_server(self.host, self.port, app) + + def run(self): + print("Serving on port %d ..." % self.port) + try: + self.server().serve_forever() + except KeyboardInterrupt: + pass + + +if __name__ == "__main__": + import sys + + port = 8899 + if len(sys.argv) > 1: + port = int(sys.argv[1]) + + debug = False + if len(sys.argv) > 2: + if sys.argv[2].lower() in ['debug', 'true']: + debug = True + + ctrl = TestNetworkCtrl(port=port, + default_status='200 OK', + default_response='{"status":"200 OK"}', + debug=debug) + ctrl.match(100, 'GET', '/test', + lambda m, u, b, **k: ('200 OK', '["200 OK"]')) + ctrl.run() diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/bigswitch/vcsversion.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/bigswitch/vcsversion.py new file mode 100644 index 00000000..9fa984ab --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/bigswitch/vcsversion.py @@ -0,0 +1,25 @@ +# Copyright 2013 Big Switch Networks, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Sumit Naiksatam, sumitnaiksatam@gmail.com +# +version_info = {'branch_nick': u'neutron/trunk', + 'revision_id': u'1', + 'revno': 0} + + +NEUTRONRESTPROXY_VERSION = ['2013', '1', None] + + +FINAL = False # This becomes true at Release Candidate time diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/bigswitch/version.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/bigswitch/version.py new file mode 100644 index 00000000..2e757f23 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/bigswitch/version.py @@ -0,0 +1,51 @@ +#!/usr/bin/env python +# Copyright 2012 OpenStack Foundation +# Copyright 2012, Big Switch Networks, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# Based on openstack generic code +# @author: Mandeep Dhami, Big Switch Networks, Inc. + +"""Determine version of NeutronRestProxy plugin""" +from __future__ import print_function + +from neutron.plugins.bigswitch import vcsversion + + +YEAR, COUNT, REVISION = vcsversion.NEUTRONRESTPROXY_VERSION + + +def canonical_version_string(): + return '.'.join(filter(None, + vcsversion.NEUTRONRESTPROXY_VERSION)) + + +def version_string(): + if vcsversion.FINAL: + return canonical_version_string() + else: + return '%s-dev' % (canonical_version_string(),) + + +def vcs_version_string(): + return "%s:%s" % (vcsversion.version_info['branch_nick'], + vcsversion.version_info['revision_id']) + + +def version_string_with_vcs(): + return "%s-%s" % (canonical_version_string(), vcs_version_string()) + + +if __name__ == "__main__": + print(version_string_with_vcs()) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/brocade/NeutronPlugin.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/brocade/NeutronPlugin.py new file mode 100644 index 00000000..8b473906 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/brocade/NeutronPlugin.py @@ -0,0 +1,506 @@ +# Copyright 2013 Brocade Communications System, Inc. +# All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# Authors: +# Shiv Haris (sharis@brocade.com) +# Varma Bhupatiraju (vbhupati@#brocade.com) +# +# (Some parts adapted from LinuxBridge Plugin) +# TODO(shiv) need support for security groups + + +"""Implentation of Brocade Neutron Plugin.""" + +from oslo.config import cfg + +from neutron.agent import securitygroups_rpc as sg_rpc +from neutron.api.rpc.agentnotifiers import dhcp_rpc_agent_api +from neutron.api.rpc.agentnotifiers import l3_rpc_agent_api +from neutron.common import constants as q_const +from neutron.common import rpc as n_rpc +from neutron.common import topics +from neutron.common import utils +from neutron.db import agents_db +from neutron.db import agentschedulers_db +from neutron.db import api as db +from neutron.db import db_base_plugin_v2 +from neutron.db import dhcp_rpc_base +from neutron.db import external_net_db +from neutron.db import extraroute_db +from neutron.db import l3_agentschedulers_db +from neutron.db import l3_rpc_base +from neutron.db import portbindings_base +from neutron.db import securitygroups_rpc_base as sg_db_rpc +from neutron.extensions import portbindings +from neutron.extensions import securitygroup as ext_sg +from neutron.openstack.common import context +from neutron.openstack.common import importutils +from neutron.openstack.common import log as logging +from neutron.plugins.brocade.db import models as brocade_db +from neutron.plugins.brocade import vlanbm as vbm +from neutron.plugins.common import constants as svc_constants + + +LOG = logging.getLogger(__name__) +PLUGIN_VERSION = 0.88 +AGENT_OWNER_PREFIX = "network:" +NOS_DRIVER = 'neutron.plugins.brocade.nos.nosdriver.NOSdriver' + +SWITCH_OPTS = [cfg.StrOpt('address', default='', + help=_('The address of the host to SSH to')), + cfg.StrOpt('username', default='', + help=_('The SSH username to use')), + cfg.StrOpt('password', default='', secret=True, + help=_('The SSH password to use')), + cfg.StrOpt('ostype', default='NOS', + help=_('Currently unused')) + ] + +PHYSICAL_INTERFACE_OPTS = [cfg.StrOpt('physical_interface', default='eth0', + help=_('The network interface to use when creating' + 'a port')) + ] + +cfg.CONF.register_opts(SWITCH_OPTS, "SWITCH") +cfg.CONF.register_opts(PHYSICAL_INTERFACE_OPTS, "PHYSICAL_INTERFACE") + + +class BridgeRpcCallbacks(n_rpc.RpcCallback, + dhcp_rpc_base.DhcpRpcCallbackMixin, + l3_rpc_base.L3RpcCallbackMixin, + sg_db_rpc.SecurityGroupServerRpcCallbackMixin): + """Agent callback.""" + + RPC_API_VERSION = '1.2' + # Device names start with "tap" + # history + # 1.1 Support Security Group RPC + # 1.2 Support get_devices_details_list + TAP_PREFIX_LEN = 3 + + @classmethod + def get_port_from_device(cls, device): + """Get port from the brocade specific db.""" + + # TODO(shh) context is not being passed as + # an argument to this function; + # + # need to be fixed in: + # file: neutron/db/securtygroups_rpc_base.py + # function: securitygroup_rules_for_devices() + # which needs to pass context to us + + # Doing what other plugins are doing + session = db.get_session() + port = brocade_db.get_port_from_device( + session, device[cls.TAP_PREFIX_LEN:]) + + # TODO(shiv): need to extend the db model to include device owners + # make it appears that the device owner is of type network + if port: + port['device'] = device + port['device_owner'] = AGENT_OWNER_PREFIX + port['binding:vif_type'] = 'bridge' + return port + + def get_device_details(self, rpc_context, **kwargs): + """Agent requests device details.""" + + agent_id = kwargs.get('agent_id') + device = kwargs.get('device') + LOG.debug(_("Device %(device)s details requested from %(agent_id)s"), + {'device': device, 'agent_id': agent_id}) + port = brocade_db.get_port(rpc_context, device[self.TAP_PREFIX_LEN:]) + if port: + entry = {'device': device, + 'vlan_id': port.vlan_id, + 'network_id': port.network_id, + 'port_id': port.port_id, + 'physical_network': port.physical_interface, + 'admin_state_up': port.admin_state_up + } + + else: + entry = {'device': device} + LOG.debug(_("%s can not be found in database"), device) + return entry + + def get_devices_details_list(self, rpc_context, **kwargs): + return [ + self.get_device_details( + rpc_context, + device=device, + **kwargs + ) + for device in kwargs.pop('devices', []) + ] + + def update_device_down(self, rpc_context, **kwargs): + """Device no longer exists on agent.""" + + device = kwargs.get('device') + port = self.get_port_from_device(device) + if port: + entry = {'device': device, + 'exists': True} + # Set port status to DOWN + port_id = port['port_id'] + brocade_db.update_port_state(rpc_context, port_id, False) + else: + entry = {'device': device, + 'exists': False} + LOG.debug(_("%s can not be found in database"), device) + return entry + + +class AgentNotifierApi(n_rpc.RpcProxy, + sg_rpc.SecurityGroupAgentRpcApiMixin): + """Agent side of the linux bridge rpc API. + + API version history: + 1.0 - Initial version. + 1.1 - Added get_active_networks_info, create_dhcp_port, + and update_dhcp_port methods. + + """ + + BASE_RPC_API_VERSION = '1.1' + + def __init__(self, topic): + super(AgentNotifierApi, self).__init__( + topic=topic, default_version=self.BASE_RPC_API_VERSION) + self.topic = topic + self.topic_network_delete = topics.get_topic_name(topic, + topics.NETWORK, + topics.DELETE) + self.topic_port_update = topics.get_topic_name(topic, + topics.PORT, + topics.UPDATE) + + def network_delete(self, context, network_id): + self.fanout_cast(context, + self.make_msg('network_delete', + network_id=network_id), + topic=self.topic_network_delete) + + def port_update(self, context, port, physical_network, vlan_id): + self.fanout_cast(context, + self.make_msg('port_update', + port=port, + physical_network=physical_network, + vlan_id=vlan_id), + topic=self.topic_port_update) + + +class BrocadePluginV2(db_base_plugin_v2.NeutronDbPluginV2, + external_net_db.External_net_db_mixin, + extraroute_db.ExtraRoute_db_mixin, + sg_db_rpc.SecurityGroupServerRpcMixin, + l3_agentschedulers_db.L3AgentSchedulerDbMixin, + agentschedulers_db.DhcpAgentSchedulerDbMixin, + portbindings_base.PortBindingBaseMixin): + """BrocadePluginV2 is a Neutron plugin. + + Provides L2 Virtual Network functionality using VDX. Upper + layer driver class that interfaces to NETCONF layer below. + + """ + + def __init__(self): + """Initialize Brocade Plugin. + + Specify switch address and db configuration. + """ + + super(BrocadePluginV2, self).__init__() + self.supported_extension_aliases = ["binding", "security-group", + "external-net", "router", + "extraroute", "agent", + "l3_agent_scheduler", + "dhcp_agent_scheduler"] + + self.physical_interface = (cfg.CONF.PHYSICAL_INTERFACE. + physical_interface) + self.base_binding_dict = self._get_base_binding_dict() + portbindings_base.register_port_dict_function() + self.ctxt = context.get_admin_context() + self.ctxt.session = db.get_session() + self._vlan_bitmap = vbm.VlanBitmap(self.ctxt) + self._setup_rpc() + self.network_scheduler = importutils.import_object( + cfg.CONF.network_scheduler_driver + ) + self.router_scheduler = importutils.import_object( + cfg.CONF.router_scheduler_driver + ) + self.brocade_init() + + def brocade_init(self): + """Brocade specific initialization.""" + + self._switch = {'address': cfg.CONF.SWITCH.address, + 'username': cfg.CONF.SWITCH.username, + 'password': cfg.CONF.SWITCH.password + } + self._driver = importutils.import_object(NOS_DRIVER) + + def _setup_rpc(self): + # RPC support + self.service_topics = {svc_constants.CORE: topics.PLUGIN, + svc_constants.L3_ROUTER_NAT: topics.L3PLUGIN} + self.rpc_context = context.RequestContext('neutron', 'neutron', + is_admin=False) + self.conn = n_rpc.create_connection(new=True) + self.endpoints = [BridgeRpcCallbacks(), + agents_db.AgentExtRpcCallback()] + for svc_topic in self.service_topics.values(): + self.conn.create_consumer(svc_topic, self.endpoints, fanout=False) + # Consume from all consumers in threads + self.conn.consume_in_threads() + self.notifier = AgentNotifierApi(topics.AGENT) + self.agent_notifiers[q_const.AGENT_TYPE_DHCP] = ( + dhcp_rpc_agent_api.DhcpAgentNotifyAPI() + ) + self.agent_notifiers[q_const.AGENT_TYPE_L3] = ( + l3_rpc_agent_api.L3AgentNotifyAPI() + ) + + def create_network(self, context, network): + """Create network. + + This call to create network translates to creation of port-profile on + the physical switch. + """ + + with context.session.begin(subtransactions=True): + net = super(BrocadePluginV2, self).create_network(context, network) + net_uuid = net['id'] + vlan_id = self._vlan_bitmap.get_next_vlan(None) + switch = self._switch + try: + self._driver.create_network(switch['address'], + switch['username'], + switch['password'], + vlan_id) + except Exception: + # Proper formatting + LOG.exception(_("Brocade NOS driver error")) + LOG.debug(_("Returning the allocated vlan (%d) to the pool"), + vlan_id) + self._vlan_bitmap.release_vlan(int(vlan_id)) + raise Exception(_("Brocade plugin raised exception, " + "check logs")) + + brocade_db.create_network(context, net_uuid, vlan_id) + self._process_l3_create(context, net, network['network']) + + LOG.info(_("Allocated vlan (%d) from the pool"), vlan_id) + return net + + def delete_network(self, context, net_id): + """Delete network. + + This call to delete the network translates to removing the + port-profile on the physical switch. + """ + + with context.session.begin(subtransactions=True): + self._process_l3_delete(context, net_id) + result = super(BrocadePluginV2, self).delete_network(context, + net_id) + # we must delete all ports in db first (foreign key constraint) + # there is no need to delete port in the driver (its a no-op) + # (actually: note there is no such call to the driver) + bports = brocade_db.get_ports(context, net_id) + for bport in bports: + brocade_db.delete_port(context, bport['port_id']) + + # find the vlan for this network + net = brocade_db.get_network(context, net_id) + vlan_id = net['vlan'] + + # Tell hw to do remove PP + switch = self._switch + try: + self._driver.delete_network(switch['address'], + switch['username'], + switch['password'], + vlan_id) + except Exception: + # Proper formatting + LOG.exception(_("Brocade NOS driver error")) + raise Exception(_("Brocade plugin raised exception, " + "check logs")) + + # now ok to delete the network + brocade_db.delete_network(context, net_id) + + # relinquish vlan in bitmap + self._vlan_bitmap.release_vlan(int(vlan_id)) + return result + + def update_network(self, context, id, network): + + session = context.session + with session.begin(subtransactions=True): + net = super(BrocadePluginV2, self).update_network(context, id, + network) + self._process_l3_update(context, net, network['network']) + return net + + def create_port(self, context, port): + """Create logical port on the switch.""" + + tenant_id = port['port']['tenant_id'] + network_id = port['port']['network_id'] + admin_state_up = port['port']['admin_state_up'] + + physical_interface = self.physical_interface + + with context.session.begin(subtransactions=True): + bnet = brocade_db.get_network(context, network_id) + vlan_id = bnet['vlan'] + + neutron_port = super(BrocadePluginV2, self).create_port(context, + port) + self._process_portbindings_create_and_update(context, + port['port'], + neutron_port) + interface_mac = neutron_port['mac_address'] + port_id = neutron_port['id'] + + switch = self._switch + + # convert mac format: xx:xx:xx:xx:xx:xx -> xxxx.xxxx.xxxx + mac = self.mac_reformat_62to34(interface_mac) + try: + self._driver.associate_mac_to_network(switch['address'], + switch['username'], + switch['password'], + vlan_id, + mac) + except Exception: + # Proper formatting + LOG.exception(_("Brocade NOS driver error")) + raise Exception(_("Brocade plugin raised exception, " + "check logs")) + + # save to brocade persistent db + brocade_db.create_port(context, port_id, network_id, + physical_interface, + vlan_id, tenant_id, admin_state_up) + + # apply any extensions + return neutron_port + + def delete_port(self, context, port_id): + with context.session.begin(subtransactions=True): + neutron_port = self.get_port(context, port_id) + interface_mac = neutron_port['mac_address'] + # convert mac format: xx:xx:xx:xx:xx:xx -> xxxx.xxxx.xxxx + mac = self.mac_reformat_62to34(interface_mac) + + brocade_port = brocade_db.get_port(context, port_id) + vlan_id = brocade_port['vlan_id'] + + switch = self._switch + try: + self._driver.dissociate_mac_from_network(switch['address'], + switch['username'], + switch['password'], + vlan_id, + mac) + except Exception: + LOG.exception(_("Brocade NOS driver error")) + raise Exception( + _("Brocade plugin raised exception, check logs")) + + super(BrocadePluginV2, self).delete_port(context, port_id) + brocade_db.delete_port(context, port_id) + + def update_port(self, context, port_id, port): + original_port = self.get_port(context, port_id) + session = context.session + port_updated = False + with session.begin(subtransactions=True): + # delete the port binding and read it with the new rules + if ext_sg.SECURITYGROUPS in port['port']: + port['port'][ext_sg.SECURITYGROUPS] = ( + self._get_security_groups_on_port(context, port)) + self._delete_port_security_group_bindings(context, port_id) + # process_port_create_security_group also needs port id + port['port']['id'] = port_id + self._process_port_create_security_group( + context, + port['port'], + port['port'][ext_sg.SECURITYGROUPS]) + port_updated = True + port_data = port['port'] + port = super(BrocadePluginV2, self).update_port( + context, port_id, port) + self._process_portbindings_create_and_update(context, + port_data, + port) + if original_port['admin_state_up'] != port['admin_state_up']: + port_updated = True + + if (original_port['fixed_ips'] != port['fixed_ips'] or + not utils.compare_elements( + original_port.get(ext_sg.SECURITYGROUPS), + port.get(ext_sg.SECURITYGROUPS))): + self.notifier.security_groups_member_updated( + context, port.get(ext_sg.SECURITYGROUPS)) + + if port_updated: + self._notify_port_updated(context, port) + + return port + + def _notify_port_updated(self, context, port): + port_id = port['id'] + bport = brocade_db.get_port(context, port_id) + self.notifier.port_update(context, port, + bport.physical_interface, + bport.vlan_id) + + def _get_base_binding_dict(self): + binding = { + portbindings.VIF_TYPE: portbindings.VIF_TYPE_BRIDGE, + portbindings.VIF_DETAILS: { + # TODO(rkukura): Replace with new VIF security details + portbindings.CAP_PORT_FILTER: + 'security-group' in self.supported_extension_aliases}} + return binding + + def get_plugin_version(self): + """Get version number of the plugin.""" + return PLUGIN_VERSION + + @staticmethod + def mac_reformat_62to34(interface_mac): + """Transform MAC address format. + + Transforms from 6 groups of 2 hexadecimal numbers delimited by ":" + to 3 groups of 4 hexadecimals numbers delimited by ".". + + :param interface_mac: MAC address in the format xx:xx:xx:xx:xx:xx + :type interface_mac: string + :returns: MAC address in the format xxxx.xxxx.xxxx + :rtype: string + """ + + mac = interface_mac.replace(":", "") + mac = mac[0:4] + "." + mac[4:8] + "." + mac[8:12] + return mac diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/brocade/README.md b/icehouse-patches/neutron/dvr-patch/neutron/plugins/brocade/README.md new file mode 100644 index 00000000..82b3ad89 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/brocade/README.md @@ -0,0 +1,112 @@ +Brocade Openstack Neutron Plugin +================================ + +* up-to-date version of these instructions are located at: + http://wiki.openstack.org/brocade-neutron-plugin + +* N.B.: Please see Prerequisites section regarding ncclient (netconf client library) + +* Supports VCS (Virtual Cluster of Switches) + + +Openstack Brocade Neutron Plugin implements the Neutron v2.0 API. + +This plugin is meant to orchestrate Brocade VCS switches running NOS, examples of these are: + + 1. VDX 67xx series of switches + 2. VDX 87xx series of switches + +Brocade Neutron plugin implements the Neutron v2.0 API. It uses NETCONF at the backend +to configure the Brocade switch. + + +------------+ +------------+ +-------------+ + | | | | | | + | | | | | Brocade | + | Openstack | v2.0 | Brocade | NETCONF | VCS Switch | + | Neutron +--------+ Neutron +----------+ | + | | | Plugin | | VDX 67xx | + | | | | | VDX 87xx | + | | | | | | + | | | | | | + +------------+ +------------+ +-------------+ + + +Directory Structure +=================== + +Normally you will have your Openstack directory structure as follows: + + /opt/stack/nova/ + /opt/stack/horizon/ + ... + /opt/stack/neutron/neutron/plugins/ + +Within this structure, Brocade plugin resides at: + + /opt/stack/neutron/neutron/plugins/brocade + + +Prerequsites +============ + +This plugin requires installation of the python netconf client (ncclient) library: + +ncclient v0.3.1 - Python library for NETCONF clients available at http://github.com/brocade/ncclient + + % git clone https://www.github.com/brocade/ncclient + % cd ncclient; sudo python ./setup.py install + + +Configuration +============= + +1. Specify to Neutron that you will be using the Brocade Plugin - this is done +by setting the parameter core_plugin in Neutron: + + core_plugin = neutron.plugins.brocade.NeutronPlugin.BrocadePluginV2 + +2. Physical switch configuration parameters and Brocade specific database configuration is specified in +the configuration file specified in the brocade.ini files: + + % cat /etc/neutron/plugins/brocade/brocade.ini + [SWITCH] + username = admin + password = password + address = + ostype = NOS + + [database] + connection = mysql://root:pass@localhost/brocade_neutron?charset=utf8 + + (please see list of more configuration parameters in the brocade.ini file) + +Running Setup.py +================ + +Running setup.py with appropriate permissions will copy the default configuration +file to /etc/neutron/plugins/brocade/brocade.ini. This file MUST be edited to +suit your setup/environment. + + % cd /opt/stack/neutron/neutron/plugins/brocade + % python setup.py + + +Devstack +======== + +Please see special notes for devstack at: +http://wiki.openstack.org/brocade-neutron-plugin + +In order to use Brocade Neutron Plugin, add the following lines in localrc, if localrc file doe + not exist create one: + +ENABLED_SERVICES=g-api,g-reg,key,n-api,n-crt,n-obj,n-cpu,n-net,n-cond,cinder,c-sch,c-api,c-vol,n-sch,n-novnc,n-xvnc,n-cauth,horizon,rabbit,neutron,q-svc,q-agt +Q_PLUGIN=brocade + +As part of running devstack/stack.sh, the configuration files is copied as: + + % cp /opt/stack/neutron/etc/neutron/plugins/brocade/brocade.ini /etc/neutron/plugins/brocade/brocade.ini + +(hence it is important to make any changes to the configuration in: +/opt/stack/neutron/etc/neutron/plugins/brocade/brocade.ini) + diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/brocade/__init__.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/brocade/__init__.py new file mode 100644 index 00000000..ac507645 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/brocade/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2013 Brocade Communications System, Inc. +# All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/brocade/db/__init__.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/brocade/db/__init__.py new file mode 100644 index 00000000..ac507645 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/brocade/db/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2013 Brocade Communications System, Inc. +# All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/brocade/db/models.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/brocade/db/models.py new file mode 100644 index 00000000..313427c5 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/brocade/db/models.py @@ -0,0 +1,149 @@ +# Copyright 2013 Brocade Communications System, Inc. +# All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# Authors: +# Shiv Haris (sharis@brocade.com) +# Varma Bhupatiraju (vbhupati@#brocade.com) + + +"""Brocade specific database schema/model.""" + +import sqlalchemy as sa + +from neutron.db import model_base +from neutron.db import models_v2 + + +class BrocadeNetwork(model_base.BASEV2, models_v2.HasId): + """Schema for brocade network.""" + + vlan = sa.Column(sa.String(10)) + + +class BrocadePort(model_base.BASEV2): + """Schema for brocade port.""" + + port_id = sa.Column(sa.String(36), primary_key=True, default="") + network_id = sa.Column(sa.String(36), + sa.ForeignKey("brocadenetworks.id"), + nullable=False) + admin_state_up = sa.Column(sa.Boolean, nullable=False) + physical_interface = sa.Column(sa.String(36)) + vlan_id = sa.Column(sa.String(36)) + tenant_id = sa.Column(sa.String(36)) + + +def create_network(context, net_id, vlan): + """Create a brocade specific network/port-profiles.""" + + session = context.session + with session.begin(subtransactions=True): + net = BrocadeNetwork(id=net_id, vlan=vlan) + session.add(net) + + return net + + +def delete_network(context, net_id): + """Delete a brocade specific network/port-profiles.""" + + session = context.session + with session.begin(subtransactions=True): + net = (session.query(BrocadeNetwork).filter_by(id=net_id).first()) + if net is not None: + session.delete(net) + + +def get_network(context, net_id, fields=None): + """Get brocade specific network, with vlan extension.""" + + session = context.session + return (session.query(BrocadeNetwork).filter_by(id=net_id).first()) + + +def get_networks(context, filters=None, fields=None): + """Get all brocade specific networks.""" + + session = context.session + try: + nets = session.query(BrocadeNetwork).all() + return nets + except sa.exc.SQLAlchemyError: + return None + + +def create_port(context, port_id, network_id, physical_interface, + vlan_id, tenant_id, admin_state_up): + """Create a brocade specific port, has policy like vlan.""" + + # port_id is truncated: since the linux-bridge tap device names are + # based on truncated port id, this enables port lookups using + # tap devices + port_id = port_id[0:11] + session = context.session + with session.begin(subtransactions=True): + port = BrocadePort(port_id=port_id, + network_id=network_id, + physical_interface=physical_interface, + vlan_id=vlan_id, + admin_state_up=admin_state_up, + tenant_id=tenant_id) + session.add(port) + return port + + +def get_port(context, port_id): + """get a brocade specific port.""" + + port_id = port_id[0:11] + session = context.session + port = (session.query(BrocadePort).filter_by(port_id=port_id).first()) + return port + + +def get_ports(context, network_id=None): + """get a brocade specific port.""" + + session = context.session + ports = (session.query(BrocadePort).filter_by(network_id=network_id).all()) + return ports + + +def delete_port(context, port_id): + """delete brocade specific port.""" + + port_id = port_id[0:11] + session = context.session + with session.begin(subtransactions=True): + port = (session.query(BrocadePort).filter_by(port_id=port_id).first()) + if port is not None: + session.delete(port) + + +def get_port_from_device(session, port_id): + """get port from the tap device.""" + + # device is same as truncated port_id + port = (session.query(BrocadePort).filter_by(port_id=port_id).first()) + return port + + +def update_port_state(context, port_id, admin_state_up): + """Update port attributes.""" + + port_id = port_id[0:11] + session = context.session + session.query(BrocadePort).filter_by( + port_id=port_id).update({'admin_state_up': admin_state_up}) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/brocade/nos/__init__.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/brocade/nos/__init__.py new file mode 100644 index 00000000..7f85d6b4 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/brocade/nos/__init__.py @@ -0,0 +1,14 @@ +# Copyright (c) 2013 Brocade Communications Systems, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/brocade/nos/fake_nosdriver.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/brocade/nos/fake_nosdriver.py new file mode 100644 index 00000000..5fabc0ec --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/brocade/nos/fake_nosdriver.py @@ -0,0 +1,115 @@ +# Copyright 2013 Brocade Communications System, Inc. +# All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# Authors: +# Varma Bhupatiraju (vbhupati@#brocade.com) +# Shiv Haris (sharis@brocade.com) + + +"""FAKE DRIVER, for unit tests purposes. + +Brocade NOS Driver implements NETCONF over SSHv2 for +Neutron network life-cycle management. +""" + + +class NOSdriver(): + """NOS NETCONF interface driver for Neutron network. + + Fake: Handles life-cycle management of Neutron network, + leverages AMPP on NOS + (for use by unit tests, avoids touching any hardware) + """ + + def __init__(self): + pass + + def connect(self, host, username, password): + """Connect via SSH and initialize the NETCONF session.""" + pass + + def create_network(self, host, username, password, net_id): + """Creates a new virtual network.""" + pass + + def delete_network(self, host, username, password, net_id): + """Deletes a virtual network.""" + pass + + def associate_mac_to_network(self, host, username, password, + net_id, mac): + """Associates a MAC address to virtual network.""" + pass + + def dissociate_mac_from_network(self, host, username, password, + net_id, mac): + """Dissociates a MAC address from virtual network.""" + pass + + def create_vlan_interface(self, mgr, vlan_id): + """Configures a VLAN interface.""" + pass + + def delete_vlan_interface(self, mgr, vlan_id): + """Deletes a VLAN interface.""" + pass + + def get_port_profiles(self, mgr): + """Retrieves all port profiles.""" + pass + + def get_port_profile(self, mgr, name): + """Retrieves a port profile.""" + pass + + def create_port_profile(self, mgr, name): + """Creates a port profile.""" + pass + + def delete_port_profile(self, mgr, name): + """Deletes a port profile.""" + pass + + def activate_port_profile(self, mgr, name): + """Activates a port profile.""" + pass + + def deactivate_port_profile(self, mgr, name): + """Deactivates a port profile.""" + pass + + def associate_mac_to_port_profile(self, mgr, name, mac_address): + """Associates a MAC address to a port profile.""" + pass + + def dissociate_mac_from_port_profile(self, mgr, name, mac_address): + """Dissociates a MAC address from a port profile.""" + pass + + def create_vlan_profile_for_port_profile(self, mgr, name): + """Creates VLAN sub-profile for port profile.""" + pass + + def configure_l2_mode_for_vlan_profile(self, mgr, name): + """Configures L2 mode for VLAN sub-profile.""" + pass + + def configure_trunk_mode_for_vlan_profile(self, mgr, name): + """Configures trunk mode for VLAN sub-profile.""" + pass + + def configure_allowed_vlans_for_vlan_profile(self, mgr, name, vlan_id): + """Configures allowed VLANs for VLAN sub-profile.""" + pass diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/brocade/nos/nctemplates.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/brocade/nos/nctemplates.py new file mode 100644 index 00000000..9401130e --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/brocade/nos/nctemplates.py @@ -0,0 +1,202 @@ +# Copyright (c) 2013 Brocade Communications Systems, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# Authors: +# Varma Bhupatiraju (vbhupati@#brocade.com) +# Shiv Haris (sharis@brocade.com) + + +"""NOS NETCONF XML Configuration Command Templates. + +Interface Configuration Commands +""" + +# Create VLAN (vlan_id) +CREATE_VLAN_INTERFACE = """ + + + + + {vlan_id} + + + + +""" + +# Delete VLAN (vlan_id) +DELETE_VLAN_INTERFACE = """ + + + + + {vlan_id} + + + + +""" + +# +# AMPP Life-cycle Management Configuration Commands +# + +# Create AMPP port-profile (port_profile_name) +CREATE_PORT_PROFILE = """ + + + {name} + + +""" + +# Create VLAN sub-profile for port-profile (port_profile_name) +CREATE_VLAN_PROFILE_FOR_PORT_PROFILE = """ + + + {name} + + + +""" + +# Configure L2 mode for VLAN sub-profile (port_profile_name) +CONFIGURE_L2_MODE_FOR_VLAN_PROFILE = """ + + + {name} + + + + + +""" + +# Configure trunk mode for VLAN sub-profile (port_profile_name) +CONFIGURE_TRUNK_MODE_FOR_VLAN_PROFILE = """ + + + {name} + + + + trunk + + + + + +""" + +# Configure allowed VLANs for VLAN sub-profile +# (port_profile_name, allowed_vlan, native_vlan) +CONFIGURE_ALLOWED_VLANS_FOR_VLAN_PROFILE = """ + + + {name} + + + + + + {vlan_id} + + + + + + + +""" + +# Delete port-profile (port_profile_name) +DELETE_PORT_PROFILE = """ + + + {name} + + +""" + +# Activate port-profile (port_profile_name) +ACTIVATE_PORT_PROFILE = """ + + + + {name} + + + + +""" + +# Deactivate port-profile (port_profile_name) +DEACTIVATE_PORT_PROFILE = """ + + + + {name} + + + + +""" + +# Associate MAC address to port-profile (port_profile_name, mac_address) +ASSOCIATE_MAC_TO_PORT_PROFILE = """ + + + + {name} + + {mac_address} + + + + +""" + +# Dissociate MAC address from port-profile (port_profile_name, mac_address) +DISSOCIATE_MAC_FROM_PORT_PROFILE = """ + + + + {name} + + {mac_address} + + + + +""" + +# +# Custom RPC Commands +# + + +# +# Constants +# + +# Port profile naming convention for Neutron networks +OS_PORT_PROFILE_NAME = "openstack-profile-{id}" + +# Port profile filter expressions +PORT_PROFILE_XPATH_FILTER = "/port-profile" +PORT_PROFILE_NAME_XPATH_FILTER = "/port-profile[name='{name}']" diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/brocade/nos/nosdriver.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/brocade/nos/nosdriver.py new file mode 100644 index 00000000..8dc1cf0c --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/brocade/nos/nosdriver.py @@ -0,0 +1,231 @@ +# Copyright 2013 Brocade Communications System, Inc. +# All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# Authors: +# Varma Bhupatiraju (vbhupati@#brocade.com) +# Shiv Haris (sharis@brocade.com) + + +"""Brocade NOS Driver implements NETCONF over SSHv2 for +Neutron network life-cycle management. +""" + +from ncclient import manager + +from neutron.openstack.common import excutils +from neutron.openstack.common import log as logging +from neutron.plugins.brocade.nos import nctemplates as template + + +LOG = logging.getLogger(__name__) +SSH_PORT = 22 + + +def nos_unknown_host_cb(host, fingerprint): + """An unknown host callback. + + Returns `True` if it finds the key acceptable, + and `False` if not. This default callback for NOS always returns 'True' + (i.e. trusts all hosts for now). + """ + return True + + +class NOSdriver(): + """NOS NETCONF interface driver for Neutron network. + + Handles life-cycle management of Neutron network (leverages AMPP on NOS) + """ + + def __init__(self): + self.mgr = None + + def connect(self, host, username, password): + """Connect via SSH and initialize the NETCONF session.""" + + # Use the persisted NETCONF connection + if self.mgr and self.mgr.connected: + return self.mgr + + # Open new NETCONF connection + try: + self.mgr = manager.connect(host=host, port=SSH_PORT, + username=username, password=password, + unknown_host_cb=nos_unknown_host_cb) + except Exception as e: + with excutils.save_and_reraise_exception(): + LOG.error(_("Connect failed to switch: %s"), e) + + LOG.debug(_("Connect success to host %(host)s:%(ssh_port)d"), + dict(host=host, ssh_port=SSH_PORT)) + return self.mgr + + def close_session(self): + """Close NETCONF session.""" + if self.mgr: + self.mgr.close_session() + self.mgr = None + + def create_network(self, host, username, password, net_id): + """Creates a new virtual network.""" + + name = template.OS_PORT_PROFILE_NAME.format(id=net_id) + try: + mgr = self.connect(host, username, password) + self.create_vlan_interface(mgr, net_id) + self.create_port_profile(mgr, name) + self.create_vlan_profile_for_port_profile(mgr, name) + self.configure_l2_mode_for_vlan_profile(mgr, name) + self.configure_trunk_mode_for_vlan_profile(mgr, name) + self.configure_allowed_vlans_for_vlan_profile(mgr, name, net_id) + self.activate_port_profile(mgr, name) + except Exception as ex: + with excutils.save_and_reraise_exception(): + LOG.exception(_("NETCONF error: %s"), ex) + self.close_session() + + def delete_network(self, host, username, password, net_id): + """Deletes a virtual network.""" + + name = template.OS_PORT_PROFILE_NAME.format(id=net_id) + try: + mgr = self.connect(host, username, password) + self.deactivate_port_profile(mgr, name) + self.delete_port_profile(mgr, name) + self.delete_vlan_interface(mgr, net_id) + except Exception as ex: + with excutils.save_and_reraise_exception(): + LOG.exception(_("NETCONF error: %s"), ex) + self.close_session() + + def associate_mac_to_network(self, host, username, password, + net_id, mac): + """Associates a MAC address to virtual network.""" + + name = template.OS_PORT_PROFILE_NAME.format(id=net_id) + try: + mgr = self.connect(host, username, password) + self.associate_mac_to_port_profile(mgr, name, mac) + except Exception as ex: + with excutils.save_and_reraise_exception(): + LOG.exception(_("NETCONF error: %s"), ex) + self.close_session() + + def dissociate_mac_from_network(self, host, username, password, + net_id, mac): + """Dissociates a MAC address from virtual network.""" + + name = template.OS_PORT_PROFILE_NAME.format(id=net_id) + try: + mgr = self.connect(host, username, password) + self.dissociate_mac_from_port_profile(mgr, name, mac) + except Exception as ex: + with excutils.save_and_reraise_exception(): + LOG.exception(_("NETCONF error: %s"), ex) + self.close_session() + + def create_vlan_interface(self, mgr, vlan_id): + """Configures a VLAN interface.""" + + confstr = template.CREATE_VLAN_INTERFACE.format(vlan_id=vlan_id) + mgr.edit_config(target='running', config=confstr) + + def delete_vlan_interface(self, mgr, vlan_id): + """Deletes a VLAN interface.""" + + confstr = template.DELETE_VLAN_INTERFACE.format(vlan_id=vlan_id) + mgr.edit_config(target='running', config=confstr) + + def get_port_profiles(self, mgr): + """Retrieves all port profiles.""" + + filterstr = template.PORT_PROFILE_XPATH_FILTER + response = mgr.get_config(source='running', + filter=('xpath', filterstr)).data_xml + return response + + def get_port_profile(self, mgr, name): + """Retrieves a port profile.""" + + filterstr = template.PORT_PROFILE_NAME_XPATH_FILTER.format(name=name) + response = mgr.get_config(source='running', + filter=('xpath', filterstr)).data_xml + return response + + def create_port_profile(self, mgr, name): + """Creates a port profile.""" + + confstr = template.CREATE_PORT_PROFILE.format(name=name) + mgr.edit_config(target='running', config=confstr) + + def delete_port_profile(self, mgr, name): + """Deletes a port profile.""" + + confstr = template.DELETE_PORT_PROFILE.format(name=name) + mgr.edit_config(target='running', config=confstr) + + def activate_port_profile(self, mgr, name): + """Activates a port profile.""" + + confstr = template.ACTIVATE_PORT_PROFILE.format(name=name) + mgr.edit_config(target='running', config=confstr) + + def deactivate_port_profile(self, mgr, name): + """Deactivates a port profile.""" + + confstr = template.DEACTIVATE_PORT_PROFILE.format(name=name) + mgr.edit_config(target='running', config=confstr) + + def associate_mac_to_port_profile(self, mgr, name, mac_address): + """Associates a MAC address to a port profile.""" + + confstr = template.ASSOCIATE_MAC_TO_PORT_PROFILE.format( + name=name, mac_address=mac_address) + mgr.edit_config(target='running', config=confstr) + + def dissociate_mac_from_port_profile(self, mgr, name, mac_address): + """Dissociates a MAC address from a port profile.""" + + confstr = template.DISSOCIATE_MAC_FROM_PORT_PROFILE.format( + name=name, mac_address=mac_address) + mgr.edit_config(target='running', config=confstr) + + def create_vlan_profile_for_port_profile(self, mgr, name): + """Creates VLAN sub-profile for port profile.""" + + confstr = template.CREATE_VLAN_PROFILE_FOR_PORT_PROFILE.format( + name=name) + mgr.edit_config(target='running', config=confstr) + + def configure_l2_mode_for_vlan_profile(self, mgr, name): + """Configures L2 mode for VLAN sub-profile.""" + + confstr = template.CONFIGURE_L2_MODE_FOR_VLAN_PROFILE.format( + name=name) + mgr.edit_config(target='running', config=confstr) + + def configure_trunk_mode_for_vlan_profile(self, mgr, name): + """Configures trunk mode for VLAN sub-profile.""" + + confstr = template.CONFIGURE_TRUNK_MODE_FOR_VLAN_PROFILE.format( + name=name) + mgr.edit_config(target='running', config=confstr) + + def configure_allowed_vlans_for_vlan_profile(self, mgr, name, vlan_id): + """Configures allowed VLANs for VLAN sub-profile.""" + + confstr = template.CONFIGURE_ALLOWED_VLANS_FOR_VLAN_PROFILE.format( + name=name, vlan_id=vlan_id) + mgr.edit_config(target='running', config=confstr) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/brocade/vlanbm.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/brocade/vlanbm.py new file mode 100644 index 00000000..e45492f5 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/brocade/vlanbm.py @@ -0,0 +1,58 @@ +# Copyright 2013 Brocade Communications System, Inc. +# All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# Authors: +# Shiv Haris (sharis@brocade.com) +# Varma Bhupatiraju (vbhupati@#brocade.com) + + +"""A Vlan Bitmap class to handle allocation/de-allocation of vlan ids.""" +from six import moves + +from neutron.common import constants +from neutron.plugins.brocade.db import models as brocade_db + + +MIN_VLAN = constants.MIN_VLAN_TAG + 1 +MAX_VLAN = constants.MAX_VLAN_TAG + + +class VlanBitmap(object): + """Setup a vlan bitmap for allocation/de-allocation.""" + + # Keep track of the vlans that have been allocated/de-allocated + # uses a bitmap to do this + + def __init__(self, ctxt): + """Initialize the vlan as a set.""" + self.vlans = set(int(net['vlan']) + for net in brocade_db.get_networks(ctxt) + if net['vlan'] + ) + + def get_next_vlan(self, vlan_id=None): + """Try to get a specific vlan if requested or get the next vlan.""" + min_vlan_search = vlan_id or MIN_VLAN + max_vlan_search = (vlan_id and vlan_id + 1) or MAX_VLAN + + for vlan in moves.xrange(min_vlan_search, max_vlan_search): + if vlan not in self.vlans: + self.vlans.add(vlan) + return vlan + + def release_vlan(self, vlan_id): + """Return the vlan to the pool.""" + if vlan_id in self.vlans: + self.vlans.remove(vlan_id) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/cisco/__init__.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/cisco/__init__.py new file mode 100644 index 00000000..9a446dd8 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/cisco/__init__.py @@ -0,0 +1,16 @@ +# Copyright 2011 Cisco Systems, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Sumit Naiksatam, Cisco Systems, Inc. +# diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/cisco/common/__init__.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/cisco/common/__init__.py new file mode 100644 index 00000000..51c00019 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/cisco/common/__init__.py @@ -0,0 +1,15 @@ +# Copyright 2011 Cisco Systems, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Sumit Naiksatam, Cisco Systems, Inc. diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/cisco/common/cisco_constants.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/cisco/common/cisco_constants.py new file mode 100644 index 00000000..9d8d7d53 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/cisco/common/cisco_constants.py @@ -0,0 +1,109 @@ +# Copyright 2011 Cisco Systems, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Sumit Naiksatam, Cisco Systems, Inc. + + +# Attachment attributes +INSTANCE_ID = 'instance_id' +TENANT_ID = 'tenant_id' +TENANT_NAME = 'tenant_name' +HOST_NAME = 'host_name' + +# Network attributes +NET_ID = 'id' +NET_NAME = 'name' +NET_VLAN_ID = 'vlan_id' +NET_VLAN_NAME = 'vlan_name' +NET_PORTS = 'ports' + +CREDENTIAL_ID = 'credential_id' +CREDENTIAL_NAME = 'credential_name' +CREDENTIAL_USERNAME = 'user_name' +CREDENTIAL_PASSWORD = 'password' +CREDENTIAL_TYPE = 'type' +MASKED_PASSWORD = '********' + +USERNAME = 'username' +PASSWORD = 'password' + +LOGGER_COMPONENT_NAME = "cisco_plugin" + +NEXUS_PLUGIN = 'nexus_plugin' +VSWITCH_PLUGIN = 'vswitch_plugin' + +DEVICE_IP = 'device_ip' + +NETWORK_ADMIN = 'network_admin' + +NETWORK = 'network' +PORT = 'port' +BASE_PLUGIN_REF = 'base_plugin_ref' +CONTEXT = 'context' +SUBNET = 'subnet' + +#### N1Kv CONSTANTS +# Special vlan_id value in n1kv_vlan_allocations table indicating flat network +FLAT_VLAN_ID = -1 + +# Topic for tunnel notifications between the plugin and agent +TUNNEL = 'tunnel' + +# Maximum VXLAN range configurable for one network profile. +MAX_VXLAN_RANGE = 1000000 + +# Values for network_type +NETWORK_TYPE_FLAT = 'flat' +NETWORK_TYPE_VLAN = 'vlan' +NETWORK_TYPE_VXLAN = 'vxlan' +NETWORK_TYPE_LOCAL = 'local' +NETWORK_TYPE_NONE = 'none' +NETWORK_TYPE_TRUNK = 'trunk' +NETWORK_TYPE_MULTI_SEGMENT = 'multi-segment' + +# Values for network sub_type +NETWORK_TYPE_OVERLAY = 'overlay' +NETWORK_SUBTYPE_NATIVE_VXLAN = 'native_vxlan' +NETWORK_SUBTYPE_TRUNK_VLAN = NETWORK_TYPE_VLAN +NETWORK_SUBTYPE_TRUNK_VXLAN = NETWORK_TYPE_OVERLAY + +# Prefix for VM Network name +VM_NETWORK_NAME_PREFIX = 'vmn_' + +DEFAULT_HTTP_TIMEOUT = 15 +SET = 'set' +INSTANCE = 'instance' +PROPERTIES = 'properties' +NAME = 'name' +ID = 'id' +POLICY = 'policy' +TENANT_ID_NOT_SET = 'TENANT_ID_NOT_SET' +ENCAPSULATIONS = 'encapsulations' +STATE = 'state' +ONLINE = 'online' +MAPPINGS = 'mappings' +MAPPING = 'mapping' +SEGMENTS = 'segments' +SEGMENT = 'segment' +BRIDGE_DOMAIN_SUFFIX = '_bd' +LOGICAL_NETWORK_SUFFIX = '_log_net' +ENCAPSULATION_PROFILE_SUFFIX = '_profile' + +UUID_LENGTH = 36 + +# Nexus vlan and vxlan segment range +NEXUS_VLAN_RESERVED_MIN = 3968 +NEXUS_VLAN_RESERVED_MAX = 4047 +NEXUS_VXLAN_MIN = 4096 +NEXUS_VXLAN_MAX = 16000000 diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/cisco/common/cisco_credentials_v2.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/cisco/common/cisco_credentials_v2.py new file mode 100644 index 00000000..c7af2bca --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/cisco/common/cisco_credentials_v2.py @@ -0,0 +1,59 @@ +# Copyright 2012 Cisco Systems, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Sumit Naiksatam, Cisco Systems, Inc. + +import logging as LOG + +from neutron.plugins.cisco.common import cisco_constants as const +from neutron.plugins.cisco.common import cisco_exceptions as cexc +from neutron.plugins.cisco.common import config +from neutron.plugins.cisco.db import network_db_v2 as cdb + +LOG.basicConfig(level=LOG.WARN) +LOG.getLogger(const.LOGGER_COMPONENT_NAME) + + +class Store(object): + """Credential Store.""" + + @staticmethod + def initialize(): + dev_dict = config.get_device_dictionary() + for key in dev_dict: + dev_id, dev_ip, dev_key = key + if dev_key == const.USERNAME: + try: + cdb.add_credential( + dev_ip, + dev_dict[dev_id, dev_ip, const.USERNAME], + dev_dict[dev_id, dev_ip, const.PASSWORD], + dev_id) + except cexc.CredentialAlreadyExists: + # We are quietly ignoring this, since it only happens + # if this class module is loaded more than once, in + # which case, the credentials are already populated + pass + + @staticmethod + def get_username(cred_name): + """Get the username.""" + credential = cdb.get_credential_name(cred_name) + return credential[const.CREDENTIAL_USERNAME] + + @staticmethod + def get_password(cred_name): + """Get the password.""" + credential = cdb.get_credential_name(cred_name) + return credential[const.CREDENTIAL_PASSWORD] diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/cisco/common/cisco_exceptions.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/cisco/common/cisco_exceptions.py new file mode 100644 index 00000000..0a7fede3 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/cisco/common/cisco_exceptions.py @@ -0,0 +1,234 @@ +# Copyright 2011 Cisco Systems, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Sumit Naiksatam, Cisco Systems, Inc. +# @author: Rohit Agarwalla, Cisco Systems, Inc. + +"""Exceptions used by the Cisco plugin.""" + +from neutron.common import exceptions + + +class NetworkSegmentIDNotFound(exceptions.NeutronException): + """Segmentation ID for network is not found.""" + message = _("Segmentation ID for network %(net_id)s is not found.") + + +class NoMoreNics(exceptions.NeutronException): + """No more dynamic NICs are available in the system.""" + message = _("Unable to complete operation. No more dynamic NICs are " + "available in the system.") + + +class NetworkVlanBindingAlreadyExists(exceptions.NeutronException): + """Binding cannot be created, since it already exists.""" + message = _("NetworkVlanBinding for %(vlan_id)s and network " + "%(network_id)s already exists.") + + +class VlanIDNotFound(exceptions.NeutronException): + """VLAN ID cannot be found.""" + message = _("Vlan ID %(vlan_id)s not found.") + + +class VlanIDOutsidePool(exceptions.NeutronException): + """VLAN ID cannot be allocated, since it is outside the configured pool.""" + message = _("Unable to complete operation. VLAN ID exists outside of the " + "configured network segment range.") + + +class VlanIDNotAvailable(exceptions.NeutronException): + """No VLAN ID available.""" + message = _("No Vlan ID available.") + + +class QosNotFound(exceptions.NeutronException): + """QoS level with this ID cannot be found.""" + message = _("QoS level %(qos_id)s could not be found " + "for tenant %(tenant_id)s.") + + +class QosNameAlreadyExists(exceptions.NeutronException): + """QoS Name already exists.""" + message = _("QoS level with name %(qos_name)s already exists " + "for tenant %(tenant_id)s.") + + +class CredentialNotFound(exceptions.NeutronException): + """Credential with this ID cannot be found.""" + message = _("Credential %(credential_id)s could not be found.") + + +class CredentialNameNotFound(exceptions.NeutronException): + """Credential Name could not be found.""" + message = _("Credential %(credential_name)s could not be found.") + + +class CredentialAlreadyExists(exceptions.NeutronException): + """Credential already exists.""" + message = _("Credential %(credential_name)s already exists.") + + +class ProviderNetworkExists(exceptions.NeutronException): + """Provider network already exists.""" + message = _("Provider network %s already exists") + + +class NexusComputeHostNotConfigured(exceptions.NeutronException): + """Connection to compute host is not configured.""" + message = _("Connection to %(host)s is not configured.") + + +class NexusConnectFailed(exceptions.NeutronException): + """Failed to connect to Nexus switch.""" + message = _("Unable to connect to Nexus %(nexus_host)s. Reason: %(exc)s.") + + +class NexusConfigFailed(exceptions.NeutronException): + """Failed to configure Nexus switch.""" + message = _("Failed to configure Nexus: %(config)s. Reason: %(exc)s.") + + +class NexusPortBindingNotFound(exceptions.NeutronException): + """NexusPort Binding is not present.""" + message = _("Nexus Port Binding (%(filters)s) is not present.") + + def __init__(self, **kwargs): + filters = ','.join('%s=%s' % i for i in kwargs.items()) + super(NexusPortBindingNotFound, self).__init__(filters=filters) + + +class NoNexusSviSwitch(exceptions.NeutronException): + """No usable nexus switch found.""" + message = _("No usable Nexus switch found to create SVI interface.") + + +class PortVnicBindingAlreadyExists(exceptions.NeutronException): + """PortVnic Binding already exists.""" + message = _("PortVnic Binding %(port_id)s already exists.") + + +class PortVnicNotFound(exceptions.NeutronException): + """PortVnic Binding is not present.""" + message = _("PortVnic Binding %(port_id)s is not present.") + + +class SubnetNotSpecified(exceptions.NeutronException): + """Subnet id not specified.""" + message = _("No subnet_id specified for router gateway.") + + +class SubnetInterfacePresent(exceptions.NeutronException): + """Subnet SVI interface already exists.""" + message = _("Subnet %(subnet_id)s has an interface on %(router_id)s.") + + +class PortIdForNexusSvi(exceptions.NeutronException): + """Port Id specified for Nexus SVI.""" + message = _('Nexus hardware router gateway only uses Subnet Ids.') + + +class InvalidDetach(exceptions.NeutronException): + message = _("Unable to unplug the attachment %(att_id)s from port " + "%(port_id)s for network %(net_id)s. The attachment " + "%(att_id)s does not exist.") + + +class PolicyProfileAlreadyExists(exceptions.NeutronException): + """Policy Profile cannot be created since it already exists.""" + message = _("Policy Profile %(profile_id)s " + "already exists.") + + +class PolicyProfileIdNotFound(exceptions.NotFound): + """Policy Profile with the given UUID cannot be found.""" + message = _("Policy Profile %(profile_id)s could not be found.") + + +class NetworkProfileAlreadyExists(exceptions.NeutronException): + """Network Profile cannot be created since it already exists.""" + message = _("Network Profile %(profile_id)s " + "already exists.") + + +class NetworkProfileNotFound(exceptions.NotFound): + """Network Profile with the given UUID/name cannot be found.""" + message = _("Network Profile %(profile)s could not be found.") + + +class NetworkProfileInUse(exceptions.InUse): + """Network Profile with the given UUID is in use.""" + message = _("One or more network segments belonging to network " + "profile %(profile)s is in use.") + + +class NoMoreNetworkSegments(exceptions.NoNetworkAvailable): + """Network segments exhausted for the given network profile.""" + message = _("No more segments available in network segment pool " + "%(network_profile_name)s.") + + +class VMNetworkNotFound(exceptions.NotFound): + """VM Network with the given name cannot be found.""" + message = _("VM Network %(name)s could not be found.") + + +class VxlanIDInUse(exceptions.InUse): + """VXLAN ID is in use.""" + message = _("Unable to create the network. " + "The VXLAN ID %(vxlan_id)s is in use.") + + +class VxlanIDNotFound(exceptions.NotFound): + """VXLAN ID cannot be found.""" + message = _("Vxlan ID %(vxlan_id)s not found.") + + +class VxlanIDOutsidePool(exceptions.NeutronException): + """VXLAN ID cannot be allocated, as it is outside the configured pool.""" + message = _("Unable to complete operation. VXLAN ID exists outside of the " + "configured network segment range.") + + +class VSMConnectionFailed(exceptions.ServiceUnavailable): + """Connection to VSM failed.""" + message = _("Connection to VSM failed: %(reason)s.") + + +class VSMError(exceptions.NeutronException): + """Error has occurred on the VSM.""" + message = _("Internal VSM Error: %(reason)s.") + + +class NetworkBindingNotFound(exceptions.NotFound): + """Network Binding for network cannot be found.""" + message = _("Network Binding for network %(network_id)s could " + "not be found.") + + +class PortBindingNotFound(exceptions.NotFound): + """Port Binding for port cannot be found.""" + message = _("Port Binding for port %(port_id)s could " + "not be found.") + + +class ProfileTenantBindingNotFound(exceptions.NotFound): + """Profile to Tenant binding for given profile ID cannot be found.""" + message = _("Profile-Tenant binding for profile %(profile_id)s could " + "not be found.") + + +class NoClusterFound(exceptions.NotFound): + """No service cluster found to perform multi-segment bridging.""" + message = _("No service cluster found to perform multi-segment bridging.") diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/cisco/common/cisco_faults.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/cisco/common/cisco_faults.py new file mode 100644 index 00000000..9af6e060 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/cisco/common/cisco_faults.py @@ -0,0 +1,136 @@ +# Copyright 2011 Cisco Systems, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Ying Liu, Cisco Systems, Inc. + +import webob.dec + +from neutron import wsgi + + +class Fault(webob.exc.HTTPException): + """Error codes for API faults.""" + + _fault_names = { + 400: "malformedRequest", + 401: "unauthorized", + 451: "CredentialNotFound", + 452: "QoSNotFound", + 453: "NovatenantNotFound", + 454: "MultiportNotFound", + 470: "serviceUnavailable", + 471: "pluginFault" + } + + def __init__(self, exception): + """Create a Fault for the given webob.exc.exception.""" + self.wrapped_exc = exception + + @webob.dec.wsgify(RequestClass=wsgi.Request) + def __call__(self, req): + """Generate a WSGI response. + + Response is generated based on the exception passed to constructor. + """ + # Replace the body with fault details. + code = self.wrapped_exc.status_int + fault_name = self._fault_names.get(code, "neutronServiceFault") + fault_data = { + fault_name: { + 'code': code, + 'message': self.wrapped_exc.explanation}} + # 'code' is an attribute on the fault tag itself + content_type = req.best_match_content_type() + self.wrapped_exc.body = wsgi.Serializer().serialize( + fault_data, content_type) + self.wrapped_exc.content_type = content_type + return self.wrapped_exc + + +class PortNotFound(webob.exc.HTTPClientError): + """PortNotFound exception. + + subclass of :class:`~HTTPClientError` + + This indicates that the server did not find the port specified + in the HTTP request for a given network + + code: 430, title: Port not Found + """ + code = 430 + title = _('Port not Found') + explanation = _('Unable to find a port with the specified identifier.') + + +class CredentialNotFound(webob.exc.HTTPClientError): + """CredentialNotFound exception. + + subclass of :class:`~HTTPClientError` + + This indicates that the server did not find the Credential specified + in the HTTP request + + code: 451, title: Credential not Found + """ + code = 451 + title = _('Credential Not Found') + explanation = _('Unable to find a Credential with' + ' the specified identifier.') + + +class QosNotFound(webob.exc.HTTPClientError): + """QosNotFound exception. + + subclass of :class:`~HTTPClientError` + + This indicates that the server did not find the QoS specified + in the HTTP request + + code: 452, title: QoS not Found + """ + code = 452 + title = _('QoS Not Found') + explanation = _('Unable to find a QoS with' + ' the specified identifier.') + + +class NovatenantNotFound(webob.exc.HTTPClientError): + """NovatenantNotFound exception. + + subclass of :class:`~HTTPClientError` + + This indicates that the server did not find the Novatenant specified + in the HTTP request + + code: 453, title: Nova tenant not Found + """ + code = 453 + title = _('Nova tenant Not Found') + explanation = _('Unable to find a Novatenant with' + ' the specified identifier.') + + +class RequestedStateInvalid(webob.exc.HTTPClientError): + """RequestedStateInvalid exception. + + subclass of :class:`~HTTPClientError` + + This indicates that the server could not update the port state to + to the request value + + code: 431, title: Requested State Invalid + """ + code = 431 + title = _('Requested State Invalid') + explanation = _('Unable to update port state with specified value.') diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/cisco/common/config.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/cisco/common/config.py new file mode 100644 index 00000000..7081d04c --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/cisco/common/config.py @@ -0,0 +1,149 @@ +# Copyright 2013 Cisco Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo.config import cfg + +from neutron.agent.common import config + + +cisco_plugins_opts = [ + cfg.StrOpt('vswitch_plugin', + default='neutron.plugins.openvswitch.ovs_neutron_plugin.' + 'OVSNeutronPluginV2', + help=_("Virtual Switch to use")), + cfg.StrOpt('nexus_plugin', + default='neutron.plugins.cisco.nexus.cisco_nexus_plugin_v2.' + 'NexusPlugin', + help=_("Nexus Switch to use")), +] + +cisco_opts = [ + cfg.StrOpt('vlan_name_prefix', default='q-', + help=_("VLAN Name prefix")), + cfg.StrOpt('provider_vlan_name_prefix', default='p-', + help=_("VLAN Name prefix for provider vlans")), + cfg.BoolOpt('provider_vlan_auto_create', default=True, + help=_('Provider VLANs are automatically created as needed ' + 'on the Nexus switch')), + cfg.BoolOpt('provider_vlan_auto_trunk', default=True, + help=_('Provider VLANs are automatically trunked as needed ' + 'on the ports of the Nexus switch')), + cfg.BoolOpt('nexus_l3_enable', default=False, + help=_("Enable L3 support on the Nexus switches")), + cfg.BoolOpt('svi_round_robin', default=False, + help=_("Distribute SVI interfaces over all switches")), + cfg.StrOpt('model_class', + default='neutron.plugins.cisco.models.virt_phy_sw_v2.' + 'VirtualPhysicalSwitchModelV2', + help=_("Model Class")), + cfg.StrOpt('nexus_driver', + default='neutron.plugins.cisco.test.nexus.' + 'fake_nexus_driver.CiscoNEXUSFakeDriver', + help=_("Nexus Driver Name")), +] + +cisco_n1k_opts = [ + cfg.StrOpt('integration_bridge', default='br-int', + help=_("N1K Integration Bridge")), + cfg.BoolOpt('enable_tunneling', default=True, + help=_("N1K Enable Tunneling")), + cfg.StrOpt('tunnel_bridge', default='br-tun', + help=_("N1K Tunnel Bridge")), + cfg.StrOpt('local_ip', default='10.0.0.3', + help=_("N1K Local IP")), + cfg.StrOpt('tenant_network_type', default='local', + help=_("N1K Tenant Network Type")), + cfg.StrOpt('bridge_mappings', default='', + help=_("N1K Bridge Mappings")), + cfg.StrOpt('vxlan_id_ranges', default='5000:10000', + help=_("N1K VXLAN ID Ranges")), + cfg.StrOpt('network_vlan_ranges', default='vlan:1:4095', + help=_("N1K Network VLAN Ranges")), + cfg.StrOpt('default_network_profile', default='default_network_profile', + help=_("N1K default network profile")), + cfg.StrOpt('default_policy_profile', default='service_profile', + help=_("N1K default policy profile")), + cfg.StrOpt('network_node_policy_profile', default='dhcp_pp', + help=_("N1K policy profile for network node")), + cfg.IntOpt('poll_duration', default=10, + help=_("N1K Policy profile polling duration in seconds")), + cfg.IntOpt('http_pool_size', default=4, + help=_("Number of threads to use to make HTTP requests")), +] + +cfg.CONF.register_opts(cisco_opts, "CISCO") +cfg.CONF.register_opts(cisco_n1k_opts, "CISCO_N1K") +cfg.CONF.register_opts(cisco_plugins_opts, "CISCO_PLUGINS") +config.register_root_helper(cfg.CONF) + +# shortcuts +CONF = cfg.CONF +CISCO = cfg.CONF.CISCO +CISCO_N1K = cfg.CONF.CISCO_N1K +CISCO_PLUGINS = cfg.CONF.CISCO_PLUGINS + +# +# device_dictionary - Contains all external device configuration. +# +# When populated the device dictionary format is: +# {('', '', ''): '', ...} +# +# Example: +# {('NEXUS_SWITCH', '1.1.1.1', 'username'): 'admin', +# ('NEXUS_SWITCH', '1.1.1.1', 'password'): 'mySecretPassword', +# ('NEXUS_SWITCH', '1.1.1.1', 'compute1'): '1/1', ...} +# +device_dictionary = {} + +# +# first_device_ip - IP address of first switch discovered in config +# +# Used for SVI placement when round-robin placement is disabled +# +first_device_ip = None + + +class CiscoConfigOptions(): + """Cisco Configuration Options Class.""" + + def __init__(self): + self._create_device_dictionary() + + def _create_device_dictionary(self): + """ + Create the device dictionary from the cisco_plugins.ini + device supported sections. Ex. NEXUS_SWITCH, N1KV. + """ + + global first_device_ip + + multi_parser = cfg.MultiConfigParser() + read_ok = multi_parser.read(CONF.config_file) + + if len(read_ok) != len(CONF.config_file): + raise cfg.Error(_("Some config files were not parsed properly")) + + first_device_ip = None + for parsed_file in multi_parser.parsed: + for parsed_item in parsed_file.keys(): + dev_id, sep, dev_ip = parsed_item.partition(':') + if dev_id.lower() in ['nexus_switch', 'n1kv']: + for dev_key, value in parsed_file[parsed_item].items(): + if dev_ip and not first_device_ip: + first_device_ip = dev_ip + device_dictionary[dev_id, dev_ip, dev_key] = value[0] + + +def get_device_dictionary(): + return device_dictionary diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/cisco/db/__init__.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/cisco/db/__init__.py new file mode 100644 index 00000000..9a446dd8 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/cisco/db/__init__.py @@ -0,0 +1,16 @@ +# Copyright 2011 Cisco Systems, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Sumit Naiksatam, Cisco Systems, Inc. +# diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/cisco/db/n1kv_db_v2.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/cisco/db/n1kv_db_v2.py new file mode 100644 index 00000000..b9093da2 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/cisco/db/n1kv_db_v2.py @@ -0,0 +1,1619 @@ +# Copyright 2013 Cisco Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Aruna Kushwaha, Cisco Systems Inc. +# @author: Abhishek Raut, Cisco Systems Inc. +# @author: Rudrajit Tapadar, Cisco Systems Inc. +# @author: Sergey Sudakovich, Cisco Systems Inc. + +import netaddr +import re +from sqlalchemy.orm import exc +from sqlalchemy import sql + +from neutron.api.v2 import attributes +from neutron.common import constants +from neutron.common import exceptions as n_exc +import neutron.db.api as db +from neutron.db import models_v2 +from neutron.openstack.common import log as logging +from neutron.plugins.cisco.common import cisco_constants as c_const +from neutron.plugins.cisco.common import cisco_exceptions as c_exc +from neutron.plugins.cisco.db import n1kv_models_v2 + +LOG = logging.getLogger(__name__) + + +def del_trunk_segment_binding(db_session, trunk_segment_id, segment_pairs): + """ + Delete a trunk network binding. + + :param db_session: database session + :param trunk_segment_id: UUID representing the trunk network + :param segment_pairs: List of segment UUIDs in pair + representing the segments that are trunked + """ + with db_session.begin(subtransactions=True): + for (segment_id, dot1qtag) in segment_pairs: + (db_session.query(n1kv_models_v2.N1kvTrunkSegmentBinding). + filter_by(trunk_segment_id=trunk_segment_id, + segment_id=segment_id, + dot1qtag=dot1qtag).delete()) + alloc = (db_session.query(n1kv_models_v2. + N1kvTrunkSegmentBinding). + filter_by(trunk_segment_id=trunk_segment_id).first()) + if not alloc: + binding = get_network_binding(db_session, trunk_segment_id) + binding.physical_network = None + + +def del_multi_segment_binding(db_session, multi_segment_id, segment_pairs): + """ + Delete a multi-segment network binding. + + :param db_session: database session + :param multi_segment_id: UUID representing the multi-segment network + :param segment_pairs: List of segment UUIDs in pair + representing the segments that are bridged + """ + with db_session.begin(subtransactions=True): + for (segment1_id, segment2_id) in segment_pairs: + (db_session.query(n1kv_models_v2. + N1kvMultiSegmentNetworkBinding).filter_by( + multi_segment_id=multi_segment_id, + segment1_id=segment1_id, + segment2_id=segment2_id).delete()) + + +def add_trunk_segment_binding(db_session, trunk_segment_id, segment_pairs): + """ + Create a trunk network binding. + + :param db_session: database session + :param trunk_segment_id: UUID representing the multi-segment network + :param segment_pairs: List of segment UUIDs in pair + representing the segments to be trunked + """ + with db_session.begin(subtransactions=True): + binding = get_network_binding(db_session, trunk_segment_id) + for (segment_id, tag) in segment_pairs: + if not binding.physical_network: + member_seg_binding = get_network_binding(db_session, + segment_id) + binding.physical_network = member_seg_binding.physical_network + trunk_segment_binding = ( + n1kv_models_v2.N1kvTrunkSegmentBinding( + trunk_segment_id=trunk_segment_id, + segment_id=segment_id, dot1qtag=tag)) + db_session.add(trunk_segment_binding) + + +def add_multi_segment_binding(db_session, multi_segment_id, segment_pairs): + """ + Create a multi-segment network binding. + + :param db_session: database session + :param multi_segment_id: UUID representing the multi-segment network + :param segment_pairs: List of segment UUIDs in pair + representing the segments to be bridged + """ + with db_session.begin(subtransactions=True): + for (segment1_id, segment2_id) in segment_pairs: + multi_segment_binding = ( + n1kv_models_v2.N1kvMultiSegmentNetworkBinding( + multi_segment_id=multi_segment_id, + segment1_id=segment1_id, + segment2_id=segment2_id)) + db_session.add(multi_segment_binding) + + +def add_multi_segment_encap_profile_name(db_session, multi_segment_id, + segment_pair, profile_name): + """ + Add the encapsulation profile name to the multi-segment network binding. + + :param db_session: database session + :param multi_segment_id: UUID representing the multi-segment network + :param segment_pair: set containing the segment UUIDs that are bridged + """ + with db_session.begin(subtransactions=True): + binding = get_multi_segment_network_binding(db_session, + multi_segment_id, + segment_pair) + binding.encap_profile_name = profile_name + + +def get_multi_segment_network_binding(db_session, + multi_segment_id, segment_pair): + """ + Retrieve multi-segment network binding. + + :param db_session: database session + :param multi_segment_id: UUID representing the trunk network whose binding + is to fetch + :param segment_pair: set containing the segment UUIDs that are bridged + :returns: binding object + """ + try: + (segment1_id, segment2_id) = segment_pair + return (db_session.query( + n1kv_models_v2.N1kvMultiSegmentNetworkBinding). + filter_by(multi_segment_id=multi_segment_id, + segment1_id=segment1_id, + segment2_id=segment2_id)).one() + except exc.NoResultFound: + raise c_exc.NetworkBindingNotFound(network_id=multi_segment_id) + + +def get_multi_segment_members(db_session, multi_segment_id): + """ + Retrieve all the member segments of a multi-segment network. + + :param db_session: database session + :param multi_segment_id: UUID representing the multi-segment network + :returns: a list of tuples representing the mapped segments + """ + with db_session.begin(subtransactions=True): + allocs = (db_session.query( + n1kv_models_v2.N1kvMultiSegmentNetworkBinding). + filter_by(multi_segment_id=multi_segment_id)) + return [(a.segment1_id, a.segment2_id) for a in allocs] + + +def get_multi_segment_encap_dict(db_session, multi_segment_id): + """ + Retrieve the encapsulation profiles for every segment pairs bridged. + + :param db_session: database session + :param multi_segment_id: UUID representing the multi-segment network + :returns: a dictionary of lists containing the segment pairs in sets + """ + with db_session.begin(subtransactions=True): + encap_dict = {} + allocs = (db_session.query( + n1kv_models_v2.N1kvMultiSegmentNetworkBinding). + filter_by(multi_segment_id=multi_segment_id)) + for alloc in allocs: + if alloc.encap_profile_name not in encap_dict: + encap_dict[alloc.encap_profile_name] = [] + seg_pair = (alloc.segment1_id, alloc.segment2_id) + encap_dict[alloc.encap_profile_name].append(seg_pair) + return encap_dict + + +def get_trunk_network_binding(db_session, trunk_segment_id, segment_pair): + """ + Retrieve trunk network binding. + + :param db_session: database session + :param trunk_segment_id: UUID representing the trunk network whose binding + is to fetch + :param segment_pair: set containing the segment_id and dot1qtag + :returns: binding object + """ + try: + (segment_id, dot1qtag) = segment_pair + return (db_session.query(n1kv_models_v2.N1kvTrunkSegmentBinding). + filter_by(trunk_segment_id=trunk_segment_id, + segment_id=segment_id, + dot1qtag=dot1qtag)).one() + except exc.NoResultFound: + raise c_exc.NetworkBindingNotFound(network_id=trunk_segment_id) + + +def get_trunk_members(db_session, trunk_segment_id): + """ + Retrieve all the member segments of a trunk network. + + :param db_session: database session + :param trunk_segment_id: UUID representing the trunk network + :returns: a list of tuples representing the segment and their + corresponding dot1qtag + """ + with db_session.begin(subtransactions=True): + allocs = (db_session.query(n1kv_models_v2.N1kvTrunkSegmentBinding). + filter_by(trunk_segment_id=trunk_segment_id)) + return [(a.segment_id, a.dot1qtag) for a in allocs] + + +def is_trunk_member(db_session, segment_id): + """ + Checks if a segment is a member of a trunk segment. + + :param db_session: database session + :param segment_id: UUID of the segment to be checked + :returns: boolean + """ + with db_session.begin(subtransactions=True): + ret = (db_session.query(n1kv_models_v2.N1kvTrunkSegmentBinding). + filter_by(segment_id=segment_id).first()) + return bool(ret) + + +def is_multi_segment_member(db_session, segment_id): + """ + Checks if a segment is a member of a multi-segment network. + + :param db_session: database session + :param segment_id: UUID of the segment to be checked + :returns: boolean + """ + with db_session.begin(subtransactions=True): + ret1 = (db_session.query( + n1kv_models_v2.N1kvMultiSegmentNetworkBinding). + filter_by(segment1_id=segment_id).first()) + ret2 = (db_session.query( + n1kv_models_v2.N1kvMultiSegmentNetworkBinding). + filter_by(segment2_id=segment_id).first()) + return bool(ret1 or ret2) + + +def get_network_binding(db_session, network_id): + """ + Retrieve network binding. + + :param db_session: database session + :param network_id: UUID representing the network whose binding is + to fetch + :returns: binding object + """ + try: + return (db_session.query(n1kv_models_v2.N1kvNetworkBinding). + filter_by(network_id=network_id). + one()) + except exc.NoResultFound: + raise c_exc.NetworkBindingNotFound(network_id=network_id) + + +def add_network_binding(db_session, network_id, network_type, + physical_network, segmentation_id, + multicast_ip, network_profile_id, add_segments): + """ + Create network binding. + + :param db_session: database session + :param network_id: UUID representing the network + :param network_type: string representing type of network (VLAN, OVERLAY, + MULTI_SEGMENT or TRUNK) + :param physical_network: Only applicable for VLAN networks. It + represents a L2 Domain + :param segmentation_id: integer representing VLAN or VXLAN ID + :param multicast_ip: Native VXLAN technology needs a multicast IP to be + associated with every VXLAN ID to deal with broadcast + packets. A single multicast IP can be shared by + multiple VXLAN IDs. + :param network_profile_id: network profile ID based on which this network + is created + :param add_segments: List of segment UUIDs in pairs to be added to either a + multi-segment or trunk network + """ + with db_session.begin(subtransactions=True): + binding = n1kv_models_v2.N1kvNetworkBinding( + network_id=network_id, + network_type=network_type, + physical_network=physical_network, + segmentation_id=segmentation_id, + multicast_ip=multicast_ip, + profile_id=network_profile_id) + db_session.add(binding) + if add_segments is None: + pass + elif network_type == c_const.NETWORK_TYPE_MULTI_SEGMENT: + add_multi_segment_binding(db_session, network_id, add_segments) + elif network_type == c_const.NETWORK_TYPE_TRUNK: + add_trunk_segment_binding(db_session, network_id, add_segments) + + +def get_segment_range(network_profile): + """ + Get the segment range min and max for a network profile. + + :params network_profile: object of type network profile + :returns: integer values representing minimum and maximum segment + range value + """ + # Sort the range to ensure min, max is in order + seg_min, seg_max = sorted( + int(i) for i in network_profile.segment_range.split('-')) + LOG.debug(_("seg_min %(seg_min)s, seg_max %(seg_max)s"), + {'seg_min': seg_min, 'seg_max': seg_max}) + return seg_min, seg_max + + +def get_multicast_ip(network_profile): + """ + Retrieve a multicast ip from the defined pool. + + :params network_profile: object of type network profile + :returns: string representing multicast IP + """ + # Round robin multicast ip allocation + min_ip, max_ip = _get_multicast_ip_range(network_profile) + addr_list = list((netaddr.iter_iprange(min_ip, max_ip))) + mul_ip_str = str(addr_list[network_profile.multicast_ip_index]) + + network_profile.multicast_ip_index += 1 + if network_profile.multicast_ip_index == len(addr_list): + network_profile.multicast_ip_index = 0 + return mul_ip_str + + +def _get_multicast_ip_range(network_profile): + """ + Helper method to retrieve minimum and maximum multicast ip. + + :params network_profile: object of type network profile + :returns: two strings representing minimum multicast ip and + maximum multicast ip + """ + # Assumption: ip range belongs to the same subnet + # Assumption: ip range is already sorted + return network_profile.multicast_ip_range.split('-') + + +def get_port_binding(db_session, port_id): + """ + Retrieve port binding. + + :param db_session: database session + :param port_id: UUID representing the port whose binding is to fetch + :returns: port binding object + """ + try: + return (db_session.query(n1kv_models_v2.N1kvPortBinding). + filter_by(port_id=port_id). + one()) + except exc.NoResultFound: + raise c_exc.PortBindingNotFound(port_id=port_id) + + +def add_port_binding(db_session, port_id, policy_profile_id): + """ + Create port binding. + + Bind the port with policy profile. + :param db_session: database session + :param port_id: UUID of the port + :param policy_profile_id: UUID of the policy profile + """ + with db_session.begin(subtransactions=True): + binding = n1kv_models_v2.N1kvPortBinding(port_id=port_id, + profile_id=policy_profile_id) + db_session.add(binding) + + +def delete_segment_allocations(db_session, net_p): + """ + Delete the segment allocation entry from the table. + + :params db_session: database session + :params net_p: network profile object + """ + with db_session.begin(subtransactions=True): + seg_min, seg_max = get_segment_range(net_p) + if net_p['segment_type'] == c_const.NETWORK_TYPE_VLAN: + db_session.query(n1kv_models_v2.N1kvVlanAllocation).filter( + (n1kv_models_v2.N1kvVlanAllocation.physical_network == + net_p['physical_network']), + (n1kv_models_v2.N1kvVlanAllocation.vlan_id >= seg_min), + (n1kv_models_v2.N1kvVlanAllocation.vlan_id <= + seg_max)).delete() + elif net_p['segment_type'] == c_const.NETWORK_TYPE_OVERLAY: + db_session.query(n1kv_models_v2.N1kvVxlanAllocation).filter( + (n1kv_models_v2.N1kvVxlanAllocation.vxlan_id >= seg_min), + (n1kv_models_v2.N1kvVxlanAllocation.vxlan_id <= + seg_max)).delete() + + +def sync_vlan_allocations(db_session, net_p): + """ + Synchronize vlan_allocations table with configured VLAN ranges. + + Sync the network profile range with the vlan_allocations table for each + physical network. + :param db_session: database session + :param net_p: network profile dictionary + """ + with db_session.begin(subtransactions=True): + seg_min, seg_max = get_segment_range(net_p) + for vlan_id in range(seg_min, seg_max + 1): + try: + get_vlan_allocation(db_session, + net_p['physical_network'], + vlan_id) + except c_exc.VlanIDNotFound: + alloc = n1kv_models_v2.N1kvVlanAllocation( + physical_network=net_p['physical_network'], + vlan_id=vlan_id, + network_profile_id=net_p['id']) + db_session.add(alloc) + + +def get_vlan_allocation(db_session, physical_network, vlan_id): + """ + Retrieve vlan allocation. + + :param db_session: database session + :param physical network: string name for the physical network + :param vlan_id: integer representing the VLAN ID. + :returns: allocation object for given physical network and VLAN ID + """ + try: + return (db_session.query(n1kv_models_v2.N1kvVlanAllocation). + filter_by(physical_network=physical_network, + vlan_id=vlan_id).one()) + except exc.NoResultFound: + raise c_exc.VlanIDNotFound(vlan_id=vlan_id) + + +def reserve_vlan(db_session, network_profile): + """ + Reserve a VLAN ID within the range of the network profile. + + :param db_session: database session + :param network_profile: network profile object + """ + seg_min, seg_max = get_segment_range(network_profile) + segment_type = c_const.NETWORK_TYPE_VLAN + + with db_session.begin(subtransactions=True): + alloc = (db_session.query(n1kv_models_v2.N1kvVlanAllocation). + filter(sql.and_( + n1kv_models_v2.N1kvVlanAllocation.vlan_id >= seg_min, + n1kv_models_v2.N1kvVlanAllocation.vlan_id <= seg_max, + n1kv_models_v2.N1kvVlanAllocation.physical_network == + network_profile['physical_network'], + n1kv_models_v2.N1kvVlanAllocation.allocated == + sql.false()) + )).first() + if alloc: + segment_id = alloc.vlan_id + physical_network = alloc.physical_network + alloc.allocated = True + return (physical_network, segment_type, segment_id, "0.0.0.0") + raise c_exc.NoMoreNetworkSegments( + network_profile_name=network_profile.name) + + +def reserve_vxlan(db_session, network_profile): + """ + Reserve a VXLAN ID within the range of the network profile. + + :param db_session: database session + :param network_profile: network profile object + """ + seg_min, seg_max = get_segment_range(network_profile) + segment_type = c_const.NETWORK_TYPE_OVERLAY + physical_network = "" + + with db_session.begin(subtransactions=True): + alloc = (db_session.query(n1kv_models_v2.N1kvVxlanAllocation). + filter(sql.and_( + n1kv_models_v2.N1kvVxlanAllocation.vxlan_id >= + seg_min, + n1kv_models_v2.N1kvVxlanAllocation.vxlan_id <= + seg_max, + n1kv_models_v2.N1kvVxlanAllocation.allocated == + sql.false()) + ).first()) + if alloc: + segment_id = alloc.vxlan_id + alloc.allocated = True + if network_profile.sub_type == (c_const. + NETWORK_SUBTYPE_NATIVE_VXLAN): + return (physical_network, segment_type, + segment_id, get_multicast_ip(network_profile)) + else: + return (physical_network, segment_type, segment_id, "0.0.0.0") + raise n_exc.NoNetworkAvailable() + + +def alloc_network(db_session, network_profile_id): + """ + Allocate network using first available free segment ID in segment range. + + :param db_session: database session + :param network_profile_id: UUID representing the network profile + """ + with db_session.begin(subtransactions=True): + network_profile = get_network_profile(db_session, + network_profile_id) + if network_profile.segment_type == c_const.NETWORK_TYPE_VLAN: + return reserve_vlan(db_session, network_profile) + if network_profile.segment_type == c_const.NETWORK_TYPE_OVERLAY: + return reserve_vxlan(db_session, network_profile) + return (None, network_profile.segment_type, 0, "0.0.0.0") + + +def reserve_specific_vlan(db_session, physical_network, vlan_id): + """ + Reserve a specific VLAN ID for the network. + + :param db_session: database session + :param physical_network: string representing the name of physical network + :param vlan_id: integer value of the segmentation ID to be reserved + """ + with db_session.begin(subtransactions=True): + try: + alloc = (db_session.query(n1kv_models_v2.N1kvVlanAllocation). + filter_by(physical_network=physical_network, + vlan_id=vlan_id). + one()) + if alloc.allocated: + if vlan_id == c_const.FLAT_VLAN_ID: + raise n_exc.FlatNetworkInUse( + physical_network=physical_network) + else: + raise n_exc.VlanIdInUse(vlan_id=vlan_id, + physical_network=physical_network) + LOG.debug(_("Reserving specific vlan %(vlan)s on physical " + "network %(network)s from pool"), + {"vlan": vlan_id, "network": physical_network}) + alloc.allocated = True + db_session.add(alloc) + except exc.NoResultFound: + raise c_exc.VlanIDOutsidePool + + +def release_vlan(db_session, physical_network, vlan_id): + """ + Release a given VLAN ID. + + :param db_session: database session + :param physical_network: string representing the name of physical network + :param vlan_id: integer value of the segmentation ID to be released + """ + with db_session.begin(subtransactions=True): + try: + alloc = (db_session.query(n1kv_models_v2.N1kvVlanAllocation). + filter_by(physical_network=physical_network, + vlan_id=vlan_id). + one()) + alloc.allocated = False + except exc.NoResultFound: + LOG.warning(_("vlan_id %(vlan)s on physical network %(network)s " + "not found"), + {"vlan": vlan_id, "network": physical_network}) + + +def sync_vxlan_allocations(db_session, net_p): + """ + Synchronize vxlan_allocations table with configured vxlan ranges. + + :param db_session: database session + :param net_p: network profile dictionary + """ + seg_min, seg_max = get_segment_range(net_p) + if seg_max + 1 - seg_min > c_const.MAX_VXLAN_RANGE: + msg = (_("Unreasonable vxlan ID range %(vxlan_min)s - %(vxlan_max)s"), + {"vxlan_min": seg_min, "vxlan_max": seg_max}) + raise n_exc.InvalidInput(error_message=msg) + with db_session.begin(subtransactions=True): + for vxlan_id in range(seg_min, seg_max + 1): + try: + get_vxlan_allocation(db_session, vxlan_id) + except c_exc.VxlanIDNotFound: + alloc = n1kv_models_v2.N1kvVxlanAllocation( + network_profile_id=net_p['id'], vxlan_id=vxlan_id) + db_session.add(alloc) + + +def get_vxlan_allocation(db_session, vxlan_id): + """ + Retrieve VXLAN allocation for the given VXLAN ID. + + :param db_session: database session + :param vxlan_id: integer value representing the segmentation ID + :returns: allocation object + """ + try: + return (db_session.query(n1kv_models_v2.N1kvVxlanAllocation). + filter_by(vxlan_id=vxlan_id).one()) + except exc.NoResultFound: + raise c_exc.VxlanIDNotFound(vxlan_id=vxlan_id) + + +def reserve_specific_vxlan(db_session, vxlan_id): + """ + Reserve a specific VXLAN ID. + + :param db_session: database session + :param vxlan_id: integer value representing the segmentation ID + """ + with db_session.begin(subtransactions=True): + try: + alloc = (db_session.query(n1kv_models_v2.N1kvVxlanAllocation). + filter_by(vxlan_id=vxlan_id). + one()) + if alloc.allocated: + raise c_exc.VxlanIDInUse(vxlan_id=vxlan_id) + LOG.debug(_("Reserving specific vxlan %s from pool"), vxlan_id) + alloc.allocated = True + db_session.add(alloc) + except exc.NoResultFound: + raise c_exc.VxlanIDOutsidePool + + +def release_vxlan(db_session, vxlan_id): + """ + Release a given VXLAN ID. + + :param db_session: database session + :param vxlan_id: integer value representing the segmentation ID + """ + with db_session.begin(subtransactions=True): + try: + alloc = (db_session.query(n1kv_models_v2.N1kvVxlanAllocation). + filter_by(vxlan_id=vxlan_id). + one()) + alloc.allocated = False + except exc.NoResultFound: + LOG.warning(_("vxlan_id %s not found"), vxlan_id) + + +def set_port_status(port_id, status): + """ + Set the status of the port. + + :param port_id: UUID representing the port + :param status: string representing the new status + """ + db_session = db.get_session() + try: + port = db_session.query(models_v2.Port).filter_by(id=port_id).one() + port.status = status + except exc.NoResultFound: + raise n_exc.PortNotFound(port_id=port_id) + + +def get_vm_network(db_session, policy_profile_id, network_id): + """ + Retrieve a vm_network based on policy profile and network id. + + :param db_session: database session + :param policy_profile_id: UUID representing policy profile + :param network_id: UUID representing network + :returns: VM network object + """ + try: + return (db_session.query(n1kv_models_v2.N1kVmNetwork). + filter_by(profile_id=policy_profile_id, + network_id=network_id).one()) + except exc.NoResultFound: + name = (c_const.VM_NETWORK_NAME_PREFIX + policy_profile_id + + "_" + network_id) + raise c_exc.VMNetworkNotFound(name=name) + + +def add_vm_network(db_session, + name, + policy_profile_id, + network_id, + port_count): + """ + Create a VM network. + + Add a VM network for a unique combination of network and + policy profile. All ports having the same policy profile + on one network will be associated with one VM network. + :param db_session: database session + :param name: string representing the name of the VM network + :param policy_profile_id: UUID representing policy profile + :param network_id: UUID representing a network + :param port_count: integer representing the number of ports on vm network + """ + with db_session.begin(subtransactions=True): + vm_network = n1kv_models_v2.N1kVmNetwork( + name=name, + profile_id=policy_profile_id, + network_id=network_id, + port_count=port_count) + db_session.add(vm_network) + + +def update_vm_network_port_count(db_session, name, port_count): + """ + Update a VM network with new port count. + + :param db_session: database session + :param name: string representing the name of the VM network + :param port_count: integer representing the number of ports on VM network + """ + try: + with db_session.begin(subtransactions=True): + vm_network = (db_session.query(n1kv_models_v2.N1kVmNetwork). + filter_by(name=name).one()) + if port_count is not None: + vm_network.port_count = port_count + return vm_network + except exc.NoResultFound: + raise c_exc.VMNetworkNotFound(name=name) + + +def delete_vm_network(db_session, policy_profile_id, network_id): + """ + Delete a VM network. + + :param db_session: database session + :param policy_profile_id: UUID representing a policy profile + :param network_id: UUID representing a network + :returns: deleted VM network object + """ + with db_session.begin(subtransactions=True): + try: + vm_network = get_vm_network(db_session, + policy_profile_id, + network_id) + db_session.delete(vm_network) + db_session.query(n1kv_models_v2.N1kVmNetwork).filter_by( + name=vm_network["name"]).delete() + return vm_network + except exc.NoResultFound: + name = (c_const.VM_NETWORK_NAME_PREFIX + policy_profile_id + + "_" + network_id) + raise c_exc.VMNetworkNotFound(name=name) + + +def create_network_profile(db_session, network_profile): + """Create a network profile.""" + LOG.debug(_("create_network_profile()")) + with db_session.begin(subtransactions=True): + kwargs = {"name": network_profile["name"], + "segment_type": network_profile["segment_type"]} + if network_profile["segment_type"] == c_const.NETWORK_TYPE_VLAN: + kwargs["physical_network"] = network_profile["physical_network"] + kwargs["segment_range"] = network_profile["segment_range"] + elif network_profile["segment_type"] == c_const.NETWORK_TYPE_OVERLAY: + kwargs["multicast_ip_index"] = 0 + kwargs["multicast_ip_range"] = network_profile[ + "multicast_ip_range"] + kwargs["segment_range"] = network_profile["segment_range"] + kwargs["sub_type"] = network_profile["sub_type"] + elif network_profile["segment_type"] == c_const.NETWORK_TYPE_TRUNK: + kwargs["sub_type"] = network_profile["sub_type"] + net_profile = n1kv_models_v2.NetworkProfile(**kwargs) + db_session.add(net_profile) + return net_profile + + +def delete_network_profile(db_session, id): + """Delete Network Profile.""" + LOG.debug(_("delete_network_profile()")) + with db_session.begin(subtransactions=True): + try: + network_profile = get_network_profile(db_session, id) + db_session.delete(network_profile) + (db_session.query(n1kv_models_v2.ProfileBinding). + filter_by(profile_id=id).delete()) + return network_profile + except exc.NoResultFound: + raise c_exc.ProfileTenantBindingNotFound(profile_id=id) + + +def update_network_profile(db_session, id, network_profile): + """Update Network Profile.""" + LOG.debug(_("update_network_profile()")) + with db_session.begin(subtransactions=True): + profile = get_network_profile(db_session, id) + profile.update(network_profile) + return profile + + +def get_network_profile(db_session, id): + """Get Network Profile.""" + LOG.debug(_("get_network_profile()")) + try: + return db_session.query( + n1kv_models_v2.NetworkProfile).filter_by(id=id).one() + except exc.NoResultFound: + raise c_exc.NetworkProfileNotFound(profile=id) + + +def _get_network_profiles(db_session=None, physical_network=None): + """ + Retrieve all network profiles. + + Get Network Profiles on a particular physical network, if physical + network is specified. If no physical network is specified, return + all network profiles. + """ + db_session = db_session or db.get_session() + if physical_network: + return (db_session.query(n1kv_models_v2.NetworkProfile). + filter_by(physical_network=physical_network)) + return db_session.query(n1kv_models_v2.NetworkProfile) + + +def create_policy_profile(policy_profile): + """Create Policy Profile.""" + LOG.debug(_("create_policy_profile()")) + db_session = db.get_session() + with db_session.begin(subtransactions=True): + p_profile = n1kv_models_v2.PolicyProfile(id=policy_profile["id"], + name=policy_profile["name"]) + db_session.add(p_profile) + return p_profile + + +def delete_policy_profile(id): + """Delete Policy Profile.""" + LOG.debug(_("delete_policy_profile()")) + db_session = db.get_session() + with db_session.begin(subtransactions=True): + policy_profile = get_policy_profile(db_session, id) + db_session.delete(policy_profile) + + +def update_policy_profile(db_session, id, policy_profile): + """Update a policy profile.""" + LOG.debug(_("update_policy_profile()")) + with db_session.begin(subtransactions=True): + _profile = get_policy_profile(db_session, id) + _profile.update(policy_profile) + return _profile + + +def get_policy_profile(db_session, id): + """Get Policy Profile.""" + LOG.debug(_("get_policy_profile()")) + try: + return db_session.query( + n1kv_models_v2.PolicyProfile).filter_by(id=id).one() + except exc.NoResultFound: + raise c_exc.PolicyProfileIdNotFound(profile_id=id) + + +def get_policy_profiles(): + """Retrieve all policy profiles.""" + db_session = db.get_session() + with db_session.begin(subtransactions=True): + return db_session.query(n1kv_models_v2.PolicyProfile) + + +def create_profile_binding(db_session, tenant_id, profile_id, profile_type): + """Create Network/Policy Profile association with a tenant.""" + db_session = db_session or db.get_session() + if profile_type not in ["network", "policy"]: + raise n_exc.NeutronException(_("Invalid profile type")) + + if _profile_binding_exists(db_session, + tenant_id, + profile_id, + profile_type): + return get_profile_binding(db_session, tenant_id, profile_id) + + with db_session.begin(subtransactions=True): + binding = n1kv_models_v2.ProfileBinding(profile_type=profile_type, + profile_id=profile_id, + tenant_id=tenant_id) + db_session.add(binding) + return binding + + +def _profile_binding_exists(db_session, tenant_id, profile_id, profile_type): + LOG.debug(_("_profile_binding_exists()")) + return (db_session.query(n1kv_models_v2.ProfileBinding). + filter_by(tenant_id=tenant_id, profile_id=profile_id, + profile_type=profile_type).first()) + + +def get_profile_binding(db_session, tenant_id, profile_id): + """Get Network/Policy Profile - Tenant binding.""" + LOG.debug(_("get_profile_binding()")) + try: + return (db_session.query(n1kv_models_v2.ProfileBinding).filter_by( + tenant_id=tenant_id, profile_id=profile_id).one()) + except exc.NoResultFound: + raise c_exc.ProfileTenantBindingNotFound(profile_id=profile_id) + + +def delete_profile_binding(db_session, tenant_id, profile_id): + """Delete Policy Binding.""" + LOG.debug(_("delete_profile_binding()")) + db_session = db_session or db.get_session() + try: + binding = get_profile_binding(db_session, tenant_id, profile_id) + with db_session.begin(subtransactions=True): + db_session.delete(binding) + except c_exc.ProfileTenantBindingNotFound: + LOG.debug(_("Profile-Tenant binding missing for profile ID " + "%(profile_id)s and tenant ID %(tenant_id)s"), + {"profile_id": profile_id, "tenant_id": tenant_id}) + return + + +def _get_profile_bindings(db_session, profile_type=None): + """ + Retrieve a list of profile bindings. + + Get all profile-tenant bindings based on profile type. + If profile type is None, return profile-tenant binding for all + profile types. + """ + LOG.debug(_("_get_profile_bindings()")) + if profile_type: + profile_bindings = (db_session.query(n1kv_models_v2.ProfileBinding). + filter_by(profile_type=profile_type)) + return profile_bindings + return db_session.query(n1kv_models_v2.ProfileBinding) + + +class NetworkProfile_db_mixin(object): + + """Network Profile Mixin.""" + + def _replace_fake_tenant_id_with_real(self, context): + """ + Replace default tenant-id with admin tenant-ids. + + Default tenant-ids are populated in profile bindings when plugin is + initialized. Replace these tenant-ids with admin's tenant-id. + :param context: neutron api request context + """ + if context.is_admin and context.tenant_id: + tenant_id = context.tenant_id + db_session = context.session + with db_session.begin(subtransactions=True): + (db_session.query(n1kv_models_v2.ProfileBinding). + filter_by(tenant_id=c_const.TENANT_ID_NOT_SET). + update({'tenant_id': tenant_id})) + + def _get_network_collection_for_tenant(self, db_session, model, tenant_id): + net_profile_ids = (db_session.query(n1kv_models_v2.ProfileBinding. + profile_id). + filter_by(tenant_id=tenant_id). + filter_by(profile_type=c_const.NETWORK)) + network_profiles = (db_session.query(model).filter(model.id.in_( + pid[0] for pid in net_profile_ids))) + return [self._make_network_profile_dict(p) for p in network_profiles] + + def _make_profile_bindings_dict(self, profile_binding, fields=None): + res = {"profile_id": profile_binding["profile_id"], + "tenant_id": profile_binding["tenant_id"]} + return self._fields(res, fields) + + def _make_network_profile_dict(self, network_profile, fields=None): + res = {"id": network_profile["id"], + "name": network_profile["name"], + "segment_type": network_profile["segment_type"], + "sub_type": network_profile["sub_type"], + "segment_range": network_profile["segment_range"], + "multicast_ip_index": network_profile["multicast_ip_index"], + "multicast_ip_range": network_profile["multicast_ip_range"], + "physical_network": network_profile["physical_network"]} + return self._fields(res, fields) + + def _segment_in_use(self, db_session, network_profile): + """Verify whether a segment is allocated for given network profile.""" + with db_session.begin(subtransactions=True): + return (db_session.query(n1kv_models_v2.N1kvNetworkBinding). + filter_by(profile_id=network_profile['id'])).first() + + def get_network_profile_bindings(self, context, filters=None, fields=None): + """ + Retrieve a list of profile bindings for network profiles. + + :param context: neutron api request context + :param filters: a dictionary with keys that are valid keys for a + profile bindings object. Values in this dictiontary are + an iterable containing values that will be used for an + exact match comparison for that value. Each result + returned by this function will have matched one of the + values for each key in filters + :params fields: a list of strings that are valid keys in a profile + bindings dictionary. Only these fields will be returned + :returns: list of profile bindings + """ + if context.is_admin: + profile_bindings = _get_profile_bindings( + context.session, + profile_type=c_const.NETWORK) + return [self._make_profile_bindings_dict(pb) + for pb in profile_bindings] + + def create_network_profile(self, context, network_profile): + """ + Create a network profile. + + :param context: neutron api request context + :param network_profile: network profile dictionary + :returns: network profile dictionary + """ + self._replace_fake_tenant_id_with_real(context) + p = network_profile["network_profile"] + self._validate_network_profile_args(context, p) + with context.session.begin(subtransactions=True): + net_profile = create_network_profile(context.session, p) + if net_profile.segment_type == c_const.NETWORK_TYPE_VLAN: + sync_vlan_allocations(context.session, net_profile) + elif net_profile.segment_type == c_const.NETWORK_TYPE_OVERLAY: + sync_vxlan_allocations(context.session, net_profile) + create_profile_binding(context.session, + context.tenant_id, + net_profile.id, + c_const.NETWORK) + if p.get("add_tenant"): + self.add_network_profile_tenant(context.session, + net_profile.id, + p["add_tenant"]) + return self._make_network_profile_dict(net_profile) + + def delete_network_profile(self, context, id): + """ + Delete a network profile. + + :param context: neutron api request context + :param id: UUID representing network profile to delete + :returns: deleted network profile dictionary + """ + # Check whether the network profile is in use. + if self._segment_in_use(context.session, + get_network_profile(context.session, id)): + raise c_exc.NetworkProfileInUse(profile=id) + # Delete and return the network profile if it is not in use. + _profile = delete_network_profile(context.session, id) + return self._make_network_profile_dict(_profile) + + def update_network_profile(self, context, id, network_profile): + """ + Update a network profile. + + Add/remove network profile to tenant-id binding for the corresponding + options and if user is admin. + :param context: neutron api request context + :param id: UUID representing network profile to update + :param network_profile: network profile dictionary + :returns: updated network profile dictionary + """ + # Flag to check whether network profile is updated or not. + is_updated = False + p = network_profile["network_profile"] + original_net_p = get_network_profile(context.session, id) + # Update network profile to tenant id binding. + if context.is_admin and "add_tenant" in p: + self.add_network_profile_tenant(context.session, id, + p["add_tenant"]) + is_updated = True + if context.is_admin and "remove_tenant" in p: + delete_profile_binding(context.session, p["remove_tenant"], id) + is_updated = True + if original_net_p.segment_type == c_const.NETWORK_TYPE_TRUNK: + #TODO(abhraut): Remove check when Trunk supports segment range. + if p.get('segment_range'): + msg = _("segment_range not required for TRUNK") + LOG.error(msg) + raise n_exc.InvalidInput(error_message=msg) + if original_net_p.segment_type in [c_const.NETWORK_TYPE_VLAN, + c_const.NETWORK_TYPE_TRUNK]: + if p.get("multicast_ip_range"): + msg = _("multicast_ip_range not required") + LOG.error(msg) + raise n_exc.InvalidInput(error_message=msg) + # Update segment range if network profile is not in use. + if (p.get("segment_range") and + p.get("segment_range") != original_net_p.segment_range): + if not self._segment_in_use(context.session, original_net_p): + delete_segment_allocations(context.session, original_net_p) + updated_net_p = update_network_profile(context.session, id, p) + self._validate_segment_range_uniqueness(context, + updated_net_p, id) + if original_net_p.segment_type == c_const.NETWORK_TYPE_VLAN: + sync_vlan_allocations(context.session, updated_net_p) + if original_net_p.segment_type == c_const.NETWORK_TYPE_OVERLAY: + sync_vxlan_allocations(context.session, updated_net_p) + is_updated = True + else: + raise c_exc.NetworkProfileInUse(profile=id) + if (p.get('multicast_ip_range') and + (p.get("multicast_ip_range") != + original_net_p.get("multicast_ip_range"))): + self._validate_multicast_ip_range(p) + if not self._segment_in_use(context.session, original_net_p): + is_updated = True + else: + raise c_exc.NetworkProfileInUse(profile=id) + # Update network profile if name is updated and the network profile + # is not yet updated. + if "name" in p and not is_updated: + is_updated = True + # Return network profile if it is successfully updated. + if is_updated: + return self._make_network_profile_dict( + update_network_profile(context.session, id, p)) + + def get_network_profile(self, context, id, fields=None): + """ + Retrieve a network profile. + + :param context: neutron api request context + :param id: UUID representing the network profile to retrieve + :params fields: a list of strings that are valid keys in a network + profile dictionary. Only these fields will be returned + :returns: network profile dictionary + """ + profile = get_network_profile(context.session, id) + return self._make_network_profile_dict(profile, fields) + + def get_network_profiles(self, context, filters=None, fields=None): + """ + Retrieve a list of all network profiles. + + Retrieve all network profiles if tenant is admin. For a non-admin + tenant, retrieve all network profiles belonging to this tenant only. + :param context: neutron api request context + :param filters: a dictionary with keys that are valid keys for a + network profile object. Values in this dictiontary are + an iterable containing values that will be used for an + exact match comparison for that value. Each result + returned by this function will have matched one of the + values for each key in filters + :params fields: a list of strings that are valid keys in a network + profile dictionary. Only these fields will be returned + :returns: list of all network profiles + """ + if context.is_admin: + return self._get_collection(context, n1kv_models_v2.NetworkProfile, + self._make_network_profile_dict, + filters=filters, fields=fields) + return self._get_network_collection_for_tenant(context.session, + n1kv_models_v2. + NetworkProfile, + context.tenant_id) + + def add_network_profile_tenant(self, + db_session, + network_profile_id, + tenant_id): + """ + Add a tenant to a network profile. + + :param db_session: database session + :param network_profile_id: UUID representing network profile + :param tenant_id: UUID representing the tenant + :returns: profile binding object + """ + return create_profile_binding(db_session, + tenant_id, + network_profile_id, + c_const.NETWORK) + + def network_profile_exists(self, context, id): + """ + Verify whether a network profile for given id exists. + + :param context: neutron api request context + :param id: UUID representing network profile + :returns: true if network profile exist else False + """ + try: + get_network_profile(context.session, id) + return True + except c_exc.NetworkProfileNotFound(profile=id): + return False + + def _get_segment_range(self, data): + return (int(seg) for seg in data.split("-")[:2]) + + def _validate_network_profile_args(self, context, p): + """ + Validate completeness of Nexus1000V network profile arguments. + + :param context: neutron api request context + :param p: network profile object + """ + self._validate_network_profile(p) + segment_type = p['segment_type'].lower() + if segment_type != c_const.NETWORK_TYPE_TRUNK: + self._validate_segment_range_uniqueness(context, p) + + def _validate_segment_range(self, network_profile): + """ + Validate segment range values. + + :param network_profile: network profile object + """ + if not re.match(r"(\d+)\-(\d+)", network_profile["segment_range"]): + msg = _("Invalid segment range. example range: 500-550") + raise n_exc.InvalidInput(error_message=msg) + + def _validate_multicast_ip_range(self, network_profile): + """ + Validate multicast ip range values. + + :param network_profile: network profile object + """ + try: + min_ip, max_ip = (network_profile + ['multicast_ip_range'].split('-', 1)) + except ValueError: + msg = _("Invalid multicast ip address range. " + "example range: 224.1.1.1-224.1.1.10") + LOG.error(msg) + raise n_exc.InvalidInput(error_message=msg) + for ip in [min_ip, max_ip]: + try: + if not netaddr.IPAddress(ip).is_multicast(): + msg = _("%s is not a valid multicast ip address") % ip + LOG.error(msg) + raise n_exc.InvalidInput(error_message=msg) + if netaddr.IPAddress(ip) <= netaddr.IPAddress('224.0.0.255'): + msg = _("%s is reserved multicast ip address") % ip + LOG.error(msg) + raise n_exc.InvalidInput(error_message=msg) + except netaddr.AddrFormatError: + msg = _("%s is not a valid ip address") % ip + LOG.error(msg) + raise n_exc.InvalidInput(error_message=msg) + if netaddr.IPAddress(min_ip) > netaddr.IPAddress(max_ip): + msg = (_("Invalid multicast IP range '%(min_ip)s-%(max_ip)s':" + " Range should be from low address to high address") % + {'min_ip': min_ip, 'max_ip': max_ip}) + LOG.error(msg) + raise n_exc.InvalidInput(error_message=msg) + + def _validate_network_profile(self, net_p): + """ + Validate completeness of a network profile arguments. + + :param net_p: network profile object + """ + if any(net_p[arg] == "" for arg in ["segment_type"]): + msg = _("Arguments segment_type missing" + " for network profile") + LOG.error(msg) + raise n_exc.InvalidInput(error_message=msg) + segment_type = net_p["segment_type"].lower() + if segment_type not in [c_const.NETWORK_TYPE_VLAN, + c_const.NETWORK_TYPE_OVERLAY, + c_const.NETWORK_TYPE_TRUNK, + c_const.NETWORK_TYPE_MULTI_SEGMENT]: + msg = _("segment_type should either be vlan, overlay, " + "multi-segment or trunk") + LOG.error(msg) + raise n_exc.InvalidInput(error_message=msg) + if segment_type == c_const.NETWORK_TYPE_VLAN: + if "physical_network" not in net_p: + msg = _("Argument physical_network missing " + "for network profile") + LOG.error(msg) + raise n_exc.InvalidInput(error_message=msg) + if segment_type == c_const.NETWORK_TYPE_TRUNK: + if net_p["segment_range"]: + msg = _("segment_range not required for trunk") + LOG.error(msg) + raise n_exc.InvalidInput(error_message=msg) + if segment_type in [c_const.NETWORK_TYPE_TRUNK, + c_const.NETWORK_TYPE_OVERLAY]: + if not attributes.is_attr_set(net_p.get("sub_type")): + msg = _("Argument sub_type missing " + "for network profile") + LOG.error(msg) + raise n_exc.InvalidInput(error_message=msg) + if segment_type in [c_const.NETWORK_TYPE_VLAN, + c_const.NETWORK_TYPE_OVERLAY]: + if "segment_range" not in net_p: + msg = _("Argument segment_range missing " + "for network profile") + LOG.error(msg) + raise n_exc.InvalidInput(error_message=msg) + self._validate_segment_range(net_p) + if segment_type == c_const.NETWORK_TYPE_OVERLAY: + if net_p['sub_type'] != c_const.NETWORK_SUBTYPE_NATIVE_VXLAN: + net_p['multicast_ip_range'] = '0.0.0.0' + else: + multicast_ip_range = net_p.get("multicast_ip_range") + if not attributes.is_attr_set(multicast_ip_range): + msg = _("Argument multicast_ip_range missing" + " for VXLAN multicast network profile") + LOG.error(msg) + raise n_exc.InvalidInput(error_message=msg) + self._validate_multicast_ip_range(net_p) + else: + net_p['multicast_ip_range'] = '0.0.0.0' + + def _validate_segment_range_uniqueness(self, context, net_p, id=None): + """ + Validate that segment range doesn't overlap. + + :param context: neutron api request context + :param net_p: network profile dictionary + :param id: UUID representing the network profile being updated + """ + segment_type = net_p["segment_type"].lower() + seg_min, seg_max = self._get_segment_range(net_p['segment_range']) + if segment_type == c_const.NETWORK_TYPE_VLAN: + if not ((seg_min <= seg_max) and + ((seg_min in range(constants.MIN_VLAN_TAG, + c_const.NEXUS_VLAN_RESERVED_MIN) and + seg_max in range(constants.MIN_VLAN_TAG, + c_const.NEXUS_VLAN_RESERVED_MIN)) or + (seg_min in range(c_const.NEXUS_VLAN_RESERVED_MAX + 1, + constants.MAX_VLAN_TAG) and + seg_max in range(c_const.NEXUS_VLAN_RESERVED_MAX + 1, + constants.MAX_VLAN_TAG)))): + msg = (_("Segment range is invalid, select from " + "%(min)s-%(nmin)s, %(nmax)s-%(max)s") % + {"min": constants.MIN_VLAN_TAG, + "nmin": c_const.NEXUS_VLAN_RESERVED_MIN - 1, + "nmax": c_const.NEXUS_VLAN_RESERVED_MAX + 1, + "max": constants.MAX_VLAN_TAG - 1}) + LOG.error(msg) + raise n_exc.InvalidInput(error_message=msg) + profiles = _get_network_profiles( + db_session=context.session, + physical_network=net_p["physical_network"] + ) + elif segment_type in [c_const.NETWORK_TYPE_OVERLAY, + c_const.NETWORK_TYPE_MULTI_SEGMENT, + c_const.NETWORK_TYPE_TRUNK]: + if (seg_min > seg_max or + seg_min < c_const.NEXUS_VXLAN_MIN or + seg_max > c_const.NEXUS_VXLAN_MAX): + msg = (_("segment range is invalid. Valid range is : " + "%(min)s-%(max)s") % + {"min": c_const.NEXUS_VXLAN_MIN, + "max": c_const.NEXUS_VXLAN_MAX}) + LOG.error(msg) + raise n_exc.InvalidInput(error_message=msg) + profiles = _get_network_profiles(db_session=context.session) + if profiles: + for profile in profiles: + if id and profile.id == id: + continue + name = profile.name + segment_range = profile.segment_range + if net_p["name"] == name: + msg = (_("NetworkProfile name %s already exists"), + net_p["name"]) + LOG.error(msg) + raise n_exc.InvalidInput(error_message=msg) + if (c_const.NETWORK_TYPE_MULTI_SEGMENT in + [profile.segment_type, net_p["segment_type"]] or + c_const.NETWORK_TYPE_TRUNK in + [profile.segment_type, net_p["segment_type"]]): + continue + seg_min, seg_max = self._get_segment_range( + net_p["segment_range"]) + profile_seg_min, profile_seg_max = self._get_segment_range( + segment_range) + if ((profile_seg_min <= seg_min <= profile_seg_max) or + (profile_seg_min <= seg_max <= profile_seg_max) or + ((seg_min <= profile_seg_min) and + (seg_max >= profile_seg_max))): + msg = _("Segment range overlaps with another profile") + LOG.error(msg) + raise n_exc.InvalidInput(error_message=msg) + + def _get_network_profile_by_name(self, db_session, name): + """ + Retrieve network profile based on name. + + :param db_session: database session + :param name: string representing the name for the network profile + :returns: network profile object + """ + with db_session.begin(subtransactions=True): + try: + return (db_session.query(n1kv_models_v2.NetworkProfile). + filter_by(name=name).one()) + except exc.NoResultFound: + raise c_exc.NetworkProfileNotFound(profile=name) + + +class PolicyProfile_db_mixin(object): + + """Policy Profile Mixin.""" + + def _get_policy_collection_for_tenant(self, db_session, model, tenant_id): + profile_ids = (db_session.query(n1kv_models_v2. + ProfileBinding.profile_id) + .filter_by(tenant_id=tenant_id). + filter_by(profile_type=c_const.POLICY).all()) + profiles = db_session.query(model).filter(model.id.in_( + pid[0] for pid in profile_ids)) + return [self._make_policy_profile_dict(p) for p in profiles] + + def _make_policy_profile_dict(self, policy_profile, fields=None): + res = {"id": policy_profile["id"], "name": policy_profile["name"]} + return self._fields(res, fields) + + def _make_profile_bindings_dict(self, profile_binding, fields=None): + res = {"profile_id": profile_binding["profile_id"], + "tenant_id": profile_binding["tenant_id"]} + return self._fields(res, fields) + + def _policy_profile_exists(self, id): + db_session = db.get_session() + return (db_session.query(n1kv_models_v2.PolicyProfile). + filter_by(id=id).first()) + + def get_policy_profile(self, context, id, fields=None): + """ + Retrieve a policy profile for the given UUID. + + :param context: neutron api request context + :param id: UUID representing policy profile to fetch + :params fields: a list of strings that are valid keys in a policy + profile dictionary. Only these fields will be returned + :returns: policy profile dictionary + """ + profile = get_policy_profile(context.session, id) + return self._make_policy_profile_dict(profile, fields) + + def get_policy_profiles(self, context, filters=None, fields=None): + """ + Retrieve a list of policy profiles. + + Retrieve all policy profiles if tenant is admin. For a non-admin + tenant, retrieve all policy profiles belonging to this tenant only. + :param context: neutron api request context + :param filters: a dictionary with keys that are valid keys for a + policy profile object. Values in this dictiontary are + an iterable containing values that will be used for an + exact match comparison for that value. Each result + returned by this function will have matched one of the + values for each key in filters + :params fields: a list of strings that are valid keys in a policy + profile dictionary. Only these fields will be returned + :returns: list of all policy profiles + """ + if context.is_admin: + return self._get_collection(context, n1kv_models_v2.PolicyProfile, + self._make_policy_profile_dict, + filters=filters, fields=fields) + else: + return self._get_policy_collection_for_tenant(context.session, + n1kv_models_v2. + PolicyProfile, + context.tenant_id) + + def get_policy_profile_bindings(self, context, filters=None, fields=None): + """ + Retrieve a list of profile bindings for policy profiles. + + :param context: neutron api request context + :param filters: a dictionary with keys that are valid keys for a + profile bindings object. Values in this dictiontary are + an iterable containing values that will be used for an + exact match comparison for that value. Each result + returned by this function will have matched one of the + values for each key in filters + :params fields: a list of strings that are valid keys in a profile + bindings dictionary. Only these fields will be returned + :returns: list of profile bindings + """ + if context.is_admin: + profile_bindings = _get_profile_bindings( + context.session, + profile_type=c_const.POLICY) + return [self._make_profile_bindings_dict(pb) + for pb in profile_bindings] + + def update_policy_profile(self, context, id, policy_profile): + """ + Update a policy profile. + + Add/remove policy profile to tenant-id binding for the corresponding + option and if user is admin. + :param context: neutron api request context + :param id: UUID representing policy profile to update + :param policy_profile: policy profile dictionary + :returns: updated policy profile dictionary + """ + p = policy_profile["policy_profile"] + if context.is_admin and "add_tenant" in p: + self.add_policy_profile_tenant(context.session, + id, + p["add_tenant"]) + return self._make_policy_profile_dict(get_policy_profile( + context.session, id)) + if context.is_admin and "remove_tenant" in p: + delete_profile_binding(context.session, p["remove_tenant"], id) + return self._make_policy_profile_dict(get_policy_profile( + context.session, id)) + return self._make_policy_profile_dict( + update_policy_profile(context.session, id, p)) + + def add_policy_profile_tenant(self, + db_session, + policy_profile_id, + tenant_id): + """ + Add a tenant to a policy profile binding. + + :param db_session: database session + :param policy_profile_id: UUID representing policy profile + :param tenant_id: UUID representing the tenant + :returns: profile binding object + """ + return create_profile_binding(db_session, + tenant_id, + policy_profile_id, + c_const.POLICY) + + def remove_policy_profile_tenant(self, policy_profile_id, tenant_id): + """ + Remove a tenant to a policy profile binding. + + :param policy_profile_id: UUID representing policy profile + :param tenant_id: UUID representing the tenant + """ + delete_profile_binding(None, tenant_id, policy_profile_id) + + def _delete_policy_profile(self, policy_profile_id): + """Delete policy profile and associated binding.""" + db_session = db.get_session() + with db_session.begin(subtransactions=True): + (db_session.query(n1kv_models_v2.PolicyProfile). + filter_by(id=policy_profile_id).delete()) + + def _get_policy_profile_by_name(self, name): + """ + Retrieve policy profile based on name. + + :param name: string representing the name for the policy profile + :returns: policy profile object + """ + db_session = db.get_session() + with db_session.begin(subtransactions=True): + return (db_session.query(n1kv_models_v2.PolicyProfile). + filter_by(name=name).one()) + + def _remove_all_fake_policy_profiles(self): + """ + Remove all policy profiles associated with fake tenant id. + + This will find all Profile ID where tenant is not set yet - set A + and profiles where tenant was already set - set B + and remove what is in both and no tenant id set + """ + db_session = db.get_session() + with db_session.begin(subtransactions=True): + a_set_q = (db_session.query(n1kv_models_v2.ProfileBinding). + filter_by(tenant_id=c_const.TENANT_ID_NOT_SET, + profile_type=c_const.POLICY)) + a_set = set(i.profile_id for i in a_set_q) + b_set_q = (db_session.query(n1kv_models_v2.ProfileBinding). + filter(sql.and_(n1kv_models_v2.ProfileBinding. + tenant_id != c_const.TENANT_ID_NOT_SET, + n1kv_models_v2.ProfileBinding. + profile_type == c_const.POLICY))) + b_set = set(i.profile_id for i in b_set_q) + (db_session.query(n1kv_models_v2.ProfileBinding). + filter(sql.and_(n1kv_models_v2.ProfileBinding.profile_id. + in_(a_set & b_set), + n1kv_models_v2.ProfileBinding.tenant_id == + c_const.TENANT_ID_NOT_SET)). + delete(synchronize_session="fetch")) + + def _add_policy_profile(self, + policy_profile_name, + policy_profile_id, + tenant_id=None): + """ + Add Policy profile and tenant binding. + + :param policy_profile_name: string representing the name for the + policy profile + :param policy_profile_id: UUID representing the policy profile + :param tenant_id: UUID representing the tenant + """ + policy_profile = {"id": policy_profile_id, "name": policy_profile_name} + tenant_id = tenant_id or c_const.TENANT_ID_NOT_SET + if not self._policy_profile_exists(policy_profile_id): + create_policy_profile(policy_profile) + create_profile_binding(None, + tenant_id, + policy_profile["id"], + c_const.POLICY) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/cisco/db/n1kv_models_v2.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/cisco/db/n1kv_models_v2.py new file mode 100644 index 00000000..b104666a --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/cisco/db/n1kv_models_v2.py @@ -0,0 +1,183 @@ +# Copyright 2013 Cisco Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Abhishek Raut, Cisco Systems Inc. +# @author: Rudrajit Tapadar, Cisco Systems Inc. + +import sqlalchemy as sa + +from neutron.db import model_base +from neutron.db import models_v2 +from neutron.openstack.common import log as logging +from neutron.plugins.cisco.common import cisco_constants + + +LOG = logging.getLogger(__name__) + + +class N1kvVlanAllocation(model_base.BASEV2): + + """Represents allocation state of vlan_id on physical network.""" + __tablename__ = 'cisco_n1kv_vlan_allocations' + + physical_network = sa.Column(sa.String(64), + nullable=False, + primary_key=True) + vlan_id = sa.Column(sa.Integer, nullable=False, primary_key=True, + autoincrement=False) + allocated = sa.Column(sa.Boolean, nullable=False, default=False) + network_profile_id = sa.Column(sa.String(36), + sa.ForeignKey('cisco_network_profiles.id', + ondelete="CASCADE"), + nullable=False) + + +class N1kvVxlanAllocation(model_base.BASEV2): + + """Represents allocation state of vxlan_id.""" + __tablename__ = 'cisco_n1kv_vxlan_allocations' + + vxlan_id = sa.Column(sa.Integer, nullable=False, primary_key=True, + autoincrement=False) + allocated = sa.Column(sa.Boolean, nullable=False, default=False) + network_profile_id = sa.Column(sa.String(36), + sa.ForeignKey('cisco_network_profiles.id', + ondelete="CASCADE"), + nullable=False) + + +class N1kvPortBinding(model_base.BASEV2): + + """Represents binding of ports to policy profile.""" + __tablename__ = 'cisco_n1kv_port_bindings' + + port_id = sa.Column(sa.String(36), + sa.ForeignKey('ports.id', ondelete="CASCADE"), + primary_key=True) + profile_id = sa.Column(sa.String(36), + sa.ForeignKey('cisco_policy_profiles.id')) + + +class N1kvNetworkBinding(model_base.BASEV2): + + """Represents binding of virtual network to physical realization.""" + __tablename__ = 'cisco_n1kv_network_bindings' + + network_id = sa.Column(sa.String(36), + sa.ForeignKey('networks.id', ondelete="CASCADE"), + primary_key=True) + network_type = sa.Column(sa.String(32), nullable=False) + physical_network = sa.Column(sa.String(64)) + segmentation_id = sa.Column(sa.Integer) + multicast_ip = sa.Column(sa.String(32)) + profile_id = sa.Column(sa.String(36), + sa.ForeignKey('cisco_network_profiles.id')) + + +class N1kVmNetwork(model_base.BASEV2): + + """Represents VM Network information.""" + __tablename__ = 'cisco_n1kv_vmnetworks' + + name = sa.Column(sa.String(80), primary_key=True) + profile_id = sa.Column(sa.String(36), + sa.ForeignKey('cisco_policy_profiles.id')) + network_id = sa.Column(sa.String(36)) + port_count = sa.Column(sa.Integer) + + +class NetworkProfile(model_base.BASEV2, models_v2.HasId): + + """ + Nexus1000V Network Profiles + + segment_type - VLAN, OVERLAY, TRUNK, MULTI_SEGMENT + sub_type - TRUNK_VLAN, TRUNK_VXLAN, native_vxlan, enhanced_vxlan + segment_range - '-' + multicast_ip_index - + multicast_ip_range - '-' + physical_network - Name for the physical network + """ + __tablename__ = 'cisco_network_profiles' + + name = sa.Column(sa.String(255)) + segment_type = sa.Column(sa.Enum(cisco_constants.NETWORK_TYPE_VLAN, + cisco_constants.NETWORK_TYPE_OVERLAY, + cisco_constants.NETWORK_TYPE_TRUNK, + cisco_constants. + NETWORK_TYPE_MULTI_SEGMENT, + name='segment_type'), + nullable=False) + sub_type = sa.Column(sa.String(255)) + segment_range = sa.Column(sa.String(255)) + multicast_ip_index = sa.Column(sa.Integer, default=0) + multicast_ip_range = sa.Column(sa.String(255)) + physical_network = sa.Column(sa.String(255)) + + +class PolicyProfile(model_base.BASEV2): + + """ + Nexus1000V Network Profiles + + Both 'id' and 'name' are coming from Nexus1000V switch + """ + __tablename__ = 'cisco_policy_profiles' + + id = sa.Column(sa.String(36), primary_key=True) + name = sa.Column(sa.String(255)) + + +class ProfileBinding(model_base.BASEV2): + + """ + Represents a binding of Network Profile + or Policy Profile to tenant_id + """ + __tablename__ = 'cisco_n1kv_profile_bindings' + + profile_type = sa.Column(sa.Enum(cisco_constants.NETWORK, + cisco_constants.POLICY, + name='profile_type')) + tenant_id = sa.Column(sa.String(36), + primary_key=True, + default=cisco_constants.TENANT_ID_NOT_SET) + profile_id = sa.Column(sa.String(36), primary_key=True) + + +class N1kvTrunkSegmentBinding(model_base.BASEV2): + + """Represents binding of segments in trunk networks.""" + __tablename__ = 'cisco_n1kv_trunk_segments' + + trunk_segment_id = sa.Column(sa.String(36), + sa.ForeignKey('networks.id', + ondelete="CASCADE"), + primary_key=True) + segment_id = sa.Column(sa.String(36), nullable=False, primary_key=True) + dot1qtag = sa.Column(sa.String(36), nullable=False, primary_key=True) + + +class N1kvMultiSegmentNetworkBinding(model_base.BASEV2): + + """Represents binding of segments in multi-segment networks.""" + __tablename__ = 'cisco_n1kv_multi_segments' + + multi_segment_id = sa.Column(sa.String(36), + sa.ForeignKey('networks.id', + ondelete="CASCADE"), + primary_key=True) + segment1_id = sa.Column(sa.String(36), nullable=False, primary_key=True) + segment2_id = sa.Column(sa.String(36), nullable=False, primary_key=True) + encap_profile_name = sa.Column(sa.String(36)) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/cisco/db/network_db_v2.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/cisco/db/network_db_v2.py new file mode 100644 index 00000000..350950e1 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/cisco/db/network_db_v2.py @@ -0,0 +1,288 @@ +# Copyright 2012, Cisco Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Rohit Agarwalla, Cisco Systems, Inc. + +from sqlalchemy.orm import exc + +from neutron.db import api as db +from neutron.openstack.common import log as logging +from neutron.openstack.common import uuidutils +from neutron.plugins.cisco.common import cisco_constants as const +from neutron.plugins.cisco.common import cisco_exceptions as c_exc +from neutron.plugins.cisco.db import network_models_v2 +# Do NOT remove this import. It is required for all the models to be seen +# by db.initialize() when called from VirtualPhysicalSwitchModelV2.__init__. +from neutron.plugins.cisco.db import nexus_models_v2 # noqa +from neutron.plugins.openvswitch import ovs_models_v2 + + +LOG = logging.getLogger(__name__) + + +def get_all_qoss(tenant_id): + """Lists all the qos to tenant associations.""" + LOG.debug(_("get_all_qoss() called")) + session = db.get_session() + return (session.query(network_models_v2.QoS). + filter_by(tenant_id=tenant_id).all()) + + +def get_qos(tenant_id, qos_id): + """Lists the qos given a tenant_id and qos_id.""" + LOG.debug(_("get_qos() called")) + session = db.get_session() + try: + return (session.query(network_models_v2.QoS). + filter_by(tenant_id=tenant_id). + filter_by(qos_id=qos_id).one()) + except exc.NoResultFound: + raise c_exc.QosNotFound(qos_id=qos_id, + tenant_id=tenant_id) + + +def add_qos(tenant_id, qos_name, qos_desc): + """Adds a qos to tenant association.""" + LOG.debug(_("add_qos() called")) + session = db.get_session() + try: + qos = (session.query(network_models_v2.QoS). + filter_by(tenant_id=tenant_id). + filter_by(qos_name=qos_name).one()) + raise c_exc.QosNameAlreadyExists(qos_name=qos_name, + tenant_id=tenant_id) + except exc.NoResultFound: + qos = network_models_v2.QoS(qos_id=uuidutils.generate_uuid(), + tenant_id=tenant_id, + qos_name=qos_name, + qos_desc=qos_desc) + session.add(qos) + session.flush() + return qos + + +def remove_qos(tenant_id, qos_id): + """Removes a qos to tenant association.""" + session = db.get_session() + try: + qos = (session.query(network_models_v2.QoS). + filter_by(tenant_id=tenant_id). + filter_by(qos_id=qos_id).one()) + session.delete(qos) + session.flush() + return qos + except exc.NoResultFound: + pass + + +def update_qos(tenant_id, qos_id, new_qos_name=None): + """Updates a qos to tenant association.""" + session = db.get_session() + try: + qos = (session.query(network_models_v2.QoS). + filter_by(tenant_id=tenant_id). + filter_by(qos_id=qos_id).one()) + if new_qos_name: + qos["qos_name"] = new_qos_name + session.merge(qos) + session.flush() + return qos + except exc.NoResultFound: + raise c_exc.QosNotFound(qos_id=qos_id, + tenant_id=tenant_id) + + +def get_all_credentials(): + """Lists all the creds for a tenant.""" + session = db.get_session() + return (session.query(network_models_v2.Credential).all()) + + +def get_credential(credential_id): + """Lists the creds for given a cred_id.""" + session = db.get_session() + try: + return (session.query(network_models_v2.Credential). + filter_by(credential_id=credential_id).one()) + except exc.NoResultFound: + raise c_exc.CredentialNotFound(credential_id=credential_id) + + +def get_credential_name(credential_name): + """Lists the creds for given a cred_name.""" + session = db.get_session() + try: + return (session.query(network_models_v2.Credential). + filter_by(credential_name=credential_name).one()) + except exc.NoResultFound: + raise c_exc.CredentialNameNotFound(credential_name=credential_name) + + +def add_credential(credential_name, user_name, password, type): + """Create a credential.""" + session = db.get_session() + try: + cred = (session.query(network_models_v2.Credential). + filter_by(credential_name=credential_name).one()) + raise c_exc.CredentialAlreadyExists(credential_name=credential_name) + except exc.NoResultFound: + cred = network_models_v2.Credential( + credential_id=uuidutils.generate_uuid(), + credential_name=credential_name, + user_name=user_name, + password=password, + type=type) + session.add(cred) + session.flush() + return cred + + +def remove_credential(credential_id): + """Removes a credential.""" + session = db.get_session() + try: + cred = (session.query(network_models_v2.Credential). + filter_by(credential_id=credential_id).one()) + session.delete(cred) + session.flush() + return cred + except exc.NoResultFound: + pass + + +def update_credential(credential_id, + new_user_name=None, new_password=None): + """Updates a credential for a tenant.""" + session = db.get_session() + try: + cred = (session.query(network_models_v2.Credential). + filter_by(credential_id=credential_id).one()) + if new_user_name: + cred["user_name"] = new_user_name + if new_password: + cred["password"] = new_password + session.merge(cred) + session.flush() + return cred + except exc.NoResultFound: + raise c_exc.CredentialNotFound(credential_id=credential_id) + + +def get_all_n1kv_credentials(): + session = db.get_session() + return (session.query(network_models_v2.Credential). + filter_by(type='n1kv')) + + +def add_provider_network(network_id, network_type, segmentation_id): + """Add a network to the provider network table.""" + session = db.get_session() + if session.query(network_models_v2.ProviderNetwork).filter_by( + network_id=network_id).first(): + raise c_exc.ProviderNetworkExists(network_id) + pnet = network_models_v2.ProviderNetwork(network_id=network_id, + network_type=network_type, + segmentation_id=segmentation_id) + session.add(pnet) + session.flush() + + +def remove_provider_network(network_id): + """Remove network_id from the provider network table. + + :param network_id: Any network id. If it is not in the table, do nothing. + :return: network_id if it was in the table and successfully removed. + """ + session = db.get_session() + pnet = (session.query(network_models_v2.ProviderNetwork). + filter_by(network_id=network_id).first()) + if pnet: + session.delete(pnet) + session.flush() + return network_id + + +def is_provider_network(network_id): + """Return True if network_id is in the provider network table.""" + session = db.get_session() + if session.query(network_models_v2.ProviderNetwork).filter_by( + network_id=network_id).first(): + return True + + +def is_provider_vlan(vlan_id): + """Check for a for a vlan provider network with the specified vland_id. + + Returns True if the provider network table contains a vlan network + with the specified vlan_id. + """ + session = db.get_session() + if (session.query(network_models_v2.ProviderNetwork). + filter_by(network_type=const.NETWORK_TYPE_VLAN, + segmentation_id=vlan_id).first()): + return True + + +def get_ovs_vlans(): + session = db.get_session() + bindings = (session.query(ovs_models_v2.VlanAllocation.vlan_id). + filter_by(allocated=True)) + return [binding.vlan_id for binding in bindings] + + +class Credential_db_mixin(object): + + """Mixin class for Cisco Credentials as a resource.""" + + def _make_credential_dict(self, credential, fields=None): + res = {'credential_id': credential['credential_id'], + 'credential_name': credential['credential_name'], + 'user_name': credential['user_name'], + 'password': credential['password'], + 'type': credential['type']} + return self._fields(res, fields) + + def create_credential(self, context, credential): + """Create a credential.""" + c = credential['credential'] + cred = add_credential(c['credential_name'], + c['user_name'], + c['password'], + c['type']) + return self._make_credential_dict(cred) + + def get_credentials(self, context, filters=None, fields=None): + """Retrieve a list of credentials.""" + return self._get_collection(context, + network_models_v2.Credential, + self._make_credential_dict, + filters=filters, + fields=fields) + + def get_credential(self, context, id, fields=None): + """Retireve the requested credential based on its id.""" + credential = get_credential(id) + return self._make_credential_dict(credential, fields) + + def update_credential(self, context, id, credential): + """Update a credential based on its id.""" + c = credential['credential'] + cred = update_credential(id, + c['user_name'], + c['password']) + return self._make_credential_dict(cred) + + def delete_credential(self, context, id): + """Delete a credential based on its id.""" + return remove_credential(id) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/cisco/db/network_models_v2.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/cisco/db/network_models_v2.py new file mode 100644 index 00000000..bf9145e0 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/cisco/db/network_models_v2.py @@ -0,0 +1,54 @@ +# Copyright 2012, Cisco Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Rohit Agarwalla, Cisco Systems, Inc. + +import sqlalchemy as sa + +from neutron.db import model_base + + +class QoS(model_base.BASEV2): + """Represents QoS policies for a tenant.""" + + __tablename__ = 'cisco_qos_policies' + + qos_id = sa.Column(sa.String(255)) + tenant_id = sa.Column(sa.String(255), primary_key=True) + qos_name = sa.Column(sa.String(255), primary_key=True) + qos_desc = sa.Column(sa.String(255)) + + +class Credential(model_base.BASEV2): + """Represents credentials for a tenant to control Cisco switches.""" + + __tablename__ = 'cisco_credentials' + + credential_id = sa.Column(sa.String(255)) + credential_name = sa.Column(sa.String(255), primary_key=True) + user_name = sa.Column(sa.String(255)) + password = sa.Column(sa.String(255)) + type = sa.Column(sa.String(255)) + + +class ProviderNetwork(model_base.BASEV2): + """Represents networks that were created as provider networks.""" + + __tablename__ = 'cisco_provider_networks' + + network_id = sa.Column(sa.String(36), + sa.ForeignKey('networks.id', ondelete="CASCADE"), + primary_key=True) + network_type = sa.Column(sa.String(255), nullable=False) + segmentation_id = sa.Column(sa.Integer, nullable=False) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/cisco/db/nexus_db_v2.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/cisco/db/nexus_db_v2.py new file mode 100644 index 00000000..78ac51d8 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/cisco/db/nexus_db_v2.py @@ -0,0 +1,152 @@ +# Copyright 2012, Cisco Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Rohit Agarwalla, Cisco Systems, Inc. +# @author: Arvind Somya, Cisco Systems, Inc. (asomya@cisco.com) +# + +import sqlalchemy.orm.exc as sa_exc + +import neutron.db.api as db +from neutron.openstack.common import log as logging +from neutron.plugins.cisco.common import cisco_exceptions as c_exc +from neutron.plugins.cisco.db import nexus_models_v2 + + +LOG = logging.getLogger(__name__) + + +def get_nexusport_binding(port_id, vlan_id, switch_ip, instance_id): + """Lists a nexusport binding.""" + LOG.debug(_("get_nexusport_binding() called")) + return _lookup_all_nexus_bindings(port_id=port_id, + vlan_id=vlan_id, + switch_ip=switch_ip, + instance_id=instance_id) + + +def get_nexusvlan_binding(vlan_id, switch_ip): + """Lists a vlan and switch binding.""" + LOG.debug(_("get_nexusvlan_binding() called")) + return _lookup_all_nexus_bindings(vlan_id=vlan_id, switch_ip=switch_ip) + + +def add_nexusport_binding(port_id, vlan_id, switch_ip, instance_id): + """Adds a nexusport binding.""" + LOG.debug(_("add_nexusport_binding() called")) + session = db.get_session() + binding = nexus_models_v2.NexusPortBinding(port_id=port_id, + vlan_id=vlan_id, + switch_ip=switch_ip, + instance_id=instance_id) + session.add(binding) + session.flush() + return binding + + +def remove_nexusport_binding(port_id, vlan_id, switch_ip, instance_id): + """Removes a nexusport binding.""" + LOG.debug(_("remove_nexusport_binding() called")) + session = db.get_session() + binding = _lookup_all_nexus_bindings(session=session, + vlan_id=vlan_id, + switch_ip=switch_ip, + port_id=port_id, + instance_id=instance_id) + for bind in binding: + session.delete(bind) + session.flush() + return binding + + +def update_nexusport_binding(port_id, new_vlan_id): + """Updates nexusport binding.""" + if not new_vlan_id: + LOG.warning(_("update_nexusport_binding called with no vlan")) + return + LOG.debug(_("update_nexusport_binding called")) + session = db.get_session() + binding = _lookup_one_nexus_binding(session=session, port_id=port_id) + binding.vlan_id = new_vlan_id + session.merge(binding) + session.flush() + return binding + + +def get_nexusvm_bindings(vlan_id, instance_id): + """Lists nexusvm bindings.""" + LOG.debug(_("get_nexusvm_binding() called")) + + return _lookup_all_nexus_bindings(vlan_id=vlan_id, + instance_id=instance_id) + + +def get_port_vlan_switch_binding(port_id, vlan_id, switch_ip): + """Lists nexusvm bindings.""" + LOG.debug(_("get_port_vlan_switch_binding() called")) + return _lookup_all_nexus_bindings(port_id=port_id, + switch_ip=switch_ip, + vlan_id=vlan_id) + + +def get_port_switch_bindings(port_id, switch_ip): + """List all vm/vlan bindings on a Nexus switch port.""" + LOG.debug(_("get_port_switch_bindings() called, " + "port:'%(port_id)s', switch:'%(switch_ip)s'"), + {'port_id': port_id, 'switch_ip': switch_ip}) + try: + return _lookup_all_nexus_bindings(port_id=port_id, + switch_ip=switch_ip) + except c_exc.NexusPortBindingNotFound: + pass + + +def get_nexussvi_bindings(): + """Lists nexus svi bindings.""" + LOG.debug(_("get_nexussvi_bindings() called")) + return _lookup_all_nexus_bindings(port_id='router') + + +def _lookup_nexus_bindings(query_type, session=None, **bfilter): + """Look up 'query_type' Nexus bindings matching the filter. + + :param query_type: 'all', 'one' or 'first' + :param session: db session + :param bfilter: filter for bindings query + :return: bindings if query gave a result, else + raise NexusPortBindingNotFound. + """ + if session is None: + session = db.get_session() + query_method = getattr(session.query( + nexus_models_v2.NexusPortBinding).filter_by(**bfilter), query_type) + try: + bindings = query_method() + if bindings: + return bindings + except sa_exc.NoResultFound: + pass + raise c_exc.NexusPortBindingNotFound(**bfilter) + + +def _lookup_all_nexus_bindings(session=None, **bfilter): + return _lookup_nexus_bindings('all', session, **bfilter) + + +def _lookup_one_nexus_binding(session=None, **bfilter): + return _lookup_nexus_bindings('one', session, **bfilter) + + +def _lookup_first_nexus_binding(session=None, **bfilter): + return _lookup_nexus_bindings('first', session, **bfilter) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/cisco/db/nexus_models_v2.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/cisco/db/nexus_models_v2.py new file mode 100644 index 00000000..cf0fd7b3 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/cisco/db/nexus_models_v2.py @@ -0,0 +1,44 @@ +# Copyright 2012, Cisco Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# @author: Rohit Agarwalla, Cisco Systems, Inc. + +import sqlalchemy as sa + +from neutron.db import model_base + + +class NexusPortBinding(model_base.BASEV2): + """Represents a binding of VM's to nexus ports.""" + + __tablename__ = "cisco_nexusport_bindings" + + id = sa.Column(sa.Integer, primary_key=True, autoincrement=True) + port_id = sa.Column(sa.String(255)) + vlan_id = sa.Column(sa.Integer, nullable=False) + switch_ip = sa.Column(sa.String(255), nullable=False) + instance_id = sa.Column(sa.String(255), nullable=False) + + def __repr__(self): + """Just the binding, without the id key.""" + return ("" % + (self.port_id, self.vlan_id, self.switch_ip, self.instance_id)) + + def __eq__(self, other): + """Compare only the binding, without the id key.""" + return ( + self.port_id == other.port_id and + self.vlan_id == other.vlan_id and + self.switch_ip == other.switch_ip and + self.instance_id == other.instance_id + ) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/cisco/extensions/__init__.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/cisco/extensions/__init__.py new file mode 100644 index 00000000..0318309e --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/cisco/extensions/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2013 OpenStack Foundation. +# All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/cisco/extensions/_credential_view.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/cisco/extensions/_credential_view.py new file mode 100644 index 00000000..a0f80d45 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/cisco/extensions/_credential_view.py @@ -0,0 +1,50 @@ +# Copyright 2011 Cisco Systems, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Ying Liu, Cisco Systems, Inc. +# + + +def get_view_builder(req): + base_url = req.application_url + return ViewBuilder(base_url) + + +class ViewBuilder(object): + """ViewBuilder for Credential, derived from neutron.views.networks.""" + + def __init__(self, base_url): + """Initialize builder. + + :param base_url: url of the root wsgi application + """ + self.base_url = base_url + + def build(self, credential_data, is_detail=False): + """Generic method used to generate a credential entity.""" + if is_detail: + credential = self._build_detail(credential_data) + else: + credential = self._build_simple(credential_data) + return credential + + def _build_simple(self, credential_data): + """Return a simple description of credential.""" + return dict(credential=dict(id=credential_data['credential_id'])) + + def _build_detail(self, credential_data): + """Return a detailed description of credential.""" + return dict(credential=dict(id=credential_data['credential_id'], + name=credential_data['user_name'], + password=credential_data['password'])) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/cisco/extensions/_qos_view.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/cisco/extensions/_qos_view.py new file mode 100644 index 00000000..b748b456 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/cisco/extensions/_qos_view.py @@ -0,0 +1,50 @@ +# Copyright 2011 Cisco Systems, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Ying Liu, Cisco Systems, Inc. +# + + +def get_view_builder(req): + base_url = req.application_url + return ViewBuilder(base_url) + + +class ViewBuilder(object): + """ViewBuilder for QoS, derived from neutron.views.networks.""" + + def __init__(self, base_url): + """Initialize builder. + + :param base_url: url of the root wsgi application + """ + self.base_url = base_url + + def build(self, qos_data, is_detail=False): + """Generic method used to generate a QoS entity.""" + if is_detail: + qos = self._build_detail(qos_data) + else: + qos = self._build_simple(qos_data) + return qos + + def _build_simple(self, qos_data): + """Return a simple description of qos.""" + return dict(qos=dict(id=qos_data['qos_id'])) + + def _build_detail(self, qos_data): + """Return a detailed description of qos.""" + return dict(qos=dict(id=qos_data['qos_id'], + name=qos_data['qos_name'], + description=qos_data['qos_desc'])) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/cisco/extensions/credential.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/cisco/extensions/credential.py new file mode 100644 index 00000000..9bd621fb --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/cisco/extensions/credential.py @@ -0,0 +1,82 @@ +# Copyright 2013 Cisco Systems, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Ying Liu, Cisco Systems, Inc. +# @author: Abhishek Raut, Cisco Systems, Inc + +from neutron.api import extensions +from neutron.api.v2 import attributes +from neutron.api.v2 import base +from neutron import manager + + +# Attribute Map +RESOURCE_ATTRIBUTE_MAP = { + 'credentials': { + 'credential_id': {'allow_post': False, 'allow_put': False, + 'validate': {'type:regex': attributes.UUID_PATTERN}, + 'is_visible': True}, + 'credential_name': {'allow_post': True, 'allow_put': True, + 'is_visible': True, 'default': ''}, + 'tenant_id': {'allow_post': True, 'allow_put': False, + 'is_visible': False, 'default': ''}, + 'type': {'allow_post': True, 'allow_put': True, + 'is_visible': True, 'default': ''}, + 'user_name': {'allow_post': True, 'allow_put': True, + 'is_visible': True, 'default': ''}, + 'password': {'allow_post': True, 'allow_put': True, + 'is_visible': True, 'default': ''}, + }, +} + + +class Credential(extensions.ExtensionDescriptor): + + @classmethod + def get_name(cls): + """Returns Extended Resource Name.""" + return "Cisco Credential" + + @classmethod + def get_alias(cls): + """Returns Extended Resource Alias.""" + return "credential" + + @classmethod + def get_description(cls): + """Returns Extended Resource Description.""" + return "Credential include username and password" + + @classmethod + def get_namespace(cls): + """Returns Extended Resource Namespace.""" + return "http://docs.ciscocloud.com/api/ext/credential/v2.0" + + @classmethod + def get_updated(cls): + """Returns Extended Resource Update Time.""" + return "2011-07-25T13:25:27-06:00" + + @classmethod + def get_resources(cls): + """Returns Extended Resources.""" + resource_name = "credential" + collection_name = resource_name + "s" + plugin = manager.NeutronManager.get_plugin() + params = RESOURCE_ATTRIBUTE_MAP.get(collection_name, dict()) + controller = base.create_resource(collection_name, + resource_name, + plugin, params) + return [extensions.ResourceExtension(collection_name, + controller)] diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/cisco/extensions/n1kv.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/cisco/extensions/n1kv.py new file mode 100644 index 00000000..8bf21c65 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/cisco/extensions/n1kv.py @@ -0,0 +1,104 @@ +# Copyright 2013 Cisco Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Abhishek Raut, Cisco Systems, Inc. +# @author: Rudrajit Tapadar, Cisco Systems, Inc. +# @author: Aruna Kushwaha, Cisco Systems, Inc. +# @author: Sergey Sudakovich, Cisco Systems, Inc. + +from neutron.api import extensions +from neutron.api.v2 import attributes + + +PROFILE_ID = 'n1kv:profile_id' +MULTICAST_IP = 'n1kv:multicast_ip' +SEGMENT_ADD = 'n1kv:segment_add' +SEGMENT_DEL = 'n1kv:segment_del' +MEMBER_SEGMENTS = 'n1kv:member_segments' + +EXTENDED_ATTRIBUTES_2_0 = { + 'networks': { + PROFILE_ID: {'allow_post': True, 'allow_put': False, + 'validate': {'type:regex': attributes.UUID_PATTERN}, + 'default': attributes.ATTR_NOT_SPECIFIED, + 'is_visible': True}, + MULTICAST_IP: {'allow_post': True, 'allow_put': True, + 'default': attributes.ATTR_NOT_SPECIFIED, + 'is_visible': True}, + SEGMENT_ADD: {'allow_post': True, 'allow_put': True, + 'default': attributes.ATTR_NOT_SPECIFIED, + 'is_visible': True}, + SEGMENT_DEL: {'allow_post': True, 'allow_put': True, + 'default': attributes.ATTR_NOT_SPECIFIED, + 'is_visible': True}, + MEMBER_SEGMENTS: {'allow_post': True, 'allow_put': True, + 'default': attributes.ATTR_NOT_SPECIFIED, + 'is_visible': True}, + }, + 'ports': { + PROFILE_ID: {'allow_post': True, 'allow_put': False, + 'validate': {'type:regex': attributes.UUID_PATTERN}, + 'default': attributes.ATTR_NOT_SPECIFIED, + 'is_visible': True} + } +} + + +class N1kv(extensions.ExtensionDescriptor): + + """Extension class supporting N1kv profiles. + + This class is used by neutron's extension framework to make + metadata about the n1kv profile extension available to + clients. No new resources are defined by this extension. Instead, + the existing network resource's request and response messages are + extended with attributes in the n1kv profile namespace. + + To create a network based on n1kv profile using the CLI with admin rights: + + (shell) net-create --tenant_id \ + --n1kv:profile_id + (shell) port-create --tenant_id \ + --n1kv:profile_id + + + With admin rights, network dictionaries returned from CLI commands + will also include n1kv profile attributes. + """ + + @classmethod + def get_name(cls): + return "n1kv" + + @classmethod + def get_alias(cls): + return "n1kv" + + @classmethod + def get_description(cls): + return "Expose network profile" + + @classmethod + def get_namespace(cls): + return "http://docs.openstack.org/ext/n1kv/api/v2.0" + + @classmethod + def get_updated(cls): + return "2012-11-15T10:00:00-00:00" + + def get_extended_resources(self, version): + if version == "2.0": + return EXTENDED_ATTRIBUTES_2_0 + else: + return {} diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/cisco/extensions/network_profile.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/cisco/extensions/network_profile.py new file mode 100644 index 00000000..09e30382 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/cisco/extensions/network_profile.py @@ -0,0 +1,101 @@ +# Copyright 2013 Cisco Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Abhishek Raut, Cisco Systems, Inc. +# @author: Sergey Sudakovich, Cisco Systems, Inc. +# @author: Rudrajit Tapadar, Cisco Systems, Inc. + +from neutron.api import extensions +from neutron.api.v2 import attributes +from neutron.api.v2 import base +from neutron import manager + + +# Attribute Map +RESOURCE_ATTRIBUTE_MAP = { + 'network_profiles': { + 'id': {'allow_post': False, 'allow_put': False, + 'validate': {'type:regex': attributes.UUID_PATTERN}, + 'is_visible': True}, + 'name': {'allow_post': True, 'allow_put': True, + 'is_visible': True, 'default': ''}, + 'segment_type': {'allow_post': True, 'allow_put': False, + 'is_visible': True, 'default': ''}, + 'sub_type': {'allow_post': True, 'allow_put': False, + 'is_visible': True, + 'default': attributes.ATTR_NOT_SPECIFIED}, + 'segment_range': {'allow_post': True, 'allow_put': True, + 'is_visible': True, 'default': ''}, + 'multicast_ip_range': {'allow_post': True, 'allow_put': True, + 'is_visible': True, + 'default': attributes.ATTR_NOT_SPECIFIED}, + 'multicast_ip_index': {'allow_post': False, 'allow_put': False, + 'is_visible': False, 'default': '0'}, + 'physical_network': {'allow_post': True, 'allow_put': False, + 'is_visible': True, 'default': ''}, + 'tenant_id': {'allow_post': True, 'allow_put': False, + 'is_visible': False, 'default': ''}, + 'add_tenant': {'allow_post': True, 'allow_put': True, + 'is_visible': True, 'default': None}, + 'remove_tenant': {'allow_post': True, 'allow_put': True, + 'is_visible': True, 'default': None}, + }, + 'network_profile_bindings': { + 'profile_id': {'allow_post': False, 'allow_put': False, + 'validate': {'type:regex': attributes.UUID_PATTERN}, + 'is_visible': True}, + 'tenant_id': {'allow_post': True, 'allow_put': False, + 'is_visible': True}, + }, +} + + +class Network_profile(extensions.ExtensionDescriptor): + + @classmethod + def get_name(cls): + return "Cisco N1kv Network Profiles" + + @classmethod + def get_alias(cls): + return 'network_profile' + + @classmethod + def get_description(cls): + return ("Profile includes the type of profile for N1kv") + + @classmethod + def get_namespace(cls): + return "http://docs.openstack.org/ext/n1kv/network-profile/api/v2.0" + + @classmethod + def get_updated(cls): + return "2012-07-20T10:00:00-00:00" + + @classmethod + def get_resources(cls): + """Returns Extended Resources.""" + exts = [] + plugin = manager.NeutronManager.get_plugin() + for resource_name in ['network_profile', 'network_profile_binding']: + collection_name = resource_name + "s" + controller = base.create_resource( + collection_name, + resource_name, + plugin, + RESOURCE_ATTRIBUTE_MAP.get(collection_name)) + ex = extensions.ResourceExtension(collection_name, + controller) + exts.append(ex) + return exts diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/cisco/extensions/policy_profile.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/cisco/extensions/policy_profile.py new file mode 100644 index 00000000..d85f96a5 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/cisco/extensions/policy_profile.py @@ -0,0 +1,83 @@ +# Copyright 2013 Cisco Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Abhishek Raut, Cisco Systems, Inc. +# @author: Sergey Sudakovich, Cisco Systems, Inc. + +from neutron.api import extensions +from neutron.api.v2 import attributes +from neutron.api.v2 import base +from neutron import manager + +# Attribute Map +RESOURCE_ATTRIBUTE_MAP = { + 'policy_profiles': { + 'id': {'allow_post': False, 'allow_put': False, + 'validate': {'type:regex': attributes.UUID_PATTERN}, + 'is_visible': True}, + 'name': {'allow_post': False, 'allow_put': False, + 'is_visible': True, 'default': ''}, + 'add_tenant': {'allow_post': True, 'allow_put': True, + 'is_visible': True, 'default': None}, + 'remove_tenant': {'allow_post': True, 'allow_put': True, + 'is_visible': True, 'default': None}, + }, + 'policy_profile_bindings': { + 'profile_id': {'allow_post': False, 'allow_put': False, + 'validate': {'type:regex': attributes.UUID_PATTERN}, + 'is_visible': True}, + 'tenant_id': {'allow_post': True, 'allow_put': False, + 'is_visible': True}, + }, +} + + +class Policy_profile(extensions.ExtensionDescriptor): + + @classmethod + def get_name(cls): + return "Cisco Nexus1000V Policy Profiles" + + @classmethod + def get_alias(cls): + return 'policy_profile' + + @classmethod + def get_description(cls): + return "Profile includes the type of profile for N1kv" + + @classmethod + def get_namespace(cls): + return "http://docs.openstack.org/ext/n1kv/policy-profile/api/v2.0" + + @classmethod + def get_updated(cls): + return "2012-07-20T10:00:00-00:00" + + @classmethod + def get_resources(cls): + """Returns Extended Resources.""" + exts = [] + plugin = manager.NeutronManager.get_plugin() + for resource_name in ['policy_profile', 'policy_profile_binding']: + collection_name = resource_name + "s" + controller = base.create_resource( + collection_name, + resource_name, + plugin, + RESOURCE_ATTRIBUTE_MAP.get(collection_name)) + ex = extensions.ResourceExtension(collection_name, + controller) + exts.append(ex) + return exts diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/cisco/extensions/qos.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/cisco/extensions/qos.py new file mode 100644 index 00000000..90e2247d --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/cisco/extensions/qos.py @@ -0,0 +1,154 @@ +# Copyright 2011 Cisco Systems, Inc. +# All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Ying Liu, Cisco Systems, Inc. +# + +from webob import exc + +from neutron.api import api_common as common +from neutron.api import extensions +from neutron import manager +from neutron.plugins.cisco.common import cisco_exceptions as exception +from neutron.plugins.cisco.common import cisco_faults as faults +from neutron.plugins.cisco.extensions import _qos_view as qos_view +from neutron import wsgi + + +class Qos(extensions.ExtensionDescriptor): + """Qos extension file.""" + + @classmethod + def get_name(cls): + """Returns Ext Resource Name.""" + return "Cisco qos" + + @classmethod + def get_alias(cls): + """Returns Ext Resource Alias.""" + return "Cisco qos" + + @classmethod + def get_description(cls): + """Returns Ext Resource Description.""" + return "qos includes qos_name and qos_desc" + + @classmethod + def get_namespace(cls): + """Returns Ext Resource Namespace.""" + return "http://docs.ciscocloud.com/api/ext/qos/v1.0" + + @classmethod + def get_updated(cls): + """Returns Ext Resource update.""" + return "2011-07-25T13:25:27-06:00" + + @classmethod + def get_resources(cls): + """Returns Ext Resources.""" + parent_resource = dict(member_name="tenant", + collection_name="extensions/csco/tenants") + + controller = QosController(manager.NeutronManager.get_plugin()) + return [extensions.ResourceExtension('qoss', controller, + parent=parent_resource)] + + +class QosController(common.NeutronController, wsgi.Controller): + """qos API controller based on NeutronController.""" + + _qos_ops_param_list = [ + {'param-name': 'qos_name', 'required': True}, + {'param-name': 'qos_desc', 'required': True}, + ] + + _serialization_metadata = { + "application/xml": { + "attributes": { + "qos": ["id", "name"], + }, + }, + } + + def __init__(self, plugin): + self._resource_name = 'qos' + self._plugin = plugin + + def index(self, request, tenant_id): + """Returns a list of qos ids.""" + return self._items(request, tenant_id, is_detail=False) + + def _items(self, request, tenant_id, is_detail): + """Returns a list of qoss.""" + qoss = self._plugin.get_all_qoss(tenant_id) + builder = qos_view.get_view_builder(request) + result = [builder.build(qos, is_detail)['qos'] for qos in qoss] + return dict(qoss=result) + + # pylint: disable-msg=E1101 + def show(self, request, tenant_id, id): + """Returns qos details for the given qos id.""" + try: + qos = self._plugin.get_qos_details(tenant_id, id) + builder = qos_view.get_view_builder(request) + #build response with details + result = builder.build(qos, True) + return dict(qoss=result) + except exception.QosNotFound as exp: + return faults.Fault(faults.QosNotFound(exp)) + + def create(self, request, tenant_id): + """Creates a new qos for a given tenant.""" + #look for qos name in request + try: + body = self._deserialize(request.body, request.get_content_type()) + req_body = self._prepare_request_body(body, + self._qos_ops_param_list) + req_params = req_body[self._resource_name] + except exc.HTTPError as exp: + return faults.Fault(exp) + qos = self._plugin.create_qos(tenant_id, + req_params['qos_name'], + req_params['qos_desc']) + builder = qos_view.get_view_builder(request) + result = builder.build(qos) + return dict(qoss=result) + + def update(self, request, tenant_id, id): + """Updates the name for the qos with the given id.""" + try: + body = self._deserialize(request.body, request.get_content_type()) + req_body = self._prepare_request_body(body, + self._qos_ops_param_list) + req_params = req_body[self._resource_name] + except exc.HTTPError as exp: + return faults.Fault(exp) + try: + qos = self._plugin.rename_qos(tenant_id, id, + req_params['qos_name']) + + builder = qos_view.get_view_builder(request) + result = builder.build(qos, True) + return dict(qoss=result) + except exception.QosNotFound as exp: + return faults.Fault(faults.QosNotFound(exp)) + + def delete(self, request, tenant_id, id): + """Destroys the qos with the given id.""" + try: + self._plugin.delete_qos(tenant_id, id) + return exc.HTTPOk() + except exception.QosNotFound as exp: + return faults.Fault(faults.QosNotFound(exp)) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/cisco/l2device_plugin_base.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/cisco/l2device_plugin_base.py new file mode 100644 index 00000000..5139b4b5 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/cisco/l2device_plugin_base.py @@ -0,0 +1,173 @@ +# Copyright 2012 Cisco Systems, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Sumit Naiksatam, Cisco Systems, Inc. + +import abc +import inspect +import six + + +@six.add_metaclass(abc.ABCMeta) +class L2DevicePluginBase(object): + """Base class for a device-specific plugin. + + An example of a device-specific plugin is a Nexus switch plugin. + The network model relies on device-category-specific plugins to perform + the configuration on each device. + """ + + @abc.abstractmethod + def create_network(self, tenant_id, net_name, net_id, vlan_name, vlan_id, + **kwargs): + """Create network. + + :returns: + :raises: + """ + pass + + @abc.abstractmethod + def delete_network(self, tenant_id, net_id, **kwargs): + """Delete network. + + :returns: + :raises: + """ + pass + + @abc.abstractmethod + def update_network(self, tenant_id, net_id, name, **kwargs): + """Update network. + + :returns: + :raises: + """ + pass + + @abc.abstractmethod + def create_port(self, tenant_id, net_id, port_state, port_id, **kwargs): + """Create port. + + :returns: + :raises: + """ + pass + + @abc.abstractmethod + def delete_port(self, tenant_id, net_id, port_id, **kwargs): + """Delete port. + + :returns: + :raises: + """ + pass + + @abc.abstractmethod + def update_port(self, tenant_id, net_id, port_id, **kwargs): + """Update port. + + :returns: + :raises: + """ + pass + + @abc.abstractmethod + def plug_interface(self, tenant_id, net_id, port_id, remote_interface_id, + **kwargs): + """Plug interface. + + :returns: + :raises: + """ + pass + + @abc.abstractmethod + def unplug_interface(self, tenant_id, net_id, port_id, **kwargs): + """Unplug interface. + + :returns: + :raises: + """ + pass + + def create_subnet(self, tenant_id, net_id, ip_version, + subnet_cidr, **kwargs): + """Create subnet. + + :returns: + :raises: + """ + pass + + def get_subnets(self, tenant_id, net_id, **kwargs): + """Get subnets. + + :returns: + :raises: + """ + pass + + def get_subnet(self, tenant_id, net_id, subnet_id, **kwargs): + """Get subnet. + + :returns: + :raises: + """ + pass + + def update_subnet(self, tenant_id, net_id, subnet_id, **kwargs): + """Update subnet. + + :returns: + :raises: + """ + pass + + def delete_subnet(self, tenant_id, net_id, subnet_id, **kwargs): + """Delete subnet. + + :returns: + :raises: + """ + pass + + @classmethod + def __subclasshook__(cls, klass): + """Check plugin class. + + The __subclasshook__ method is a class method + that will be called every time a class is tested + using issubclass(klass, Plugin). + In that case, it will check that every method + marked with the abstractmethod decorator is + provided by the plugin class. + """ + if cls is L2DevicePluginBase: + for method in cls.__abstractmethods__: + method_ok = False + for base in klass.__mro__: + if method in base.__dict__: + fn_obj = base.__dict__[method] + if inspect.isfunction(fn_obj): + abstract_fn_obj = cls.__dict__[method] + arg_count = fn_obj.func_code.co_argcount + expected_arg_count = \ + abstract_fn_obj.func_code.co_argcount + method_ok = arg_count == expected_arg_count + if method_ok: + continue + return NotImplemented + return True + return NotImplemented diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/cisco/models/__init__.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/cisco/models/__init__.py new file mode 100644 index 00000000..51c00019 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/cisco/models/__init__.py @@ -0,0 +1,15 @@ +# Copyright 2011 Cisco Systems, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Sumit Naiksatam, Cisco Systems, Inc. diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/cisco/models/virt_phy_sw_v2.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/cisco/models/virt_phy_sw_v2.py new file mode 100644 index 00000000..4702e0a7 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/cisco/models/virt_phy_sw_v2.py @@ -0,0 +1,551 @@ +# Copyright 2012 Cisco Systems, Inc. +# All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Sumit Naiksatam, Cisco Systems, Inc. +# @author: Rohit Agarwalla, Cisco Systems, Inc. +# + +import inspect +import logging +import sys + +from neutron.api.v2 import attributes +from neutron.db import api as db_api +from neutron.extensions import portbindings +from neutron.extensions import providernet as provider +from neutron import neutron_plugin_base_v2 +from neutron.openstack.common import importutils +from neutron.plugins.cisco.common import cisco_constants as const +from neutron.plugins.cisco.common import cisco_credentials_v2 as cred +from neutron.plugins.cisco.common import cisco_exceptions as cexc +from neutron.plugins.cisco.common import config as conf +from neutron.plugins.cisco.db import network_db_v2 as cdb +from neutron.plugins.openvswitch import ovs_db_v2 as odb + + +LOG = logging.getLogger(__name__) + + +class VirtualPhysicalSwitchModelV2(neutron_plugin_base_v2.NeutronPluginBaseV2): + """Virtual Physical Switch Model. + + This implementation works with OVS and Nexus plugin for the + following topology: + One or more servers to a nexus switch. + """ + __native_bulk_support = True + supported_extension_aliases = ["provider", "binding"] + _methods_to_delegate = ['create_network_bulk', + 'get_network', 'get_networks', + 'create_port_bulk', + 'get_port', 'get_ports', + 'create_subnet', 'create_subnet_bulk', + 'delete_subnet', 'update_subnet', + 'get_subnet', 'get_subnets', + 'create_or_update_agent', 'report_state'] + + def __init__(self): + """Initialize the segmentation manager. + + Checks which device plugins are configured, and load the inventories + those device plugins for which the inventory is configured. + """ + conf.CiscoConfigOptions() + + self._plugins = {} + for key in conf.CISCO_PLUGINS.keys(): + plugin_obj = conf.CISCO_PLUGINS[key] + if plugin_obj is not None: + self._plugins[key] = importutils.import_object(plugin_obj) + LOG.debug(_("Loaded device plugin %s"), + conf.CISCO_PLUGINS[key]) + + if ((const.VSWITCH_PLUGIN in self._plugins) and + hasattr(self._plugins[const.VSWITCH_PLUGIN], + "supported_extension_aliases")): + self.supported_extension_aliases.extend( + self._plugins[const.VSWITCH_PLUGIN]. + supported_extension_aliases) + # At this point, all the database models should have been loaded. It's + # possible that configure_db() may have been called by one of the + # plugins loaded in above. Otherwise, this call is to make sure that + # the database is initialized + db_api.configure_db() + + # Initialize credential store after database initialization + cred.Store.initialize() + LOG.debug(_("%(module)s.%(name)s init done"), + {'module': __name__, + 'name': self.__class__.__name__}) + + # Check whether we have a valid Nexus driver loaded + self.is_nexus_plugin = False + nexus_driver = conf.CISCO.nexus_driver + if nexus_driver.endswith('CiscoNEXUSDriver'): + self.is_nexus_plugin = True + + def __getattribute__(self, name): + """Delegate calls to OVS sub-plugin. + + This delegates the calls to the methods implemented only by the OVS + sub-plugin. Note: Currently, bulking is handled by the caller + (PluginV2), and this model class expects to receive only non-bulking + calls. If, however, a bulking call is made, this will method will + delegate the call to the OVS plugin. + """ + super_getattribute = super(VirtualPhysicalSwitchModelV2, + self).__getattribute__ + methods = super_getattribute('_methods_to_delegate') + + if name in methods: + plugin = super_getattribute('_plugins')[const.VSWITCH_PLUGIN] + return getattr(plugin, name) + + try: + return super_getattribute(name) + except AttributeError: + plugin = super_getattribute('_plugins')[const.VSWITCH_PLUGIN] + return getattr(plugin, name) + + def _func_name(self, offset=0): + """Get the name of the calling function.""" + frame_record = inspect.stack()[1 + offset] + func_name = frame_record[3] + return func_name + + def _invoke_plugin_per_device(self, plugin_key, function_name, + args, **kwargs): + """Invoke plugin per device. + + Invokes a device plugin's relevant functions (based on the + plugin implementation) for completing this operation. + """ + if plugin_key not in self._plugins: + LOG.info(_("No %s Plugin loaded"), plugin_key) + LOG.info(_("%(plugin_key)s: %(function_name)s with args %(args)s " + "ignored"), + {'plugin_key': plugin_key, + 'function_name': function_name, + 'args': args}) + else: + func = getattr(self._plugins[plugin_key], function_name) + return func(*args, **kwargs) + + def _get_segmentation_id(self, network_id): + binding_seg_id = odb.get_network_binding(None, network_id) + if not binding_seg_id: + raise cexc.NetworkSegmentIDNotFound(net_id=network_id) + return binding_seg_id.segmentation_id + + def _get_provider_vlan_id(self, network): + if (all(attributes.is_attr_set(network.get(attr)) + for attr in (provider.NETWORK_TYPE, + provider.PHYSICAL_NETWORK, + provider.SEGMENTATION_ID)) + and + network[provider.NETWORK_TYPE] == const.NETWORK_TYPE_VLAN): + return network[provider.SEGMENTATION_ID] + + def create_network(self, context, network): + """Create network. + + Perform this operation in the context of the configured device + plugins. + """ + LOG.debug(_("create_network() called")) + provider_vlan_id = self._get_provider_vlan_id(network[const.NETWORK]) + args = [context, network] + ovs_output = self._invoke_plugin_per_device(const.VSWITCH_PLUGIN, + self._func_name(), + args) + # The vswitch plugin did all the verification. If it's a provider + # vlan network, save it for the nexus plugin to use later. + if provider_vlan_id: + network_id = ovs_output[const.NET_ID] + cdb.add_provider_network(network_id, + const.NETWORK_TYPE_VLAN, + provider_vlan_id) + LOG.debug(_("Provider network added to DB: %(network_id)s, " + "%(vlan_id)s"), + {'network_id': network_id, 'vlan_id': provider_vlan_id}) + return ovs_output + + def update_network(self, context, id, network): + """Update network. + + Perform this operation in the context of the configured device + plugins. + + Note that the Nexus sub-plugin does not need to be notified + (and the Nexus switch does not need to be [re]configured) + for an update network operation because the Nexus sub-plugin + is agnostic of all network-level attributes except the + segmentation ID. Furthermore, updating of the segmentation ID + is not supported by the OVS plugin since it is considered a + provider attribute, so it is not supported by this method. + """ + LOG.debug(_("update_network() called")) + + # We can only support updating of provider attributes if all the + # configured sub-plugins support it. Currently we have no method + # in place for checking whether a sub-plugin supports it, + # so assume not. + provider._raise_if_updates_provider_attributes(network['network']) + + args = [context, id, network] + return self._invoke_plugin_per_device(const.VSWITCH_PLUGIN, + self._func_name(), + args) + + def delete_network(self, context, id): + """Delete network. + + Perform this operation in the context of the configured device + plugins. + """ + args = [context, id] + ovs_output = self._invoke_plugin_per_device(const.VSWITCH_PLUGIN, + self._func_name(), + args) + if cdb.remove_provider_network(id): + LOG.debug(_("Provider network removed from DB: %s"), id) + return ovs_output + + def get_network(self, context, id, fields=None): + """Get network. This method is delegated to the vswitch plugin. + + This method is included here to satisfy abstract method requirements. + """ + pass # pragma no cover + + def get_networks(self, context, filters=None, fields=None, + sorts=None, limit=None, marker=None, page_reverse=False): + """Get networks. This method is delegated to the vswitch plugin. + + This method is included here to satisfy abstract method requirements. + """ + pass # pragma no cover + + def _invoke_nexus_for_net_create(self, context, tenant_id, net_id, + instance_id, host_id): + if not self.is_nexus_plugin: + return False + + network = self.get_network(context, net_id) + vlan_id = self._get_segmentation_id(net_id) + vlan_name = conf.CISCO.vlan_name_prefix + str(vlan_id) + network[const.NET_VLAN_ID] = vlan_id + network[const.NET_VLAN_NAME] = vlan_name + attachment = { + const.TENANT_ID: tenant_id, + const.INSTANCE_ID: instance_id, + const.HOST_NAME: host_id, + } + self._invoke_plugin_per_device( + const.NEXUS_PLUGIN, + 'create_network', + [network, attachment]) + + def _check_valid_port_device_owner(self, port): + """Check the port for valid device_owner. + + Don't call the nexus plugin for router and dhcp + port owners. + """ + return port['device_owner'].startswith('compute') + + def _get_port_host_id_from_bindings(self, port): + """Get host_id from portbindings.""" + host_id = None + + if (portbindings.HOST_ID in port and + attributes.is_attr_set(port[portbindings.HOST_ID])): + host_id = port[portbindings.HOST_ID] + + return host_id + + def create_port(self, context, port): + """Create port. + + Perform this operation in the context of the configured device + plugins. + """ + LOG.debug(_("create_port() called")) + args = [context, port] + ovs_output = self._invoke_plugin_per_device(const.VSWITCH_PLUGIN, + self._func_name(), + args) + instance_id = port['port']['device_id'] + + # Only call nexus plugin if there's a valid instance_id, host_id + # and device_owner + try: + host_id = self._get_port_host_id_from_bindings(port['port']) + if (instance_id and host_id and + self._check_valid_port_device_owner(port['port'])): + net_id = port['port']['network_id'] + tenant_id = port['port']['tenant_id'] + self._invoke_nexus_for_net_create( + context, tenant_id, net_id, instance_id, host_id) + except Exception: + # Create network on the Nexus plugin has failed, so we need + # to rollback the port creation on the VSwitch plugin. + exc_info = sys.exc_info() + try: + id = ovs_output['id'] + args = [context, id] + ovs_output = self._invoke_plugin_per_device( + const.VSWITCH_PLUGIN, + 'delete_port', + args) + finally: + # Re-raise the original exception + raise exc_info[0], exc_info[1], exc_info[2] + return ovs_output + + def get_port(self, context, id, fields=None): + """Get port. This method is delegated to the vswitch plugin. + + This method is included here to satisfy abstract method requirements. + """ + pass # pragma no cover + + def get_ports(self, context, filters=None, fields=None): + """Get ports. This method is delegated to the vswitch plugin. + + This method is included here to satisfy abstract method requirements. + """ + pass # pragma no cover + + def _check_nexus_net_create_needed(self, new_port, old_port): + """Check if nexus plugin should be invoked for net_create. + + In the following cases, the plugin should be invoked: + -- a port is attached to a VM instance. The old host id is None + -- VM migration. The old host id has a valid value + + When the plugin needs to be invoked, return the old_host_id, + and a list of calling arguments. + Otherwise, return '' for old host id and an empty list + """ + old_device_id = old_port['device_id'] + new_device_id = new_port.get('device_id') + new_host_id = self._get_port_host_id_from_bindings(new_port) + tenant_id = old_port['tenant_id'] + net_id = old_port['network_id'] + old_host_id = self._get_port_host_id_from_bindings(old_port) + + LOG.debug(_("tenant_id: %(tid)s, net_id: %(nid)s, " + "old_device_id: %(odi)s, new_device_id: %(ndi)s, " + "old_host_id: %(ohi)s, new_host_id: %(nhi)s, " + "old_device_owner: %(odo)s, new_device_owner: %(ndo)s"), + {'tid': tenant_id, 'nid': net_id, + 'odi': old_device_id, 'ndi': new_device_id, + 'ohi': old_host_id, 'nhi': new_host_id, + 'odo': old_port.get('device_owner'), + 'ndo': new_port.get('device_owner')}) + + # A port is attached to an instance + if (new_device_id and not old_device_id and new_host_id and + self._check_valid_port_device_owner(new_port)): + return '', [tenant_id, net_id, new_device_id, new_host_id] + + # An instance is being migrated + if (old_device_id and old_host_id and new_host_id != old_host_id and + self._check_valid_port_device_owner(old_port)): + return old_host_id, [tenant_id, net_id, old_device_id, new_host_id] + + # no need to invoke the plugin + return '', [] + + def update_port(self, context, id, port): + """Update port. + + Perform this operation in the context of the configured device + plugins. + """ + LOG.debug(_("update_port() called")) + old_port = self.get_port(context, id) + args = [context, id, port] + ovs_output = self._invoke_plugin_per_device(const.VSWITCH_PLUGIN, + self._func_name(), + args) + try: + # Check if the nexus plugin needs to be invoked + old_host_id, create_args = self._check_nexus_net_create_needed( + port['port'], old_port) + + # In the case of migration, invoke it to remove + # the previous port binding + if old_host_id: + vlan_id = self._get_segmentation_id(old_port['network_id']) + delete_args = [old_port['device_id'], vlan_id] + self._invoke_plugin_per_device(const.NEXUS_PLUGIN, + "delete_port", + delete_args) + + # Invoke the Nexus plugin to create a net and/or new port binding + if create_args: + self._invoke_nexus_for_net_create(context, *create_args) + + return ovs_output + except Exception: + exc_info = sys.exc_info() + LOG.error(_("Unable to update port '%s' on Nexus switch"), + old_port['name'], exc_info=exc_info) + try: + # Roll back vSwitch plugin to original port attributes. + args = [context, id, {'port': old_port}] + self._invoke_plugin_per_device( + const.VSWITCH_PLUGIN, + self._func_name(), + args) + finally: + # Re-raise the original exception + raise exc_info[0], exc_info[1], exc_info[2] + + def delete_port(self, context, id, l3_port_check=True): + """Delete port. + + Perform this operation in the context of the configured device + plugins. + """ + LOG.debug(_("delete_port() called")) + port = self.get_port(context, id) + + host_id = self._get_port_host_id_from_bindings(port) + + if (self.is_nexus_plugin and host_id and + self._check_valid_port_device_owner(port)): + vlan_id = self._get_segmentation_id(port['network_id']) + n_args = [port['device_id'], vlan_id] + self._invoke_plugin_per_device(const.NEXUS_PLUGIN, + self._func_name(), + n_args) + try: + args = [context, id] + ovs_output = self._invoke_plugin_per_device( + const.VSWITCH_PLUGIN, self._func_name(), + args, l3_port_check=l3_port_check) + except Exception: + exc_info = sys.exc_info() + # Roll back the delete port on the Nexus plugin + try: + tenant_id = port['tenant_id'] + net_id = port['network_id'] + instance_id = port['device_id'] + host_id = port[portbindings.HOST_ID] + self._invoke_nexus_for_net_create(context, tenant_id, net_id, + instance_id, host_id) + finally: + # Raise the original exception. + raise exc_info[0], exc_info[1], exc_info[2] + + return ovs_output + + def add_router_interface(self, context, router_id, interface_info): + """Add a router interface on a subnet. + + Only invoke the Nexus plugin to create SVI if L3 support on + the Nexus switches is enabled and a Nexus plugin is loaded, + otherwise send it to the vswitch plugin + """ + if (conf.CISCO.nexus_l3_enable and self.is_nexus_plugin): + LOG.debug(_("L3 enabled on Nexus plugin, create SVI on switch")) + if 'subnet_id' not in interface_info: + raise cexc.SubnetNotSpecified() + if 'port_id' in interface_info: + raise cexc.PortIdForNexusSvi() + subnet = self.get_subnet(context, interface_info['subnet_id']) + gateway_ip = subnet['gateway_ip'] + # Get gateway IP address and netmask + cidr = subnet['cidr'] + netmask = cidr.split('/', 1)[1] + gateway_ip = gateway_ip + '/' + netmask + network_id = subnet['network_id'] + vlan_id = self._get_segmentation_id(network_id) + vlan_name = conf.CISCO.vlan_name_prefix + str(vlan_id) + + n_args = [vlan_name, vlan_id, subnet['id'], gateway_ip, router_id] + return self._invoke_plugin_per_device(const.NEXUS_PLUGIN, + self._func_name(), + n_args) + else: + LOG.debug(_("L3 disabled or not Nexus plugin, send to vswitch")) + n_args = [context, router_id, interface_info] + return self._invoke_plugin_per_device(const.VSWITCH_PLUGIN, + self._func_name(), + n_args) + + def remove_router_interface(self, context, router_id, interface_info): + """Remove a router interface. + + Only invoke the Nexus plugin to delete SVI if L3 support on + the Nexus switches is enabled and a Nexus plugin is loaded, + otherwise send it to the vswitch plugin + """ + if (conf.CISCO.nexus_l3_enable and self.is_nexus_plugin): + LOG.debug(_("L3 enabled on Nexus plugin, delete SVI from switch")) + + subnet = self.get_subnet(context, interface_info['subnet_id']) + network_id = subnet['network_id'] + vlan_id = self._get_segmentation_id(network_id) + n_args = [vlan_id, router_id] + + return self._invoke_plugin_per_device(const.NEXUS_PLUGIN, + self._func_name(), + n_args) + else: + LOG.debug(_("L3 disabled or not Nexus plugin, send to vswitch")) + n_args = [context, router_id, interface_info] + return self._invoke_plugin_per_device(const.VSWITCH_PLUGIN, + self._func_name(), + n_args) + + def create_subnet(self, context, subnet): + """Create subnet. This method is delegated to the vswitch plugin. + + This method is included here to satisfy abstract method requirements. + """ + pass # pragma no cover + + def update_subnet(self, context, id, subnet): + """Update subnet. This method is delegated to the vswitch plugin. + + This method is included here to satisfy abstract method requirements. + """ + pass # pragma no cover + + def get_subnet(self, context, id, fields=None): + """Get subnet. This method is delegated to the vswitch plugin. + + This method is included here to satisfy abstract method requirements. + """ + pass # pragma no cover + + def delete_subnet(self, context, id, kwargs): + """Delete subnet. This method is delegated to the vswitch plugin. + + This method is included here to satisfy abstract method requirements. + """ + pass # pragma no cover + + def get_subnets(self, context, filters=None, fields=None, + sorts=None, limit=None, marker=None, page_reverse=False): + """Get subnets. This method is delegated to the vswitch plugin. + + This method is included here to satisfy abstract method requirements. + """ + pass # pragma no cover diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/cisco/n1kv/__init__.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/cisco/n1kv/__init__.py new file mode 100644 index 00000000..cf7a1cea --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/cisco/n1kv/__init__.py @@ -0,0 +1,16 @@ +# Copyright 2013 Cisco Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Abhishek Raut, Cisco Systems, Inc. +# diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/cisco/n1kv/n1kv_client.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/cisco/n1kv/n1kv_client.py new file mode 100644 index 00000000..a107f933 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/cisco/n1kv/n1kv_client.py @@ -0,0 +1,539 @@ +# Copyright 2013 Cisco Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Abhishek Raut, Cisco Systems, Inc. +# @author: Rudrajit Tapadar, Cisco Systems, Inc. + +import base64 +import eventlet +import netaddr +import requests + +from neutron.common import exceptions as n_exc +from neutron.extensions import providernet +from neutron.openstack.common import jsonutils +from neutron.openstack.common import log as logging +from neutron.plugins.cisco.common import cisco_constants as c_const +from neutron.plugins.cisco.common import cisco_credentials_v2 as c_cred +from neutron.plugins.cisco.common import cisco_exceptions as c_exc +from neutron.plugins.cisco.common import config as c_conf +from neutron.plugins.cisco.db import network_db_v2 +from neutron.plugins.cisco.extensions import n1kv + +LOG = logging.getLogger(__name__) + + +class Client(object): + + """ + Client for the Cisco Nexus1000V Neutron Plugin. + + This client implements functions to communicate with + Cisco Nexus1000V VSM. + + For every Neutron objects, Cisco Nexus1000V Neutron Plugin + creates a corresponding object in the controller (Cisco + Nexus1000V VSM). + + CONCEPTS: + + Following are few concepts used in Nexus1000V VSM: + + port-profiles: + Policy profiles correspond to port profiles on Nexus1000V VSM. + Port profiles are the primary mechanism by which network policy is + defined and applied to switch interfaces in a Nexus 1000V system. + + network-segment: + Each network-segment represents a broadcast domain. + + network-segment-pool: + A network-segment-pool contains one or more network-segments. + + logical-network: + A logical-network contains one or more network-segment-pools. + + bridge-domain: + A bridge-domain is created when the network-segment is of type VXLAN. + Each VXLAN <--> VLAN combination can be thought of as a bridge domain. + + ip-pool: + Each ip-pool represents a subnet on the Nexus1000V VSM. + + vm-network: + vm-network refers to a network-segment and policy-profile. + It maintains a list of ports that uses the network-segment and + policy-profile this vm-network refers to. + + events: + Events correspond to commands that are logged on Nexus1000V VSM. + Events are used to poll for a certain resource on Nexus1000V VSM. + Event type of port_profile: Return all updates/create/deletes + of port profiles from the VSM. + Event type of port_profile_update: Return only updates regarding + policy-profiles. + Event type of port_profile_delete: Return only deleted policy profiles. + + + WORK FLOW: + + For every network profile a corresponding logical-network and + a network-segment-pool, under this logical-network, will be created. + + For every network created from a given network profile, a + network-segment will be added to the network-segment-pool corresponding + to that network profile. + + A port is created on a network and associated with a policy-profile. + Hence for every unique combination of a network and a policy-profile, a + unique vm-network will be created and a reference to the port will be + added. If the same combination of network and policy-profile is used by + another port, the references to that port will be added to the same + vm-network. + + + """ + + # Define paths for the URI where the client connects for HTTP requests. + port_profiles_path = "/virtual-port-profile" + network_segment_path = "/network-segment/%s" + network_segment_pool_path = "/network-segment-pool/%s" + ip_pool_path = "/ip-pool-template/%s" + ports_path = "/kvm/vm-network/%s/ports" + port_path = "/kvm/vm-network/%s/ports/%s" + vm_networks_path = "/kvm/vm-network" + vm_network_path = "/kvm/vm-network/%s" + bridge_domains_path = "/kvm/bridge-domain" + bridge_domain_path = "/kvm/bridge-domain/%s" + logical_network_path = "/logical-network/%s" + events_path = "/kvm/events" + clusters_path = "/cluster" + encap_profiles_path = "/encapsulation-profile" + encap_profile_path = "/encapsulation-profile/%s" + + pool = eventlet.GreenPool(c_conf.CISCO_N1K.http_pool_size) + + def __init__(self, **kwargs): + """Initialize a new client for the plugin.""" + self.format = 'json' + self.hosts = self._get_vsm_hosts() + self.action_prefix = 'http://%s/api/n1k' % self.hosts[0] + self.timeout = c_const.DEFAULT_HTTP_TIMEOUT + + def list_port_profiles(self): + """ + Fetch all policy profiles from the VSM. + + :returns: JSON string + """ + return self._get(self.port_profiles_path) + + def create_bridge_domain(self, network, overlay_subtype): + """ + Create a bridge domain on VSM. + + :param network: network dict + :param overlay_subtype: string representing subtype of overlay network + """ + body = {'name': network['id'] + c_const.BRIDGE_DOMAIN_SUFFIX, + 'segmentId': network[providernet.SEGMENTATION_ID], + 'subType': overlay_subtype, + 'tenantId': network['tenant_id']} + if overlay_subtype == c_const.NETWORK_SUBTYPE_NATIVE_VXLAN: + body['groupIp'] = network[n1kv.MULTICAST_IP] + return self._post(self.bridge_domains_path, + body=body) + + def delete_bridge_domain(self, name): + """ + Delete a bridge domain on VSM. + + :param name: name of the bridge domain to be deleted + """ + return self._delete(self.bridge_domain_path % name) + + def create_network_segment(self, network, network_profile): + """ + Create a network segment on the VSM. + + :param network: network dict + :param network_profile: network profile dict + """ + body = {'publishName': network['id'], + 'description': network['name'], + 'id': network['id'], + 'tenantId': network['tenant_id'], + 'networkSegmentPool': network_profile['id'], } + if network[providernet.NETWORK_TYPE] == c_const.NETWORK_TYPE_VLAN: + body['vlan'] = network[providernet.SEGMENTATION_ID] + elif network[providernet.NETWORK_TYPE] == c_const.NETWORK_TYPE_OVERLAY: + body['bridgeDomain'] = (network['id'] + + c_const.BRIDGE_DOMAIN_SUFFIX) + if network_profile['segment_type'] == c_const.NETWORK_TYPE_TRUNK: + body['mode'] = c_const.NETWORK_TYPE_TRUNK + body['segmentType'] = network_profile['sub_type'] + if network_profile['sub_type'] == c_const.NETWORK_TYPE_VLAN: + body['addSegments'] = network['add_segment_list'] + body['delSegments'] = network['del_segment_list'] + else: + body['encapProfile'] = (network['id'] + + c_const.ENCAPSULATION_PROFILE_SUFFIX) + else: + body['mode'] = 'access' + body['segmentType'] = network_profile['segment_type'] + return self._post(self.network_segment_path % network['id'], + body=body) + + def update_network_segment(self, network_segment_id, body): + """ + Update a network segment on the VSM. + + Network segment on VSM can be updated to associate it with an ip-pool + or update its description and segment id. + + :param network_segment_id: UUID representing the network segment + :param body: dict of arguments to be updated + """ + return self._post(self.network_segment_path % network_segment_id, + body=body) + + def delete_network_segment(self, network_segment_id): + """ + Delete a network segment on the VSM. + + :param network_segment_id: UUID representing the network segment + """ + return self._delete(self.network_segment_path % network_segment_id) + + def create_logical_network(self, network_profile, tenant_id): + """ + Create a logical network on the VSM. + + :param network_profile: network profile dict + :param tenant_id: UUID representing the tenant + """ + LOG.debug(_("Logical network")) + body = {'description': network_profile['name'], + 'tenantId': tenant_id} + logical_network_name = (network_profile['id'] + + c_const.LOGICAL_NETWORK_SUFFIX) + return self._post(self.logical_network_path % logical_network_name, + body=body) + + def delete_logical_network(self, logical_network_name): + """ + Delete a logical network on VSM. + + :param logical_network_name: string representing name of the logical + network + """ + return self._delete( + self.logical_network_path % logical_network_name) + + def create_network_segment_pool(self, network_profile, tenant_id): + """ + Create a network segment pool on the VSM. + + :param network_profile: network profile dict + :param tenant_id: UUID representing the tenant + """ + LOG.debug(_("network_segment_pool")) + logical_network_name = (network_profile['id'] + + c_const.LOGICAL_NETWORK_SUFFIX) + body = {'name': network_profile['name'], + 'description': network_profile['name'], + 'id': network_profile['id'], + 'logicalNetwork': logical_network_name, + 'tenantId': tenant_id} + return self._post( + self.network_segment_pool_path % network_profile['id'], + body=body) + + def update_network_segment_pool(self, network_profile): + """ + Update a network segment pool on the VSM. + + :param network_profile: network profile dict + """ + body = {'name': network_profile['name'], + 'description': network_profile['name']} + return self._post(self.network_segment_pool_path % + network_profile['id'], body=body) + + def delete_network_segment_pool(self, network_segment_pool_id): + """ + Delete a network segment pool on the VSM. + + :param network_segment_pool_id: UUID representing the network + segment pool + """ + return self._delete(self.network_segment_pool_path % + network_segment_pool_id) + + def create_ip_pool(self, subnet): + """ + Create an ip-pool on the VSM. + + :param subnet: subnet dict + """ + if subnet['cidr']: + try: + ip = netaddr.IPNetwork(subnet['cidr']) + netmask = str(ip.netmask) + network_address = str(ip.network) + except (ValueError, netaddr.AddrFormatError): + msg = _("Invalid input for CIDR") + raise n_exc.InvalidInput(error_message=msg) + else: + netmask = network_address = "" + + if subnet['allocation_pools']: + address_range_start = subnet['allocation_pools'][0]['start'] + address_range_end = subnet['allocation_pools'][0]['end'] + else: + address_range_start = None + address_range_end = None + + body = {'addressRangeStart': address_range_start, + 'addressRangeEnd': address_range_end, + 'ipAddressSubnet': netmask, + 'description': subnet['name'], + 'gateway': subnet['gateway_ip'], + 'dhcp': subnet['enable_dhcp'], + 'dnsServersList': subnet['dns_nameservers'], + 'networkAddress': network_address, + 'tenantId': subnet['tenant_id']} + return self._post(self.ip_pool_path % subnet['id'], + body=body) + + def update_ip_pool(self, subnet): + """ + Update an ip-pool on the VSM. + + :param subnet: subnet dictionary + """ + body = {'description': subnet['name'], + 'dhcp': subnet['enable_dhcp'], + 'dnsServersList': subnet['dns_nameservers']} + return self._post(self.ip_pool_path % subnet['id'], + body=body) + + def delete_ip_pool(self, subnet_id): + """ + Delete an ip-pool on the VSM. + + :param subnet_id: UUID representing the subnet + """ + return self._delete(self.ip_pool_path % subnet_id) + + def create_vm_network(self, + port, + vm_network_name, + policy_profile): + """ + Create a VM network on the VSM. + + :param port: port dict + :param vm_network_name: name of the VM network + :param policy_profile: policy profile dict + """ + body = {'name': vm_network_name, + 'networkSegmentId': port['network_id'], + 'networkSegment': port['network_id'], + 'portProfile': policy_profile['name'], + 'portProfileId': policy_profile['id'], + 'tenantId': port['tenant_id'], + 'portId': port['id'], + 'macAddress': port['mac_address'], + } + if port.get('fixed_ips'): + body['ipAddress'] = port['fixed_ips'][0]['ip_address'] + body['subnetId'] = port['fixed_ips'][0]['subnet_id'] + return self._post(self.vm_networks_path, + body=body) + + def delete_vm_network(self, vm_network_name): + """ + Delete a VM network on the VSM. + + :param vm_network_name: name of the VM network + """ + return self._delete(self.vm_network_path % vm_network_name) + + def create_n1kv_port(self, port, vm_network_name): + """ + Create a port on the VSM. + + :param port: port dict + :param vm_network_name: name of the VM network which imports this port + """ + body = {'id': port['id'], + 'macAddress': port['mac_address']} + if port.get('fixed_ips'): + body['ipAddress'] = port['fixed_ips'][0]['ip_address'] + body['subnetId'] = port['fixed_ips'][0]['subnet_id'] + return self._post(self.ports_path % vm_network_name, + body=body) + + def update_n1kv_port(self, vm_network_name, port_id, body): + """ + Update a port on the VSM. + + Update the mac address associated with the port + + :param vm_network_name: name of the VM network which imports this port + :param port_id: UUID of the port + :param body: dict of the arguments to be updated + """ + return self._post(self.port_path % (vm_network_name, port_id), + body=body) + + def delete_n1kv_port(self, vm_network_name, port_id): + """ + Delete a port on the VSM. + + :param vm_network_name: name of the VM network which imports this port + :param port_id: UUID of the port + """ + return self._delete(self.port_path % (vm_network_name, port_id)) + + def _do_request(self, method, action, body=None, + headers=None): + """ + Perform the HTTP request. + + The response is in either JSON format or plain text. A GET method will + invoke a JSON response while a PUT/POST/DELETE returns message from the + VSM in plain text format. + Exception is raised when VSM replies with an INTERNAL SERVER ERROR HTTP + status code (500) i.e. an error has occurred on the VSM or SERVICE + UNAVAILABLE (503) i.e. VSM is not reachable. + + :param method: type of the HTTP request. POST, GET, PUT or DELETE + :param action: path to which the client makes request + :param body: dict for arguments which are sent as part of the request + :param headers: header for the HTTP request + :returns: JSON or plain text in HTTP response + """ + action = self.action_prefix + action + if not headers and self.hosts: + headers = self._get_auth_header(self.hosts[0]) + headers['Content-Type'] = self._set_content_type('json') + headers['Accept'] = self._set_content_type('json') + if body: + body = jsonutils.dumps(body, indent=2) + LOG.debug(_("req: %s"), body) + try: + resp = self.pool.spawn(requests.request, + method, + url=action, + data=body, + headers=headers, + timeout=self.timeout).wait() + except Exception as e: + raise c_exc.VSMConnectionFailed(reason=e) + LOG.debug(_("status_code %s"), resp.status_code) + if resp.status_code == requests.codes.OK: + if 'application/json' in resp.headers['content-type']: + try: + return resp.json() + except ValueError: + return {} + elif 'text/plain' in resp.headers['content-type']: + LOG.debug(_("VSM: %s"), resp.text) + else: + raise c_exc.VSMError(reason=resp.text) + + def _set_content_type(self, format=None): + """ + Set the mime-type to either 'xml' or 'json'. + + :param format: format to be set. + :return: mime-type string + """ + if not format: + format = self.format + return "application/%s" % format + + def _delete(self, action, body=None, headers=None): + return self._do_request("DELETE", action, body=body, + headers=headers) + + def _get(self, action, body=None, headers=None): + return self._do_request("GET", action, body=body, + headers=headers) + + def _post(self, action, body=None, headers=None): + return self._do_request("POST", action, body=body, + headers=headers) + + def _put(self, action, body=None, headers=None): + return self._do_request("PUT", action, body=body, + headers=headers) + + def _get_vsm_hosts(self): + """ + Retrieve a list of VSM ip addresses. + + :return: list of host ip addresses + """ + return [cr[c_const.CREDENTIAL_NAME] for cr in + network_db_v2.get_all_n1kv_credentials()] + + def _get_auth_header(self, host_ip): + """ + Retrieve header with auth info for the VSM. + + :param host_ip: IP address of the VSM + :return: authorization header dict + """ + username = c_cred.Store.get_username(host_ip) + password = c_cred.Store.get_password(host_ip) + auth = base64.encodestring("%s:%s" % (username, password)).rstrip() + header = {"Authorization": "Basic %s" % auth} + return header + + def get_clusters(self): + """Fetches a list of all vxlan gateway clusters.""" + return self._get(self.clusters_path) + + def create_encapsulation_profile(self, encap): + """ + Create an encapsulation profile on VSM. + + :param encap: encapsulation dict + """ + body = {'name': encap['name'], + 'addMappings': encap['add_segment_list'], + 'delMappings': encap['del_segment_list']} + return self._post(self.encap_profiles_path, + body=body) + + def update_encapsulation_profile(self, context, profile_name, body): + """ + Adds a vlan to bridge-domain mapping to an encapsulation profile. + + :param profile_name: Name of the encapsulation profile + :param body: mapping dictionary + """ + return self._post(self.encap_profile_path + % profile_name, body=body) + + def delete_encapsulation_profile(self, name): + """ + Delete an encapsulation profile on VSM. + + :param name: name of the encapsulation profile to be deleted + """ + return self._delete(self.encap_profile_path % name) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py new file mode 100644 index 00000000..431cbc6d --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py @@ -0,0 +1,1436 @@ +# Copyright 2013 Cisco Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Aruna Kushwaha, Cisco Systems, Inc. +# @author: Rudrajit Tapadar, Cisco Systems, Inc. +# @author: Abhishek Raut, Cisco Systems, Inc. +# @author: Sergey Sudakovich, Cisco Systems, Inc. + +import eventlet + +from oslo.config import cfg as q_conf + +from neutron.api.rpc.agentnotifiers import dhcp_rpc_agent_api +from neutron.api.rpc.agentnotifiers import l3_rpc_agent_api +from neutron.api.v2 import attributes +from neutron.common import constants +from neutron.common import exceptions as n_exc +from neutron.common import rpc as n_rpc +from neutron.common import topics +from neutron.common import utils +from neutron.db import agents_db +from neutron.db import agentschedulers_db +from neutron.db import db_base_plugin_v2 +from neutron.db import dhcp_rpc_base +from neutron.db import external_net_db +from neutron.db import extraroute_db +from neutron.db import l3_agentschedulers_db +from neutron.db import l3_rpc_base +from neutron.db import portbindings_db +from neutron.extensions import portbindings +from neutron.extensions import providernet +from neutron.openstack.common import importutils +from neutron.openstack.common import log as logging +from neutron.openstack.common import uuidutils as uuidutils +from neutron.plugins.cisco.common import cisco_constants as c_const +from neutron.plugins.cisco.common import cisco_credentials_v2 as c_cred +from neutron.plugins.cisco.common import cisco_exceptions +from neutron.plugins.cisco.common import config as c_conf +from neutron.plugins.cisco.db import n1kv_db_v2 +from neutron.plugins.cisco.db import network_db_v2 +from neutron.plugins.cisco.extensions import n1kv +from neutron.plugins.cisco.n1kv import n1kv_client +from neutron.plugins.common import constants as svc_constants + + +LOG = logging.getLogger(__name__) + + +class N1kvRpcCallbacks(n_rpc.RpcCallback, + dhcp_rpc_base.DhcpRpcCallbackMixin, + l3_rpc_base.L3RpcCallbackMixin): + + """Class to handle agent RPC calls.""" + + # Set RPC API version to 1.1 by default. + RPC_API_VERSION = '1.1' + + +class N1kvNeutronPluginV2(db_base_plugin_v2.NeutronDbPluginV2, + external_net_db.External_net_db_mixin, + extraroute_db.ExtraRoute_db_mixin, + portbindings_db.PortBindingMixin, + n1kv_db_v2.NetworkProfile_db_mixin, + n1kv_db_v2.PolicyProfile_db_mixin, + network_db_v2.Credential_db_mixin, + l3_agentschedulers_db.L3AgentSchedulerDbMixin, + agentschedulers_db.DhcpAgentSchedulerDbMixin): + + """ + Implement the Neutron abstractions using Cisco Nexus1000V. + + Refer README file for the architecture, new features, and + workflow + + """ + + # This attribute specifies whether the plugin supports or not + # bulk operations. + __native_bulk_support = False + supported_extension_aliases = ["provider", "agent", + "n1kv", "network_profile", + "policy_profile", "external-net", "router", + "binding", "credential", + "l3_agent_scheduler", + "dhcp_agent_scheduler"] + + def __init__(self, configfile=None): + """ + Initialize Nexus1000V Neutron plugin. + + 1. Initialize VIF type to OVS + 2. Initialize Nexus1000v and Credential DB + 3. Establish communication with Cisco Nexus1000V + """ + super(N1kvNeutronPluginV2, self).__init__() + self.base_binding_dict = { + portbindings.VIF_TYPE: portbindings.VIF_TYPE_OVS, + portbindings.VIF_DETAILS: { + # TODO(rkukura): Replace with new VIF security details + portbindings.CAP_PORT_FILTER: + 'security-group' in self.supported_extension_aliases}} + c_cred.Store.initialize() + self._setup_vsm() + self._setup_rpc() + self.network_scheduler = importutils.import_object( + q_conf.CONF.network_scheduler_driver + ) + self.router_scheduler = importutils.import_object( + q_conf.CONF.router_scheduler_driver + ) + + def _setup_rpc(self): + # RPC support + self.service_topics = {svc_constants.CORE: topics.PLUGIN, + svc_constants.L3_ROUTER_NAT: topics.L3PLUGIN} + self.conn = n_rpc.create_connection(new=True) + self.endpoints = [N1kvRpcCallbacks(), agents_db.AgentExtRpcCallback()] + for svc_topic in self.service_topics.values(): + self.conn.create_consumer(svc_topic, self.endpoints, fanout=False) + self.dhcp_agent_notifier = dhcp_rpc_agent_api.DhcpAgentNotifyAPI() + self.l3_agent_notifier = l3_rpc_agent_api.L3AgentNotifyAPI() + # Consume from all consumers in threads + self.conn.consume_in_threads() + + def _setup_vsm(self): + """ + Setup Cisco Nexus 1000V related parameters and pull policy profiles. + + Retrieve all the policy profiles from the VSM when the plugin is + is instantiated for the first time and then continue to poll for + policy profile updates. + """ + LOG.debug(_('_setup_vsm')) + self.agent_vsm = True + # Poll VSM for create/delete of policy profile. + eventlet.spawn(self._poll_policy_profiles) + + def _poll_policy_profiles(self): + """Start a green thread to pull policy profiles from VSM.""" + while True: + self._populate_policy_profiles() + eventlet.sleep(c_conf.CISCO_N1K.poll_duration) + + def _populate_policy_profiles(self): + """ + Populate all the policy profiles from VSM. + + The tenant id is not available when the policy profiles are polled + from the VSM. Hence we associate the policy profiles with fake + tenant-ids. + """ + LOG.debug(_('_populate_policy_profiles')) + try: + n1kvclient = n1kv_client.Client() + policy_profiles = n1kvclient.list_port_profiles() + vsm_profiles = {} + plugin_profiles_set = set() + # Fetch policy profiles from VSM + for profile_name in policy_profiles: + profile_id = (policy_profiles + [profile_name][c_const.PROPERTIES][c_const.ID]) + vsm_profiles[profile_id] = profile_name + # Fetch policy profiles previously populated + for profile in n1kv_db_v2.get_policy_profiles(): + plugin_profiles_set.add(profile.id) + vsm_profiles_set = set(vsm_profiles) + # Update database if the profile sets differ. + if vsm_profiles_set ^ plugin_profiles_set: + # Add profiles in database if new profiles were created in VSM + for pid in vsm_profiles_set - plugin_profiles_set: + self._add_policy_profile(vsm_profiles[pid], pid) + + # Delete profiles from database if profiles were deleted in VSM + for pid in plugin_profiles_set - vsm_profiles_set: + self._delete_policy_profile(pid) + self._remove_all_fake_policy_profiles() + except (cisco_exceptions.VSMError, + cisco_exceptions.VSMConnectionFailed): + LOG.warning(_('No policy profile populated from VSM')) + + def _extend_network_dict_provider(self, context, network): + """Add extended network parameters.""" + binding = n1kv_db_v2.get_network_binding(context.session, + network['id']) + network[providernet.NETWORK_TYPE] = binding.network_type + if binding.network_type == c_const.NETWORK_TYPE_OVERLAY: + network[providernet.PHYSICAL_NETWORK] = None + network[providernet.SEGMENTATION_ID] = binding.segmentation_id + network[n1kv.MULTICAST_IP] = binding.multicast_ip + elif binding.network_type == c_const.NETWORK_TYPE_VLAN: + network[providernet.PHYSICAL_NETWORK] = binding.physical_network + network[providernet.SEGMENTATION_ID] = binding.segmentation_id + elif binding.network_type == c_const.NETWORK_TYPE_TRUNK: + network[providernet.PHYSICAL_NETWORK] = binding.physical_network + network[providernet.SEGMENTATION_ID] = None + network[n1kv.MULTICAST_IP] = None + elif binding.network_type == c_const.NETWORK_TYPE_MULTI_SEGMENT: + network[providernet.PHYSICAL_NETWORK] = None + network[providernet.SEGMENTATION_ID] = None + network[n1kv.MULTICAST_IP] = None + + def _process_provider_create(self, context, attrs): + network_type = attrs.get(providernet.NETWORK_TYPE) + physical_network = attrs.get(providernet.PHYSICAL_NETWORK) + segmentation_id = attrs.get(providernet.SEGMENTATION_ID) + + network_type_set = attributes.is_attr_set(network_type) + physical_network_set = attributes.is_attr_set(physical_network) + segmentation_id_set = attributes.is_attr_set(segmentation_id) + + if not (network_type_set or physical_network_set or + segmentation_id_set): + return (None, None, None) + + if not network_type_set: + msg = _("provider:network_type required") + raise n_exc.InvalidInput(error_message=msg) + elif network_type == c_const.NETWORK_TYPE_VLAN: + if not segmentation_id_set: + msg = _("provider:segmentation_id required") + raise n_exc.InvalidInput(error_message=msg) + if segmentation_id < 1 or segmentation_id > 4094: + msg = _("provider:segmentation_id out of range " + "(1 through 4094)") + raise n_exc.InvalidInput(error_message=msg) + elif network_type == c_const.NETWORK_TYPE_OVERLAY: + if physical_network_set: + msg = _("provider:physical_network specified for Overlay " + "network") + raise n_exc.InvalidInput(error_message=msg) + else: + physical_network = None + if not segmentation_id_set: + msg = _("provider:segmentation_id required") + raise n_exc.InvalidInput(error_message=msg) + if segmentation_id < 5000: + msg = _("provider:segmentation_id out of range " + "(5000+)") + raise n_exc.InvalidInput(error_message=msg) + else: + msg = _("provider:network_type %s not supported"), network_type + raise n_exc.InvalidInput(error_message=msg) + + if network_type == c_const.NETWORK_TYPE_VLAN: + if physical_network_set: + network_profiles = n1kv_db_v2.get_network_profiles() + for network_profile in network_profiles: + if physical_network == network_profile[ + 'physical_network']: + break + else: + msg = (_("Unknown provider:physical_network %s"), + physical_network) + raise n_exc.InvalidInput(error_message=msg) + else: + msg = _("provider:physical_network required") + raise n_exc.InvalidInput(error_message=msg) + + return (network_type, physical_network, segmentation_id) + + def _check_provider_update(self, context, attrs): + """Handle Provider network updates.""" + network_type = attrs.get(providernet.NETWORK_TYPE) + physical_network = attrs.get(providernet.PHYSICAL_NETWORK) + segmentation_id = attrs.get(providernet.SEGMENTATION_ID) + + network_type_set = attributes.is_attr_set(network_type) + physical_network_set = attributes.is_attr_set(physical_network) + segmentation_id_set = attributes.is_attr_set(segmentation_id) + + if not (network_type_set or physical_network_set or + segmentation_id_set): + return + + # TBD : Need to handle provider network updates + msg = _("Plugin does not support updating provider attributes") + raise n_exc.InvalidInput(error_message=msg) + + def _get_cluster(self, segment1, segment2, clusters): + """ + Returns a cluster to apply the segment mapping + + :param segment1: UUID of segment to be mapped + :param segment2: UUID of segment to be mapped + :param clusters: List of clusters + """ + for cluster in sorted(clusters, key=lambda k: k['size']): + for mapping in cluster[c_const.MAPPINGS]: + for segment in mapping[c_const.SEGMENTS]: + if segment1 in segment or segment2 in segment: + break + else: + cluster['size'] += 2 + return cluster['encapProfileName'] + break + return + + def _extend_mapping_dict(self, context, mapping_dict, segment): + """ + Extend a mapping dictionary with dot1q tag and bridge-domain name. + + :param context: neutron api request context + :param mapping_dict: dictionary to populate values + :param segment: id of the segment being populated + """ + net = self.get_network(context, segment) + if net[providernet.NETWORK_TYPE] == c_const.NETWORK_TYPE_VLAN: + mapping_dict['dot1q'] = str(net[providernet.SEGMENTATION_ID]) + else: + mapping_dict['bridgeDomain'] = (net['name'] + + c_const.BRIDGE_DOMAIN_SUFFIX) + + def _send_add_multi_segment_request(self, context, net_id, segment_pairs): + """ + Send Add multi-segment network request to VSM. + + :param context: neutron api request context + :param net_id: UUID of the multi-segment network + :param segment_pairs: List of segments in UUID pairs + that need to be bridged + """ + + if not segment_pairs: + return + + session = context.session + n1kvclient = n1kv_client.Client() + clusters = n1kvclient.get_clusters() + online_clusters = [] + encap_dict = {} + for cluster in clusters['body'][c_const.SET]: + cluster = cluster[c_const.PROPERTIES] + if cluster[c_const.STATE] == c_const.ONLINE: + cluster['size'] = 0 + for mapping in cluster[c_const.MAPPINGS]: + cluster['size'] += ( + len(mapping[c_const.SEGMENTS])) + online_clusters.append(cluster) + for (segment1, segment2) in segment_pairs: + encap_profile = self._get_cluster(segment1, segment2, + online_clusters) + if encap_profile is not None: + if encap_profile in encap_dict: + profile_dict = encap_dict[encap_profile] + else: + profile_dict = {'name': encap_profile, + 'addMappings': [], + 'delMappings': []} + encap_dict[encap_profile] = profile_dict + mapping_dict = {} + self._extend_mapping_dict(context, + mapping_dict, segment1) + self._extend_mapping_dict(context, + mapping_dict, segment2) + profile_dict['addMappings'].append(mapping_dict) + n1kv_db_v2.add_multi_segment_encap_profile_name(session, + net_id, + (segment1, + segment2), + encap_profile) + else: + raise cisco_exceptions.NoClusterFound + + for profile in encap_dict: + n1kvclient.update_encapsulation_profile(context, profile, + encap_dict[profile]) + + def _send_del_multi_segment_request(self, context, net_id, segment_pairs): + """ + Send Delete multi-segment network request to VSM. + + :param context: neutron api request context + :param net_id: UUID of the multi-segment network + :param segment_pairs: List of segments in UUID pairs + whose bridging needs to be removed + """ + if not segment_pairs: + return + session = context.session + encap_dict = {} + n1kvclient = n1kv_client.Client() + for (segment1, segment2) in segment_pairs: + binding = ( + n1kv_db_v2.get_multi_segment_network_binding(session, net_id, + (segment1, + segment2))) + encap_profile = binding['encap_profile_name'] + if encap_profile in encap_dict: + profile_dict = encap_dict[encap_profile] + else: + profile_dict = {'name': encap_profile, + 'addMappings': [], + 'delMappings': []} + encap_dict[encap_profile] = profile_dict + mapping_dict = {} + self._extend_mapping_dict(context, + mapping_dict, segment1) + self._extend_mapping_dict(context, + mapping_dict, segment2) + profile_dict['delMappings'].append(mapping_dict) + + for profile in encap_dict: + n1kvclient.update_encapsulation_profile(context, profile, + encap_dict[profile]) + + def _get_encap_segments(self, context, segment_pairs): + """ + Get the list of segments in encapsulation profile format. + + :param context: neutron api request context + :param segment_pairs: List of segments that need to be bridged + """ + member_list = [] + for pair in segment_pairs: + (segment, dot1qtag) = pair + member_dict = {} + net = self.get_network(context, segment) + member_dict['bridgeDomain'] = (net['name'] + + c_const.BRIDGE_DOMAIN_SUFFIX) + member_dict['dot1q'] = dot1qtag + member_list.append(member_dict) + return member_list + + def _populate_member_segments(self, context, network, segment_pairs, oper): + """ + Populate trunk network dict with member segments. + + :param context: neutron api request context + :param network: Dictionary containing the trunk network information + :param segment_pairs: List of segments in UUID pairs + that needs to be trunked + :param oper: Operation to be performed + """ + LOG.debug(_('_populate_member_segments %s'), segment_pairs) + trunk_list = [] + for (segment, dot1qtag) in segment_pairs: + net = self.get_network(context, segment) + member_dict = {'segment': net['name'], + 'dot1qtag': dot1qtag} + trunk_list.append(member_dict) + if oper == n1kv.SEGMENT_ADD: + network['add_segment_list'] = trunk_list + elif oper == n1kv.SEGMENT_DEL: + network['del_segment_list'] = trunk_list + + def _parse_multi_segments(self, context, attrs, param): + """ + Parse the multi-segment network attributes. + + :param context: neutron api request context + :param attrs: Attributes of the network + :param param: Additional parameter indicating an add + or del operation + :returns: List of segment UUIDs in set pairs + """ + pair_list = [] + valid_seg_types = [c_const.NETWORK_TYPE_VLAN, + c_const.NETWORK_TYPE_OVERLAY] + segments = attrs.get(param) + if not attributes.is_attr_set(segments): + return pair_list + for pair in segments.split(','): + segment1, sep, segment2 = pair.partition(':') + if (uuidutils.is_uuid_like(segment1) and + uuidutils.is_uuid_like(segment2)): + binding1 = n1kv_db_v2.get_network_binding(context.session, + segment1) + binding2 = n1kv_db_v2.get_network_binding(context.session, + segment2) + if (binding1.network_type not in valid_seg_types or + binding2.network_type not in valid_seg_types or + binding1.network_type == binding2.network_type): + msg = _("Invalid pairing supplied") + raise n_exc.InvalidInput(error_message=msg) + else: + pair_list.append((segment1, segment2)) + else: + LOG.debug(_('Invalid UUID supplied in %s'), pair) + msg = _("Invalid UUID supplied") + raise n_exc.InvalidInput(error_message=msg) + return pair_list + + def _parse_trunk_segments(self, context, attrs, param, physical_network, + sub_type): + """ + Parse the trunk network attributes. + + :param context: neutron api request context + :param attrs: Attributes of the network + :param param: Additional parameter indicating an add + or del operation + :param physical_network: Physical network of the trunk segment + :param sub_type: Sub-type of the trunk segment + :returns: List of segment UUIDs and dot1qtag (for vxlan) in set pairs + """ + pair_list = [] + segments = attrs.get(param) + if not attributes.is_attr_set(segments): + return pair_list + for pair in segments.split(','): + segment, sep, dot1qtag = pair.partition(':') + if sub_type == c_const.NETWORK_TYPE_VLAN: + dot1qtag = '' + if uuidutils.is_uuid_like(segment): + binding = n1kv_db_v2.get_network_binding(context.session, + segment) + if binding.network_type == c_const.NETWORK_TYPE_TRUNK: + msg = _("Cannot add a trunk segment '%s' as a member of " + "another trunk segment") % segment + raise n_exc.InvalidInput(error_message=msg) + elif binding.network_type == c_const.NETWORK_TYPE_VLAN: + if sub_type == c_const.NETWORK_TYPE_OVERLAY: + msg = _("Cannot add vlan segment '%s' as a member of " + "a vxlan trunk segment") % segment + raise n_exc.InvalidInput(error_message=msg) + if not physical_network: + physical_network = binding.physical_network + elif physical_network != binding.physical_network: + msg = _("Network UUID '%s' belongs to a different " + "physical network") % segment + raise n_exc.InvalidInput(error_message=msg) + elif binding.network_type == c_const.NETWORK_TYPE_OVERLAY: + if sub_type == c_const.NETWORK_TYPE_VLAN: + msg = _("Cannot add vxlan segment '%s' as a member of " + "a vlan trunk segment") % segment + raise n_exc.InvalidInput(error_message=msg) + try: + if not utils.is_valid_vlan_tag(int(dot1qtag)): + msg = _("Vlan tag '%s' is out of range") % dot1qtag + raise n_exc.InvalidInput(error_message=msg) + except ValueError: + msg = _("Vlan tag '%s' is not an integer " + "value") % dot1qtag + raise n_exc.InvalidInput(error_message=msg) + pair_list.append((segment, dot1qtag)) + else: + LOG.debug(_('%s is not a valid uuid'), segment) + msg = _("'%s' is not a valid UUID") % segment + raise n_exc.InvalidInput(error_message=msg) + return pair_list + + def _extend_network_dict_member_segments(self, context, network): + """Add the extended parameter member segments to the network.""" + members = [] + binding = n1kv_db_v2.get_network_binding(context.session, + network['id']) + if binding.network_type == c_const.NETWORK_TYPE_TRUNK: + members = n1kv_db_v2.get_trunk_members(context.session, + network['id']) + elif binding.network_type == c_const.NETWORK_TYPE_MULTI_SEGMENT: + members = n1kv_db_v2.get_multi_segment_members(context.session, + network['id']) + network[n1kv.MEMBER_SEGMENTS] = members + + def _extend_network_dict_profile(self, context, network): + """Add the extended parameter network profile to the network.""" + binding = n1kv_db_v2.get_network_binding(context.session, + network['id']) + network[n1kv.PROFILE_ID] = binding.profile_id + + def _extend_port_dict_profile(self, context, port): + """Add the extended parameter port profile to the port.""" + binding = n1kv_db_v2.get_port_binding(context.session, + port['id']) + port[n1kv.PROFILE_ID] = binding.profile_id + + def _process_network_profile(self, context, network): + """Validate network profile exists.""" + profile_id = network.get(n1kv.PROFILE_ID) + profile_id_set = attributes.is_attr_set(profile_id) + if not profile_id_set: + profile_name = c_conf.CISCO_N1K.default_network_profile + net_p = self._get_network_profile_by_name(context.session, + profile_name) + profile_id = net_p['id'] + network['n1kv:profile_id'] = profile_id + return profile_id + + def _process_policy_profile(self, context, attrs): + """Validates whether policy profile exists.""" + profile_id = attrs.get(n1kv.PROFILE_ID) + profile_id_set = attributes.is_attr_set(profile_id) + if not profile_id_set: + msg = _("n1kv:profile_id does not exist") + raise n_exc.InvalidInput(error_message=msg) + if not self._policy_profile_exists(profile_id): + msg = _("n1kv:profile_id does not exist") + raise n_exc.InvalidInput(error_message=msg) + + return profile_id + + def _send_create_logical_network_request(self, network_profile, tenant_id): + """ + Send create logical network request to VSM. + + :param network_profile: network profile dictionary + :param tenant_id: UUID representing the tenant + """ + LOG.debug(_('_send_create_logical_network')) + n1kvclient = n1kv_client.Client() + n1kvclient.create_logical_network(network_profile, tenant_id) + + def _send_delete_logical_network_request(self, network_profile): + """ + Send delete logical network request to VSM. + + :param network_profile: network profile dictionary + """ + LOG.debug('_send_delete_logical_network') + n1kvclient = n1kv_client.Client() + logical_network_name = (network_profile['id'] + + c_const.LOGICAL_NETWORK_SUFFIX) + n1kvclient.delete_logical_network(logical_network_name) + + def _send_create_network_profile_request(self, context, profile): + """ + Send create network profile request to VSM. + + :param context: neutron api request context + :param profile: network profile dictionary + """ + LOG.debug(_('_send_create_network_profile_request: %s'), profile['id']) + n1kvclient = n1kv_client.Client() + n1kvclient.create_network_segment_pool(profile, context.tenant_id) + + def _send_update_network_profile_request(self, profile): + """ + Send update network profile request to VSM. + + :param profile: network profile dictionary + """ + LOG.debug(_('_send_update_network_profile_request: %s'), profile['id']) + n1kvclient = n1kv_client.Client() + n1kvclient.update_network_segment_pool(profile) + + def _send_delete_network_profile_request(self, profile): + """ + Send delete network profile request to VSM. + + :param profile: network profile dictionary + """ + LOG.debug(_('_send_delete_network_profile_request: %s'), + profile['name']) + n1kvclient = n1kv_client.Client() + n1kvclient.delete_network_segment_pool(profile['id']) + + def _send_create_network_request(self, context, network, segment_pairs): + """ + Send create network request to VSM. + + Create a bridge domain for network of type Overlay. + :param context: neutron api request context + :param network: network dictionary + :param segment_pairs: List of segments in UUID pairs + that need to be bridged + """ + LOG.debug(_('_send_create_network_request: %s'), network['id']) + profile = self.get_network_profile(context, + network[n1kv.PROFILE_ID]) + n1kvclient = n1kv_client.Client() + if network[providernet.NETWORK_TYPE] == c_const.NETWORK_TYPE_OVERLAY: + n1kvclient.create_bridge_domain(network, profile['sub_type']) + if network[providernet.NETWORK_TYPE] == c_const.NETWORK_TYPE_TRUNK: + self._populate_member_segments(context, network, segment_pairs, + n1kv.SEGMENT_ADD) + network['del_segment_list'] = [] + if profile['sub_type'] == c_const.NETWORK_TYPE_OVERLAY: + encap_dict = {'name': (network['name'] + + c_const.ENCAPSULATION_PROFILE_SUFFIX), + 'add_segment_list': ( + self._get_encap_segments(context, + segment_pairs)), + 'del_segment_list': []} + n1kvclient.create_encapsulation_profile(encap_dict) + n1kvclient.create_network_segment(network, profile) + + def _send_update_network_request(self, context, network, add_segments, + del_segments): + """ + Send update network request to VSM. + + :param context: neutron api request context + :param network: network dictionary + :param add_segments: List of segments bindings + that need to be deleted + :param del_segments: List of segments bindings + that need to be deleted + """ + LOG.debug(_('_send_update_network_request: %s'), network['id']) + db_session = context.session + profile = n1kv_db_v2.get_network_profile( + db_session, network[n1kv.PROFILE_ID]) + n1kvclient = n1kv_client.Client() + body = {'description': network['name'], + 'id': network['id'], + 'networkSegmentPool': profile['id'], + 'vlan': network[providernet.SEGMENTATION_ID], + 'mode': 'access', + 'segmentType': profile['segment_type'], + 'addSegments': [], + 'delSegments': []} + if network[providernet.NETWORK_TYPE] == c_const.NETWORK_TYPE_TRUNK: + self._populate_member_segments(context, network, add_segments, + n1kv.SEGMENT_ADD) + self._populate_member_segments(context, network, del_segments, + n1kv.SEGMENT_DEL) + body['mode'] = c_const.NETWORK_TYPE_TRUNK + body['segmentType'] = profile['sub_type'] + body['addSegments'] = network['add_segment_list'] + body['delSegments'] = network['del_segment_list'] + LOG.debug(_('add_segments=%s'), body['addSegments']) + LOG.debug(_('del_segments=%s'), body['delSegments']) + if profile['sub_type'] == c_const.NETWORK_TYPE_OVERLAY: + encap_profile = (network['id'] + + c_const.ENCAPSULATION_PROFILE_SUFFIX) + encap_dict = {'name': encap_profile, + 'addMappings': ( + self._get_encap_segments(context, + add_segments)), + 'delMappings': ( + self._get_encap_segments(context, + del_segments))} + n1kvclient.update_encapsulation_profile(context, encap_profile, + encap_dict) + n1kvclient.update_network_segment(network['id'], body) + + def _send_delete_network_request(self, context, network): + """ + Send delete network request to VSM. + + Delete bridge domain if network is of type Overlay. + Delete encapsulation profile if network is of type OVERLAY Trunk. + :param context: neutron api request context + :param network: network dictionary + """ + LOG.debug(_('_send_delete_network_request: %s'), network['id']) + n1kvclient = n1kv_client.Client() + session = context.session + if network[providernet.NETWORK_TYPE] == c_const.NETWORK_TYPE_OVERLAY: + name = network['id'] + c_const.BRIDGE_DOMAIN_SUFFIX + n1kvclient.delete_bridge_domain(name) + elif network[providernet.NETWORK_TYPE] == c_const.NETWORK_TYPE_TRUNK: + profile = self.get_network_profile( + context, network[n1kv.PROFILE_ID]) + if profile['sub_type'] == c_const.NETWORK_TYPE_OVERLAY: + profile_name = (network['id'] + + c_const.ENCAPSULATION_PROFILE_SUFFIX) + n1kvclient.delete_encapsulation_profile(profile_name) + elif (network[providernet.NETWORK_TYPE] == + c_const.NETWORK_TYPE_MULTI_SEGMENT): + encap_dict = n1kv_db_v2.get_multi_segment_encap_dict(session, + network['id']) + for profile in encap_dict: + profile_dict = {'name': profile, + 'addSegments': [], + 'delSegments': []} + for segment_pair in encap_dict[profile]: + mapping_dict = {} + (segment1, segment2) = segment_pair + self._extend_mapping_dict(context, + mapping_dict, segment1) + self._extend_mapping_dict(context, + mapping_dict, segment2) + profile_dict['delSegments'].append(mapping_dict) + n1kvclient.update_encapsulation_profile(context, profile, + profile_dict) + n1kvclient.delete_network_segment(network['id']) + + def _send_create_subnet_request(self, context, subnet): + """ + Send create subnet request to VSM. + + :param context: neutron api request context + :param subnet: subnet dictionary + """ + LOG.debug(_('_send_create_subnet_request: %s'), subnet['id']) + n1kvclient = n1kv_client.Client() + n1kvclient.create_ip_pool(subnet) + + def _send_update_subnet_request(self, subnet): + """ + Send update subnet request to VSM. + + :param subnet: subnet dictionary + """ + LOG.debug(_('_send_update_subnet_request: %s'), subnet['name']) + n1kvclient = n1kv_client.Client() + n1kvclient.update_ip_pool(subnet) + + def _send_delete_subnet_request(self, context, subnet): + """ + Send delete subnet request to VSM. + + :param context: neutron api request context + :param subnet: subnet dictionary + """ + LOG.debug(_('_send_delete_subnet_request: %s'), subnet['name']) + body = {'ipPool': subnet['id'], 'deleteSubnet': True} + n1kvclient = n1kv_client.Client() + n1kvclient.update_network_segment(subnet['network_id'], body=body) + n1kvclient.delete_ip_pool(subnet['id']) + + def _send_create_port_request(self, + context, + port, + port_count, + policy_profile, + vm_network_name): + """ + Send create port request to VSM. + + Create a VM network for a network and policy profile combination. + If the VM network already exists, bind this port to the existing + VM network on the VSM. + :param context: neutron api request context + :param port: port dictionary + :param port_count: integer representing the number of ports in one + VM Network + :param policy_profile: object of type policy profile + :param vm_network_name: string representing the name of the VM + network + """ + LOG.debug(_('_send_create_port_request: %s'), port) + n1kvclient = n1kv_client.Client() + if port_count == 1: + n1kvclient.create_vm_network(port, + vm_network_name, + policy_profile) + else: + n1kvclient.create_n1kv_port(port, vm_network_name) + + def _send_update_port_request(self, port_id, mac_address, vm_network_name): + """ + Send update port request to VSM. + + :param port_id: UUID representing port to update + :param mac_address: string representing the mac address + :param vm_network_name: VM network name to which the port is bound + """ + LOG.debug(_('_send_update_port_request: %s'), port_id) + body = {'portId': port_id, + 'macAddress': mac_address} + n1kvclient = n1kv_client.Client() + n1kvclient.update_n1kv_port(vm_network_name, port_id, body) + + def _send_delete_port_request(self, context, port, vm_network): + """ + Send delete port request to VSM. + + Delete the port on the VSM. If it is the last port on the VM Network, + delete the VM Network. + :param context: neutron api request context + :param port: port object which is to be deleted + :param vm_network: VM network object with which the port is associated + """ + LOG.debug(_('_send_delete_port_request: %s'), port['id']) + n1kvclient = n1kv_client.Client() + n1kvclient.delete_n1kv_port(vm_network['name'], port['id']) + if vm_network['port_count'] == 0: + n1kvclient.delete_vm_network(vm_network['name']) + + def _get_segmentation_id(self, context, id): + """ + Retrieve segmentation ID for a given network. + + :param context: neutron api request context + :param id: UUID of the network + :returns: segmentation ID for the network + """ + session = context.session + binding = n1kv_db_v2.get_network_binding(session, id) + return binding.segmentation_id + + def create_network(self, context, network): + """ + Create network based on network profile. + + :param context: neutron api request context + :param network: network dictionary + :returns: network object + """ + (network_type, physical_network, + segmentation_id) = self._process_provider_create(context, + network['network']) + profile_id = self._process_network_profile(context, network['network']) + segment_pairs = None + LOG.debug(_('Create network: profile_id=%s'), profile_id) + session = context.session + with session.begin(subtransactions=True): + if not network_type: + # tenant network + (physical_network, network_type, segmentation_id, + multicast_ip) = n1kv_db_v2.alloc_network(session, + profile_id) + LOG.debug(_('Physical_network %(phy_net)s, ' + 'seg_type %(net_type)s, ' + 'seg_id %(seg_id)s, ' + 'multicast_ip %(multicast_ip)s'), + {'phy_net': physical_network, + 'net_type': network_type, + 'seg_id': segmentation_id, + 'multicast_ip': multicast_ip}) + if network_type == c_const.NETWORK_TYPE_MULTI_SEGMENT: + segment_pairs = ( + self._parse_multi_segments(context, network['network'], + n1kv.SEGMENT_ADD)) + LOG.debug(_('Seg list %s '), segment_pairs) + elif network_type == c_const.NETWORK_TYPE_TRUNK: + network_profile = self.get_network_profile(context, + profile_id) + segment_pairs = ( + self._parse_trunk_segments(context, network['network'], + n1kv.SEGMENT_ADD, + physical_network, + network_profile['sub_type'] + )) + LOG.debug(_('Seg list %s '), segment_pairs) + else: + if not segmentation_id: + raise n_exc.TenantNetworksDisabled() + else: + # provider network + if network_type == c_const.NETWORK_TYPE_VLAN: + network_profile = self.get_network_profile(context, + profile_id) + seg_min, seg_max = self._get_segment_range( + network_profile['segment_range']) + if not seg_min <= segmentation_id <= seg_max: + raise cisco_exceptions.VlanIDOutsidePool + n1kv_db_v2.reserve_specific_vlan(session, + physical_network, + segmentation_id) + multicast_ip = "0.0.0.0" + net = super(N1kvNeutronPluginV2, self).create_network(context, + network) + n1kv_db_v2.add_network_binding(session, + net['id'], + network_type, + physical_network, + segmentation_id, + multicast_ip, + profile_id, + segment_pairs) + self._process_l3_create(context, net, network['network']) + self._extend_network_dict_provider(context, net) + self._extend_network_dict_profile(context, net) + try: + if network_type == c_const.NETWORK_TYPE_MULTI_SEGMENT: + self._send_add_multi_segment_request(context, net['id'], + segment_pairs) + else: + self._send_create_network_request(context, net, segment_pairs) + except(cisco_exceptions.VSMError, + cisco_exceptions.VSMConnectionFailed): + super(N1kvNeutronPluginV2, self).delete_network(context, net['id']) + else: + LOG.debug(_("Created network: %s"), net['id']) + return net + + def update_network(self, context, id, network): + """ + Update network parameters. + + :param context: neutron api request context + :param id: UUID representing the network to update + :returns: updated network object + """ + self._check_provider_update(context, network['network']) + add_segments = [] + del_segments = [] + + session = context.session + with session.begin(subtransactions=True): + net = super(N1kvNeutronPluginV2, self).update_network(context, id, + network) + self._process_l3_update(context, net, network['network']) + binding = n1kv_db_v2.get_network_binding(session, id) + if binding.network_type == c_const.NETWORK_TYPE_MULTI_SEGMENT: + add_segments = ( + self._parse_multi_segments(context, network['network'], + n1kv.SEGMENT_ADD)) + n1kv_db_v2.add_multi_segment_binding(session, + net['id'], add_segments) + del_segments = ( + self._parse_multi_segments(context, network['network'], + n1kv.SEGMENT_DEL)) + self._send_add_multi_segment_request(context, net['id'], + add_segments) + self._send_del_multi_segment_request(context, net['id'], + del_segments) + n1kv_db_v2.del_multi_segment_binding(session, + net['id'], del_segments) + elif binding.network_type == c_const.NETWORK_TYPE_TRUNK: + network_profile = self.get_network_profile(context, + binding.profile_id) + add_segments = ( + self._parse_trunk_segments(context, network['network'], + n1kv.SEGMENT_ADD, + binding.physical_network, + network_profile['sub_type'])) + n1kv_db_v2.add_trunk_segment_binding(session, + net['id'], add_segments) + del_segments = ( + self._parse_trunk_segments(context, network['network'], + n1kv.SEGMENT_DEL, + binding.physical_network, + network_profile['sub_type'])) + n1kv_db_v2.del_trunk_segment_binding(session, + net['id'], del_segments) + self._extend_network_dict_provider(context, net) + self._extend_network_dict_profile(context, net) + if binding.network_type != c_const.NETWORK_TYPE_MULTI_SEGMENT: + self._send_update_network_request(context, net, add_segments, + del_segments) + LOG.debug(_("Updated network: %s"), net['id']) + return net + + def delete_network(self, context, id): + """ + Delete a network. + + :param context: neutron api request context + :param id: UUID representing the network to delete + """ + session = context.session + with session.begin(subtransactions=True): + binding = n1kv_db_v2.get_network_binding(session, id) + network = self.get_network(context, id) + if n1kv_db_v2.is_trunk_member(session, id): + msg = _("Cannot delete network '%s' " + "that is member of a trunk segment") % network['name'] + raise n_exc.InvalidInput(error_message=msg) + if n1kv_db_v2.is_multi_segment_member(session, id): + msg = _("Cannot delete network '%s' that is a member of a " + "multi-segment network") % network['name'] + raise n_exc.InvalidInput(error_message=msg) + if binding.network_type == c_const.NETWORK_TYPE_OVERLAY: + n1kv_db_v2.release_vxlan(session, binding.segmentation_id) + elif binding.network_type == c_const.NETWORK_TYPE_VLAN: + n1kv_db_v2.release_vlan(session, binding.physical_network, + binding.segmentation_id) + self._process_l3_delete(context, id) + super(N1kvNeutronPluginV2, self).delete_network(context, id) + # the network_binding record is deleted via cascade from + # the network record, so explicit removal is not necessary + self._send_delete_network_request(context, network) + LOG.debug(_("Deleted network: %s"), id) + + def get_network(self, context, id, fields=None): + """ + Retrieve a Network. + + :param context: neutron api request context + :param id: UUID representing the network to fetch + :returns: requested network dictionary + """ + LOG.debug(_("Get network: %s"), id) + net = super(N1kvNeutronPluginV2, self).get_network(context, id, None) + self._extend_network_dict_provider(context, net) + self._extend_network_dict_profile(context, net) + self._extend_network_dict_member_segments(context, net) + return self._fields(net, fields) + + def get_networks(self, context, filters=None, fields=None): + """ + Retrieve a list of networks. + + :param context: neutron api request context + :param filters: a dictionary with keys that are valid keys for a + network object. Values in this dictiontary are an + iterable containing values that will be used for an + exact match comparison for that value. Each result + returned by this function will have matched one of the + values for each key in filters + :params fields: a list of strings that are valid keys in a network + dictionary. Only these fields will be returned. + :returns: list of network dictionaries. + """ + LOG.debug(_("Get networks")) + nets = super(N1kvNeutronPluginV2, self).get_networks(context, filters, + None) + for net in nets: + self._extend_network_dict_provider(context, net) + self._extend_network_dict_profile(context, net) + + return [self._fields(net, fields) for net in nets] + + def create_port(self, context, port): + """ + Create neutron port. + + Create a port. Use a default policy profile for ports created for dhcp + and router interface. Default policy profile name is configured in the + /etc/neutron/cisco_plugins.ini file. + + :param context: neutron api request context + :param port: port dictionary + :returns: port object + """ + p_profile = None + port_count = None + vm_network_name = None + profile_id_set = False + + # Set the network policy profile id for auto generated L3/DHCP ports + if ('device_id' in port['port'] and port['port']['device_owner'] in + [constants.DEVICE_OWNER_DHCP, constants.DEVICE_OWNER_ROUTER_INTF, + constants.DEVICE_OWNER_ROUTER_GW, + constants.DEVICE_OWNER_FLOATINGIP]): + p_profile_name = c_conf.CISCO_N1K.network_node_policy_profile + p_profile = self._get_policy_profile_by_name(p_profile_name) + if p_profile: + port['port']['n1kv:profile_id'] = p_profile['id'] + + if n1kv.PROFILE_ID in port['port']: + profile_id = port['port'].get(n1kv.PROFILE_ID) + profile_id_set = attributes.is_attr_set(profile_id) + + # Set the default policy profile id for ports if no id is set + if not profile_id_set: + p_profile_name = c_conf.CISCO_N1K.default_policy_profile + p_profile = self._get_policy_profile_by_name(p_profile_name) + if p_profile: + port['port']['n1kv:profile_id'] = p_profile['id'] + profile_id_set = True + + profile_id = self._process_policy_profile(context, + port['port']) + LOG.debug(_('Create port: profile_id=%s'), profile_id) + session = context.session + with session.begin(subtransactions=True): + pt = super(N1kvNeutronPluginV2, self).create_port(context, + port) + n1kv_db_v2.add_port_binding(session, pt['id'], profile_id) + self._extend_port_dict_profile(context, pt) + try: + vm_network = n1kv_db_v2.get_vm_network( + context.session, + profile_id, + pt['network_id']) + except cisco_exceptions.VMNetworkNotFound: + # Create a VM Network if no VM network exists. + vm_network_name = "%s%s_%s" % (c_const.VM_NETWORK_NAME_PREFIX, + profile_id, + pt['network_id']) + port_count = 1 + n1kv_db_v2.add_vm_network(context.session, + vm_network_name, + profile_id, + pt['network_id'], + port_count) + else: + # Update port count of the VM network. + vm_network_name = vm_network['name'] + port_count = vm_network['port_count'] + 1 + n1kv_db_v2.update_vm_network_port_count(context.session, + vm_network_name, + port_count) + self._process_portbindings_create_and_update(context, + port['port'], + pt) + # Extract policy profile for VM network create in VSM. + if not p_profile: + p_profile = n1kv_db_v2.get_policy_profile(session, profile_id) + try: + self._send_create_port_request(context, + pt, + port_count, + p_profile, + vm_network_name) + except(cisco_exceptions.VSMError, + cisco_exceptions.VSMConnectionFailed): + super(N1kvNeutronPluginV2, self).delete_port(context, pt['id']) + else: + LOG.debug(_("Created port: %s"), pt) + return pt + + def update_port(self, context, id, port): + """ + Update port parameters. + + :param context: neutron api request context + :param id: UUID representing the port to update + :returns: updated port object + """ + LOG.debug(_("Update port: %s"), id) + with context.session.begin(subtransactions=True): + updated_port = super(N1kvNeutronPluginV2, + self).update_port(context, id, port) + self._process_portbindings_create_and_update(context, + port['port'], + updated_port) + self._extend_port_dict_profile(context, updated_port) + return updated_port + + def delete_port(self, context, id, l3_port_check=True): + """ + Delete a port. + + :param context: neutron api request context + :param id: UUID representing the port to delete + """ + # if needed, check to see if this is a port owned by + # and l3-router. If so, we should prevent deletion. + if l3_port_check: + self.prevent_l3_port_deletion(context, id) + with context.session.begin(subtransactions=True): + port = self.get_port(context, id) + vm_network = n1kv_db_v2.get_vm_network(context.session, + port[n1kv.PROFILE_ID], + port['network_id']) + vm_network['port_count'] -= 1 + n1kv_db_v2.update_vm_network_port_count(context.session, + vm_network['name'], + vm_network['port_count']) + if vm_network['port_count'] == 0: + n1kv_db_v2.delete_vm_network(context.session, + port[n1kv.PROFILE_ID], + port['network_id']) + self.disassociate_floatingips(context, id) + super(N1kvNeutronPluginV2, self).delete_port(context, id) + self._send_delete_port_request(context, port, vm_network) + + def get_port(self, context, id, fields=None): + """ + Retrieve a port. + :param context: neutron api request context + :param id: UUID representing the port to retrieve + :param fields: a list of strings that are valid keys in a port + dictionary. Only these fields will be returned. + :returns: port dictionary + """ + LOG.debug(_("Get port: %s"), id) + port = super(N1kvNeutronPluginV2, self).get_port(context, id, None) + self._extend_port_dict_profile(context, port) + return self._fields(port, fields) + + def get_ports(self, context, filters=None, fields=None): + """ + Retrieve a list of ports. + + :param context: neutron api request context + :param filters: a dictionary with keys that are valid keys for a + port object. Values in this dictiontary are an + iterable containing values that will be used for an + exact match comparison for that value. Each result + returned by this function will have matched one of the + values for each key in filters + :params fields: a list of strings that are valid keys in a port + dictionary. Only these fields will be returned. + :returns: list of port dictionaries + """ + LOG.debug(_("Get ports")) + ports = super(N1kvNeutronPluginV2, self).get_ports(context, filters, + None) + for port in ports: + self._extend_port_dict_profile(context, port) + + return [self._fields(port, fields) for port in ports] + + def create_subnet(self, context, subnet): + """ + Create subnet for a given network. + + :param context: neutron api request context + :param subnet: subnet dictionary + :returns: subnet object + """ + LOG.debug(_('Create subnet')) + sub = super(N1kvNeutronPluginV2, self).create_subnet(context, subnet) + try: + self._send_create_subnet_request(context, sub) + except(cisco_exceptions.VSMError, + cisco_exceptions.VSMConnectionFailed): + super(N1kvNeutronPluginV2, self).delete_subnet(context, sub['id']) + else: + LOG.debug(_("Created subnet: %s"), sub['id']) + return sub + + def update_subnet(self, context, id, subnet): + """ + Update a subnet. + + :param context: neutron api request context + :param id: UUID representing subnet to update + :returns: updated subnet object + """ + LOG.debug(_('Update subnet')) + sub = super(N1kvNeutronPluginV2, self).update_subnet(context, + id, + subnet) + self._send_update_subnet_request(sub) + return sub + + def delete_subnet(self, context, id): + """ + Delete a subnet. + + :param context: neutron api request context + :param id: UUID representing subnet to delete + :returns: deleted subnet object + """ + LOG.debug(_('Delete subnet: %s'), id) + subnet = self.get_subnet(context, id) + self._send_delete_subnet_request(context, subnet) + return super(N1kvNeutronPluginV2, self).delete_subnet(context, id) + + def get_subnet(self, context, id, fields=None): + """ + Retrieve a subnet. + + :param context: neutron api request context + :param id: UUID representing subnet to retrieve + :params fields: a list of strings that are valid keys in a subnet + dictionary. Only these fields will be returned. + :returns: subnet object + """ + LOG.debug(_("Get subnet: %s"), id) + subnet = super(N1kvNeutronPluginV2, self).get_subnet(context, id, + None) + return self._fields(subnet, fields) + + def get_subnets(self, context, filters=None, fields=None): + """ + Retrieve a list of subnets. + + :param context: neutron api request context + :param filters: a dictionary with keys that are valid keys for a + subnet object. Values in this dictiontary are an + iterable containing values that will be used for an + exact match comparison for that value. Each result + returned by this function will have matched one of the + values for each key in filters + :params fields: a list of strings that are valid keys in a subnet + dictionary. Only these fields will be returned. + :returns: list of dictionaries of subnets + """ + LOG.debug(_("Get subnets")) + subnets = super(N1kvNeutronPluginV2, self).get_subnets(context, + filters, + None) + return [self._fields(subnet, fields) for subnet in subnets] + + def create_network_profile(self, context, network_profile): + """ + Create a network profile. + + Create a network profile, which represents a pool of networks + belonging to one type (VLAN or Overlay). On creation of network + profile, we retrieve the admin tenant-id which we use to replace + the previously stored fake tenant-id in tenant-profile bindings. + :param context: neutron api request context + :param network_profile: network profile dictionary + :returns: network profile object + """ + self._replace_fake_tenant_id_with_real(context) + with context.session.begin(subtransactions=True): + net_p = super(N1kvNeutronPluginV2, + self).create_network_profile(context, + network_profile) + try: + self._send_create_logical_network_request(net_p, + context.tenant_id) + except(cisco_exceptions.VSMError, + cisco_exceptions.VSMConnectionFailed): + n1kv_db_v2.delete_profile_binding(context.session, + context.tenant_id, + net_p['id']) + try: + self._send_create_network_profile_request(context, net_p) + except(cisco_exceptions.VSMError, + cisco_exceptions.VSMConnectionFailed): + n1kv_db_v2.delete_profile_binding(context.session, + context.tenant_id, + net_p['id']) + self._send_delete_logical_network_request(net_p) + return net_p + + def delete_network_profile(self, context, id): + """ + Delete a network profile. + + :param context: neutron api request context + :param id: UUID of the network profile to delete + :returns: deleted network profile object + """ + with context.session.begin(subtransactions=True): + net_p = super(N1kvNeutronPluginV2, + self).delete_network_profile(context, id) + self._send_delete_network_profile_request(net_p) + self._send_delete_logical_network_request(net_p) + + def update_network_profile(self, context, net_profile_id, network_profile): + """ + Update a network profile. + + :param context: neutron api request context + :param net_profile_id: UUID of the network profile to update + :param network_profile: dictionary containing network profile object + """ + session = context.session + with session.begin(subtransactions=True): + net_p = (super(N1kvNeutronPluginV2, self). + update_network_profile(context, + net_profile_id, + network_profile)) + self._send_update_network_profile_request(net_p) + return net_p + + def create_router(self, context, router): + """ + Handle creation of router. + + Schedule router to L3 agent as part of the create handling. + :param context: neutron api request context + :param router: router dictionary + :returns: router object + """ + session = context.session + with session.begin(subtransactions=True): + rtr = (super(N1kvNeutronPluginV2, self). + create_router(context, router)) + LOG.debug(_("Scheduling router %s"), rtr['id']) + self.schedule_router(context, rtr['id']) + return rtr diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/cisco/network_plugin.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/cisco/network_plugin.py new file mode 100644 index 00000000..4c613407 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/cisco/network_plugin.py @@ -0,0 +1,174 @@ +# Copyright 2012 Cisco Systems, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Sumit Naiksatam, Cisco Systems, Inc. + +import logging + +import webob.exc as wexc + +from neutron.api import extensions as neutron_extensions +from neutron.api.v2 import base +from neutron.db import db_base_plugin_v2 +from neutron.openstack.common import importutils +from neutron.plugins.cisco.common import cisco_exceptions as cexc +from neutron.plugins.cisco.common import config +from neutron.plugins.cisco.db import network_db_v2 as cdb +from neutron.plugins.cisco import extensions + +LOG = logging.getLogger(__name__) + + +class PluginV2(db_base_plugin_v2.NeutronDbPluginV2): + """Meta-Plugin with v2 API support for multiple sub-plugins.""" + _supported_extension_aliases = ["credential", "Cisco qos"] + _methods_to_delegate = ['create_network', + 'delete_network', 'update_network', 'get_network', + 'get_networks', + 'create_port', 'delete_port', + 'update_port', 'get_port', 'get_ports', + 'create_subnet', + 'delete_subnet', 'update_subnet', + 'get_subnet', 'get_subnets', ] + + CISCO_FAULT_MAP = { + cexc.CredentialAlreadyExists: wexc.HTTPBadRequest, + cexc.CredentialNameNotFound: wexc.HTTPNotFound, + cexc.CredentialNotFound: wexc.HTTPNotFound, + cexc.NetworkSegmentIDNotFound: wexc.HTTPNotFound, + cexc.NetworkVlanBindingAlreadyExists: wexc.HTTPBadRequest, + cexc.NexusComputeHostNotConfigured: wexc.HTTPNotFound, + cexc.NexusConfigFailed: wexc.HTTPBadRequest, + cexc.NexusConnectFailed: wexc.HTTPServiceUnavailable, + cexc.NexusPortBindingNotFound: wexc.HTTPNotFound, + cexc.NoMoreNics: wexc.HTTPBadRequest, + cexc.PortIdForNexusSvi: wexc.HTTPBadRequest, + cexc.PortVnicBindingAlreadyExists: wexc.HTTPBadRequest, + cexc.PortVnicNotFound: wexc.HTTPNotFound, + cexc.QosNameAlreadyExists: wexc.HTTPBadRequest, + cexc.QosNotFound: wexc.HTTPNotFound, + cexc.SubnetNotSpecified: wexc.HTTPBadRequest, + cexc.VlanIDNotAvailable: wexc.HTTPNotFound, + cexc.VlanIDNotFound: wexc.HTTPNotFound, + } + + @property + def supported_extension_aliases(self): + if not hasattr(self, '_aliases'): + aliases = self._supported_extension_aliases[:] + if hasattr(self._model, "supported_extension_aliases"): + aliases.extend(self._model.supported_extension_aliases) + self._aliases = aliases + return self._aliases + + def __init__(self): + """Load the model class.""" + self._model_name = config.CISCO.model_class + self._model = importutils.import_object(self._model_name) + native_bulk_attr_name = ("_%s__native_bulk_support" + % self._model.__class__.__name__) + self.__native_bulk_support = getattr(self._model, + native_bulk_attr_name, False) + + neutron_extensions.append_api_extensions_path(extensions.__path__) + + # Extend the fault map + self._extend_fault_map() + + LOG.debug(_("Plugin initialization complete")) + + def __getattribute__(self, name): + """Delegate core API calls to the model class. + + Core API calls are delegated directly to the configured model class. + Note: Bulking calls will be handled by this class, and turned into + non-bulking calls to be considered for delegation. + """ + methods = object.__getattribute__(self, "_methods_to_delegate") + if name in methods: + return getattr(object.__getattribute__(self, "_model"), + name) + else: + return object.__getattribute__(self, name) + + def __getattr__(self, name): + """Delegate calls to the extensions. + + This delegates the calls to the extensions explicitly implemented by + the model. + """ + if hasattr(self._model, name): + return getattr(self._model, name) + else: + # Must make sure we re-raise the error that led us here, since + # otherwise getattr() and even hasattr() doesn't work correctly. + raise AttributeError( + _("'%(model)s' object has no attribute '%(name)s'") % + {'model': self._model_name, 'name': name}) + + def _extend_fault_map(self): + """Extend the Neutron Fault Map for Cisco exceptions. + + Map exceptions which are specific to the Cisco Plugin + to standard HTTP exceptions. + + """ + base.FAULT_MAP.update(self.CISCO_FAULT_MAP) + + """ + Extension API implementation + """ + def get_all_qoss(self, tenant_id): + """Get all QoS levels.""" + LOG.debug(_("get_all_qoss() called")) + qoslist = cdb.get_all_qoss(tenant_id) + return qoslist + + def get_qos_details(self, tenant_id, qos_id): + """Get QoS Details.""" + LOG.debug(_("get_qos_details() called")) + return cdb.get_qos(tenant_id, qos_id) + + def create_qos(self, tenant_id, qos_name, qos_desc): + """Create a QoS level.""" + LOG.debug(_("create_qos() called")) + qos = cdb.add_qos(tenant_id, qos_name, str(qos_desc)) + return qos + + def delete_qos(self, tenant_id, qos_id): + """Delete a QoS level.""" + LOG.debug(_("delete_qos() called")) + return cdb.remove_qos(tenant_id, qos_id) + + def rename_qos(self, tenant_id, qos_id, new_name): + """Rename QoS level.""" + LOG.debug(_("rename_qos() called")) + return cdb.update_qos(tenant_id, qos_id, new_name) + + def get_all_credentials(self): + """Get all credentials.""" + LOG.debug(_("get_all_credentials() called")) + credential_list = cdb.get_all_credentials() + return credential_list + + def get_credential_details(self, credential_id): + """Get a particular credential.""" + LOG.debug(_("get_credential_details() called")) + return cdb.get_credential(credential_id) + + def rename_credential(self, credential_id, new_name, new_password): + """Rename the particular credential resource.""" + LOG.debug(_("rename_credential() called")) + return cdb.update_credential(credential_id, new_name, + new_password=new_password) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/cisco/nexus/__init__.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/cisco/nexus/__init__.py new file mode 100644 index 00000000..b66a37fc --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/cisco/nexus/__init__.py @@ -0,0 +1,19 @@ +# Copyright 2011 Cisco Systems, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Sumit Naiksatam, Cisco Systems, Inc. +# @author: Edgar Magana, Cisco Systems, Inc. +""" +Init module for Nexus Driver +""" diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/cisco/nexus/cisco_nexus_network_driver_v2.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/cisco/nexus/cisco_nexus_network_driver_v2.py new file mode 100644 index 00000000..f55a8001 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/cisco/nexus/cisco_nexus_network_driver_v2.py @@ -0,0 +1,194 @@ +# Copyright 2011 Cisco Systems, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Debojyoti Dutta, Cisco Systems, Inc. +# @author: Edgar Magana, Cisco Systems Inc. +# +""" +Implements a Nexus-OS NETCONF over SSHv2 API Client +""" + +import logging + +from ncclient import manager + +from neutron.openstack.common import excutils +from neutron.plugins.cisco.common import cisco_constants as const +from neutron.plugins.cisco.common import cisco_credentials_v2 as cred +from neutron.plugins.cisco.common import cisco_exceptions as cexc +from neutron.plugins.cisco.common import config as conf +from neutron.plugins.cisco.db import nexus_db_v2 +from neutron.plugins.cisco.nexus import cisco_nexus_snippets as snipp + +LOG = logging.getLogger(__name__) + + +class CiscoNEXUSDriver(): + """Nexus Driver Main Class.""" + def __init__(self): + cisco_switches = conf.get_device_dictionary() + self.nexus_switches = dict(((key[1], key[2]), val) + for key, val in cisco_switches.items() + if key[0] == 'NEXUS_SWITCH') + self.credentials = {} + self.connections = {} + + def _edit_config(self, nexus_host, target='running', config='', + allowed_exc_strs=None): + """Modify switch config for a target config type. + + :param nexus_host: IP address of switch to configure + :param target: Target config type + :param config: Configuration string in XML format + :param allowed_exc_strs: Exceptions which have any of these strings + as a subset of their exception message + (str(exception)) can be ignored + + :raises: NexusConfigFailed + + """ + if not allowed_exc_strs: + allowed_exc_strs = [] + mgr = self.nxos_connect(nexus_host) + try: + mgr.edit_config(target, config=config) + except Exception as e: + for exc_str in allowed_exc_strs: + if exc_str in str(e): + break + else: + # Raise a Neutron exception. Include a description of + # the original ncclient exception. No need to preserve T/B + raise cexc.NexusConfigFailed(config=config, exc=e) + + def get_credential(self, nexus_ip): + if nexus_ip not in self.credentials: + nexus_username = cred.Store.get_username(nexus_ip) + nexus_password = cred.Store.get_password(nexus_ip) + self.credentials[nexus_ip] = { + const.USERNAME: nexus_username, + const.PASSWORD: nexus_password + } + return self.credentials[nexus_ip] + + def nxos_connect(self, nexus_host): + """Make SSH connection to the Nexus Switch.""" + if getattr(self.connections.get(nexus_host), 'connected', None): + return self.connections[nexus_host] + + nexus_ssh_port = int(self.nexus_switches[nexus_host, 'ssh_port']) + nexus_creds = self.get_credential(nexus_host) + nexus_user = nexus_creds[const.USERNAME] + nexus_password = nexus_creds[const.PASSWORD] + try: + man = manager.connect(host=nexus_host, + port=nexus_ssh_port, + username=nexus_user, + password=nexus_password) + self.connections[nexus_host] = man + except Exception as e: + # Raise a Neutron exception. Include a description of + # the original ncclient exception. No need to preserve T/B. + raise cexc.NexusConnectFailed(nexus_host=nexus_host, exc=e) + + return self.connections[nexus_host] + + def create_xml_snippet(self, cutomized_config): + """Create XML snippet. + + Creates the Proper XML structure for the Nexus Switch Configuration. + """ + conf_xml_snippet = snipp.EXEC_CONF_SNIPPET % (cutomized_config) + return conf_xml_snippet + + def create_vlan(self, nexus_host, vlanid, vlanname): + """Create a VLAN on Nexus Switch given the VLAN ID and Name.""" + confstr = self.create_xml_snippet( + snipp.CMD_VLAN_CONF_SNIPPET % (vlanid, vlanname)) + self._edit_config(nexus_host, target='running', config=confstr) + + # Enable VLAN active and no-shutdown states. Some versions of + # Nexus switch do not allow state changes for the extended VLAN + # range (1006-4094), but these errors can be ignored (default + # values are appropriate). + state_config = [snipp.CMD_VLAN_ACTIVE_SNIPPET, + snipp.CMD_VLAN_NO_SHUTDOWN_SNIPPET] + for snippet in state_config: + try: + confstr = self.create_xml_snippet(snippet % vlanid) + self._edit_config( + nexus_host, + target='running', + config=confstr, + allowed_exc_strs=["Can't modify state for extended", + "Command is only allowed on VLAN"]) + except cexc.NexusConfigFailed: + with excutils.save_and_reraise_exception(): + self.delete_vlan(nexus_host, vlanid) + + def delete_vlan(self, nexus_host, vlanid): + """Delete a VLAN on Nexus Switch given the VLAN ID.""" + confstr = snipp.CMD_NO_VLAN_CONF_SNIPPET % vlanid + confstr = self.create_xml_snippet(confstr) + self._edit_config(nexus_host, target='running', config=confstr) + + def enable_vlan_on_trunk_int(self, nexus_host, vlanid, etype, interface): + """Enable a VLAN on a trunk interface.""" + # If one or more VLANs are already configured on this interface, + # include the 'add' keyword. + if nexus_db_v2.get_port_switch_bindings('%s:%s' % (etype, interface), + nexus_host): + snippet = snipp.CMD_INT_VLAN_ADD_SNIPPET + else: + snippet = snipp.CMD_INT_VLAN_SNIPPET + confstr = snippet % (etype, interface, vlanid, etype) + confstr = self.create_xml_snippet(confstr) + LOG.debug(_("NexusDriver: %s"), confstr) + self._edit_config(nexus_host, target='running', config=confstr) + + def disable_vlan_on_trunk_int(self, nexus_host, vlanid, etype, interface): + """Disable a VLAN on a trunk interface.""" + confstr = snipp.CMD_NO_VLAN_INT_SNIPPET % (etype, interface, + vlanid, etype) + confstr = self.create_xml_snippet(confstr) + LOG.debug(_("NexusDriver: %s"), confstr) + self._edit_config(nexus_host, target='running', config=confstr) + + def create_and_trunk_vlan(self, nexus_host, vlan_id, vlan_name, + etype, nexus_port): + """Create VLAN and trunk it on the specified ports.""" + self.create_vlan(nexus_host, vlan_id, vlan_name) + LOG.debug(_("NexusDriver created VLAN: %s"), vlan_id) + if nexus_port: + self.enable_vlan_on_trunk_int(nexus_host, vlan_id, + etype, nexus_port) + + def delete_and_untrunk_vlan(self, nexus_host, vlan_id, etype, nexus_port): + """Delete VLAN and untrunk it from the specified ports.""" + self.delete_vlan(nexus_host, vlan_id) + if nexus_port: + self.disable_vlan_on_trunk_int(nexus_host, vlan_id, + etype, nexus_port) + + def create_vlan_svi(self, nexus_host, vlan_id, gateway_ip): + confstr = snipp.CMD_VLAN_SVI_SNIPPET % (vlan_id, gateway_ip) + confstr = self.create_xml_snippet(confstr) + LOG.debug(_("NexusDriver: %s"), confstr) + self._edit_config(nexus_host, target='running', config=confstr) + + def delete_vlan_svi(self, nexus_host, vlan_id): + confstr = snipp.CMD_NO_VLAN_SVI_SNIPPET % vlan_id + confstr = self.create_xml_snippet(confstr) + LOG.debug(_("NexusDriver: %s"), confstr) + self._edit_config(nexus_host, target='running', config=confstr) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/cisco/nexus/cisco_nexus_plugin_v2.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/cisco/nexus/cisco_nexus_plugin_v2.py new file mode 100644 index 00000000..012c8560 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/cisco/nexus/cisco_nexus_plugin_v2.py @@ -0,0 +1,345 @@ +# Copyright 2012 Cisco Systems, Inc. +# All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Sumit Naiksatam, Cisco Systems, Inc. +# @author: Edgar Magana, Cisco Systems, Inc. +# @author: Arvind Somya, Cisco Systems, Inc. (asomya@cisco.com) +# + +""" +PlugIn for Nexus OS driver +""" + +import logging + +from neutron.openstack.common import excutils +from neutron.openstack.common import importutils +from neutron.plugins.cisco.common import cisco_constants as const +from neutron.plugins.cisco.common import cisco_exceptions as cisco_exc +from neutron.plugins.cisco.common import config as conf +from neutron.plugins.cisco.db import network_db_v2 as cdb +from neutron.plugins.cisco.db import nexus_db_v2 as nxos_db +from neutron.plugins.cisco import l2device_plugin_base + + +LOG = logging.getLogger(__name__) + + +class NexusPlugin(l2device_plugin_base.L2DevicePluginBase): + """Nexus PlugIn Main Class.""" + _networks = {} + + def __init__(self): + """Extract configuration parameters from the configuration file.""" + self._client = importutils.import_object(conf.CISCO.nexus_driver) + LOG.debug(_("Loaded driver %s"), conf.CISCO.nexus_driver) + self._nexus_switches = conf.get_device_dictionary() + + def create_network(self, network, attachment): + """Create or update a network when an attachment is changed. + + This method is not invoked at the usual plugin create_network() time. + Instead, it is invoked on create/update port. + + :param network: Network on which the port operation is happening + :param attachment: Details about the owner of the port + + Create a VLAN in the appropriate switch/port, and configure the + appropriate interfaces for this VLAN. + """ + LOG.debug(_("NexusPlugin:create_network() called")) + # Grab the switch IPs and ports for this host + host_connections = [] + host = attachment['host_name'] + for switch_type, switch_ip, attr in self._nexus_switches: + if str(attr) == str(host): + port = self._nexus_switches[switch_type, switch_ip, attr] + # Get ether type for port, assume an ethernet type + # if none specified. + if ':' in port: + etype, port_id = port.split(':') + else: + etype, port_id = 'ethernet', port + host_connections.append((switch_ip, etype, port_id)) + if not host_connections: + raise cisco_exc.NexusComputeHostNotConfigured(host=host) + + vlan_id = network[const.NET_VLAN_ID] + vlan_name = network[const.NET_VLAN_NAME] + auto_create = True + auto_trunk = True + if cdb.is_provider_vlan(vlan_id): + vlan_name = ''.join([conf.CISCO.provider_vlan_name_prefix, + str(vlan_id)]) + auto_create = conf.CISCO.provider_vlan_auto_create + auto_trunk = conf.CISCO.provider_vlan_auto_trunk + + # Check if this network is already in the DB + for switch_ip, etype, port_id in host_connections: + vlan_created = False + vlan_trunked = False + eport_id = '%s:%s' % (etype, port_id) + # Check for switch vlan bindings + try: + # This vlan has already been created on this switch + # via another operation, like SVI bindings. + nxos_db.get_nexusvlan_binding(vlan_id, switch_ip) + vlan_created = True + auto_create = False + except cisco_exc.NexusPortBindingNotFound: + # No changes, proceed as normal + pass + + try: + nxos_db.get_port_vlan_switch_binding(eport_id, vlan_id, + switch_ip) + except cisco_exc.NexusPortBindingNotFound: + if auto_create and auto_trunk: + # Create vlan and trunk vlan on the port + LOG.debug(_("Nexus: create & trunk vlan %s"), vlan_name) + self._client.create_and_trunk_vlan( + switch_ip, vlan_id, vlan_name, etype, port_id) + vlan_created = True + vlan_trunked = True + elif auto_create: + # Create vlan but do not trunk it on the port + LOG.debug(_("Nexus: create vlan %s"), vlan_name) + self._client.create_vlan(switch_ip, vlan_id, vlan_name) + vlan_created = True + elif auto_trunk: + # Only trunk vlan on the port + LOG.debug(_("Nexus: trunk vlan %s"), vlan_name) + self._client.enable_vlan_on_trunk_int( + switch_ip, vlan_id, etype, port_id) + vlan_trunked = True + + try: + instance = attachment[const.INSTANCE_ID] + nxos_db.add_nexusport_binding(eport_id, str(vlan_id), + switch_ip, instance) + except Exception: + with excutils.save_and_reraise_exception(): + # Add binding failed, roll back any vlan creation/enabling + if vlan_created and vlan_trunked: + LOG.debug(_("Nexus: delete & untrunk vlan %s"), + vlan_name) + self._client.delete_and_untrunk_vlan(switch_ip, + vlan_id, + etype, port_id) + elif vlan_created: + LOG.debug(_("Nexus: delete vlan %s"), vlan_name) + self._client.delete_vlan(switch_ip, vlan_id) + elif vlan_trunked: + LOG.debug(_("Nexus: untrunk vlan %s"), vlan_name) + self._client.disable_vlan_on_trunk_int(switch_ip, + vlan_id, + etype, + port_id) + + net_id = network[const.NET_ID] + new_net_dict = {const.NET_ID: net_id, + const.NET_NAME: network[const.NET_NAME], + const.NET_PORTS: {}, + const.NET_VLAN_NAME: vlan_name, + const.NET_VLAN_ID: vlan_id} + self._networks[net_id] = new_net_dict + return new_net_dict + + def add_router_interface(self, vlan_name, vlan_id, subnet_id, + gateway_ip, router_id): + """Create VLAN SVI on the Nexus switch.""" + # Find a switch to create the SVI on + switch_ip = self._find_switch_for_svi() + if not switch_ip: + raise cisco_exc.NoNexusSviSwitch() + + # Check if this vlan exists on the switch already + try: + nxos_db.get_nexusvlan_binding(vlan_id, switch_ip) + except cisco_exc.NexusPortBindingNotFound: + # Create vlan and trunk vlan on the port + self._client.create_and_trunk_vlan( + switch_ip, vlan_id, vlan_name, etype=None, nexus_port=None) + # Check if a router interface has already been created + try: + nxos_db.get_nexusvm_bindings(vlan_id, router_id) + raise cisco_exc.SubnetInterfacePresent(subnet_id=subnet_id, + router_id=router_id) + except cisco_exc.NexusPortBindingNotFound: + self._client.create_vlan_svi(switch_ip, vlan_id, gateway_ip) + nxos_db.add_nexusport_binding('router', str(vlan_id), + switch_ip, router_id) + + return True + + def remove_router_interface(self, vlan_id, router_id): + """Remove VLAN SVI from the Nexus Switch.""" + # Grab switch_ip from database + switch_ip = nxos_db.get_nexusvm_bindings(vlan_id, + router_id)[0].switch_ip + + # Delete the SVI interface from the switch + self._client.delete_vlan_svi(switch_ip, vlan_id) + + # Invoke delete_port to delete this row + # And delete vlan if required + return self.delete_port(router_id, vlan_id) + + def _find_switch_for_svi(self): + """Get a switch to create the SVI on.""" + LOG.debug(_("Grabbing a switch to create SVI")) + nexus_switches = self._client.nexus_switches + if conf.CISCO.svi_round_robin: + LOG.debug(_("Using round robin to create SVI")) + switch_dict = dict( + (switch_ip, 0) for switch_ip, _ in nexus_switches) + try: + bindings = nxos_db.get_nexussvi_bindings() + # Build a switch dictionary with weights + for binding in bindings: + switch_ip = binding.switch_ip + if switch_ip not in switch_dict: + switch_dict[switch_ip] = 1 + else: + switch_dict[switch_ip] += 1 + # Search for the lowest value in the dict + if switch_dict: + switch_ip = min(switch_dict, key=switch_dict.get) + return switch_ip + except cisco_exc.NexusPortBindingNotFound: + pass + + LOG.debug(_("No round robin or zero weights, using first switch")) + # Return the first switch in the config + return conf.first_device_ip + + def delete_network(self, tenant_id, net_id, **kwargs): + """Delete network. + + Not applicable to Nexus plugin. Defined here to satisfy abstract + method requirements. + """ + LOG.debug(_("NexusPlugin:delete_network() called")) # pragma no cover + + def update_network(self, tenant_id, net_id, **kwargs): + """Update the properties of a particular Virtual Network. + + Not applicable to Nexus plugin. Defined here to satisfy abstract + method requirements. + """ + LOG.debug(_("NexusPlugin:update_network() called")) # pragma no cover + + def create_port(self, tenant_id, net_id, port_state, port_id, **kwargs): + """Create port. + + Not applicable to Nexus plugin. Defined here to satisfy abstract + method requirements. + """ + LOG.debug(_("NexusPlugin:create_port() called")) # pragma no cover + + def delete_port(self, device_id, vlan_id): + """Delete port. + + Delete port bindings from the database and scan whether the network + is still required on the interfaces trunked. + """ + LOG.debug(_("NexusPlugin:delete_port() called")) + # Delete DB row(s) for this port + try: + rows = nxos_db.get_nexusvm_bindings(vlan_id, device_id) + except cisco_exc.NexusPortBindingNotFound: + return + + auto_delete = True + auto_untrunk = True + if cdb.is_provider_vlan(vlan_id): + auto_delete = conf.CISCO.provider_vlan_auto_create + auto_untrunk = conf.CISCO.provider_vlan_auto_trunk + LOG.debug(_("delete_network(): provider vlan %s"), vlan_id) + + instance_id = False + for row in rows: + instance_id = row['instance_id'] + switch_ip = row.switch_ip + etype, nexus_port = '', '' + if row['port_id'] == 'router': + etype, nexus_port = 'vlan', row['port_id'] + auto_untrunk = False + else: + etype, nexus_port = row['port_id'].split(':') + + nxos_db.remove_nexusport_binding(row.port_id, row.vlan_id, + row.switch_ip, + row.instance_id) + # Check whether there are any remaining instances using this + # vlan on this Nexus port. + try: + nxos_db.get_port_vlan_switch_binding(row.port_id, + row.vlan_id, + row.switch_ip) + except cisco_exc.NexusPortBindingNotFound: + try: + if nexus_port and auto_untrunk: + # Untrunk the vlan from this Nexus interface + self._client.disable_vlan_on_trunk_int( + switch_ip, row.vlan_id, etype, nexus_port) + + # Check whether there are any remaining instances + # using this vlan on the Nexus switch. + if auto_delete: + try: + nxos_db.get_nexusvlan_binding(row.vlan_id, + row.switch_ip) + except cisco_exc.NexusPortBindingNotFound: + # Delete this vlan from this switch + self._client.delete_vlan(switch_ip, row.vlan_id) + except Exception: + # The delete vlan operation on the Nexus failed, + # so this delete_port request has failed. For + # consistency, roll back the Nexus database to what + # it was before this request. + with excutils.save_and_reraise_exception(): + nxos_db.add_nexusport_binding(row.port_id, + row.vlan_id, + row.switch_ip, + row.instance_id) + + return instance_id + + def update_port(self, tenant_id, net_id, port_id, port_state, **kwargs): + """Update port. + + Not applicable to Nexus plugin. Defined here to satisfy abstract + method requirements. + """ + LOG.debug(_("NexusPlugin:update_port() called")) # pragma no cover + + def plug_interface(self, tenant_id, net_id, port_id, remote_interface_id, + **kwargs): + """Plug interfaces. + + Not applicable to Nexus plugin. Defined here to satisfy abstract + method requirements. + """ + LOG.debug(_("NexusPlugin:plug_interface() called")) # pragma no cover + + def unplug_interface(self, tenant_id, net_id, port_id, **kwargs): + """Unplug interface. + + Not applicable to Nexus plugin. Defined here to satisfy abstract + method requirements. + """ + LOG.debug(_("NexusPlugin:unplug_interface() called") + ) # pragma no cover diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/cisco/nexus/cisco_nexus_snippets.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/cisco/nexus/cisco_nexus_snippets.py new file mode 100644 index 00000000..81a93f53 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/cisco/nexus/cisco_nexus_snippets.py @@ -0,0 +1,178 @@ +# Copyright 2011 Cisco Systems, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Edgar Magana, Cisco Systems, Inc. +# @author: Arvind Somya (asomya@cisco.com) Cisco Systems, Inc. + +""" +Nexus-OS XML-based configuration snippets +""" + +import logging + + +LOG = logging.getLogger(__name__) + + +# The following are standard strings, messages used to communicate with Nexus, +EXEC_CONF_SNIPPET = """ + + + <__XML__MODE__exec_configure>%s + + + +""" + +CMD_VLAN_CONF_SNIPPET = """ + + + <__XML__PARAM_value>%s + <__XML__MODE_vlan> + + %s + + + + +""" + +CMD_VLAN_ACTIVE_SNIPPET = """ + + + <__XML__PARAM_value>%s + <__XML__MODE_vlan> + + active + + + + +""" + +CMD_VLAN_NO_SHUTDOWN_SNIPPET = """ + + + <__XML__PARAM_value>%s + <__XML__MODE_vlan> + + + + + + +""" + +CMD_NO_VLAN_CONF_SNIPPET = """ + + + + <__XML__PARAM_value>%s + + + +""" + +CMD_INT_VLAN_HEADER = """ + + <%s> + %s + <__XML__MODE_if-ethernet-switch> + + + + """ + +CMD_VLAN_ID = """ + %s""" + +CMD_VLAN_ADD_ID = """ + %s + """ % CMD_VLAN_ID + +CMD_INT_VLAN_TRAILER = """ + + + + + + + +""" + +CMD_INT_VLAN_SNIPPET = (CMD_INT_VLAN_HEADER + + CMD_VLAN_ID + + CMD_INT_VLAN_TRAILER) + +CMD_INT_VLAN_ADD_SNIPPET = (CMD_INT_VLAN_HEADER + + CMD_VLAN_ADD_ID + + CMD_INT_VLAN_TRAILER) + +CMD_NO_VLAN_INT_SNIPPET = """ + + <%s> + %s + <__XML__MODE_if-ethernet-switch> + + + + + + + %s + + + + + + + + +""" + +FILTER_SHOW_VLAN_BRIEF_SNIPPET = """ + + + + + +""" + +CMD_VLAN_SVI_SNIPPET = """ + + + %s + <__XML__MODE_vlan> + + + + +
+
%s
+
+
+ +
+
+""" + +CMD_NO_VLAN_SVI_SNIPPET = """ + + + + %s + + + +""" diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/cisco/test/nexus/__init__.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/cisco/test/nexus/__init__.py new file mode 100644 index 00000000..4ee6bc9c --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/cisco/test/nexus/__init__.py @@ -0,0 +1,17 @@ +# Copyright 2012 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import __builtin__ +setattr(__builtin__, '_', lambda x: x) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/cisco/test/nexus/fake_nexus_driver.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/cisco/test/nexus/fake_nexus_driver.py new file mode 100644 index 00000000..d9ca848a --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/cisco/test/nexus/fake_nexus_driver.py @@ -0,0 +1,99 @@ +# Copyright 2012 Cisco Systems, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Sumit Naiksatam, Cisco Systems, Inc. +# @author: Rohit Agarwalla, Cisco Systems, Inc. + + +class CiscoNEXUSFakeDriver(): + """Nexus Driver Fake Class.""" + + def __init__(self): + pass + + def nxos_connect(self, nexus_host, nexus_ssh_port, nexus_user, + nexus_password): + """Make the fake connection to the Nexus Switch.""" + pass + + def create_xml_snippet(self, cutomized_config): + """Create XML snippet. + + Creates the Proper XML structure for the Nexus Switch + Configuration. + """ + pass + + def enable_vlan(self, mgr, vlanid, vlanname): + """Create a VLAN on Nexus Switch given the VLAN ID and Name.""" + pass + + def disable_vlan(self, mgr, vlanid): + """Delete a VLAN on Nexus Switch given the VLAN ID.""" + pass + + def disable_switch_port(self, mgr, interface): + """Disable trunk mode an interface on Nexus Switch.""" + pass + + def enable_vlan_on_trunk_int(self, mgr, etype, interface, vlanid): + """Enable vlan on trunk interface. + + Enable trunk mode vlan access an interface on Nexus Switch given + VLANID. + """ + pass + + def disable_vlan_on_trunk_int(self, mgr, interface, vlanid): + """Disables vlan in trunk interface. + + Enables trunk mode vlan access an interface on Nexus Switch given + VLANID. + """ + pass + + def create_vlan(self, vlan_name, vlan_id, nexus_host, nexus_user, + nexus_password, nexus_ports, nexus_ssh_port, vlan_ids): + """Create VLAN and enable it on interface. + + Creates a VLAN and Enable on trunk mode an interface on Nexus Switch + given the VLAN ID and Name and Interface Number. + """ + pass + + def delete_vlan(self, vlan_id, nexus_host, nexus_user, nexus_password, + nexus_ports, nexus_ssh_port): + """Delete VLAN. + + Delete a VLAN and Disables trunk mode an interface on Nexus Switch + given the VLAN ID and Interface Number. + """ + pass + + def build_vlans_cmd(self): + """Build a string with all the VLANs on the same Switch.""" + pass + + def add_vlan_int(self, vlan_id, nexus_host, nexus_user, nexus_password, + nexus_ports, nexus_ssh_port, vlan_ids=None): + """Add a vlan from interfaces on the Nexus switch given the VLAN ID.""" + pass + + def remove_vlan_int(self, vlan_id, nexus_host, nexus_user, nexus_password, + nexus_ports, nexus_ssh_port): + """Remove vlan from interfaces. + + Removes a vlan from interfaces on the Nexus switch given the VLAN ID. + """ + pass diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/common/__init__.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/common/__init__.py new file mode 100644 index 00000000..b706747c --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/common/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2012 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/common/constants.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/common/constants.py new file mode 100644 index 00000000..4cd14409 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/common/constants.py @@ -0,0 +1,80 @@ +# Copyright 2012 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# service type constants: +CORE = "CORE" +DUMMY = "DUMMY" +LOADBALANCER = "LOADBALANCER" +FIREWALL = "FIREWALL" +VPN = "VPN" +METERING = "METERING" +L3_ROUTER_NAT = "L3_ROUTER_NAT" + + +#maps extension alias to service type +EXT_TO_SERVICE_MAPPING = { + 'dummy': DUMMY, + 'lbaas': LOADBALANCER, + 'fwaas': FIREWALL, + 'vpnaas': VPN, + 'metering': METERING, + 'router': L3_ROUTER_NAT +} + +# TODO(salvatore-orlando): Move these (or derive them) from conf file +ALLOWED_SERVICES = [CORE, DUMMY, LOADBALANCER, FIREWALL, VPN, METERING, + L3_ROUTER_NAT] + +COMMON_PREFIXES = { + CORE: "", + DUMMY: "/dummy_svc", + LOADBALANCER: "/lb", + FIREWALL: "/fw", + VPN: "/vpn", + METERING: "/metering", + L3_ROUTER_NAT: "", +} + +# Service operation status constants +ACTIVE = "ACTIVE" +DOWN = "DOWN" +PENDING_CREATE = "PENDING_CREATE" +PENDING_UPDATE = "PENDING_UPDATE" +PENDING_DELETE = "PENDING_DELETE" +INACTIVE = "INACTIVE" +ERROR = "ERROR" + +ACTIVE_PENDING_STATUSES = ( + ACTIVE, + PENDING_CREATE, + PENDING_UPDATE +) + +# FWaaS firewall rule action +FWAAS_ALLOW = "allow" +FWAAS_DENY = "deny" + +# L3 Protocol name constants +TCP = "tcp" +UDP = "udp" +ICMP = "icmp" + +# Network Type constants +TYPE_FLAT = 'flat' +TYPE_GRE = 'gre' +TYPE_LOCAL = 'local' +TYPE_VXLAN = 'vxlan' +TYPE_VLAN = 'vlan' +TYPE_NONE = 'none' diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/common/utils.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/common/utils.py new file mode 100644 index 00000000..fbc6f69e --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/common/utils.py @@ -0,0 +1,67 @@ +# Copyright 2013 Cisco Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Common utilities and helper functions for Openstack Networking Plugins. +""" + +from neutron.common import exceptions as n_exc +from neutron.common import utils +from neutron.plugins.common import constants + + +def verify_vlan_range(vlan_range): + """Raise an exception for invalid tags or malformed range.""" + for vlan_tag in vlan_range: + if not utils.is_valid_vlan_tag(vlan_tag): + raise n_exc.NetworkVlanRangeError( + vlan_range=vlan_range, + error=_("%s is not a valid VLAN tag") % vlan_tag) + if vlan_range[1] < vlan_range[0]: + raise n_exc.NetworkVlanRangeError( + vlan_range=vlan_range, + error=_("End of VLAN range is less than start of VLAN range")) + + +def parse_network_vlan_range(network_vlan_range): + """Interpret a string as network[:vlan_begin:vlan_end].""" + entry = network_vlan_range.strip() + if ':' in entry: + try: + network, vlan_min, vlan_max = entry.split(':') + vlan_range = (int(vlan_min), int(vlan_max)) + except ValueError as ex: + raise n_exc.NetworkVlanRangeError(vlan_range=entry, error=ex) + verify_vlan_range(vlan_range) + return network, vlan_range + else: + return entry, None + + +def parse_network_vlan_ranges(network_vlan_ranges_cfg_entries): + """Interpret a list of strings as network[:vlan_begin:vlan_end] entries.""" + networks = {} + for entry in network_vlan_ranges_cfg_entries: + network, vlan_range = parse_network_vlan_range(entry) + if vlan_range: + networks.setdefault(network, []).append(vlan_range) + else: + networks.setdefault(network, []) + return networks + + +def in_pending_status(status): + return status in (constants.PENDING_CREATE, + constants.PENDING_UPDATE, + constants.PENDING_DELETE) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/embrane/__init__.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/embrane/__init__.py new file mode 100644 index 00000000..25dc46b0 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/embrane/__init__.py @@ -0,0 +1,16 @@ +# Copyright 2013 Embrane, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Ivar Lazzaro, Embrane, Inc. diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/embrane/agent/__init__.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/embrane/agent/__init__.py new file mode 100644 index 00000000..25dc46b0 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/embrane/agent/__init__.py @@ -0,0 +1,16 @@ +# Copyright 2013 Embrane, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Ivar Lazzaro, Embrane, Inc. diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/embrane/agent/dispatcher.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/embrane/agent/dispatcher.py new file mode 100644 index 00000000..dcea84f7 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/embrane/agent/dispatcher.py @@ -0,0 +1,132 @@ +# Copyright 2013 Embrane, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Ivar Lazzaro, Embrane, Inc. + +from eventlet import greenthread +from eventlet import queue +from heleosapi import constants as h_con +from heleosapi import exceptions as h_exc + +from neutron.openstack.common import log as logging +from neutron.plugins.embrane.agent.operations import router_operations +from neutron.plugins.embrane.common import constants as p_con +from neutron.plugins.embrane.common import contexts as ctx + +LOG = logging.getLogger(__name__) + + +class Dispatcher(object): + + def __init__(self, plugin, async=True): + self._async = async + self._plugin = plugin + self.sync_items = dict() + + def dispatch_l3(self, d_context, args=(), kwargs={}): + item = d_context.item + event = d_context.event + n_context = d_context.n_context + chain = d_context.chain + + item_id = item["id"] + handlers = router_operations.handlers + if event in handlers: + for f in handlers[event]: + first_run = False + if item_id not in self.sync_items: + self.sync_items[item_id] = (queue.Queue(),) + first_run = True + self.sync_items[item_id][0].put( + ctx.OperationContext(event, n_context, item, chain, f, + args, kwargs)) + t = None + if first_run: + t = greenthread.spawn(self._consume_l3, + item_id, + self.sync_items[item_id][0], + self._plugin, + self._async) + self.sync_items[item_id] += (t,) + if not self._async: + t = self.sync_items[item_id][1] + t.wait() + + def _consume_l3(self, sync_item, sync_queue, plugin, a_sync): + current_state = None + while True: + try: + # If the DVA is deleted, the thread (and the associated queue) + # can die as well + if current_state == p_con.Status.DELETED: + del self.sync_items[sync_item] + return + try: + # If synchronous op, empty the queue as fast as possible + operation_context = sync_queue.get( + block=a_sync, + timeout=p_con.QUEUE_TIMEOUT) + except queue.Empty: + del self.sync_items[sync_item] + return + # Execute the preliminary operations + (operation_context.chain and + operation_context.chain.execute_all()) + # Execute the main operation, a transient state is maintained + # so that the consumer can decide if it has + # to be burned to the DB + transient_state = None + try: + dva_state = operation_context.function( + plugin._esm_api, + operation_context.n_context.tenant_id, + operation_context.item, + *operation_context.args, + **operation_context.kwargs) + if dva_state == p_con.Status.DELETED: + transient_state = dva_state + else: + if not dva_state: + transient_state = p_con.Status.ERROR + elif dva_state == h_con.DvaState.POWER_ON: + transient_state = p_con.Status.ACTIVE + else: + transient_state = p_con.Status.READY + + except (h_exc.PendingDva, h_exc.DvaNotFound, + h_exc.BrokenInterface, h_exc.DvaCreationFailed, + h_exc.DvaCreationPending, h_exc.BrokenDva, + h_exc.ConfigurationFailed) as ex: + LOG.warning(p_con.error_map[type(ex)] % ex.message) + transient_state = p_con.Status.ERROR + except h_exc.DvaDeleteFailed as ex: + LOG.warning(p_con.error_map[type(ex)] % ex.message) + transient_state = p_con.Status.DELETED + finally: + # if the returned transient state is None, no operations + # are required on the DVA status + if transient_state: + if transient_state == p_con.Status.DELETED: + current_state = plugin._delete_router( + operation_context.n_context, + operation_context.item["id"]) + # Error state cannot be reverted + elif transient_state != p_con.Status.ERROR: + current_state = plugin._update_neutron_state( + operation_context.n_context, + operation_context.item, + transient_state) + except Exception: + LOG.exception(_("Unhandled exception occurred")) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/embrane/agent/operations/__init__.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/embrane/agent/operations/__init__.py new file mode 100644 index 00000000..25dc46b0 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/embrane/agent/operations/__init__.py @@ -0,0 +1,16 @@ +# Copyright 2013 Embrane, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Ivar Lazzaro, Embrane, Inc. diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/embrane/agent/operations/router_operations.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/embrane/agent/operations/router_operations.py new file mode 100644 index 00000000..a81c6aab --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/embrane/agent/operations/router_operations.py @@ -0,0 +1,154 @@ +# Copyright 2013 Embrane, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Ivar Lazzaro, Embrane, Inc. + +import functools + +from heleosapi import exceptions as h_exc + +from neutron.openstack.common import log as logging +from neutron.plugins.embrane.common import constants as p_con + +LOG = logging.getLogger(__name__) +handlers = dict() + + +def handler(event, handler): + def wrap(f): + if event not in handler.keys(): + new_func_list = [f] + handler[event] = new_func_list + else: + handler[event].append(f) + + @functools.wraps(f) + def wrapped_f(*args, **kwargs): + return f(*args, **kwargs) + return wrapped_f + return wrap + + +@handler(p_con.Events.CREATE_ROUTER, handlers) +def _create_dva_and_assign_address(api, tenant_id, neutron_router, + flavor, utif_info=None, + ip_allocation_info=None): + """Creates a new router, and assign the gateway interface if any.""" + + dva = api.create_router(tenant_id=tenant_id, + router_id=neutron_router["id"], + name=neutron_router["name"], + flavor=flavor, + up=neutron_router["admin_state_up"]) + try: + if utif_info: + api.grow_interface(utif_info, neutron_router["admin_state_up"], + tenant_id, neutron_router["id"]) + if ip_allocation_info: + dva = api.allocate_address(neutron_router["id"], + neutron_router["admin_state_up"], + ip_allocation_info) + except h_exc.PreliminaryOperationsFailed as ex: + raise h_exc.BrokenInterface(err_msg=ex.message) + + state = api.extract_dva_state(dva) + return state + + +@handler(p_con.Events.UPDATE_ROUTER, handlers) +def _update_dva_and_assign_address(api, tenant_id, neutron_router, + utif_info=None, ip_allocation_info=None, + routes_info=[]): + name = neutron_router["name"] + up = neutron_router["admin_state_up"] + r_id = neutron_router["id"] + if ip_allocation_info or routes_info: + up = True + dva = api.update_dva(tenant_id=tenant_id, router_id=r_id, name=name, + up=up, utif_info=utif_info) + if ip_allocation_info: + api.allocate_address(r_id, up, ip_allocation_info) + + if routes_info: + api.delete_extra_routes(r_id, up) + api.set_extra_routes(r_id, neutron_router["admin_state_up"], + routes_info) + + return api.extract_dva_state(dva) + + +@handler(p_con.Events.DELETE_ROUTER, handlers) +def _delete_dva(api, tenant_id, neutron_router): + try: + api.delete_dva(tenant_id, neutron_router["id"]) + except h_exc.DvaNotFound: + LOG.warning(_("The router %s had no physical representation," + "likely already deleted"), neutron_router["id"]) + return p_con.Status.DELETED + + +@handler(p_con.Events.GROW_ROUTER_IF, handlers) +def _grow_dva_iface_and_assign_address(api, tenant_id, neutron_router, + utif_info=None, + ip_allocation_info=None): + try: + dva = api.grow_interface(utif_info, neutron_router["admin_state_up"], + tenant_id, neutron_router["id"]) + if ip_allocation_info: + dva = api.allocate_address(neutron_router["id"], + neutron_router["admin_state_up"], + ip_allocation_info) + except h_exc.PreliminaryOperationsFailed as ex: + raise h_exc.BrokenInterface(err_msg=ex.message) + + state = api.extract_dva_state(dva) + return state + + +@handler(p_con.Events.SHRINK_ROUTER_IF, handlers) +def _shrink_dva_iface(api, tenant_id, neutron_router, port_id): + try: + dva = api.shrink_interface(tenant_id, neutron_router["id"], + neutron_router["admin_state_up"], port_id) + except h_exc.InterfaceNotFound: + LOG.warning(_("Interface %s not found in the heleos back-end," + "likely already deleted"), port_id) + return (p_con.Status.ACTIVE if neutron_router["admin_state_up"] else + p_con.Status.READY) + except h_exc.PreliminaryOperationsFailed as ex: + raise h_exc.BrokenInterface(err_msg=ex.message) + state = api.extract_dva_state(dva) + return state + + +@handler(p_con.Events.SET_NAT_RULE, handlers) +def _create_nat_rule(api, tenant_id, neutron_router, nat_info=None): + + dva = api.create_nat_entry(neutron_router["id"], + neutron_router["admin_state_up"], nat_info) + + state = api.extract_dva_state(dva) + return state + + +@handler(p_con.Events.RESET_NAT_RULE, handlers) +def _delete_nat_rule(api, tenant_id, neutron_router, floating_ip_id): + + dva = api.remove_nat_entry(neutron_router["id"], + neutron_router["admin_state_up"], + floating_ip_id) + + state = api.extract_dva_state(dva) + return state diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/embrane/base_plugin.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/embrane/base_plugin.py new file mode 100644 index 00000000..3434a96d --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/embrane/base_plugin.py @@ -0,0 +1,373 @@ +# Copyright 2013 Embrane, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Ivar Lazzaro, Embrane, Inc. + +from heleosapi import backend_operations as h_op +from heleosapi import constants as h_con +from heleosapi import exceptions as h_exc +from oslo.config import cfg +from sqlalchemy.orm import exc + +from neutron.common import constants as l3_constants +from neutron.common import exceptions as neutron_exc +from neutron.db import extraroute_db +from neutron.db import l3_db +from neutron.db import models_v2 +from neutron.extensions import l3 +from neutron.openstack.common import log as logging +from neutron.plugins.embrane.agent import dispatcher +from neutron.plugins.embrane.common import config # noqa +from neutron.plugins.embrane.common import constants as p_con +from neutron.plugins.embrane.common import contexts as embrane_ctx +from neutron.plugins.embrane.common import operation +from neutron.plugins.embrane.common import utils + +LOG = logging.getLogger(__name__) +conf = cfg.CONF.heleos + + +class EmbranePlugin(object): + """Embrane Neutron plugin. + + uses the heleos(c) platform and a support L2 plugin to leverage networking + in cloud environments. + + """ + _l3super = extraroute_db.ExtraRoute_db_mixin + + def __init__(self): + pass + + def _run_embrane_config(self): + # read configurations + config_esm_mgmt = conf.esm_mgmt + config_admin_username = conf.admin_username + config_admin_password = conf.admin_password + config_router_image_id = conf.router_image + config_security_zones = {h_con.SzType.IB: conf.inband_id, + h_con.SzType.OOB: conf.oob_id, + h_con.SzType.MGMT: conf.mgmt_id, + h_con.SzType.DUMMY: conf.dummy_utif_id} + config_resource_pool = conf.resource_pool_id + self._embrane_async = conf.async_requests + self._esm_api = h_op.BackendOperations( + esm_mgmt=config_esm_mgmt, + admin_username=config_admin_username, + admin_password=config_admin_password, + router_image_id=config_router_image_id, + security_zones=config_security_zones, + resource_pool=config_resource_pool) + self._dispatcher = dispatcher.Dispatcher(self, self._embrane_async) + + def _make_router_dict(self, *args, **kwargs): + return self._l3super._make_router_dict(self, *args, **kwargs) + + def _delete_router(self, context, router_id): + self._l3super.delete_router(self, context, router_id) + + def _update_db_router_state(self, context, neutron_router, dva_state): + if not dva_state: + new_state = p_con.Status.ERROR + elif dva_state == h_con.DvaState.POWER_ON: + new_state = p_con.Status.ACTIVE + else: + new_state = p_con.Status.READY + self._set_db_router_state(context, neutron_router, new_state) + return new_state + + def _set_db_router_state(self, context, neutron_router, new_state): + return utils.set_db_item_state(context, neutron_router, new_state) + + def _update_db_interfaces_state(self, context, neutron_router): + router_ports = self.get_ports(context, + {"device_id": [neutron_router["id"]]}) + self._esm_api.update_ports_status(neutron_router["id"], router_ports) + for port in router_ports: + db_port = self._get_port(context, port["id"]) + db_port["status"] = port["status"] + context.session.merge(db_port) + + def _update_neutron_state(self, context, neutron_router, state): + try: + self._update_db_interfaces_state(context, neutron_router) + except Exception: + LOG.exception(_("Unhandled exception occurred")) + return self._set_db_router_state(context, neutron_router, state) + + def _retrieve_prefix_from_port(self, context, neutron_port): + subnet_id = neutron_port["fixed_ips"][0]["subnet_id"] + subnet = utils.retrieve_subnet(context, subnet_id) + prefix = subnet["cidr"].split("/")[1] + return prefix + + # L3 extension + def create_router(self, context, router): + r = router["router"] + self._get_tenant_id_for_create(context, r) + db_router = self._l3super.create_router(self, context, router) + neutron_router = self._get_router(context, db_router['id']) + gw_port = neutron_router.gw_port + # For now, only small flavor is used + utif_info = (self._plugin_support.retrieve_utif_info(context, + gw_port) + if gw_port else None) + ip_allocation_info = (utils.retrieve_ip_allocation_info(context, + gw_port) + if gw_port else None) + neutron_router = self._l3super._get_router(self, context, + neutron_router["id"]) + neutron_router["status"] = p_con.Status.CREATING + self._dispatcher.dispatch_l3( + d_context=embrane_ctx.DispatcherContext( + p_con.Events.CREATE_ROUTER, neutron_router, context, None), + args=(h_con.Flavor.SMALL, utif_info, ip_allocation_info)) + return self._make_router_dict(neutron_router) + + def update_router(self, context, id, router): + db_router = self._l3super.update_router(self, context, id, router) + neutron_router = self._get_router(context, db_router['id']) + gw_port = neutron_router.gw_port + utif_info = (self._plugin_support.retrieve_utif_info(context, + gw_port) + if gw_port else None) + ip_allocation_info = (utils.retrieve_ip_allocation_info(context, + gw_port) + if gw_port else None) + + routes_info = router["router"].get("routes") + + neutron_router = self._l3super._get_router(self, context, id) + state_change = operation.Operation( + self._set_db_router_state, + args=(context, neutron_router, p_con.Status.UPDATING)) + self._dispatcher.dispatch_l3( + d_context=embrane_ctx.DispatcherContext( + p_con.Events.UPDATE_ROUTER, neutron_router, context, + state_change), + args=(utif_info, ip_allocation_info, routes_info)) + return self._make_router_dict(neutron_router) + + def get_router(self, context, id, fields=None): + """Ensures that id does exist in the ESM.""" + neutron_router = self._get_router(context, id) + + try: + if neutron_router["status"] != p_con.Status.CREATING: + self._esm_api.get_dva(id) + except h_exc.DvaNotFound: + + LOG.error(_("The following routers have not physical match: %s"), + id) + self._set_db_router_state(context, neutron_router, + p_con.Status.ERROR) + + LOG.debug(_("Requested router: %s"), neutron_router) + return self._make_router_dict(neutron_router, fields) + + def get_routers(self, context, filters=None, fields=None, sorts=None, + limit=None, marker=None, page_reverse=False): + """Retrieves the router list defined by the incoming filters.""" + router_query = self._apply_filters_to_query( + self._model_query(context, l3_db.Router), + l3_db.Router, filters) + id_list = [x["id"] for x in router_query + if x["status"] != p_con.Status.CREATING] + try: + self._esm_api.get_dvas(id_list) + except h_exc.DvaNotFound: + LOG.error(_("The following routers have not physical match: %s"), + repr(id_list)) + error_routers = [] + for id in id_list: + try: + error_routers.append(self._get_router(context, id)) + except l3.RouterNotFound: + pass + for error_router in error_routers: + self._set_db_router_state(context, error_router, + p_con.Status.ERROR) + return [self._make_router_dict(router, fields) + for router in router_query] + + def delete_router(self, context, id): + """Deletes the DVA with the specific router id.""" + # Copy of the parent validation code, shouldn't the base modules + # provide functions for validating operations? + device_owner_router_intf = l3_constants.DEVICE_OWNER_ROUTER_INTF + fips = self.get_floatingips_count(context.elevated(), + filters={"router_id": [id]}) + if fips: + raise l3.RouterInUse(router_id=id) + + device_filter = {"device_id": [id], + "device_owner": [device_owner_router_intf]} + ports = self.get_ports_count(context.elevated(), + filters=device_filter) + if ports: + raise l3.RouterInUse(router_id=id) + neutron_router = self._get_router(context, id) + state_change = operation.Operation(self._set_db_router_state, + args=(context, neutron_router, + p_con.Status.DELETING)) + self._dispatcher.dispatch_l3( + d_context=embrane_ctx.DispatcherContext( + p_con.Events.DELETE_ROUTER, neutron_router, context, + state_change), args=()) + LOG.debug(_("Deleting router=%s"), neutron_router) + return neutron_router + + def add_router_interface(self, context, router_id, interface_info): + """Grows DVA interface in the specified subnet.""" + neutron_router = self._get_router(context, router_id) + rport_qry = context.session.query(models_v2.Port) + ports = rport_qry.filter_by( + device_id=router_id).all() + if len(ports) >= p_con.UTIF_LIMIT: + raise neutron_exc.BadRequest( + resource=router_id, + msg=("this router doesn't support more than " + + str(p_con.UTIF_LIMIT) + " interfaces")) + neutron_router_iface = self._l3super.add_router_interface( + self, context, router_id, interface_info) + port = self._get_port(context, neutron_router_iface["port_id"]) + utif_info = self._plugin_support.retrieve_utif_info(context, port) + ip_allocation_info = utils.retrieve_ip_allocation_info(context, + port) + state_change = operation.Operation(self._set_db_router_state, + args=(context, neutron_router, + p_con.Status.UPDATING)) + self._dispatcher.dispatch_l3( + d_context=embrane_ctx.DispatcherContext( + p_con.Events.GROW_ROUTER_IF, neutron_router, context, + state_change), + args=(utif_info, ip_allocation_info)) + return neutron_router_iface + + def remove_router_interface(self, context, router_id, interface_info): + port_id = None + if "port_id" in interface_info: + port_id = interface_info["port_id"] + elif "subnet_id" in interface_info: + subnet_id = interface_info["subnet_id"] + subnet = utils.retrieve_subnet(context, subnet_id) + rport_qry = context.session.query(models_v2.Port) + ports = rport_qry.filter_by( + device_id=router_id, + device_owner=l3_constants.DEVICE_OWNER_ROUTER_INTF, + network_id=subnet["network_id"]) + for p in ports: + if p["fixed_ips"][0]["subnet_id"] == subnet_id: + port_id = p["id"] + break + neutron_router = self._get_router(context, router_id) + self._l3super.remove_router_interface(self, context, router_id, + interface_info) + state_change = operation.Operation(self._set_db_router_state, + args=(context, neutron_router, + p_con.Status.UPDATING)) + self._dispatcher.dispatch_l3( + d_context=embrane_ctx.DispatcherContext( + p_con.Events.SHRINK_ROUTER_IF, neutron_router, context, + state_change), + args=(port_id,)) + + def create_floatingip(self, context, floatingip): + result = self._l3super.create_floatingip( + self, context, floatingip) + + if result["port_id"]: + neutron_router = self._get_router(context, result["router_id"]) + db_fixed_port = self._get_port(context, result["port_id"]) + fixed_prefix = self._retrieve_prefix_from_port(context, + db_fixed_port) + db_floating_port = neutron_router["gw_port"] + floating_prefix = self._retrieve_prefix_from_port( + context, db_floating_port) + nat_info = utils.retrieve_nat_info(context, result, + fixed_prefix, + floating_prefix, + neutron_router) + state_change = operation.Operation( + self._set_db_router_state, + args=(context, neutron_router, p_con.Status.UPDATING)) + + self._dispatcher.dispatch_l3( + d_context=embrane_ctx.DispatcherContext( + p_con.Events.SET_NAT_RULE, neutron_router, context, + state_change), + args=(nat_info,)) + return result + + def update_floatingip(self, context, id, floatingip): + db_fip = self._l3super.get_floatingip(self, context, id) + result = self._l3super.update_floatingip(self, context, id, + floatingip) + + if db_fip["port_id"] and db_fip["port_id"] != result["port_id"]: + neutron_router = self._get_router(context, db_fip["router_id"]) + fip_id = db_fip["id"] + state_change = operation.Operation( + self._set_db_router_state, + args=(context, neutron_router, p_con.Status.UPDATING)) + + self._dispatcher.dispatch_l3( + d_context=embrane_ctx.DispatcherContext( + p_con.Events.RESET_NAT_RULE, neutron_router, context, + state_change), + args=(fip_id,)) + if result["port_id"]: + neutron_router = self._get_router(context, result["router_id"]) + db_fixed_port = self._get_port(context, result["port_id"]) + fixed_prefix = self._retrieve_prefix_from_port(context, + db_fixed_port) + db_floating_port = neutron_router["gw_port"] + floating_prefix = self._retrieve_prefix_from_port( + context, db_floating_port) + nat_info = utils.retrieve_nat_info(context, result, + fixed_prefix, + floating_prefix, + neutron_router) + state_change = operation.Operation( + self._set_db_router_state, + args=(context, neutron_router, p_con.Status.UPDATING)) + + self._dispatcher.dispatch_l3( + d_context=embrane_ctx.DispatcherContext( + p_con.Events.SET_NAT_RULE, neutron_router, context, + state_change), + args=(nat_info,)) + return result + + def disassociate_floatingips(self, context, port_id): + try: + fip_qry = context.session.query(l3_db.FloatingIP) + floating_ip = fip_qry.filter_by(fixed_port_id=port_id).one() + router_id = floating_ip["router_id"] + except exc.NoResultFound: + return + self._l3super.disassociate_floatingips(self, context, port_id) + if router_id: + neutron_router = self._get_router(context, router_id) + fip_id = floating_ip["id"] + state_change = operation.Operation( + self._set_db_router_state, + args=(context, neutron_router, p_con.Status.UPDATING)) + + self._dispatcher.dispatch_l3( + d_context=embrane_ctx.DispatcherContext( + p_con.Events.RESET_NAT_RULE, neutron_router, context, + state_change), + args=(fip_id,)) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/embrane/common/__init__.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/embrane/common/__init__.py new file mode 100644 index 00000000..25dc46b0 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/embrane/common/__init__.py @@ -0,0 +1,16 @@ +# Copyright 2013 Embrane, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Ivar Lazzaro, Embrane, Inc. diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/embrane/common/config.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/embrane/common/config.py new file mode 100644 index 00000000..2c13a08f --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/embrane/common/config.py @@ -0,0 +1,47 @@ +# Copyright 2013 Embrane, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Ivar Lazzaro, Embrane, Inc. + +from oslo.config import cfg + + +heleos_opts = [ + cfg.StrOpt('esm_mgmt', + help=_('ESM management root address')), + cfg.StrOpt('admin_username', default='admin', + help=_('ESM admin username.')), + cfg.StrOpt('admin_password', + secret=True, + help=_('ESM admin password.')), + cfg.StrOpt('router_image', + help=_('Router image id (Embrane FW/VPN)')), + cfg.StrOpt('inband_id', + help=_('In band Security Zone id')), + cfg.StrOpt('oob_id', + help=_('Out of band Security Zone id')), + cfg.StrOpt('mgmt_id', + help=_('Management Security Zone id')), + cfg.StrOpt('dummy_utif_id', + help=_('Dummy user traffic Security Zone id')), + cfg.StrOpt('resource_pool_id', default='default', + help=_('Shared resource pool id')), + cfg.BoolOpt('async_requests', default=True, + help=_('Define if the requests have ' + 'run asynchronously or not')), +] + + +cfg.CONF.register_opts(heleos_opts, "heleos") diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/embrane/common/constants.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/embrane/common/constants.py new file mode 100644 index 00000000..d842013b --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/embrane/common/constants.py @@ -0,0 +1,70 @@ +# Copyright 2013 Embrane, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Ivar Lazzaro, Embrane, Inc. + +from heleosapi import exceptions as h_exc + +from neutron.plugins.common import constants + + +# Router specific constants +UTIF_LIMIT = 7 +QUEUE_TIMEOUT = 300 + + +class Status: + # Transient + CREATING = constants.PENDING_CREATE + UPDATING = constants.PENDING_UPDATE + DELETING = constants.PENDING_DELETE + # Final + ACTIVE = constants.ACTIVE + ERROR = constants.ERROR + READY = constants.INACTIVE + DELETED = "DELETED" # not visible + + +class Events: + CREATE_ROUTER = "create_router" + UPDATE_ROUTER = "update_router" + DELETE_ROUTER = "delete_router" + GROW_ROUTER_IF = "grow_router_if" + SHRINK_ROUTER_IF = "shrink_router_if" + SET_NAT_RULE = "set_nat_rule" + RESET_NAT_RULE = "reset_nat_rule" + +_DVA_PENDING_ERROR_MSG = _("Dva is pending for the following reason: %s") +_DVA_NOT_FOUNT_ERROR_MSG = _("Dva can't be found to execute the operation, " + "probably was cancelled through the heleos UI") +_DVA_BROKEN_ERROR_MSG = _("Dva seems to be broken for reason %s") +_DVA_BROKEN_INTERFACE_ERROR_MSG = _("Dva interface seems to be broken " + "for reason %s") +_DVA_CREATION_FAILED_ERROR_MSG = _("Dva creation failed reason %s") +_DVA_CREATION_PENDING_ERROR_MSG = _("Dva creation is in pending state " + "for reason %s") +_CFG_FAILED_ERROR_MSG = _("Dva configuration failed for reason %s") +_DVA_DEL_FAILED_ERROR_MSG = _("Failed to delete the backend " + "router for reason %s. Please remove " + "it manually through the heleos UI") + +error_map = {h_exc.PendingDva: _DVA_PENDING_ERROR_MSG, + h_exc.DvaNotFound: _DVA_NOT_FOUNT_ERROR_MSG, + h_exc.BrokenDva: _DVA_BROKEN_ERROR_MSG, + h_exc.BrokenInterface: _DVA_BROKEN_INTERFACE_ERROR_MSG, + h_exc.DvaCreationFailed: _DVA_CREATION_FAILED_ERROR_MSG, + h_exc.DvaCreationPending: _DVA_CREATION_PENDING_ERROR_MSG, + h_exc.ConfigurationFailed: _CFG_FAILED_ERROR_MSG, + h_exc.DvaDeleteFailed: _DVA_DEL_FAILED_ERROR_MSG} diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/embrane/common/contexts.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/embrane/common/contexts.py new file mode 100644 index 00000000..d1182fcf --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/embrane/common/contexts.py @@ -0,0 +1,38 @@ +# Copyright 2013 Embrane, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Ivar Lazzaro, Embrane, Inc. + + +class DispatcherContext(object): + + def __init__(self, event, item, neutron_context, chain=None): + self.event = event + self.item = item + self.n_context = neutron_context + self.chain = chain + + +class OperationContext(DispatcherContext): + """Operational context. + + contains all the parameters needed to execute a status aware operation + + """ + def __init__(self, event, context, item, chain, function, args, kwargs): + super(OperationContext, self).__init__(event, item, context, chain) + self.function = function + self.args = args + self.kwargs = kwargs diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/embrane/common/exceptions.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/embrane/common/exceptions.py new file mode 100644 index 00000000..a023717b --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/embrane/common/exceptions.py @@ -0,0 +1,26 @@ +# Copyright 2013 Embrane, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Ivar Lazzaro, Embrane, Inc. + +from neutron.common import exceptions as neutron_exec + + +class EmbranePluginException(neutron_exec.NeutronException): + message = _("An unexpected error occurred:%(err_msg)s") + + +class UnsupportedException(EmbranePluginException): + message = _("%(err_msg)s") diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/embrane/common/operation.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/embrane/common/operation.py new file mode 100644 index 00000000..bf779ef0 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/embrane/common/operation.py @@ -0,0 +1,49 @@ +# Copyright 2013 Embrane, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Ivar Lazzaro, Embrane, Inc. + + +class Operation(object): + """Defines a series of operations which shall be executed in order. + + the operations expected are procedures, return values are discarded + + """ + + def __init__(self, procedure, args=(), kwargs={}, nextop=None): + self._procedure = procedure + self.args = args[:] + self.kwargs = dict(kwargs) + self.nextop = nextop + + def execute(self): + args = self.args + self._procedure(*args, **self.kwargs) + return self.nextop + + def execute_all(self): + nextop = self.execute() + while nextop: + nextop = self.execute_all() + + def has_next(self): + return self.nextop is not None + + def add_bottom_operation(self, operation): + op = self + while op.has_next(): + op = op.nextop + op.nextop = operation diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/embrane/common/utils.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/embrane/common/utils.py new file mode 100644 index 00000000..cd50fa62 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/embrane/common/utils.py @@ -0,0 +1,71 @@ +# Copyright 2013 Embrane, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Ivar Lazzaro, Embrane, Inc. + +from heleosapi import info as h_info + +from neutron.common import constants +from neutron.db import models_v2 +from neutron.openstack.common import log as logging + +LOG = logging.getLogger(__name__) + + +def set_db_item_state(context, neutron_item, new_state): + with context.session.begin(subtransactions=True): + if neutron_item["status"] != new_state: + neutron_item["status"] = new_state + context.session.merge(neutron_item) + + +def retrieve_subnet(context, subnet_id): + return (context.session.query( + models_v2.Subnet).filter(models_v2.Subnet.id == subnet_id).one()) + + +def retrieve_ip_allocation_info(context, neutron_port): + """Retrieves ip allocation info for a specific port if any.""" + + try: + subnet_id = neutron_port["fixed_ips"][0]["subnet_id"] + except (KeyError, IndexError): + LOG.info(_("No ip allocation set")) + return + subnet = retrieve_subnet(context, subnet_id) + allocated_ip = neutron_port["fixed_ips"][0]["ip_address"] + is_gw_port = (neutron_port["device_owner"] == + constants.DEVICE_OWNER_ROUTER_GW) + gateway_ip = subnet["gateway_ip"] + + ip_allocation_info = h_info.IpAllocationInfo( + is_gw=is_gw_port, + ip_version=subnet["ip_version"], + prefix=subnet["cidr"].split("/")[1], + ip_address=allocated_ip, + port_id=neutron_port["id"], + gateway_ip=gateway_ip) + + return ip_allocation_info + + +def retrieve_nat_info(context, fip, fixed_prefix, floating_prefix, router): + nat_info = h_info.NatInfo(source_address=fip["floating_ip_address"], + source_prefix=floating_prefix, + destination_address=fip["fixed_ip_address"], + destination_prefix=fixed_prefix, + floating_ip_id=fip["id"], + fixed_port_id=fip["port_id"]) + return nat_info diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/embrane/l2base/__init__.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/embrane/l2base/__init__.py new file mode 100644 index 00000000..25dc46b0 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/embrane/l2base/__init__.py @@ -0,0 +1,16 @@ +# Copyright 2013 Embrane, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Ivar Lazzaro, Embrane, Inc. diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/embrane/l2base/fake/__init__.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/embrane/l2base/fake/__init__.py new file mode 100644 index 00000000..25dc46b0 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/embrane/l2base/fake/__init__.py @@ -0,0 +1,16 @@ +# Copyright 2013 Embrane, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Ivar Lazzaro, Embrane, Inc. diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/embrane/l2base/fake/fake_l2_plugin.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/embrane/l2base/fake/fake_l2_plugin.py new file mode 100644 index 00000000..cb888528 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/embrane/l2base/fake/fake_l2_plugin.py @@ -0,0 +1,22 @@ +# Copyright 2013 Embrane, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Ivar Lazzaro, Embrane, Inc. + +from neutron.db import db_base_plugin_v2 + + +class FakeL2Plugin(db_base_plugin_v2.NeutronDbPluginV2): + supported_extension_aliases = [] diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/embrane/l2base/fake/fakeplugin_support.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/embrane/l2base/fake/fakeplugin_support.py new file mode 100644 index 00000000..b63c76f6 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/embrane/l2base/fake/fakeplugin_support.py @@ -0,0 +1,43 @@ +# Copyright 2013 Embrane, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Ivar Lazzaro, Embrane, Inc. + +from heleosapi import info as h_info + +from neutron.common import constants +from neutron import manager +from neutron.plugins.embrane.l2base import support_base as base + + +class FakePluginSupport(base.SupportBase): + + def __init__(self): + super(FakePluginSupport, self).__init__() + + def retrieve_utif_info(self, context, neutron_port): + plugin = manager.NeutronManager.get_plugin() + network_id = neutron_port["network_id"] + network = plugin._get_network(context, network_id) + is_gw = (neutron_port["device_owner"] == + constants.DEVICE_OWNER_ROUTER_GW) + result = h_info.UtifInfo(vlan=0, + network_name=network["name"], + network_id=network["id"], + is_gw=is_gw, + owner_tenant=network["tenant_id"], + port_id=neutron_port["id"], + mac_address=neutron_port["mac_address"]) + return result diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/embrane/l2base/openvswitch/__init__.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/embrane/l2base/openvswitch/__init__.py new file mode 100644 index 00000000..25dc46b0 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/embrane/l2base/openvswitch/__init__.py @@ -0,0 +1,16 @@ +# Copyright 2013 Embrane, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Ivar Lazzaro, Embrane, Inc. diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/embrane/l2base/openvswitch/openvswitch_support.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/embrane/l2base/openvswitch/openvswitch_support.py new file mode 100644 index 00000000..b5635a86 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/embrane/l2base/openvswitch/openvswitch_support.py @@ -0,0 +1,56 @@ +# Copyright 2013 Embrane, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Ivar Lazzaro, Embrane, Inc. + +from heleosapi import info as h_info + +from neutron.common import constants +from neutron import manager +from neutron.plugins.embrane.l2base import support_base as base +from neutron.plugins.embrane.l2base import support_exceptions as exc +from neutron.plugins.openvswitch import ovs_db_v2 + + +class OpenvswitchSupport(base.SupportBase): + """OpenVSwitch plugin support. + + Obtains the informations needed to build the user security zones + + """ + + def __init__(self): + super(OpenvswitchSupport, self).__init__() + + def retrieve_utif_info(self, context, neutron_port): + plugin = manager.NeutronManager.get_plugin() + session = context.session + network_id = neutron_port["network_id"] + network_binding = ovs_db_v2.get_network_binding(session, network_id) + if not network_binding["segmentation_id"]: + raise exc.UtifInfoError( + err_msg=_("No segmentation_id found for the network, " + "please be sure that tenant_network_type is vlan")) + network = plugin._get_network(context, network_id) + is_gw = (neutron_port["device_owner"] == + constants.DEVICE_OWNER_ROUTER_GW) + result = h_info.UtifInfo(vlan=network_binding["segmentation_id"], + network_name=network["name"], + network_id=network["id"], + is_gw=is_gw, + owner_tenant=network["tenant_id"], + port_id=neutron_port["id"], + mac_address=neutron_port["mac_address"]) + return result diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/embrane/l2base/support_base.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/embrane/l2base/support_base.py new file mode 100644 index 00000000..8104775a --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/embrane/l2base/support_base.py @@ -0,0 +1,48 @@ +# Copyright 2013 Embrane, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Ivar Lazzaro, Embrane, Inc. + +import abc + +import six + + +@six.add_metaclass(abc.ABCMeta) +class SupportBase(object): + """abstract support class. + + Defines the methods a plugin support should implement to be used as + the L2 base for Embrane plugin. + + """ + + @abc.abstractmethod + def __init__(self): + pass + + @abc.abstractmethod + def retrieve_utif_info(self, context, neutron_port=None, network=None): + """Retrieve specific network info. + + each plugin support, querying its own DB, can collect all the + information needed by the ESM in order to create the + user traffic security zone. + + :param interface_info: the foo parameter + :param context: neutron request context + :returns: heleosapi.info.UtifInfo -- specific network info + :raises: UtifInfoError + """ diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/embrane/l2base/support_exceptions.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/embrane/l2base/support_exceptions.py new file mode 100644 index 00000000..ac935840 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/embrane/l2base/support_exceptions.py @@ -0,0 +1,23 @@ +# Copyright 2013 Embrane, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Ivar Lazzaro, Embrane, Inc. + +from neutron.plugins.embrane.common import exceptions as embrane_exc + + +class UtifInfoError(embrane_exc.EmbranePluginException): + message = _("Cannot retrieve utif info for the following reason: " + "%(err_msg)s") diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/embrane/plugins/__init__.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/embrane/plugins/__init__.py new file mode 100644 index 00000000..25dc46b0 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/embrane/plugins/__init__.py @@ -0,0 +1,16 @@ +# Copyright 2013 Embrane, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Ivar Lazzaro, Embrane, Inc. diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/embrane/plugins/embrane_fake_plugin.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/embrane/plugins/embrane_fake_plugin.py new file mode 100644 index 00000000..bcfab70f --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/embrane/plugins/embrane_fake_plugin.py @@ -0,0 +1,32 @@ +# Copyright 2013 Embrane, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Ivar Lazzaro, Embrane, Inc. + +from neutron.db import extraroute_db +from neutron.plugins.embrane import base_plugin as base +from neutron.plugins.embrane.l2base.fake import fake_l2_plugin as l2 +from neutron.plugins.embrane.l2base.fake import fakeplugin_support as sup + + +class EmbraneFakePlugin(base.EmbranePlugin, extraroute_db.ExtraRoute_db_mixin, + l2.FakeL2Plugin): + _plugin_support = sup.FakePluginSupport() + + def __init__(self): + '''First run plugin specific initialization, then Embrane's.''' + self.supported_extension_aliases += ["extraroute", "router"] + l2.FakeL2Plugin.__init__(self) + self._run_embrane_config() diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/embrane/plugins/embrane_ovs_plugin.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/embrane/plugins/embrane_ovs_plugin.py new file mode 100644 index 00000000..3318885e --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/embrane/plugins/embrane_ovs_plugin.py @@ -0,0 +1,36 @@ +# Copyright 2013 Embrane, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Ivar Lazzaro, Embrane, Inc. + +from neutron.plugins.embrane import base_plugin as base +from neutron.plugins.embrane.l2base.openvswitch import openvswitch_support +from neutron.plugins.openvswitch import ovs_neutron_plugin as l2 + + +class EmbraneOvsPlugin(base.EmbranePlugin, l2.OVSNeutronPluginV2): + '''EmbraneOvsPlugin. + + This plugin uses OpenVSwitch specific L2 plugin for providing L2 networks + and the base EmbranePlugin for L3. + + ''' + _plugin_support = openvswitch_support.OpenvswitchSupport() + + def __init__(self): + '''First run plugin specific initialization, then Embrane's.''' + self._supported_extension_aliases.remove("l3_agent_scheduler") + l2.OVSNeutronPluginV2.__init__(self) + self._run_embrane_config() diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/hyperv/__init__.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/hyperv/__init__.py new file mode 100644 index 00000000..0089853d --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/hyperv/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2013 Cloudbase Solutions SRL +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/hyperv/agent/__init__.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/hyperv/agent/__init__.py new file mode 100644 index 00000000..0089853d --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/hyperv/agent/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2013 Cloudbase Solutions SRL +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/hyperv/agent/hyperv_neutron_agent.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/hyperv/agent/hyperv_neutron_agent.py new file mode 100644 index 00000000..0f45cc3f --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/hyperv/agent/hyperv_neutron_agent.py @@ -0,0 +1,473 @@ +#Copyright 2013 Cloudbase Solutions SRL +#Copyright 2013 Pedro Navarro Perez +#All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# @author: Pedro Navarro Perez +# @author: Alessandro Pilotti, Cloudbase Solutions Srl + +import platform +import re +import sys +import time + +import eventlet +eventlet.monkey_patch() + +from oslo.config import cfg + +from neutron.agent.common import config +from neutron.agent import rpc as agent_rpc +from neutron.agent import securitygroups_rpc as sg_rpc +from neutron.common import config as common_config +from neutron.common import constants as n_const +from neutron.common import rpc as n_rpc +from neutron.common import topics +from neutron import context +from neutron.openstack.common import log as logging +from neutron.openstack.common import loopingcall +from neutron.plugins.common import constants as p_const +from neutron.plugins.hyperv.agent import utils +from neutron.plugins.hyperv.agent import utilsfactory +from neutron.plugins.hyperv.common import constants + +LOG = logging.getLogger(__name__) + +agent_opts = [ + cfg.ListOpt( + 'physical_network_vswitch_mappings', + default=[], + help=_('List of : ' + 'where the physical networks can be expressed with ' + 'wildcards, e.g.: ."*:external"')), + cfg.StrOpt( + 'local_network_vswitch', + default='private', + help=_('Private vswitch name used for local networks')), + cfg.IntOpt('polling_interval', default=2, + help=_("The number of seconds the agent will wait between " + "polling for local device changes.")), + cfg.BoolOpt('enable_metrics_collection', + default=False, + help=_('Enables metrics collections for switch ports by using ' + 'Hyper-V\'s metric APIs. Collected data can by ' + 'retrieved by other apps and services, e.g.: ' + 'Ceilometer. Requires Hyper-V / Windows Server 2012 ' + 'and above')), + cfg.IntOpt('metrics_max_retries', + default=100, + help=_('Specifies the maximum number of retries to enable ' + 'Hyper-V\'s port metrics collection. The agent will try ' + 'to enable the feature once every polling_interval ' + 'period for at most metrics_max_retries or until it ' + 'succeedes.')) +] + + +CONF = cfg.CONF +CONF.register_opts(agent_opts, "AGENT") +config.register_agent_state_opts_helper(cfg.CONF) + + +class HyperVSecurityAgent(n_rpc.RpcCallback, + sg_rpc.SecurityGroupAgentRpcMixin): + # Set RPC API version to 1.1 by default. + RPC_API_VERSION = '1.1' + + def __init__(self, context, plugin_rpc): + super(HyperVSecurityAgent, self).__init__() + self.context = context + self.plugin_rpc = plugin_rpc + + if sg_rpc.is_firewall_enabled(): + self.init_firewall() + self._setup_rpc() + + def _setup_rpc(self): + self.topic = topics.AGENT + self.endpoints = [HyperVSecurityCallbackMixin(self)] + consumers = [[topics.SECURITY_GROUP, topics.UPDATE]] + + self.connection = agent_rpc.create_consumers(self.endpoints, + self.topic, + consumers) + + +class HyperVSecurityCallbackMixin(n_rpc.RpcCallback, + sg_rpc.SecurityGroupAgentRpcCallbackMixin): + # Set RPC API version to 1.1 by default. + RPC_API_VERSION = '1.1' + + def __init__(self, sg_agent): + super(HyperVSecurityCallbackMixin, self).__init__() + self.sg_agent = sg_agent + + +class HyperVPluginApi(agent_rpc.PluginApi, + sg_rpc.SecurityGroupServerRpcApiMixin): + pass + + +class HyperVNeutronAgent(n_rpc.RpcCallback): + # Set RPC API version to 1.0 by default. + RPC_API_VERSION = '1.0' + + def __init__(self): + super(HyperVNeutronAgent, self).__init__() + self._utils = utilsfactory.get_hypervutils() + self._polling_interval = CONF.AGENT.polling_interval + self._load_physical_network_mappings() + self._network_vswitch_map = {} + self._port_metric_retries = {} + self._set_agent_state() + self._setup_rpc() + + def _set_agent_state(self): + self.agent_state = { + 'binary': 'neutron-hyperv-agent', + 'host': cfg.CONF.host, + 'topic': n_const.L2_AGENT_TOPIC, + 'configurations': {'vswitch_mappings': + self._physical_network_mappings}, + 'agent_type': n_const.AGENT_TYPE_HYPERV, + 'start_flag': True} + + def _report_state(self): + try: + self.state_rpc.report_state(self.context, + self.agent_state) + self.agent_state.pop('start_flag', None) + except Exception as ex: + LOG.exception(_("Failed reporting state! %s"), ex) + + def _setup_rpc(self): + self.agent_id = 'hyperv_%s' % platform.node() + self.topic = topics.AGENT + self.plugin_rpc = HyperVPluginApi(topics.PLUGIN) + + self.state_rpc = agent_rpc.PluginReportStateAPI(topics.PLUGIN) + + # RPC network init + self.context = context.get_admin_context_without_session() + # Handle updates from service + self.endpoints = [self] + # Define the listening consumers for the agent + consumers = [[topics.PORT, topics.UPDATE], + [topics.NETWORK, topics.DELETE], + [topics.PORT, topics.DELETE], + [constants.TUNNEL, topics.UPDATE]] + self.connection = agent_rpc.create_consumers(self.endpoints, + self.topic, + consumers) + + self.sec_groups_agent = HyperVSecurityAgent( + self.context, self.plugin_rpc) + report_interval = CONF.AGENT.report_interval + if report_interval: + heartbeat = loopingcall.FixedIntervalLoopingCall( + self._report_state) + heartbeat.start(interval=report_interval) + + def _load_physical_network_mappings(self): + self._physical_network_mappings = {} + for mapping in CONF.AGENT.physical_network_vswitch_mappings: + parts = mapping.split(':') + if len(parts) != 2: + LOG.debug(_('Invalid physical network mapping: %s'), mapping) + else: + pattern = re.escape(parts[0].strip()).replace('\\*', '.*') + vswitch = parts[1].strip() + self._physical_network_mappings[pattern] = vswitch + + def _get_vswitch_for_physical_network(self, phys_network_name): + for pattern in self._physical_network_mappings: + if phys_network_name is None: + phys_network_name = '' + if re.match(pattern, phys_network_name): + return self._physical_network_mappings[pattern] + # Not found in the mappings, the vswitch has the same name + return phys_network_name + + def _get_network_vswitch_map_by_port_id(self, port_id): + for network_id, map in self._network_vswitch_map.iteritems(): + if port_id in map['ports']: + return (network_id, map) + + def network_delete(self, context, network_id=None): + LOG.debug(_("network_delete received. " + "Deleting network %s"), network_id) + # The network may not be defined on this agent + if network_id in self._network_vswitch_map: + self._reclaim_local_network(network_id) + else: + LOG.debug(_("Network %s not defined on agent."), network_id) + + def port_delete(self, context, port_id=None): + LOG.debug(_("port_delete received")) + self._port_unbound(port_id) + + def port_update(self, context, port=None, network_type=None, + segmentation_id=None, physical_network=None): + LOG.debug(_("port_update received")) + if CONF.SECURITYGROUP.enable_security_group: + if 'security_groups' in port: + self.sec_groups_agent.refresh_firewall() + + self._treat_vif_port( + port['id'], port['network_id'], + network_type, physical_network, + segmentation_id, port['admin_state_up']) + + def _get_vswitch_name(self, network_type, physical_network): + if network_type != p_const.TYPE_LOCAL: + vswitch_name = self._get_vswitch_for_physical_network( + physical_network) + else: + vswitch_name = CONF.AGENT.local_network_vswitch + return vswitch_name + + def _provision_network(self, port_id, + net_uuid, network_type, + physical_network, + segmentation_id): + LOG.info(_("Provisioning network %s"), net_uuid) + + vswitch_name = self._get_vswitch_name(network_type, physical_network) + + if network_type in [p_const.TYPE_VLAN, p_const.TYPE_FLAT]: + #Nothing to do + pass + elif network_type == p_const.TYPE_LOCAL: + #TODO(alexpilotti): Check that the switch type is private + #or create it if not existing + pass + else: + raise utils.HyperVException( + msg=(_("Cannot provision unknown network type %(network_type)s" + " for network %(net_uuid)s") % + dict(network_type=network_type, net_uuid=net_uuid))) + + map = { + 'network_type': network_type, + 'vswitch_name': vswitch_name, + 'ports': [], + 'vlan_id': segmentation_id} + self._network_vswitch_map[net_uuid] = map + + def _reclaim_local_network(self, net_uuid): + LOG.info(_("Reclaiming local network %s"), net_uuid) + del self._network_vswitch_map[net_uuid] + + def _port_bound(self, port_id, + net_uuid, + network_type, + physical_network, + segmentation_id): + LOG.debug(_("Binding port %s"), port_id) + + if net_uuid not in self._network_vswitch_map: + self._provision_network( + port_id, net_uuid, network_type, + physical_network, segmentation_id) + + map = self._network_vswitch_map[net_uuid] + map['ports'].append(port_id) + + self._utils.connect_vnic_to_vswitch(map['vswitch_name'], port_id) + + if network_type == p_const.TYPE_VLAN: + LOG.info(_('Binding VLAN ID %(segmentation_id)s ' + 'to switch port %(port_id)s'), + dict(segmentation_id=segmentation_id, port_id=port_id)) + self._utils.set_vswitch_port_vlan_id( + segmentation_id, + port_id) + elif network_type == p_const.TYPE_FLAT: + #Nothing to do + pass + elif network_type == p_const.TYPE_LOCAL: + #Nothing to do + pass + else: + LOG.error(_('Unsupported network type %s'), network_type) + + if CONF.AGENT.enable_metrics_collection: + self._utils.enable_port_metrics_collection(port_id) + self._port_metric_retries[port_id] = CONF.AGENT.metrics_max_retries + + def _port_unbound(self, port_id): + (net_uuid, map) = self._get_network_vswitch_map_by_port_id(port_id) + if net_uuid not in self._network_vswitch_map: + LOG.info(_('Network %s is not avalailable on this agent'), + net_uuid) + return + + LOG.debug(_("Unbinding port %s"), port_id) + self._utils.disconnect_switch_port(map['vswitch_name'], port_id, True) + + if not map['ports']: + self._reclaim_local_network(net_uuid) + + def _port_enable_control_metrics(self): + if not CONF.AGENT.enable_metrics_collection: + return + + for port_id in self._port_metric_retries.keys(): + if self._utils.can_enable_control_metrics(port_id): + self._utils.enable_control_metrics(port_id) + LOG.info(_('Port metrics enabled for port: %s'), port_id) + del self._port_metric_retries[port_id] + elif self._port_metric_retries[port_id] < 1: + self._utils.enable_control_metrics(port_id) + LOG.error(_('Port metrics raw enabling for port: %s'), port_id) + del self._port_metric_retries[port_id] + else: + self._port_metric_retries[port_id] -= 1 + + def _update_ports(self, registered_ports): + ports = self._utils.get_vnic_ids() + if ports == registered_ports: + return + added = ports - registered_ports + removed = registered_ports - ports + return {'current': ports, + 'added': added, + 'removed': removed} + + def _treat_vif_port(self, port_id, network_id, network_type, + physical_network, segmentation_id, + admin_state_up): + if self._utils.vnic_port_exists(port_id): + if admin_state_up: + self._port_bound(port_id, network_id, network_type, + physical_network, segmentation_id) + else: + self._port_unbound(port_id) + else: + LOG.debug(_("No port %s defined on agent."), port_id) + + def _treat_devices_added(self, devices): + try: + devices_details_list = self.plugin_rpc.get_devices_details_list( + self.context, + devices, + self.agent_id) + except Exception as e: + LOG.debug("Unable to get ports details for " + "devices %(devices)s: %(e)s", + {'devices': devices, 'e': e}) + # resync is needed + return True + + for device_details in devices_details_list: + device = device_details['device'] + LOG.info(_("Adding port %s"), device) + if 'port_id' in device_details: + LOG.info( + _("Port %(device)s updated. Details: %(device_details)s"), + {'device': device, 'device_details': device_details}) + self._treat_vif_port( + device_details['port_id'], + device_details['network_id'], + device_details['network_type'], + device_details['physical_network'], + device_details['segmentation_id'], + device_details['admin_state_up']) + + # check if security groups is enabled. + # if not, teardown the security group rules + if CONF.SECURITYGROUP.enable_security_group: + self.sec_groups_agent.prepare_devices_filter([device]) + else: + self._utils.remove_all_security_rules( + device_details['port_id']) + self.plugin_rpc.update_device_up(self.context, + device, + self.agent_id, + cfg.CONF.host) + return False + + def _treat_devices_removed(self, devices): + resync = False + for device in devices: + LOG.info(_("Removing port %s"), device) + try: + self.plugin_rpc.update_device_down(self.context, + device, + self.agent_id, + cfg.CONF.host) + except Exception as e: + LOG.debug( + _("Removing port failed for device %(device)s: %(e)s"), + dict(device=device, e=e)) + resync = True + continue + self._port_unbound(device) + return resync + + def _process_network_ports(self, port_info): + resync_a = False + resync_b = False + if 'added' in port_info: + resync_a = self._treat_devices_added(port_info['added']) + if 'removed' in port_info: + resync_b = self._treat_devices_removed(port_info['removed']) + # If one of the above operations fails => resync with plugin + return (resync_a | resync_b) + + def daemon_loop(self): + sync = True + ports = set() + + while True: + try: + start = time.time() + if sync: + LOG.info(_("Agent out of sync with plugin!")) + ports.clear() + sync = False + + port_info = self._update_ports(ports) + + # notify plugin about port deltas + if port_info: + LOG.debug(_("Agent loop has new devices!")) + # If treat devices fails - must resync with plugin + sync = self._process_network_ports(port_info) + ports = port_info['current'] + + self._port_enable_control_metrics() + except Exception as e: + LOG.exception(_("Error in agent event loop: %s"), e) + sync = True + + # sleep till end of polling interval + elapsed = (time.time() - start) + if (elapsed < self._polling_interval): + time.sleep(self._polling_interval - elapsed) + else: + LOG.debug(_("Loop iteration exceeded interval " + "(%(polling_interval)s vs. %(elapsed)s)"), + {'polling_interval': self._polling_interval, + 'elapsed': elapsed}) + + +def main(): + common_config.init(sys.argv[1:]) + common_config.setup_logging(cfg.CONF) + + plugin = HyperVNeutronAgent() + + # Start everything. + LOG.info(_("Agent initialized successfully, now running... ")) + plugin.daemon_loop() diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/hyperv/agent/security_groups_driver.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/hyperv/agent/security_groups_driver.py new file mode 100644 index 00000000..755ab527 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/hyperv/agent/security_groups_driver.py @@ -0,0 +1,146 @@ +#Copyright 2014 Cloudbase Solutions SRL +#All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# @author: Claudiu Belu, Cloudbase Solutions Srl + +from neutron.agent import firewall +from neutron.openstack.common import log as logging +from neutron.plugins.hyperv.agent import utilsfactory +from neutron.plugins.hyperv.agent import utilsv2 + +LOG = logging.getLogger(__name__) + + +class HyperVSecurityGroupsDriver(firewall.FirewallDriver): + """Security Groups Driver. + + Security Groups implementation for Hyper-V VMs. + """ + + _ACL_PROP_MAP = { + 'direction': {'ingress': utilsv2.HyperVUtilsV2._ACL_DIR_IN, + 'egress': utilsv2.HyperVUtilsV2._ACL_DIR_OUT}, + 'ethertype': {'IPv4': utilsv2.HyperVUtilsV2._ACL_TYPE_IPV4, + 'IPv6': utilsv2.HyperVUtilsV2._ACL_TYPE_IPV6}, + 'protocol': {'icmp': utilsv2.HyperVUtilsV2._ICMP_PROTOCOL}, + 'default': "ANY", + 'address_default': {'IPv4': '0.0.0.0/0', 'IPv6': '::/0'} + } + + def __init__(self): + self._utils = utilsfactory.get_hypervutils() + self._security_ports = {} + + def prepare_port_filter(self, port): + LOG.debug('Creating port %s rules' % len(port['security_group_rules'])) + + # newly created port, add default rules. + if port['device'] not in self._security_ports: + LOG.debug('Creating default reject rules.') + self._utils.create_default_reject_all_rules(port['id']) + + self._security_ports[port['device']] = port + self._create_port_rules(port['id'], port['security_group_rules']) + + def _create_port_rules(self, port_id, rules): + for rule in rules: + param_map = self._create_param_map(rule) + try: + self._utils.create_security_rule(port_id, **param_map) + except Exception as ex: + LOG.error(_('Hyper-V Exception: %(hyperv_exeption)s while ' + 'adding rule: %(rule)s'), + dict(hyperv_exeption=ex, rule=rule)) + + def _remove_port_rules(self, port_id, rules): + for rule in rules: + param_map = self._create_param_map(rule) + try: + self._utils.remove_security_rule(port_id, **param_map) + except Exception as ex: + LOG.error(_('Hyper-V Exception: %(hyperv_exeption)s while ' + 'removing rule: %(rule)s'), + dict(hyperv_exeption=ex, rule=rule)) + + def _create_param_map(self, rule): + if 'port_range_min' in rule and 'port_range_max' in rule: + local_port = '%s-%s' % (rule['port_range_min'], + rule['port_range_max']) + else: + local_port = self._ACL_PROP_MAP['default'] + + return { + 'direction': self._ACL_PROP_MAP['direction'][rule['direction']], + 'acl_type': self._ACL_PROP_MAP['ethertype'][rule['ethertype']], + 'local_port': local_port, + 'protocol': self._get_rule_protocol(rule), + 'remote_address': self._get_rule_remote_address(rule) + } + + def apply_port_filter(self, port): + LOG.info(_('Aplying port filter.')) + + def update_port_filter(self, port): + LOG.info(_('Updating port rules.')) + + if port['device'] not in self._security_ports: + self.prepare_port_filter(port) + return + + old_port = self._security_ports[port['device']] + rules = old_port['security_group_rules'] + param_port_rules = port['security_group_rules'] + + new_rules = [r for r in param_port_rules if r not in rules] + remove_rules = [r for r in rules if r not in param_port_rules] + + LOG.info(_("Creating %(new)s new rules, removing %(old)s " + "old rules."), + {'new': len(new_rules), + 'old': len(remove_rules)}) + + self._remove_port_rules(old_port['id'], remove_rules) + self._create_port_rules(port['id'], new_rules) + + self._security_ports[port['device']] = port + + def remove_port_filter(self, port): + LOG.info(_('Removing port filter')) + self._security_ports.pop(port['device'], None) + + @property + def ports(self): + return self._security_ports + + def _get_rule_remote_address(self, rule): + if rule['direction'] is 'ingress': + ip_prefix = 'source_ip_prefix' + else: + ip_prefix = 'dest_ip_prefix' + + if ip_prefix in rule: + return rule[ip_prefix] + return self._ACL_PROP_MAP['address_default'][rule['ethertype']] + + def _get_rule_protocol(self, rule): + protocol = self._get_rule_prop_or_default(rule, 'protocol') + if protocol in self._ACL_PROP_MAP['protocol'].keys(): + return self._ACL_PROP_MAP['protocol'][protocol] + + return protocol + + def _get_rule_prop_or_default(self, rule, prop): + if prop in rule: + return rule[prop] + return self._ACL_PROP_MAP['default'] diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/hyperv/agent/utils.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/hyperv/agent/utils.py new file mode 100644 index 00000000..c078ee58 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/hyperv/agent/utils.py @@ -0,0 +1,254 @@ +# Copyright 2013 Cloudbase Solutions SRL +# Copyright 2013 Pedro Navarro Perez +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# @author: Pedro Navarro Perez +# @author: Alessandro Pilotti, Cloudbase Solutions Srl + +import sys +import time + +from oslo.config import cfg + +from neutron.common import exceptions as n_exc +from neutron.openstack.common import log as logging + +# Check needed for unit testing on Unix +if sys.platform == 'win32': + import wmi + +CONF = cfg.CONF +LOG = logging.getLogger(__name__) + + +class HyperVException(n_exc.NeutronException): + message = _('HyperVException: %(msg)s') + +WMI_JOB_STATE_STARTED = 4096 +WMI_JOB_STATE_RUNNING = 4 +WMI_JOB_STATE_COMPLETED = 7 + + +class HyperVUtils(object): + + _ETHERNET_SWITCH_PORT = 'Msvm_SwitchPort' + + _wmi_namespace = '//./root/virtualization' + + def __init__(self): + self._wmi_conn = None + + @property + def _conn(self): + if self._wmi_conn is None: + self._wmi_conn = wmi.WMI(moniker=self._wmi_namespace) + return self._wmi_conn + + def get_switch_ports(self, vswitch_name): + vswitch = self._get_vswitch(vswitch_name) + vswitch_ports = vswitch.associators( + wmi_result_class=self._ETHERNET_SWITCH_PORT) + return set(p.Name for p in vswitch_ports) + + def vnic_port_exists(self, port_id): + try: + self._get_vnic_settings(port_id) + except Exception: + return False + return True + + def get_vnic_ids(self): + return set( + p.ElementName + for p in self._conn.Msvm_SyntheticEthernetPortSettingData() + if p.ElementName is not None) + + def _get_vnic_settings(self, vnic_name): + vnic_settings = self._conn.Msvm_SyntheticEthernetPortSettingData( + ElementName=vnic_name) + if not vnic_settings: + raise HyperVException(msg=_('Vnic not found: %s') % vnic_name) + return vnic_settings[0] + + def connect_vnic_to_vswitch(self, vswitch_name, switch_port_name): + vnic_settings = self._get_vnic_settings(switch_port_name) + if not vnic_settings.Connection or not vnic_settings.Connection[0]: + port = self.get_port_by_id(switch_port_name, vswitch_name) + if port: + port_path = port.Path_() + else: + port_path = self._create_switch_port( + vswitch_name, switch_port_name) + vnic_settings.Connection = [port_path] + self._modify_virt_resource(vnic_settings) + + def _get_vm_from_res_setting_data(self, res_setting_data): + sd = res_setting_data.associators( + wmi_result_class='Msvm_VirtualSystemSettingData') + vm = sd[0].associators( + wmi_result_class='Msvm_ComputerSystem') + return vm[0] + + def _modify_virt_resource(self, res_setting_data): + vm = self._get_vm_from_res_setting_data(res_setting_data) + + vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0] + (job_path, ret_val) = vs_man_svc.ModifyVirtualSystemResources( + vm.Path_(), [res_setting_data.GetText_(1)]) + self._check_job_status(ret_val, job_path) + + def _check_job_status(self, ret_val, jobpath): + """Poll WMI job state for completion.""" + if not ret_val: + return + elif ret_val not in [WMI_JOB_STATE_STARTED, WMI_JOB_STATE_RUNNING]: + raise HyperVException(msg=_('Job failed with error %d') % ret_val) + + job_wmi_path = jobpath.replace('\\', '/') + job = wmi.WMI(moniker=job_wmi_path) + + while job.JobState == WMI_JOB_STATE_RUNNING: + time.sleep(0.1) + job = wmi.WMI(moniker=job_wmi_path) + if job.JobState != WMI_JOB_STATE_COMPLETED: + job_state = job.JobState + if job.path().Class == "Msvm_ConcreteJob": + err_sum_desc = job.ErrorSummaryDescription + err_desc = job.ErrorDescription + err_code = job.ErrorCode + data = {'job_state': job_state, + 'err_sum_desc': err_sum_desc, + 'err_desc': err_desc, + 'err_code': err_code} + raise HyperVException( + msg=_("WMI job failed with status %(job_state)d. " + "Error details: %(err_sum_desc)s - %(err_desc)s - " + "Error code: %(err_code)d") % data) + else: + (error, ret_val) = job.GetError() + if not ret_val and error: + data = {'job_state': job_state, + 'error': error} + raise HyperVException( + msg=_("WMI job failed with status %(job_state)d. " + "Error details: %(error)s") % data) + else: + raise HyperVException( + msg=_("WMI job failed with status %d. " + "No error description available") % job_state) + + desc = job.Description + elap = job.ElapsedTime + LOG.debug(_("WMI job succeeded: %(desc)s, Elapsed=%(elap)s"), + {'desc': desc, 'elap': elap}) + + def _create_switch_port(self, vswitch_name, switch_port_name): + """Creates a switch port.""" + switch_svc = self._conn.Msvm_VirtualSwitchManagementService()[0] + vswitch_path = self._get_vswitch(vswitch_name).path_() + (new_port, ret_val) = switch_svc.CreateSwitchPort( + Name=switch_port_name, + FriendlyName=switch_port_name, + ScopeOfResidence="", + VirtualSwitch=vswitch_path) + if ret_val != 0: + raise HyperVException( + msg=_('Failed creating port for %s') % vswitch_name) + return new_port + + def disconnect_switch_port( + self, vswitch_name, switch_port_name, delete_port): + """Disconnects the switch port.""" + switch_svc = self._conn.Msvm_VirtualSwitchManagementService()[0] + switch_port_path = self._get_switch_port_path_by_name( + switch_port_name) + if not switch_port_path: + # Port not found. It happens when the VM was already deleted. + return + + (ret_val, ) = switch_svc.DisconnectSwitchPort( + SwitchPort=switch_port_path) + if ret_val != 0: + data = {'switch_port_name': switch_port_name, + 'vswitch_name': vswitch_name, + 'ret_val': ret_val} + raise HyperVException( + msg=_('Failed to disconnect port %(switch_port_name)s ' + 'from switch %(vswitch_name)s ' + 'with error %(ret_val)s') % data) + if delete_port: + (ret_val, ) = switch_svc.DeleteSwitchPort( + SwitchPort=switch_port_path) + if ret_val != 0: + data = {'switch_port_name': switch_port_name, + 'vswitch_name': vswitch_name, + 'ret_val': ret_val} + raise HyperVException( + msg=_('Failed to delete port %(switch_port_name)s ' + 'from switch %(vswitch_name)s ' + 'with error %(ret_val)s') % data) + + def _get_vswitch(self, vswitch_name): + vswitch = self._conn.Msvm_VirtualSwitch(ElementName=vswitch_name) + if not vswitch: + raise HyperVException(msg=_('VSwitch not found: %s') % + vswitch_name) + return vswitch[0] + + def _get_vswitch_external_port(self, vswitch): + vswitch_ports = vswitch.associators( + wmi_result_class=self._ETHERNET_SWITCH_PORT) + for vswitch_port in vswitch_ports: + lan_endpoints = vswitch_port.associators( + wmi_result_class='Msvm_SwitchLanEndpoint') + if lan_endpoints: + ext_port = lan_endpoints[0].associators( + wmi_result_class='Msvm_ExternalEthernetPort') + if ext_port: + return vswitch_port + + def set_vswitch_port_vlan_id(self, vlan_id, switch_port_name): + vlan_endpoint_settings = self._conn.Msvm_VLANEndpointSettingData( + ElementName=switch_port_name)[0] + if vlan_endpoint_settings.AccessVLAN != vlan_id: + vlan_endpoint_settings.AccessVLAN = vlan_id + vlan_endpoint_settings.put() + + def _get_switch_port_path_by_name(self, switch_port_name): + vswitch = self._conn.Msvm_SwitchPort(ElementName=switch_port_name) + if vswitch: + return vswitch[0].path_() + + def get_vswitch_id(self, vswitch_name): + vswitch = self._get_vswitch(vswitch_name) + return vswitch.Name + + def get_port_by_id(self, port_id, vswitch_name): + vswitch = self._get_vswitch(vswitch_name) + switch_ports = vswitch.associators( + wmi_result_class=self._ETHERNET_SWITCH_PORT) + for switch_port in switch_ports: + if (switch_port.ElementName == port_id): + return switch_port + + def enable_port_metrics_collection(self, switch_port_name): + raise NotImplementedError(_("Metrics collection is not supported on " + "this version of Hyper-V")) + + def enable_control_metrics(self, switch_port_name): + raise NotImplementedError(_("Metrics collection is not supported on " + "this version of Hyper-V")) + + def can_enable_control_metrics(self, switch_port_name): + return False diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/hyperv/agent/utilsfactory.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/hyperv/agent/utilsfactory.py new file mode 100644 index 00000000..adc01099 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/hyperv/agent/utilsfactory.py @@ -0,0 +1,70 @@ +# Copyright 2013 Cloudbase Solutions SRL +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# @author: Claudiu Belu, Cloudbase Solutions Srl + +import sys + +from oslo.config import cfg + +from neutron.openstack.common import log as logging +from neutron.plugins.hyperv.agent import utils +from neutron.plugins.hyperv.agent import utilsv2 + +# Check needed for unit testing on Unix +if sys.platform == 'win32': + import wmi + +hyper_opts = [ + cfg.BoolOpt('force_hyperv_utils_v1', + default=False, + help=_('Force V1 WMI utility classes')), +] + +CONF = cfg.CONF +CONF.register_opts(hyper_opts, 'hyperv') + +LOG = logging.getLogger(__name__) + + +def _get_windows_version(): + return wmi.WMI(moniker='//./root/cimv2').Win32_OperatingSystem()[0].Version + + +def _check_min_windows_version(major, minor, build=0): + version_str = _get_windows_version() + return map(int, version_str.split('.')) >= [major, minor, build] + + +def get_hypervutils(): + # V1 virtualization namespace features are supported up to + # Windows Server / Hyper-V Server 2012 + # V2 virtualization namespace features are supported starting with + # Windows Server / Hyper-V Server 2012 + # Windows Server / Hyper-V Server 2012 R2 uses the V2 namespace and + # introduces additional features + + force_v1_flag = CONF.hyperv.force_hyperv_utils_v1 + if _check_min_windows_version(6, 3): + if force_v1_flag: + LOG.warning(_('V1 virtualization namespace no longer supported on ' + 'Windows Server / Hyper-V Server 2012 R2 or above.')) + cls = utilsv2.HyperVUtilsV2R2 + elif not force_v1_flag and _check_min_windows_version(6, 2): + cls = utilsv2.HyperVUtilsV2 + else: + cls = utils.HyperVUtils + LOG.debug(_("Loading class: %(module_name)s.%(class_name)s"), + {'module_name': cls.__module__, 'class_name': cls.__name__}) + return cls() diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/hyperv/agent/utilsv2.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/hyperv/agent/utilsv2.py new file mode 100644 index 00000000..d7e4a756 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/hyperv/agent/utilsv2.py @@ -0,0 +1,437 @@ +# Copyright 2013 Cloudbase Solutions SRL +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# @author: Alessandro Pilotti, Cloudbase Solutions Srl +# @author: Claudiu Belu, Cloudbase Solutions Srl + +from neutron.plugins.hyperv.agent import utils + + +class HyperVUtilsV2(utils.HyperVUtils): + + _EXTERNAL_PORT = 'Msvm_ExternalEthernetPort' + _ETHERNET_SWITCH_PORT = 'Msvm_EthernetSwitchPort' + _PORT_ALLOC_SET_DATA = 'Msvm_EthernetPortAllocationSettingData' + _PORT_VLAN_SET_DATA = 'Msvm_EthernetSwitchPortVlanSettingData' + _PORT_SECURITY_SET_DATA = 'Msvm_EthernetSwitchPortSecuritySettingData' + _PORT_ALLOC_ACL_SET_DATA = 'Msvm_EthernetSwitchPortAclSettingData' + _PORT_EXT_ACL_SET_DATA = _PORT_ALLOC_ACL_SET_DATA + _LAN_ENDPOINT = 'Msvm_LANEndpoint' + _STATE_DISABLED = 3 + _OPERATION_MODE_ACCESS = 1 + + _VIRTUAL_SYSTEM_SETTING_DATA = 'Msvm_VirtualSystemSettingData' + _VM_SUMMARY_ENABLED_STATE = 100 + _HYPERV_VM_STATE_ENABLED = 2 + + _ACL_DIR_IN = 1 + _ACL_DIR_OUT = 2 + + _ACL_TYPE_IPV4 = 2 + _ACL_TYPE_IPV6 = 3 + + _ACL_ACTION_ALLOW = 1 + _ACL_ACTION_DENY = 2 + _ACL_ACTION_METER = 3 + + _METRIC_ENABLED = 2 + _NET_IN_METRIC_NAME = 'Filtered Incoming Network Traffic' + _NET_OUT_METRIC_NAME = 'Filtered Outgoing Network Traffic' + + _ACL_APPLICABILITY_LOCAL = 1 + _ACL_APPLICABILITY_REMOTE = 2 + + _ACL_DEFAULT = 'ANY' + _IPV4_ANY = '0.0.0.0/0' + _IPV6_ANY = '::/0' + _TCP_PROTOCOL = 'tcp' + _UDP_PROTOCOL = 'udp' + _ICMP_PROTOCOL = '1' + _MAX_WEIGHT = 65500 + + # 2 directions x 2 address types = 4 ACLs + _REJECT_ACLS_COUNT = 4 + + _wmi_namespace = '//./root/virtualization/v2' + + def __init__(self): + super(HyperVUtilsV2, self).__init__() + + def connect_vnic_to_vswitch(self, vswitch_name, switch_port_name): + vnic = self._get_vnic_settings(switch_port_name) + vswitch = self._get_vswitch(vswitch_name) + + port, found = self._get_switch_port_allocation(switch_port_name, True) + port.HostResource = [vswitch.path_()] + port.Parent = vnic.path_() + if not found: + vm = self._get_vm_from_res_setting_data(vnic) + self._add_virt_resource(vm, port) + else: + self._modify_virt_resource(port) + + def _modify_virt_resource(self, res_setting_data): + vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0] + (job_path, out_set_data, ret_val) = vs_man_svc.ModifyResourceSettings( + ResourceSettings=[res_setting_data.GetText_(1)]) + self._check_job_status(ret_val, job_path) + + def _add_virt_resource(self, vm, res_setting_data): + vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0] + (job_path, out_set_data, ret_val) = vs_man_svc.AddResourceSettings( + vm.path_(), [res_setting_data.GetText_(1)]) + self._check_job_status(ret_val, job_path) + + def _remove_virt_resource(self, res_setting_data): + vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0] + (job, ret_val) = vs_man_svc.RemoveResourceSettings( + ResourceSettings=[res_setting_data.path_()]) + self._check_job_status(ret_val, job) + + def _add_virt_feature(self, element, res_setting_data): + vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0] + (job_path, out_set_data, ret_val) = vs_man_svc.AddFeatureSettings( + element.path_(), [res_setting_data.GetText_(1)]) + self._check_job_status(ret_val, job_path) + + def _remove_virt_feature(self, feature_resource): + self._remove_multiple_virt_features([feature_resource]) + + def _remove_multiple_virt_features(self, feature_resources): + vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0] + (job_path, ret_val) = vs_man_svc.RemoveFeatureSettings( + FeatureSettings=[f.path_() for f in feature_resources]) + self._check_job_status(ret_val, job_path) + + def disconnect_switch_port( + self, vswitch_name, switch_port_name, delete_port): + """Disconnects the switch port.""" + sw_port, found = self._get_switch_port_allocation(switch_port_name) + if not sw_port: + # Port not found. It happens when the VM was already deleted. + return + + if delete_port: + self._remove_virt_resource(sw_port) + else: + sw_port.EnabledState = self._STATE_DISABLED + self._modify_virt_resource(sw_port) + + def _get_vswitch(self, vswitch_name): + vswitch = self._conn.Msvm_VirtualEthernetSwitch( + ElementName=vswitch_name) + if not len(vswitch): + raise utils.HyperVException(msg=_('VSwitch not found: %s') % + vswitch_name) + return vswitch[0] + + def _get_vswitch_external_port(self, vswitch): + vswitch_ports = vswitch.associators( + wmi_result_class=self._ETHERNET_SWITCH_PORT) + for vswitch_port in vswitch_ports: + lan_endpoints = vswitch_port.associators( + wmi_result_class=self._LAN_ENDPOINT) + if len(lan_endpoints): + lan_endpoints = lan_endpoints[0].associators( + wmi_result_class=self._LAN_ENDPOINT) + if len(lan_endpoints): + ext_port = lan_endpoints[0].associators( + wmi_result_class=self._EXTERNAL_PORT) + if ext_port: + return vswitch_port + + def set_vswitch_port_vlan_id(self, vlan_id, switch_port_name): + port_alloc, found = self._get_switch_port_allocation(switch_port_name) + if not found: + raise utils.HyperVException( + msg=_('Port Allocation not found: %s') % switch_port_name) + + vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0] + vlan_settings = self._get_vlan_setting_data_from_port_alloc(port_alloc) + if vlan_settings: + # Removing the feature because it cannot be modified + # due to a wmi exception. + (job_path, ret_val) = vs_man_svc.RemoveFeatureSettings( + FeatureSettings=[vlan_settings.path_()]) + self._check_job_status(ret_val, job_path) + + (vlan_settings, found) = self._get_vlan_setting_data(switch_port_name) + vlan_settings.AccessVlanId = vlan_id + vlan_settings.OperationMode = self._OPERATION_MODE_ACCESS + (job_path, out, ret_val) = vs_man_svc.AddFeatureSettings( + port_alloc.path_(), [vlan_settings.GetText_(1)]) + self._check_job_status(ret_val, job_path) + + def _get_vlan_setting_data_from_port_alloc(self, port_alloc): + return self._get_first_item(port_alloc.associators( + wmi_result_class=self._PORT_VLAN_SET_DATA)) + + def _get_vlan_setting_data(self, switch_port_name, create=True): + return self._get_setting_data( + self._PORT_VLAN_SET_DATA, + switch_port_name, create) + + def _get_switch_port_allocation(self, switch_port_name, create=False): + return self._get_setting_data( + self._PORT_ALLOC_SET_DATA, + switch_port_name, create) + + def _get_setting_data(self, class_name, element_name, create=True): + element_name = element_name.replace("'", '"') + q = self._conn.query("SELECT * FROM %(class_name)s WHERE " + "ElementName = '%(element_name)s'" % + {"class_name": class_name, + "element_name": element_name}) + data = self._get_first_item(q) + found = data is not None + if not data and create: + data = self._get_default_setting_data(class_name) + data.ElementName = element_name + return data, found + + def _get_default_setting_data(self, class_name): + return self._conn.query("SELECT * FROM %s WHERE InstanceID " + "LIKE '%%\\Default'" % class_name)[0] + + def _get_first_item(self, obj): + if obj: + return obj[0] + + def enable_port_metrics_collection(self, switch_port_name): + port, found = self._get_switch_port_allocation(switch_port_name, False) + if not found: + return + + # Add the ACLs only if they don't already exist + acls = port.associators(wmi_result_class=self._PORT_ALLOC_ACL_SET_DATA) + for acl_type in [self._ACL_TYPE_IPV4, self._ACL_TYPE_IPV6]: + for acl_dir in [self._ACL_DIR_IN, self._ACL_DIR_OUT]: + _acls = self._filter_acls( + acls, self._ACL_ACTION_METER, acl_dir, acl_type) + + if not _acls: + acl = self._create_acl( + acl_dir, acl_type, self._ACL_ACTION_METER) + self._add_virt_feature(port, acl) + + def enable_control_metrics(self, switch_port_name): + port, found = self._get_switch_port_allocation(switch_port_name, False) + if not found: + return + + metric_svc = self._conn.Msvm_MetricService()[0] + metric_names = [self._NET_IN_METRIC_NAME, self._NET_OUT_METRIC_NAME] + + for metric_name in metric_names: + metric_def = self._conn.CIM_BaseMetricDefinition(Name=metric_name) + if metric_def: + metric_svc.ControlMetrics( + Subject=port.path_(), + Definition=metric_def[0].path_(), + MetricCollectionEnabled=self._METRIC_ENABLED) + + def can_enable_control_metrics(self, switch_port_name): + port, found = self._get_switch_port_allocation(switch_port_name, False) + if not found: + return False + + if not self._is_port_vm_started(port): + return False + + # all 4 meter ACLs must be existent first. (2 x direction) + acls = port.associators(wmi_result_class=self._PORT_ALLOC_ACL_SET_DATA) + acls = [a for a in acls if a.Action == self._ACL_ACTION_METER] + if len(acls) < 2: + return False + return True + + def _is_port_vm_started(self, port): + vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0] + vmsettings = port.associators( + wmi_result_class=self._VIRTUAL_SYSTEM_SETTING_DATA) + #See http://msdn.microsoft.com/en-us/library/cc160706%28VS.85%29.aspx + (ret_val, summary_info) = vs_man_svc.GetSummaryInformation( + [self._VM_SUMMARY_ENABLED_STATE], + [v.path_() for v in vmsettings]) + if ret_val or not summary_info: + raise utils.HyperVException(msg=_('Cannot get VM summary data ' + 'for: %s') % port.ElementName) + + return summary_info[0].EnabledState is self._HYPERV_VM_STATE_ENABLED + + def create_security_rule(self, switch_port_name, direction, acl_type, + local_port, protocol, remote_address): + port, found = self._get_switch_port_allocation(switch_port_name, False) + if not found: + return + + # Add the ACLs only if they don't already exist + acls = port.associators(wmi_result_class=self._PORT_EXT_ACL_SET_DATA) + weight = self._get_new_weight(acls) + self._bind_security_rule( + port, direction, acl_type, self._ACL_ACTION_ALLOW, local_port, + protocol, remote_address, weight) + + def remove_security_rule(self, switch_port_name, direction, acl_type, + local_port, protocol, remote_address): + port, found = self._get_switch_port_allocation(switch_port_name, False) + if not found: + # Port not found. It happens when the VM was already deleted. + return + + acls = port.associators(wmi_result_class=self._PORT_EXT_ACL_SET_DATA) + filtered_acls = self._filter_security_acls( + acls, self._ACL_ACTION_ALLOW, direction, acl_type, local_port, + protocol, remote_address) + + for acl in filtered_acls: + self._remove_virt_feature(acl) + + def remove_all_security_rules(self, switch_port_name): + port, found = self._get_switch_port_allocation(switch_port_name, False) + if not found: + # Port not found. It happens when the VM was already deleted. + return + + acls = port.associators(wmi_result_class=self._PORT_EXT_ACL_SET_DATA) + filtered_acls = [a for a in acls if + a.Action is not self._ACL_ACTION_METER] + + if filtered_acls: + self._remove_multiple_virt_features(filtered_acls) + + def create_default_reject_all_rules(self, switch_port_name): + port, found = self._get_switch_port_allocation(switch_port_name, False) + if not found: + raise utils.HyperVException( + msg=_('Port Allocation not found: %s') % switch_port_name) + + acls = port.associators(wmi_result_class=self._PORT_EXT_ACL_SET_DATA) + filtered_acls = [v for v in acls if v.Action == self._ACL_ACTION_DENY] + + if len(filtered_acls) >= self._REJECT_ACLS_COUNT: + return + + for acl in filtered_acls: + self._remove_virt_feature(acl) + + weight = 0 + ipv4_pair = (self._ACL_TYPE_IPV4, self._IPV4_ANY) + ipv6_pair = (self._ACL_TYPE_IPV6, self._IPV6_ANY) + for direction in [self._ACL_DIR_IN, self._ACL_DIR_OUT]: + for acl_type, address in [ipv4_pair, ipv6_pair]: + for protocol in [self._TCP_PROTOCOL, + self._UDP_PROTOCOL, + self._ICMP_PROTOCOL]: + self._bind_security_rule( + port, direction, acl_type, self._ACL_ACTION_DENY, + self._ACL_DEFAULT, protocol, address, weight) + weight += 1 + + def _bind_security_rule(self, port, direction, acl_type, action, + local_port, protocol, remote_address, weight): + acls = port.associators(wmi_result_class=self._PORT_EXT_ACL_SET_DATA) + filtered_acls = self._filter_security_acls( + acls, action, direction, acl_type, local_port, protocol, + remote_address) + + for acl in filtered_acls: + self._remove_virt_feature(acl) + + acl = self._create_security_acl( + direction, acl_type, action, local_port, protocol, remote_address, + weight) + + self._add_virt_feature(port, acl) + + def _create_acl(self, direction, acl_type, action): + acl = self._get_default_setting_data(self._PORT_ALLOC_ACL_SET_DATA) + acl.set(Direction=direction, + AclType=acl_type, + Action=action, + Applicability=self._ACL_APPLICABILITY_LOCAL) + return acl + + def _create_security_acl(self, direction, acl_type, action, local_port, + protocol, remote_ip_address, weight): + acl = self._create_acl(direction, acl_type, action) + (remote_address, remote_prefix_length) = remote_ip_address.split('/') + acl.set(Applicability=self._ACL_APPLICABILITY_REMOTE, + RemoteAddress=remote_address, + RemoteAddressPrefixLength=remote_prefix_length) + return acl + + def _filter_acls(self, acls, action, direction, acl_type, remote_addr=""): + return [v for v in acls + if v.Action == action and + v.Direction == direction and + v.AclType == acl_type and + v.RemoteAddress == remote_addr] + + def _filter_security_acls(self, acls, acl_action, direction, acl_type, + local_port, protocol, remote_addr=""): + (remote_address, remote_prefix_length) = remote_addr.split('/') + remote_prefix_length = int(remote_prefix_length) + + return [v for v in acls + if v.Direction == direction and + v.Action in [self._ACL_ACTION_ALLOW, self._ACL_ACTION_DENY] and + v.AclType == acl_type and + v.RemoteAddress == remote_address and + v.RemoteAddressPrefixLength == remote_prefix_length] + + def _get_new_weight(self, acls): + return 0 + + +class HyperVUtilsV2R2(HyperVUtilsV2): + _PORT_EXT_ACL_SET_DATA = 'Msvm_EthernetSwitchPortExtendedAclSettingData' + _MAX_WEIGHT = 65500 + + # 2 directions x 2 address types x 3 protocols = 12 ACLs + _REJECT_ACLS_COUNT = 12 + + def _create_security_acl(self, direction, acl_type, action, local_port, + protocol, remote_addr, weight): + acl = self._get_default_setting_data(self._PORT_EXT_ACL_SET_DATA) + acl.set(Direction=direction, + Action=action, + LocalPort=str(local_port), + Protocol=protocol, + RemoteIPAddress=remote_addr, + IdleSessionTimeout=0, + Weight=weight) + return acl + + def _filter_security_acls(self, acls, action, direction, acl_type, + local_port, protocol, remote_addr=""): + return [v for v in acls + if v.Action == action and + v.Direction == direction and + v.LocalPort == str(local_port) and + v.Protocol == protocol and + v.RemoteIPAddress == remote_addr] + + def _get_new_weight(self, acls): + acls = [a for a in acls if a.Action is not self._ACL_ACTION_DENY] + if not acls: + return self._MAX_WEIGHT - 1 + + weights = [a.Weight for a in acls] + min_weight = min(weights) + for weight in range(min_weight, self._MAX_WEIGHT): + if weight not in weights: + return weight + + return min_weight - 1 diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/hyperv/agent_notifier_api.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/hyperv/agent_notifier_api.py new file mode 100644 index 00000000..bceab0a2 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/hyperv/agent_notifier_api.py @@ -0,0 +1,78 @@ +# Copyright 2013 Cloudbase Solutions SRL +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# @author: Alessandro Pilotti, Cloudbase Solutions Srl + +from neutron.common import rpc as n_rpc +from neutron.common import topics +from neutron.openstack.common import log as logging +from neutron.plugins.hyperv.common import constants + +LOG = logging.getLogger(__name__) + + +class AgentNotifierApi(n_rpc.RpcProxy): + '''Agent side of the openvswitch rpc API. + + API version history: + 1.0 - Initial version. + + ''' + + BASE_RPC_API_VERSION = '1.0' + + def __init__(self, topic): + super(AgentNotifierApi, self).__init__( + topic=topic, default_version=self.BASE_RPC_API_VERSION) + self.topic_network_delete = topics.get_topic_name(topic, + topics.NETWORK, + topics.DELETE) + self.topic_port_update = topics.get_topic_name(topic, + topics.PORT, + topics.UPDATE) + self.topic_port_delete = topics.get_topic_name(topic, + topics.PORT, + topics.DELETE) + self.topic_tunnel_update = topics.get_topic_name(topic, + constants.TUNNEL, + topics.UPDATE) + + def network_delete(self, context, network_id): + self.fanout_cast(context, + self.make_msg('network_delete', + network_id=network_id), + topic=self.topic_network_delete) + + def port_update(self, context, port, network_type, segmentation_id, + physical_network): + self.fanout_cast(context, + self.make_msg('port_update', + port=port, + network_type=network_type, + segmentation_id=segmentation_id, + physical_network=physical_network), + topic=self.topic_port_update) + + def port_delete(self, context, port_id): + self.fanout_cast(context, + self.make_msg('port_delete', + port_id=port_id), + topic=self.topic_port_delete) + + def tunnel_update(self, context, tunnel_ip, tunnel_id): + self.fanout_cast(context, + self.make_msg('tunnel_update', + tunnel_ip=tunnel_ip, + tunnel_id=tunnel_id), + topic=self.topic_tunnel_update) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/hyperv/common/__init__.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/hyperv/common/__init__.py new file mode 100644 index 00000000..0089853d --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/hyperv/common/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2013 Cloudbase Solutions SRL +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/hyperv/common/constants.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/hyperv/common/constants.py new file mode 100644 index 00000000..bab6e50b --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/hyperv/common/constants.py @@ -0,0 +1,21 @@ +# Copyright 2013 Cloudbase Solutions SRL +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# @author: Alessandro Pilotti, Cloudbase Solutions Srl + +# Topic for tunnel notifications between the plugin and agent +TUNNEL = 'tunnel' + +# Special vlan_id value in ovs_vlan_allocations table indicating flat network +FLAT_VLAN_ID = -1 diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/hyperv/db.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/hyperv/db.py new file mode 100644 index 00000000..276b0c38 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/hyperv/db.py @@ -0,0 +1,217 @@ +# Copyright 2013 Cloudbase Solutions SRL +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# @author: Alessandro Pilotti, Cloudbase Solutions Srl + +from six import moves +from sqlalchemy.orm import exc + +from neutron.common import exceptions as n_exc +import neutron.db.api as db_api +from neutron.db import models_v2 +from neutron.openstack.common import log as logging +from neutron.plugins.hyperv.common import constants +from neutron.plugins.hyperv import model as hyperv_model + +LOG = logging.getLogger(__name__) + + +class HyperVPluginDB(object): + def initialize(self): + db_api.configure_db() + + def reserve_vlan(self, session): + with session.begin(subtransactions=True): + alloc_q = session.query(hyperv_model.VlanAllocation) + alloc_q = alloc_q.filter_by(allocated=False) + alloc = alloc_q.first() + if alloc: + LOG.debug(_("Reserving vlan %(vlan_id)s on physical network " + "%(physical_network)s from pool"), + {'vlan_id': alloc.vlan_id, + 'physical_network': alloc.physical_network}) + alloc.allocated = True + return (alloc.physical_network, alloc.vlan_id) + raise n_exc.NoNetworkAvailable() + + def reserve_flat_net(self, session): + with session.begin(subtransactions=True): + alloc_q = session.query(hyperv_model.VlanAllocation) + alloc_q = alloc_q.filter_by(allocated=False, + vlan_id=constants.FLAT_VLAN_ID) + alloc = alloc_q.first() + if alloc: + LOG.debug(_("Reserving flat physical network " + "%(physical_network)s from pool"), + {'physical_network': alloc.physical_network}) + alloc.allocated = True + return alloc.physical_network + raise n_exc.NoNetworkAvailable() + + def reserve_specific_vlan(self, session, physical_network, vlan_id): + with session.begin(subtransactions=True): + try: + alloc_q = session.query(hyperv_model.VlanAllocation) + alloc_q = alloc_q.filter_by( + physical_network=physical_network, + vlan_id=vlan_id) + alloc = alloc_q.one() + if alloc.allocated: + if vlan_id == constants.FLAT_VLAN_ID: + raise n_exc.FlatNetworkInUse( + physical_network=physical_network) + else: + raise n_exc.VlanIdInUse( + vlan_id=vlan_id, + physical_network=physical_network) + LOG.debug(_("Reserving specific vlan %(vlan_id)s on physical " + "network %(physical_network)s from pool"), + {'vlan_id': vlan_id, + 'physical_network': physical_network}) + alloc.allocated = True + except exc.NoResultFound: + raise n_exc.NoNetworkAvailable() + + def reserve_specific_flat_net(self, session, physical_network): + return self.reserve_specific_vlan(session, physical_network, + constants.FLAT_VLAN_ID) + + def add_network_binding(self, session, network_id, network_type, + physical_network, segmentation_id): + with session.begin(subtransactions=True): + binding = hyperv_model.NetworkBinding( + network_id, network_type, + physical_network, + segmentation_id) + session.add(binding) + + def get_port(self, port_id): + session = db_api.get_session() + try: + port = session.query(models_v2.Port).filter_by(id=port_id).one() + except exc.NoResultFound: + port = None + return port + + def get_network_binding(self, session, network_id): + session = session or db_api.get_session() + try: + binding_q = session.query(hyperv_model.NetworkBinding) + binding_q = binding_q.filter_by(network_id=network_id) + return binding_q.one() + except exc.NoResultFound: + return + + def set_port_status(self, port_id, status): + session = db_api.get_session() + try: + port = session.query(models_v2.Port).filter_by(id=port_id).one() + port['status'] = status + session.merge(port) + session.flush() + except exc.NoResultFound: + raise n_exc.PortNotFound(port_id=port_id) + + def release_vlan(self, session, physical_network, vlan_id): + with session.begin(subtransactions=True): + try: + alloc_q = session.query(hyperv_model.VlanAllocation) + alloc_q = alloc_q.filter_by(physical_network=physical_network, + vlan_id=vlan_id) + alloc = alloc_q.one() + alloc.allocated = False + #session.delete(alloc) + LOG.debug(_("Releasing vlan %(vlan_id)s on physical network " + "%(physical_network)s"), + {'vlan_id': vlan_id, + 'physical_network': physical_network}) + except exc.NoResultFound: + LOG.warning(_("vlan_id %(vlan_id)s on physical network " + "%(physical_network)s not found"), + {'vlan_id': vlan_id, + 'physical_network': physical_network}) + + def _add_missing_allocatable_vlans(self, session, vlan_ids, + physical_network): + for vlan_id in sorted(vlan_ids): + alloc = hyperv_model.VlanAllocation( + physical_network, vlan_id) + session.add(alloc) + + def _remove_non_allocatable_vlans(self, session, + physical_network, + vlan_ids, + allocations): + if physical_network in allocations: + for alloc in allocations[physical_network]: + try: + # see if vlan is allocatable + vlan_ids.remove(alloc.vlan_id) + except KeyError: + # it's not allocatable, so check if its allocated + if not alloc.allocated: + # it's not, so remove it from table + LOG.debug(_( + "Removing vlan %(vlan_id)s on " + "physical network " + "%(physical_network)s from pool"), + {'vlan_id': alloc.vlan_id, + 'physical_network': physical_network}) + session.delete(alloc) + del allocations[physical_network] + + def _remove_unconfigured_vlans(self, session, allocations): + for allocs in allocations.itervalues(): + for alloc in allocs: + if not alloc.allocated: + LOG.debug(_("Removing vlan %(vlan_id)s on physical " + "network %(physical_network)s from pool"), + {'vlan_id': alloc.vlan_id, + 'physical_network': alloc.physical_network}) + session.delete(alloc) + + def sync_vlan_allocations(self, network_vlan_ranges): + """Synchronize vlan_allocations table with configured VLAN ranges.""" + + session = db_api.get_session() + with session.begin(): + # get existing allocations for all physical networks + allocations = dict() + allocs_q = session.query(hyperv_model.VlanAllocation) + for alloc in allocs_q: + allocations.setdefault(alloc.physical_network, + set()).add(alloc) + + # process vlan ranges for each configured physical network + for physical_network, vlan_ranges in network_vlan_ranges.items(): + # determine current configured allocatable vlans for this + # physical network + vlan_ids = set() + for vlan_range in vlan_ranges: + vlan_ids |= set(moves.xrange(vlan_range[0], + vlan_range[1] + 1)) + + # remove from table unallocated vlans not currently allocatable + self._remove_non_allocatable_vlans(session, + physical_network, + vlan_ids, + allocations) + + # add missing allocatable vlans to table + self._add_missing_allocatable_vlans(session, vlan_ids, + physical_network) + + # remove from table unallocated vlans for any unconfigured physical + # networks + self._remove_unconfigured_vlans(session, allocations) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/hyperv/hyperv_neutron_plugin.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/hyperv/hyperv_neutron_plugin.py new file mode 100644 index 00000000..765f65bb --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/hyperv/hyperv_neutron_plugin.py @@ -0,0 +1,331 @@ +# Copyright 2013 Cloudbase Solutions SRL +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# @author: Alessandro Pilotti, Cloudbase Solutions Srl + +from oslo.config import cfg + +from neutron.api.v2 import attributes +from neutron.common import exceptions as n_exc +from neutron.common import rpc as n_rpc +from neutron.common import topics +from neutron.db import agents_db +from neutron.db import db_base_plugin_v2 +from neutron.db import external_net_db +from neutron.db import l3_gwmode_db +from neutron.db import portbindings_base +from neutron.db import quota_db # noqa +from neutron.extensions import portbindings +from neutron.extensions import providernet as provider +from neutron.openstack.common import log as logging +from neutron.plugins.common import constants as svc_constants +from neutron.plugins.common import utils as plugin_utils +from neutron.plugins.hyperv import agent_notifier_api +from neutron.plugins.hyperv.common import constants +from neutron.plugins.hyperv import db as hyperv_db +from neutron.plugins.hyperv import rpc_callbacks + + +DEFAULT_VLAN_RANGES = [] + +hyperv_opts = [ + cfg.StrOpt('tenant_network_type', default='local', + help=_("Network type for tenant networks " + "(local, flat, vlan or none)")), + cfg.ListOpt('network_vlan_ranges', + default=DEFAULT_VLAN_RANGES, + help=_("List of :: " + "or ")), +] + +cfg.CONF.register_opts(hyperv_opts, "HYPERV") + +LOG = logging.getLogger(__name__) + + +class BaseNetworkProvider(object): + def __init__(self): + self._db = hyperv_db.HyperVPluginDB() + + def create_network(self, session, attrs): + pass + + def delete_network(self, session, binding): + pass + + def extend_network_dict(self, network, binding): + pass + + +class LocalNetworkProvider(BaseNetworkProvider): + def create_network(self, session, attrs): + network_type = attrs.get(provider.NETWORK_TYPE) + segmentation_id = attrs.get(provider.SEGMENTATION_ID) + if attributes.is_attr_set(segmentation_id): + msg = _("segmentation_id specified " + "for %s network") % network_type + raise n_exc.InvalidInput(error_message=msg) + attrs[provider.SEGMENTATION_ID] = None + + physical_network = attrs.get(provider.PHYSICAL_NETWORK) + if attributes.is_attr_set(physical_network): + msg = _("physical_network specified " + "for %s network") % network_type + raise n_exc.InvalidInput(error_message=msg) + attrs[provider.PHYSICAL_NETWORK] = None + + def extend_network_dict(self, network, binding): + network[provider.PHYSICAL_NETWORK] = None + network[provider.SEGMENTATION_ID] = None + + +class FlatNetworkProvider(BaseNetworkProvider): + def create_network(self, session, attrs): + network_type = attrs.get(provider.NETWORK_TYPE) + segmentation_id = attrs.get(provider.SEGMENTATION_ID) + if attributes.is_attr_set(segmentation_id): + msg = _("segmentation_id specified " + "for %s network") % network_type + raise n_exc.InvalidInput(error_message=msg) + segmentation_id = constants.FLAT_VLAN_ID + attrs[provider.SEGMENTATION_ID] = segmentation_id + + physical_network = attrs.get(provider.PHYSICAL_NETWORK) + if not attributes.is_attr_set(physical_network): + physical_network = self._db.reserve_flat_net(session) + attrs[provider.PHYSICAL_NETWORK] = physical_network + else: + self._db.reserve_specific_flat_net(session, physical_network) + + def delete_network(self, session, binding): + self._db.release_vlan(session, binding.physical_network, + constants.FLAT_VLAN_ID) + + def extend_network_dict(self, network, binding): + network[provider.PHYSICAL_NETWORK] = binding.physical_network + + +class VlanNetworkProvider(BaseNetworkProvider): + def create_network(self, session, attrs): + segmentation_id = attrs.get(provider.SEGMENTATION_ID) + if attributes.is_attr_set(segmentation_id): + physical_network = attrs.get(provider.PHYSICAL_NETWORK) + if not attributes.is_attr_set(physical_network): + msg = _("physical_network not provided") + raise n_exc.InvalidInput(error_message=msg) + self._db.reserve_specific_vlan(session, physical_network, + segmentation_id) + else: + (physical_network, + segmentation_id) = self._db.reserve_vlan(session) + attrs[provider.SEGMENTATION_ID] = segmentation_id + attrs[provider.PHYSICAL_NETWORK] = physical_network + + def delete_network(self, session, binding): + self._db.release_vlan( + session, binding.physical_network, + binding.segmentation_id) + + def extend_network_dict(self, network, binding): + network[provider.PHYSICAL_NETWORK] = binding.physical_network + network[provider.SEGMENTATION_ID] = binding.segmentation_id + + +class HyperVNeutronPlugin(agents_db.AgentDbMixin, + db_base_plugin_v2.NeutronDbPluginV2, + external_net_db.External_net_db_mixin, + l3_gwmode_db.L3_NAT_db_mixin, + portbindings_base.PortBindingBaseMixin): + + # This attribute specifies whether the plugin supports or not + # bulk operations. Name mangling is used in order to ensure it + # is qualified by class + __native_bulk_support = True + supported_extension_aliases = ["provider", "external-net", "router", + "agent", "ext-gw-mode", "binding", "quotas"] + + def __init__(self, configfile=None): + self._db = hyperv_db.HyperVPluginDB() + self._db.initialize() + self.base_binding_dict = { + portbindings.VIF_TYPE: portbindings.VIF_TYPE_HYPERV} + portbindings_base.register_port_dict_function() + self._set_tenant_network_type() + + self._parse_network_vlan_ranges() + self._create_network_providers_map() + self._db.sync_vlan_allocations(self._network_vlan_ranges) + + self._setup_rpc() + + def _set_tenant_network_type(self): + tenant_network_type = cfg.CONF.HYPERV.tenant_network_type + if tenant_network_type not in [svc_constants.TYPE_LOCAL, + svc_constants.TYPE_FLAT, + svc_constants.TYPE_VLAN, + svc_constants.TYPE_NONE]: + msg = _( + "Invalid tenant_network_type: %s. " + "Agent terminated!") % tenant_network_type + raise n_exc.InvalidInput(error_message=msg) + self._tenant_network_type = tenant_network_type + + def _setup_rpc(self): + # RPC support + self.service_topics = {svc_constants.CORE: topics.PLUGIN, + svc_constants.L3_ROUTER_NAT: topics.L3PLUGIN} + self.conn = n_rpc.create_connection(new=True) + self.notifier = agent_notifier_api.AgentNotifierApi( + topics.AGENT) + self.endpoints = [rpc_callbacks.HyperVRpcCallbacks(self.notifier), + agents_db.AgentExtRpcCallback()] + for svc_topic in self.service_topics.values(): + self.conn.create_consumer(svc_topic, self.endpoints, fanout=False) + # Consume from all consumers in threads + self.conn.consume_in_threads() + + def _parse_network_vlan_ranges(self): + self._network_vlan_ranges = plugin_utils.parse_network_vlan_ranges( + cfg.CONF.HYPERV.network_vlan_ranges) + LOG.info(_("Network VLAN ranges: %s"), self._network_vlan_ranges) + + def _check_vlan_id_in_range(self, physical_network, vlan_id): + for r in self._network_vlan_ranges[physical_network]: + if vlan_id >= r[0] and vlan_id <= r[1]: + return True + return False + + def _create_network_providers_map(self): + self._network_providers_map = { + svc_constants.TYPE_LOCAL: LocalNetworkProvider(), + svc_constants.TYPE_FLAT: FlatNetworkProvider(), + svc_constants.TYPE_VLAN: VlanNetworkProvider() + } + + def _process_provider_create(self, context, session, attrs): + network_type = attrs.get(provider.NETWORK_TYPE) + network_type_set = attributes.is_attr_set(network_type) + if not network_type_set: + if self._tenant_network_type == svc_constants.TYPE_NONE: + raise n_exc.TenantNetworksDisabled() + network_type = self._tenant_network_type + attrs[provider.NETWORK_TYPE] = network_type + + if network_type not in self._network_providers_map: + msg = _("Network type %s not supported") % network_type + raise n_exc.InvalidInput(error_message=msg) + p = self._network_providers_map[network_type] + # Provider specific network creation + p.create_network(session, attrs) + + def create_network(self, context, network): + session = context.session + with session.begin(subtransactions=True): + network_attrs = network['network'] + self._process_provider_create(context, session, network_attrs) + + net = super(HyperVNeutronPlugin, self).create_network( + context, network) + + network_type = network_attrs[provider.NETWORK_TYPE] + physical_network = network_attrs[provider.PHYSICAL_NETWORK] + segmentation_id = network_attrs[provider.SEGMENTATION_ID] + + self._db.add_network_binding( + session, net['id'], network_type, + physical_network, segmentation_id) + + self._process_l3_create(context, net, network['network']) + self._extend_network_dict_provider(context, net) + + LOG.debug(_("Created network: %s"), net['id']) + return net + + def _extend_network_dict_provider(self, context, network): + binding = self._db.get_network_binding( + context.session, network['id']) + network[provider.NETWORK_TYPE] = binding.network_type + p = self._network_providers_map[binding.network_type] + p.extend_network_dict(network, binding) + + def update_network(self, context, id, network): + provider._raise_if_updates_provider_attributes(network['network']) + + session = context.session + with session.begin(subtransactions=True): + net = super(HyperVNeutronPlugin, self).update_network(context, id, + network) + self._process_l3_update(context, net, network['network']) + self._extend_network_dict_provider(context, net) + return net + + def delete_network(self, context, id): + session = context.session + with session.begin(subtransactions=True): + binding = self._db.get_network_binding(session, id) + self._process_l3_delete(context, id) + super(HyperVNeutronPlugin, self).delete_network(context, id) + p = self._network_providers_map[binding.network_type] + p.delete_network(session, binding) + # the network_binding record is deleted via cascade from + # the network record, so explicit removal is not necessary + self.notifier.network_delete(context, id) + + def get_network(self, context, id, fields=None): + net = super(HyperVNeutronPlugin, self).get_network(context, id, None) + self._extend_network_dict_provider(context, net) + return self._fields(net, fields) + + def get_networks(self, context, filters=None, fields=None): + nets = super(HyperVNeutronPlugin, self).get_networks( + context, filters, None) + for net in nets: + self._extend_network_dict_provider(context, net) + + return [self._fields(net, fields) for net in nets] + + def create_port(self, context, port): + port_data = port['port'] + port = super(HyperVNeutronPlugin, self).create_port(context, port) + self._process_portbindings_create_and_update(context, + port_data, + port) + return port + + def update_port(self, context, id, port): + original_port = super(HyperVNeutronPlugin, self).get_port( + context, id) + port_data = port['port'] + port = super(HyperVNeutronPlugin, self).update_port(context, id, port) + self._process_portbindings_create_and_update(context, + port_data, + port) + if original_port['admin_state_up'] != port['admin_state_up']: + binding = self._db.get_network_binding( + None, port['network_id']) + self.notifier.port_update(context, port, + binding.network_type, + binding.segmentation_id, + binding.physical_network) + return port + + def delete_port(self, context, id, l3_port_check=True): + # if needed, check to see if this is a port owned by + # and l3-router. If so, we should prevent deletion. + if l3_port_check: + self.prevent_l3_port_deletion(context, id) + self.disassociate_floatingips(context, id) + + super(HyperVNeutronPlugin, self).delete_port(context, id) + self.notifier.port_delete(context, id) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/hyperv/model.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/hyperv/model.py new file mode 100644 index 00000000..f42aeb1d --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/hyperv/model.py @@ -0,0 +1,53 @@ +# Copyright 2013 Cloudbase Solutions SRL +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# @author: Alessandro Pilotti, Cloudbase Solutions Srl + +from sqlalchemy import Boolean, Column, ForeignKey, Integer, String + +from neutron.db import model_base + + +class VlanAllocation(model_base.BASEV2): + """Represents allocation state of vlan_id on physical network.""" + __tablename__ = 'hyperv_vlan_allocations' + + physical_network = Column(String(64), nullable=False, primary_key=True) + vlan_id = Column(Integer, nullable=False, primary_key=True, + autoincrement=False) + allocated = Column(Boolean, nullable=False) + + def __init__(self, physical_network, vlan_id): + self.physical_network = physical_network + self.vlan_id = vlan_id + self.allocated = False + + +class NetworkBinding(model_base.BASEV2): + """Represents binding of virtual network to physical realization.""" + __tablename__ = 'hyperv_network_bindings' + + network_id = Column(String(36), + ForeignKey('networks.id', ondelete="CASCADE"), + primary_key=True) + network_type = Column(String(32), nullable=False) + physical_network = Column(String(64)) + segmentation_id = Column(Integer) + + def __init__(self, network_id, network_type, physical_network, + segmentation_id): + self.network_id = network_id + self.network_type = network_type + self.physical_network = physical_network + self.segmentation_id = segmentation_id diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/hyperv/rpc_callbacks.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/hyperv/rpc_callbacks.py new file mode 100644 index 00000000..40f018e7 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/hyperv/rpc_callbacks.py @@ -0,0 +1,104 @@ +# Copyright 2013 Cloudbase Solutions SRL +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# @author: Alessandro Pilotti, Cloudbase Solutions Srl + +from neutron.common import constants as q_const +from neutron.common import rpc as n_rpc +from neutron.db import dhcp_rpc_base +from neutron.db import l3_rpc_base +from neutron.openstack.common import log as logging +from neutron.plugins.hyperv import db as hyperv_db + + +LOG = logging.getLogger(__name__) + + +class HyperVRpcCallbacks( + n_rpc.RpcCallback, + dhcp_rpc_base.DhcpRpcCallbackMixin, + l3_rpc_base.L3RpcCallbackMixin): + + # history + # 1.1 Support Security Group RPC + # 1.2 Support get_devices_details_list + RPC_API_VERSION = '1.2' + + def __init__(self, notifier): + super(HyperVRpcCallbacks, self).__init__() + self.notifier = notifier + self._db = hyperv_db.HyperVPluginDB() + + def get_device_details(self, rpc_context, **kwargs): + """Agent requests device details.""" + agent_id = kwargs.get('agent_id') + device = kwargs.get('device') + LOG.debug(_("Device %(device)s details requested from %(agent_id)s"), + {'device': device, 'agent_id': agent_id}) + port = self._db.get_port(device) + if port: + binding = self._db.get_network_binding(None, port['network_id']) + entry = {'device': device, + 'network_id': port['network_id'], + 'port_id': port['id'], + 'admin_state_up': port['admin_state_up'], + 'network_type': binding.network_type, + 'segmentation_id': binding.segmentation_id, + 'physical_network': binding.physical_network} + # Set the port status to UP + self._db.set_port_status(port['id'], q_const.PORT_STATUS_ACTIVE) + else: + entry = {'device': device} + LOG.debug(_("%s can not be found in database"), device) + return entry + + def get_devices_details_list(self, rpc_context, **kwargs): + return [ + self.get_device_details( + rpc_context, + device=device, + **kwargs + ) + for device in kwargs.pop('devices', []) + ] + + def update_device_down(self, rpc_context, **kwargs): + """Device no longer exists on agent.""" + # TODO(garyk) - live migration and port status + agent_id = kwargs.get('agent_id') + device = kwargs.get('device') + LOG.debug(_("Device %(device)s no longer exists on %(agent_id)s"), + {'device': device, 'agent_id': agent_id}) + port = self._db.get_port(device) + if port: + entry = {'device': device, + 'exists': True} + # Set port status to DOWN + self._db.set_port_status(port['id'], q_const.PORT_STATUS_DOWN) + else: + entry = {'device': device, + 'exists': False} + LOG.debug(_("%s can not be found in database"), device) + return entry + + def tunnel_sync(self, rpc_context, **kwargs): + """Tunnel sync. + + Dummy function for ovs agent running on Linux to + work with Hyper-V plugin and agent. + """ + entry = dict() + entry['tunnels'] = {} + # Return the list of tunnels IP's to the agent + return entry diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/ibm/agent/sdnve_neutron_agent.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/ibm/agent/sdnve_neutron_agent.py new file mode 100644 index 00000000..b3203d4a --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/ibm/agent/sdnve_neutron_agent.py @@ -0,0 +1,270 @@ +# Copyright 2014 IBM Corp. +# +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Mohammad Banikazemi, IBM Corp. + + +import socket +import sys +import time + +import eventlet +eventlet.monkey_patch() + +from oslo.config import cfg + +from neutron.agent.linux import ip_lib +from neutron.agent.linux import ovs_lib +from neutron.agent import rpc as agent_rpc +from neutron.common import config as common_config +from neutron.common import constants as n_const +from neutron.common import rpc as n_rpc +from neutron.common import topics +from neutron.common import utils as n_utils +from neutron import context +from neutron.openstack.common import log as logging +from neutron.openstack.common import loopingcall +from neutron.plugins.ibm.common import config # noqa +from neutron.plugins.ibm.common import constants + + +LOG = logging.getLogger(__name__) + + +class SdnvePluginApi(agent_rpc.PluginApi): + + def sdnve_info(self, context, info): + return self.call(context, + self.make_msg('sdnve_info', info=info), + topic=self.topic) + + +class SdnveNeutronAgent(n_rpc.RpcCallback): + + RPC_API_VERSION = '1.1' + + def __init__(self, integ_br, interface_mappings, + info, root_helper, polling_interval, + controller_ip, reset_br, out_of_band): + '''The agent initialization. + + Sets the following parameters and sets up the integration + bridge and physical interfaces if need be. + :param integ_br: name of the integration bridge. + :param interface_mappings: interfaces to physical networks. + :param info: local IP address of this hypervisor. + :param root_helper: utility to use when running shell cmds. + :param polling_interval: interval (secs) to poll DB. + :param controller_ip: Ip address of SDN-VE controller. + ''' + + super(SdnveNeutronAgent, self).__init__() + self.root_helper = root_helper + self.int_bridge_name = integ_br + self.controller_ip = controller_ip + self.interface_mappings = interface_mappings + self.polling_interval = polling_interval + self.info = info + self.reset_br = reset_br + self.out_of_band = out_of_band + + self.agent_state = { + 'binary': 'neutron-sdnve-agent', + 'host': cfg.CONF.host, + 'topic': n_const.L2_AGENT_TOPIC, + 'configurations': {'interface_mappings': interface_mappings, + 'reset_br': self.reset_br, + 'out_of_band': self.out_of_band, + 'controller_ip': self.controller_ip}, + 'agent_type': n_const.AGENT_TYPE_SDNVE, + 'start_flag': True} + + if self.int_bridge_name: + self.int_br = self.setup_integration_br(integ_br, reset_br, + out_of_band, + self.controller_ip) + self.setup_physical_interfaces(self.interface_mappings) + else: + self.int_br = None + + self.setup_rpc() + + def _report_state(self): + try: + self.state_rpc.report_state(self.context, + self.agent_state) + self.agent_state.pop('start_flag', None) + except Exception: + LOG.exception(_("Failed reporting state!")) + + def setup_rpc(self): + if self.int_br: + mac = self.int_br.get_local_port_mac() + self.agent_id = '%s%s' % ('sdnve', (mac.replace(":", ""))) + else: + nameaddr = socket.gethostbyname(socket.gethostname()) + self.agent_id = '%s%s' % ('sdnve_', (nameaddr.replace(".", "_"))) + + self.topic = topics.AGENT + self.plugin_rpc = SdnvePluginApi(topics.PLUGIN) + self.state_rpc = agent_rpc.PluginReportStateAPI(topics.PLUGIN) + + self.context = context.get_admin_context_without_session() + self.endpoints = [self] + consumers = [[constants.INFO, topics.UPDATE]] + + self.connection = agent_rpc.create_consumers(self.endpoints, + self.topic, + consumers) + if self.polling_interval: + heartbeat = loopingcall.FixedIntervalLoopingCall( + self._report_state) + heartbeat.start(interval=self.polling_interval) + + # Plugin calls the agents through the following + def info_update(self, context, **kwargs): + LOG.debug(_("info_update received")) + info = kwargs.get('info', {}) + new_controller = info.get('new_controller') + out_of_band = info.get('out_of_band') + if self.int_br and new_controller: + LOG.debug(_("info_update received. New controller" + "is to be set to: %s"), new_controller) + self.int_br.run_vsctl(["set-controller", + self.int_bridge_name, + "tcp:" + new_controller]) + if out_of_band: + LOG.debug(_("info_update received. New controller" + "is set to be out of band")) + self.int_br.set_db_attribute("controller", + self.int_bridge_name, + "connection-mode", + "out-of-band") + + def setup_integration_br(self, bridge_name, reset_br, out_of_band, + controller_ip=None): + '''Sets up the integration bridge. + + Create the bridge and remove all existing flows if reset_br is True. + Otherwise, creates the bridge if not already existing. + :param bridge_name: the name of the integration bridge. + :param reset_br: A boolean to rest the bridge if True. + :param out_of_band: A boolean indicating controller is out of band. + :param controller_ip: IP address to use as the bridge controller. + :returns: the integration bridge + ''' + + int_br = ovs_lib.OVSBridge(bridge_name, self.root_helper) + if reset_br: + int_br.reset_bridge() + int_br.remove_all_flows() + else: + int_br.create() + + # set the controller + if controller_ip: + int_br.run_vsctl( + ["set-controller", bridge_name, "tcp:" + controller_ip]) + if out_of_band: + int_br.set_db_attribute("controller", bridge_name, + "connection-mode", "out-of-band") + + return int_br + + def setup_physical_interfaces(self, interface_mappings): + '''Sets up the physical network interfaces. + + Link physical interfaces to the integration bridge. + :param interface_mappings: map physical net names to interface names. + ''' + + for physical_network, interface in interface_mappings.iteritems(): + LOG.info(_("Mapping physical network %(physical_network)s to " + "interface %(interface)s"), + {'physical_network': physical_network, + 'interface': interface}) + # Connect the physical interface to the bridge + if not ip_lib.device_exists(interface, self.root_helper): + LOG.error(_("Interface %(interface)s for physical network " + "%(physical_network)s does not exist. Agent " + "terminated!"), + {'physical_network': physical_network, + 'interface': interface}) + raise SystemExit(1) + self.int_br.add_port(interface) + + def sdnve_info(self): + details = self.plugin_rpc.sdnve_info( + self.context, + {'info': self.info}) + return details + + def rpc_loop(self): + + while True: + start = time.time() + LOG.debug(_("Agent in the rpc loop.")) + + # sleep till end of polling interval + elapsed = (time.time() - start) + if (elapsed < self.polling_interval): + time.sleep(self.polling_interval - elapsed) + else: + LOG.info(_("Loop iteration exceeded interval " + "(%(polling_interval)s vs. %(elapsed)s)!"), + {'polling_interval': self.polling_interval, + 'elapsed': elapsed}) + + def daemon_loop(self): + self.rpc_loop() + + +def create_agent_config_map(config): + + interface_mappings = n_utils.parse_mappings( + config.SDNVE.interface_mappings) + + controller_ips = config.SDNVE.controller_ips + LOG.info(_("Controller IPs: %s"), controller_ips) + controller_ip = controller_ips[0] + + return { + 'integ_br': config.SDNVE.integration_bridge, + 'interface_mappings': interface_mappings, + 'controller_ip': controller_ip, + 'info': config.SDNVE.info, + 'root_helper': config.SDNVE_AGENT.root_helper, + 'polling_interval': config.SDNVE_AGENT.polling_interval, + 'reset_br': config.SDNVE.reset_bridge, + 'out_of_band': config.SDNVE.out_of_band} + + +def main(): + cfg.CONF.register_opts(ip_lib.OPTS) + common_config.init(sys.argv[1:]) + common_config.setup_logging(cfg.CONF) + + try: + agent_config = create_agent_config_map(cfg.CONF) + except ValueError as e: + LOG.exception(_("%s Agent terminated!"), e) + raise SystemExit(1) + + plugin = SdnveNeutronAgent(**agent_config) + + # Start everything. + LOG.info(_("Agent initialized successfully, now running... ")) + plugin.daemon_loop() diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/ibm/common/config.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/ibm/common/config.py new file mode 100644 index 00000000..68e2dbd4 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/ibm/common/config.py @@ -0,0 +1,74 @@ +# Copyright 2014 IBM Corp. +# +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Mohammad Banikazemi, IBM Corp. + + +from oslo.config import cfg + + +DEFAULT_INTERFACE_MAPPINGS = [] +DEFAULT_CONTROLLER_IPS = ['127.0.0.1'] + +sdnve_opts = [ + cfg.BoolOpt('use_fake_controller', default=False, + help=_("If set to True uses a fake controller.")), + cfg.StrOpt('base_url', default='/one/nb/v2/', + help=_("Base URL for SDN-VE controller REST API")), + cfg.ListOpt('controller_ips', default=DEFAULT_CONTROLLER_IPS, + help=_("List of IP addresses of SDN-VE controller(s)")), + cfg.StrOpt('info', default='sdnve_info_string', + help=_("SDN-VE RPC subject")), + cfg.StrOpt('port', default='8443', + help=_("SDN-VE controller port number")), + cfg.StrOpt('format', default='json', + help=_("SDN-VE request/response format")), + cfg.StrOpt('userid', default='admin', + help=_("SDN-VE administrator user id")), + cfg.StrOpt('password', default='admin', secret=True, + help=_("SDN-VE administrator password")), + cfg.StrOpt('integration_bridge', + help=_("Integration bridge to use")), + cfg.BoolOpt('reset_bridge', default=True, + help=_("Reset the integration bridge before use")), + cfg.BoolOpt('out_of_band', default=True, + help=_("Indicating if controller is out of band or not")), + cfg.ListOpt('interface_mappings', + default=DEFAULT_INTERFACE_MAPPINGS, + help=_("List of :")), + cfg.StrOpt('default_tenant_type', default='OVERLAY', + help=_("Tenant type: OVERLAY (default) or OF")), + cfg.StrOpt('overlay_signature', default='SDNVE-OVERLAY', + help=_("The string in tenant description that indicates " + "the tenant is a OVERLAY tenant")), + cfg.StrOpt('of_signature', default='SDNVE-OF', + help=_("The string in tenant description that indicates " + "the tenant is a OF tenant")), +] + +sdnve_agent_opts = [ + cfg.IntOpt('polling_interval', default=2, + help=_("Agent polling interval if necessary")), + cfg.StrOpt('root_helper', default='sudo', + help=_("Using root helper")), + cfg.BoolOpt('rpc', default=True, + help=_("Whether using rpc")), + +] + + +cfg.CONF.register_opts(sdnve_opts, "SDNVE") +cfg.CONF.register_opts(sdnve_agent_opts, "SDNVE_AGENT") diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/ibm/sdnve_api.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/ibm/sdnve_api.py new file mode 100644 index 00000000..50e689c1 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/ibm/sdnve_api.py @@ -0,0 +1,388 @@ +# Copyright 2014 IBM Corp. +# +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Mohammad Banikazemi, IBM Corp. + + +import httplib +import urllib + +import httplib2 +from keystoneclient.v2_0 import client as keyclient +from oslo.config import cfg + +from neutron.api.v2 import attributes +from neutron.openstack.common import log as logging +from neutron.plugins.ibm.common import config # noqa +from neutron.plugins.ibm.common import constants +from neutron import wsgi + +LOG = logging.getLogger(__name__) + +SDNVE_VERSION = '2.0' +SDNVE_ACTION_PREFIX = '/sdnve' +SDNVE_RETRIES = 0 +SDNVE_RETRIY_INTERVAL = 1 +SDNVE_TENANT_TYPE_OVERLAY = u'DOVE' +SDNVE_URL = 'https://%s:%s%s' + + +class RequestHandler(object): + '''Handles processing requests to and responses from controller.''' + + def __init__(self, controller_ips=None, port=None, ssl=None, + base_url=None, userid=None, password=None, + timeout=10, formats=None): + '''Initializes the RequestHandler for communication with controller + + Following keyword arguments are used; if not specified, default + values are used. + :param port: Username for authentication. + :param timeout: Time out for http requests. + :param userid: User id for accessing controller. + :param password: Password for accessing the controller. + :param base_url: The base url for the controller. + :param controller_ips: List of controller IP addresses. + :param formats: Supported formats. + ''' + self.port = port or cfg.CONF.SDNVE.port + self.timeout = timeout + self._s_meta = None + self.connection = None + self.httpclient = httplib2.Http( + disable_ssl_certificate_validation=True) + self.cookie = None + + userid = userid or cfg.CONF.SDNVE.userid + password = password or cfg.CONF.SDNVE.password + if (userid and password): + self.httpclient.add_credentials(userid, password) + + self.base_url = base_url or cfg.CONF.SDNVE.base_url + self.controller_ips = controller_ips or cfg.CONF.SDNVE.controller_ips + + LOG.info(_("The IP addr of available SDN-VE controllers: %s"), + self.controller_ips) + self.controller_ip = self.controller_ips[0] + LOG.info(_("The SDN-VE controller IP address: %s"), + self.controller_ip) + + self.new_controller = False + self.format = formats or cfg.CONF.SDNVE.format + + self.version = SDNVE_VERSION + self.action_prefix = SDNVE_ACTION_PREFIX + self.retries = SDNVE_RETRIES + self.retry_interval = SDNVE_RETRIY_INTERVAL + + def serialize(self, data): + '''Serializes a dictionary with a single key.''' + + if isinstance(data, dict): + return wsgi.Serializer().serialize(data, self.content_type()) + elif data: + raise TypeError(_("unable to serialize object type: '%s'") % + type(data)) + + def deserialize(self, data, status_code): + '''Deserializes an xml or json string into a dictionary.''' + + # NOTE(mb): Temporary fix for backend controller requirement + data = data.replace("router_external", "router:external") + + if status_code == httplib.NO_CONTENT: + return data + try: + deserialized_data = wsgi.Serializer( + metadata=self._s_meta).deserialize(data, self.content_type()) + deserialized_data = deserialized_data['body'] + except Exception: + deserialized_data = data + + return deserialized_data + + def content_type(self, format=None): + '''Returns the mime-type for either 'xml' or 'json'.''' + + return 'application/%s' % (format or self.format) + + def delete(self, url, body=None, headers=None, params=None): + return self.do_request("DELETE", url, body=body, + headers=headers, params=params) + + def get(self, url, body=None, headers=None, params=None): + return self.do_request("GET", url, body=body, + headers=headers, params=params) + + def post(self, url, body=None, headers=None, params=None): + return self.do_request("POST", url, body=body, + headers=headers, params=params) + + def put(self, url, body=None, headers=None, params=None): + return self.do_request("PUT", url, body=body, + headers=headers, params=params) + + def do_request(self, method, url, body=None, headers=None, + params=None, connection_type=None): + + status_code = -1 + replybody_deserialized = '' + + if body: + body = self.serialize(body) + + self.headers = headers or {'Content-Type': self.content_type()} + if self.cookie: + self.headers['cookie'] = self.cookie + + if self.controller_ip != self.controller_ips[0]: + controllers = [self.controller_ip] + else: + controllers = [] + controllers.extend(self.controller_ips) + + for controller_ip in controllers: + serverurl = SDNVE_URL % (controller_ip, self.port, self.base_url) + myurl = serverurl + url + if params and isinstance(params, dict): + myurl += '?' + urllib.urlencode(params, doseq=1) + + try: + LOG.debug(_("Sending request to SDN-VE. url: " + "%(myurl)s method: %(method)s body: " + "%(body)s header: %(header)s "), + {'myurl': myurl, 'method': method, + 'body': body, 'header': self.headers}) + resp, replybody = self.httpclient.request( + myurl, method=method, body=body, headers=self.headers) + LOG.debug(("Response recd from SDN-VE. resp: %(resp)s" + "body: %(body)s"), + {'resp': resp.status, 'body': replybody}) + status_code = resp.status + + except Exception as e: + LOG.error(_("Error: Could not reach server: %(url)s " + "Exception: %(excp)s."), + {'url': myurl, 'excp': e}) + self.cookie = None + continue + + if status_code not in constants.HTTP_ACCEPTABLE: + LOG.debug(_("Error message: %(reply)s -- Status: %(status)s"), + {'reply': replybody, 'status': status_code}) + else: + LOG.debug(_("Received response status: %s"), status_code) + + if resp.get('set-cookie'): + self.cookie = resp['set-cookie'] + replybody_deserialized = self.deserialize( + replybody, + status_code) + LOG.debug(_("Deserialized body: %s"), replybody_deserialized) + if controller_ip != self.controller_ip: + # bcast the change of controller + self.new_controller = True + self.controller_ip = controller_ip + + return (status_code, replybody_deserialized) + + return (httplib.REQUEST_TIMEOUT, 'Could not reach server(s)') + + +class Client(RequestHandler): + '''Client for SDNVE controller.''' + + def __init__(self): + '''Initialize a new SDNVE client.''' + super(Client, self).__init__() + + self.keystoneclient = KeystoneClient() + + resource_path = { + 'network': "ln/networks/", + 'subnet': "ln/subnets/", + 'port': "ln/ports/", + 'tenant': "ln/tenants/", + 'router': "ln/routers/", + 'floatingip': "ln/floatingips/", + } + + def process_request(self, body): + '''Processes requests according to requirements of controller.''' + if self.format == 'json': + body = dict( + (k.replace(':', '_'), v) for k, v in body.items() + if attributes.is_attr_set(v)) + return body + + def sdnve_list(self, resource, **params): + '''Fetches a list of resources.''' + + res = self.resource_path.get(resource, None) + if not res: + LOG.info(_("Bad resource for forming a list request")) + return 0, '' + + return self.get(res, params=params) + + def sdnve_show(self, resource, specific, **params): + '''Fetches information of a certain resource.''' + + res = self.resource_path.get(resource, None) + if not res: + LOG.info(_("Bad resource for forming a show request")) + return 0, '' + + return self.get(res + specific, params=params) + + def sdnve_create(self, resource, body): + '''Creates a new resource.''' + + res = self.resource_path.get(resource, None) + if not res: + LOG.info(_("Bad resource for forming a create request")) + return 0, '' + + body = self.process_request(body) + status, data = self.post(res, body=body) + return (status, data) + + def sdnve_update(self, resource, specific, body=None): + '''Updates a resource.''' + + res = self.resource_path.get(resource, None) + if not res: + LOG.info(_("Bad resource for forming a update request")) + return 0, '' + + body = self.process_request(body) + return self.put(res + specific, body=body) + + def sdnve_delete(self, resource, specific): + '''Deletes the specified resource.''' + + res = self.resource_path.get(resource, None) + if not res: + LOG.info(_("Bad resource for forming a delete request")) + return 0, '' + + return self.delete(res + specific) + + def _tenant_id_conversion(self, osid): + return osid + + def sdnve_get_tenant_byid(self, os_tenant_id): + sdnve_tenant_id = self._tenant_id_conversion(os_tenant_id) + resp, content = self.sdnve_show('tenant', sdnve_tenant_id) + if resp in constants.HTTP_ACCEPTABLE: + tenant_id = content.get('id') + tenant_type = content.get('network_type') + if tenant_type == SDNVE_TENANT_TYPE_OVERLAY: + tenant_type = constants.TENANT_TYPE_OVERLAY + return tenant_id, tenant_type + return None, None + + def sdnve_check_and_create_tenant(self, os_tenant_id, network_type=None): + + if not os_tenant_id: + return + tenant_id, tenant_type = self.sdnve_get_tenant_byid(os_tenant_id) + if tenant_id: + if not network_type: + return tenant_id + if tenant_type != network_type: + LOG.info(_("Non matching tenant and network types: " + "%(ttype)s %(ntype)s"), + {'ttype': tenant_type, 'ntype': network_type}) + return + return tenant_id + + # Have to create a new tenant + sdnve_tenant_id = self._tenant_id_conversion(os_tenant_id) + if not network_type: + network_type = self.keystoneclient.get_tenant_type(os_tenant_id) + if network_type == constants.TENANT_TYPE_OVERLAY: + network_type = SDNVE_TENANT_TYPE_OVERLAY + + pinn_desc = ("Created by SDN-VE Neutron Plugin, OS project name = " + + self.keystoneclient.get_tenant_name(os_tenant_id)) + + res, content = self.sdnve_create('tenant', + {'id': sdnve_tenant_id, + 'name': os_tenant_id, + 'network_type': network_type, + 'description': pinn_desc}) + if res not in constants.HTTP_ACCEPTABLE: + return + + return sdnve_tenant_id + + def sdnve_get_controller(self): + if self.new_controller: + self.new_controller = False + return self.controller_ip + + +class KeystoneClient(object): + + def __init__(self, username=None, tenant_name=None, password=None, + auth_url=None): + + keystone_conf = cfg.CONF.keystone_authtoken + keystone_auth_url = ('%s://%s:%s/v2.0/' % + (keystone_conf.auth_protocol, + keystone_conf.auth_host, + keystone_conf.auth_port)) + + username = username or keystone_conf.admin_user + tenant_name = tenant_name or keystone_conf.admin_tenant_name + password = password or keystone_conf.admin_password + auth_url = auth_url or keystone_auth_url + + self.overlay_signature = cfg.CONF.SDNVE.overlay_signature + self.of_signature = cfg.CONF.SDNVE.of_signature + self.default_tenant_type = cfg.CONF.SDNVE.default_tenant_type + + self.client = keyclient.Client(username=username, + password=password, + tenant_name=tenant_name, + auth_url=auth_url) + + def get_tenant_byid(self, id): + + try: + return self.client.tenants.get(id) + except Exception: + LOG.exception(_("Did not find tenant: %r"), id) + + def get_tenant_type(self, id): + + tenant = self.get_tenant_byid(id) + if tenant: + description = tenant.description + if description: + if (description.find(self.overlay_signature) >= 0): + return constants.TENANT_TYPE_OVERLAY + if (description.find(self.of_signature) >= 0): + return constants.TENANT_TYPE_OF + return self.default_tenant_type + + def get_tenant_name(self, id): + + tenant = self.get_tenant_byid(id) + if tenant: + return tenant.name + return 'not found' diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/ibm/sdnve_neutron_plugin.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/ibm/sdnve_neutron_plugin.py new file mode 100644 index 00000000..20971227 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/ibm/sdnve_neutron_plugin.py @@ -0,0 +1,666 @@ +# Copyright 2014 IBM Corp. +# +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Mohammad Banikazemi, IBM Corp. + + +import functools + +from oslo.config import cfg + +from neutron.common import constants as n_const +from neutron.common import exceptions as n_exc +from neutron.common import rpc as n_rpc +from neutron.common import topics +from neutron.db import agents_db +from neutron.db import db_base_plugin_v2 +from neutron.db import external_net_db +from neutron.db import l3_gwmode_db +from neutron.db import portbindings_db +from neutron.db import quota_db # noqa +from neutron.extensions import portbindings +from neutron.openstack.common import excutils +from neutron.openstack.common import log as logging +from neutron.plugins.ibm.common import config # noqa +from neutron.plugins.ibm.common import constants +from neutron.plugins.ibm.common import exceptions as sdnve_exc +from neutron.plugins.ibm import sdnve_api as sdnve +from neutron.plugins.ibm import sdnve_api_fake as sdnve_fake + +LOG = logging.getLogger(__name__) + + +class SdnveRpcCallbacks(): + + def __init__(self, notifier): + self.notifier = notifier # used to notify the agent + + def sdnve_info(self, rpc_context, **kwargs): + '''Update new information.''' + info = kwargs.get('info') + # Notify all other listening agents + self.notifier.info_update(rpc_context, info) + return info + + +class AgentNotifierApi(n_rpc.RpcProxy): + '''Agent side of the SDN-VE rpc API.''' + + BASE_RPC_API_VERSION = '1.0' + + def __init__(self, topic): + super(AgentNotifierApi, self).__init__( + topic=topic, default_version=self.BASE_RPC_API_VERSION) + + self.topic_info_update = topics.get_topic_name(topic, + constants.INFO, + topics.UPDATE) + + def info_update(self, context, info): + self.fanout_cast(context, + self.make_msg('info_update', + info=info), + topic=self.topic_info_update) + + +def _ha(func): + '''Supports the high availability feature of the controller.''' + + @functools.wraps(func) + def hawrapper(self, *args, **kwargs): + '''This wrapper sets the new controller if necessary + + When a controller is detected to be not responding, and a + new controller is chosen to be used in its place, this decorator + makes sure the existing integration bridges are set to point + to the new controller by calling the set_controller method. + ''' + ret_func = func(self, *args, **kwargs) + self.set_controller(args[0]) + return ret_func + return hawrapper + + +class SdnvePluginV2(db_base_plugin_v2.NeutronDbPluginV2, + external_net_db.External_net_db_mixin, + portbindings_db.PortBindingMixin, + l3_gwmode_db.L3_NAT_db_mixin, + agents_db.AgentDbMixin, + ): + + ''' + Implement the Neutron abstractions using SDN-VE SDN Controller. + ''' + + __native_bulk_support = False + __native_pagination_support = False + __native_sorting_support = False + + supported_extension_aliases = ["binding", "router", "external-net", + "agent", "quotas"] + + def __init__(self, configfile=None): + self.base_binding_dict = { + portbindings.VIF_TYPE: portbindings.VIF_TYPE_OVS, + portbindings.VIF_DETAILS: {portbindings.CAP_PORT_FILTER: False}} + + super(SdnvePluginV2, self).__init__() + self.setup_rpc() + self.sdnve_controller_select() + if self.fake_controller: + self.sdnve_client = sdnve_fake.FakeClient() + else: + self.sdnve_client = sdnve.Client() + + def sdnve_controller_select(self): + self.fake_controller = cfg.CONF.SDNVE.use_fake_controller + + def setup_rpc(self): + # RPC support + self.topic = topics.PLUGIN + self.conn = n_rpc.create_connection(new=True) + self.notifier = AgentNotifierApi(topics.AGENT) + self.endpoints = [SdnveRpcCallbacks(self.notifier), + agents_db.AgentExtRpcCallback()] + self.conn.create_consumer(self.topic, self.endpoints, + fanout=False) + # Consume from all consumers in threads + self.conn.consume_in_threads() + + def _update_base_binding_dict(self, tenant_type): + if tenant_type == constants.TENANT_TYPE_OVERLAY: + self.base_binding_dict[ + portbindings.VIF_TYPE] = portbindings.VIF_TYPE_BRIDGE + if tenant_type == constants.TENANT_TYPE_OF: + self.base_binding_dict[ + portbindings.VIF_TYPE] = portbindings.VIF_TYPE_OVS + + def set_controller(self, context): + LOG.info(_("Set a new controller if needed.")) + new_controller = self.sdnve_client.sdnve_get_controller() + if new_controller: + self.notifier.info_update( + context, + {'new_controller': new_controller}) + LOG.info(_("Set the controller to a new controller: %s"), + new_controller) + + def _process_request(self, request, current): + new_request = dict( + (k, v) for k, v in request.items() + if v != current.get(k)) + + msg = _("Original SDN-VE HTTP request: %(orig)s; New request: %(new)s") + LOG.debug(msg, {'orig': request, 'new': new_request}) + return new_request + + # + # Network + # + + @_ha + def create_network(self, context, network): + LOG.debug(_("Create network in progress: %r"), network) + session = context.session + + tenant_id = self._get_tenant_id_for_create(context, network['network']) + # Create a new SDN-VE tenant if need be + sdnve_tenant = self.sdnve_client.sdnve_check_and_create_tenant( + tenant_id) + if sdnve_tenant is None: + raise sdnve_exc.SdnveException( + msg=_('Create net failed: no SDN-VE tenant.')) + + with session.begin(subtransactions=True): + net = super(SdnvePluginV2, self).create_network(context, network) + self._process_l3_create(context, net, network['network']) + + # Create SDN-VE network + (res, data) = self.sdnve_client.sdnve_create('network', net) + if res not in constants.HTTP_ACCEPTABLE: + super(SdnvePluginV2, self).delete_network(context, net['id']) + raise sdnve_exc.SdnveException( + msg=(_('Create net failed in SDN-VE: %s') % res)) + + LOG.debug(_("Created network: %s"), net['id']) + return net + + @_ha + def update_network(self, context, id, network): + LOG.debug(_("Update network in progress: %r"), network) + session = context.session + + processed_request = {} + with session.begin(subtransactions=True): + original_network = super(SdnvePluginV2, self).get_network( + context, id) + processed_request['network'] = self._process_request( + network['network'], original_network) + net = super(SdnvePluginV2, self).update_network( + context, id, network) + self._process_l3_update(context, net, network['network']) + + if processed_request['network']: + (res, data) = self.sdnve_client.sdnve_update( + 'network', id, processed_request['network']) + if res not in constants.HTTP_ACCEPTABLE: + net = super(SdnvePluginV2, self).update_network( + context, id, {'network': original_network}) + raise sdnve_exc.SdnveException( + msg=(_('Update net failed in SDN-VE: %s') % res)) + + return net + + @_ha + def delete_network(self, context, id): + LOG.debug(_("Delete network in progress: %s"), id) + session = context.session + + with session.begin(subtransactions=True): + self._process_l3_delete(context, id) + super(SdnvePluginV2, self).delete_network(context, id) + + (res, data) = self.sdnve_client.sdnve_delete('network', id) + if res not in constants.HTTP_ACCEPTABLE: + LOG.error( + _("Delete net failed after deleting the network in DB: %s"), + res) + + @_ha + def get_network(self, context, id, fields=None): + LOG.debug(_("Get network in progress: %s"), id) + return super(SdnvePluginV2, self).get_network(context, id, fields) + + @_ha + def get_networks(self, context, filters=None, fields=None, sorts=None, + limit=None, marker=None, page_reverse=False): + LOG.debug(_("Get networks in progress")) + return super(SdnvePluginV2, self).get_networks( + context, filters, fields, sorts, limit, marker, page_reverse) + + # + # Port + # + + @_ha + def create_port(self, context, port): + LOG.debug(_("Create port in progress: %r"), port) + session = context.session + + # Set port status as 'ACTIVE' to avoid needing the agent + port['port']['status'] = n_const.PORT_STATUS_ACTIVE + port_data = port['port'] + + with session.begin(subtransactions=True): + port = super(SdnvePluginV2, self).create_port(context, port) + if 'id' not in port: + return port + # If the tenant_id is set to '' by create_port, add the id to + # the request being sent to the controller as the controller + # requires a tenant id + tenant_id = port.get('tenant_id') + if not tenant_id: + LOG.debug(_("Create port does not have tenant id info")) + original_network = super(SdnvePluginV2, self).get_network( + context, port['network_id']) + original_tenant_id = original_network['tenant_id'] + port['tenant_id'] = original_tenant_id + LOG.debug( + _("Create port does not have tenant id info; " + "obtained is: %s"), + port['tenant_id']) + + os_tenant_id = tenant_id + id_na, tenant_type = self.sdnve_client.sdnve_get_tenant_byid( + os_tenant_id) + self._update_base_binding_dict(tenant_type) + self._process_portbindings_create_and_update(context, + port_data, port) + + # NOTE(mb): Remove this block when controller is updated + # Remove the information that the controller does not accept + sdnve_port = port.copy() + sdnve_port.pop('device_id', None) + sdnve_port.pop('device_owner', None) + + (res, data) = self.sdnve_client.sdnve_create('port', sdnve_port) + if res not in constants.HTTP_ACCEPTABLE: + super(SdnvePluginV2, self).delete_port(context, port['id']) + raise sdnve_exc.SdnveException( + msg=(_('Create port failed in SDN-VE: %s') % res)) + + LOG.debug(_("Created port: %s"), port.get('id', 'id not found')) + return port + + @_ha + def update_port(self, context, id, port): + LOG.debug(_("Update port in progress: %r"), port) + session = context.session + + processed_request = {} + with session.begin(subtransactions=True): + original_port = super(SdnvePluginV2, self).get_port( + context, id) + processed_request['port'] = self._process_request( + port['port'], original_port) + updated_port = super(SdnvePluginV2, self).update_port( + context, id, port) + + os_tenant_id = updated_port['tenant_id'] + id_na, tenant_type = self.sdnve_client.sdnve_get_tenant_byid( + os_tenant_id) + self._update_base_binding_dict(tenant_type) + self._process_portbindings_create_and_update(context, + port['port'], + updated_port) + + if processed_request['port']: + (res, data) = self.sdnve_client.sdnve_update( + 'port', id, processed_request['port']) + if res not in constants.HTTP_ACCEPTABLE: + updated_port = super(SdnvePluginV2, self).update_port( + context, id, {'port': original_port}) + raise sdnve_exc.SdnveException( + msg=(_('Update port failed in SDN-VE: %s') % res)) + + return updated_port + + @_ha + def delete_port(self, context, id, l3_port_check=True): + LOG.debug(_("Delete port in progress: %s"), id) + + # if needed, check to see if this is a port owned by + # an l3-router. If so, we should prevent deletion. + if l3_port_check: + self.prevent_l3_port_deletion(context, id) + self.disassociate_floatingips(context, id) + + super(SdnvePluginV2, self).delete_port(context, id) + + (res, data) = self.sdnve_client.sdnve_delete('port', id) + if res not in constants.HTTP_ACCEPTABLE: + LOG.error( + _("Delete port operation failed in SDN-VE " + "after deleting the port from DB: %s"), res) + + # + # Subnet + # + + @_ha + def create_subnet(self, context, subnet): + LOG.debug(_("Create subnet in progress: %r"), subnet) + new_subnet = super(SdnvePluginV2, self).create_subnet(context, subnet) + + # Note(mb): Use of null string currently required by controller + sdnve_subnet = new_subnet.copy() + if subnet.get('gateway_ip') is None: + sdnve_subnet['gateway_ip'] = 'null' + (res, data) = self.sdnve_client.sdnve_create('subnet', sdnve_subnet) + if res not in constants.HTTP_ACCEPTABLE: + super(SdnvePluginV2, self).delete_subnet(context, + new_subnet['id']) + raise sdnve_exc.SdnveException( + msg=(_('Create subnet failed in SDN-VE: %s') % res)) + + LOG.debug(_("Subnet created: %s"), new_subnet['id']) + + return new_subnet + + @_ha + def update_subnet(self, context, id, subnet): + LOG.debug(_("Update subnet in progress: %r"), subnet) + session = context.session + + processed_request = {} + with session.begin(subtransactions=True): + original_subnet = super(SdnvePluginV2, self).get_subnet( + context, id) + processed_request['subnet'] = self._process_request( + subnet['subnet'], original_subnet) + updated_subnet = super(SdnvePluginV2, self).update_subnet( + context, id, subnet) + + if processed_request['subnet']: + # Note(mb): Use of string containing null required by controller + if 'gateway_ip' in processed_request['subnet']: + if processed_request['subnet'].get('gateway_ip') is None: + processed_request['subnet']['gateway_ip'] = 'null' + (res, data) = self.sdnve_client.sdnve_update( + 'subnet', id, processed_request['subnet']) + if res not in constants.HTTP_ACCEPTABLE: + for key in subnet['subnet'].keys(): + subnet['subnet'][key] = original_subnet[key] + super(SdnvePluginV2, self).update_subnet( + context, id, subnet) + raise sdnve_exc.SdnveException( + msg=(_('Update subnet failed in SDN-VE: %s') % res)) + + return updated_subnet + + @_ha + def delete_subnet(self, context, id): + LOG.debug(_("Delete subnet in progress: %s"), id) + super(SdnvePluginV2, self).delete_subnet(context, id) + + (res, data) = self.sdnve_client.sdnve_delete('subnet', id) + if res not in constants.HTTP_ACCEPTABLE: + LOG.error(_("Delete subnet operation failed in SDN-VE after " + "deleting the subnet from DB: %s"), res) + + # + # Router + # + + @_ha + def create_router(self, context, router): + LOG.debug(_("Create router in progress: %r"), router) + + if router['router']['admin_state_up'] is False: + LOG.warning(_('Ignoring admin_state_up=False for router=%r. ' + 'Overriding with True'), router) + router['router']['admin_state_up'] = True + + tenant_id = self._get_tenant_id_for_create(context, router['router']) + # Create a new SDN-VE tenant if need be + sdnve_tenant = self.sdnve_client.sdnve_check_and_create_tenant( + tenant_id) + if sdnve_tenant is None: + raise sdnve_exc.SdnveException( + msg=_('Create router failed: no SDN-VE tenant.')) + + new_router = super(SdnvePluginV2, self).create_router(context, router) + # Create SDN-VE router + (res, data) = self.sdnve_client.sdnve_create('router', new_router) + if res not in constants.HTTP_ACCEPTABLE: + super(SdnvePluginV2, self).delete_router(context, new_router['id']) + raise sdnve_exc.SdnveException( + msg=(_('Create router failed in SDN-VE: %s') % res)) + + LOG.debug(_("Router created: %r"), new_router) + return new_router + + @_ha + def update_router(self, context, id, router): + LOG.debug(_("Update router in progress: id=%(id)s " + "router=%(router)r"), + {'id': id, 'router': router}) + session = context.session + + processed_request = {} + if not router['router'].get('admin_state_up', True): + raise n_exc.NotImplementedError(_('admin_state_up=False ' + 'routers are not ' + 'supported.')) + + with session.begin(subtransactions=True): + original_router = super(SdnvePluginV2, self).get_router( + context, id) + processed_request['router'] = self._process_request( + router['router'], original_router) + updated_router = super(SdnvePluginV2, self).update_router( + context, id, router) + + if processed_request['router']: + egw = processed_request['router'].get('external_gateway_info') + # Check for existing empty set (different from None) in request + if egw == {}: + processed_request['router'][ + 'external_gateway_info'] = {'network_id': 'null'} + (res, data) = self.sdnve_client.sdnve_update( + 'router', id, processed_request['router']) + if res not in constants.HTTP_ACCEPTABLE: + super(SdnvePluginV2, self).update_router( + context, id, {'router': original_router}) + raise sdnve_exc.SdnveException( + msg=(_('Update router failed in SDN-VE: %s') % res)) + + return updated_router + + @_ha + def delete_router(self, context, id): + LOG.debug(_("Delete router in progress: %s"), id) + + super(SdnvePluginV2, self).delete_router(context, id) + + (res, data) = self.sdnve_client.sdnve_delete('router', id) + if res not in constants.HTTP_ACCEPTABLE: + LOG.error( + _("Delete router operation failed in SDN-VE after " + "deleting the router in DB: %s"), res) + + @_ha + def add_router_interface(self, context, router_id, interface_info): + LOG.debug(_("Add router interface in progress: " + "router_id=%(router_id)s " + "interface_info=%(interface_info)r"), + {'router_id': router_id, 'interface_info': interface_info}) + + new_interface = super(SdnvePluginV2, self).add_router_interface( + context, router_id, interface_info) + LOG.debug( + _("SdnvePluginV2.add_router_interface called. Port info: %s"), + new_interface) + request_info = interface_info.copy() + request_info['port_id'] = new_interface['port_id'] + # Add the subnet_id to the request sent to the controller + if 'subnet_id' not in interface_info: + request_info['subnet_id'] = new_interface['subnet_id'] + + (res, data) = self.sdnve_client.sdnve_update( + 'router', router_id + '/add_router_interface', request_info) + if res not in constants.HTTP_ACCEPTABLE: + super(SdnvePluginV2, self).remove_router_interface( + context, router_id, interface_info) + raise sdnve_exc.SdnveException( + msg=(_('Update router-add-interface failed in SDN-VE: %s') % + res)) + + LOG.debug(_("Added router interface: %r"), new_interface) + return new_interface + + def _add_router_interface_only(self, context, router_id, interface_info): + LOG.debug(_("Add router interface only called: " + "router_id=%(router_id)s " + "interface_info=%(interface_info)r"), + {'router_id': router_id, 'interface_info': interface_info}) + + port_id = interface_info.get('port_id') + if port_id: + (res, data) = self.sdnve_client.sdnve_update( + 'router', router_id + '/add_router_interface', interface_info) + if res not in constants.HTTP_ACCEPTABLE: + LOG.error(_("SdnvePluginV2._add_router_interface_only: " + "failed to add the interface in the roll back." + " of a remove_router_interface operation")) + + @_ha + def remove_router_interface(self, context, router_id, interface_info): + LOG.debug(_("Remove router interface in progress: " + "router_id=%(router_id)s " + "interface_info=%(interface_info)r"), + {'router_id': router_id, 'interface_info': interface_info}) + + subnet_id = interface_info.get('subnet_id') + port_id = interface_info.get('port_id') + if not subnet_id: + if not port_id: + raise sdnve_exc.BadInputException(msg=_('No port ID')) + myport = super(SdnvePluginV2, self).get_port(context, port_id) + LOG.debug(_("SdnvePluginV2.remove_router_interface port: %s"), + myport) + myfixed_ips = myport.get('fixed_ips') + if not myfixed_ips: + raise sdnve_exc.BadInputException(msg=_('No fixed IP')) + subnet_id = myfixed_ips[0].get('subnet_id') + if subnet_id: + interface_info['subnet_id'] = subnet_id + LOG.debug( + _("SdnvePluginV2.remove_router_interface subnet_id: %s"), + subnet_id) + else: + if not port_id: + # The backend requires port id info in the request + subnet = super(SdnvePluginV2, self).get_subnet(context, + subnet_id) + df = {'device_id': [router_id], + 'device_owner': [n_const.DEVICE_OWNER_ROUTER_INTF], + 'network_id': [subnet['network_id']]} + ports = self.get_ports(context, filters=df) + if ports: + pid = ports[0]['id'] + interface_info['port_id'] = pid + msg = ("SdnvePluginV2.remove_router_interface " + "subnet_id: %(sid)s port_id: %(pid)s") + LOG.debug(msg, {'sid': subnet_id, 'pid': pid}) + + (res, data) = self.sdnve_client.sdnve_update( + 'router', router_id + '/remove_router_interface', interface_info) + + if res not in constants.HTTP_ACCEPTABLE: + raise sdnve_exc.SdnveException( + msg=(_('Update router-remove-interface failed SDN-VE: %s') % + res)) + + session = context.session + with session.begin(subtransactions=True): + try: + info = super(SdnvePluginV2, self).remove_router_interface( + context, router_id, interface_info) + except Exception: + with excutils.save_and_reraise_exception(): + self._add_router_interface_only(context, + router_id, interface_info) + + return info + + # + # Floating Ip + # + + @_ha + def create_floatingip(self, context, floatingip): + LOG.debug(_("Create floatingip in progress: %r"), + floatingip) + new_floatingip = super(SdnvePluginV2, self).create_floatingip( + context, floatingip) + + (res, data) = self.sdnve_client.sdnve_create( + 'floatingip', {'floatingip': new_floatingip}) + if res not in constants.HTTP_ACCEPTABLE: + super(SdnvePluginV2, self).delete_floatingip( + context, new_floatingip['id']) + raise sdnve_exc.SdnveException( + msg=(_('Creating floating ip operation failed ' + 'in SDN-VE controller: %s') % res)) + + LOG.debug(_("Created floatingip : %r"), new_floatingip) + return new_floatingip + + @_ha + def update_floatingip(self, context, id, floatingip): + LOG.debug(_("Update floatingip in progress: %r"), floatingip) + session = context.session + + processed_request = {} + with session.begin(subtransactions=True): + original_floatingip = super( + SdnvePluginV2, self).get_floatingip(context, id) + processed_request['floatingip'] = self._process_request( + floatingip['floatingip'], original_floatingip) + updated_floatingip = super( + SdnvePluginV2, self).update_floatingip(context, id, floatingip) + + if processed_request['floatingip']: + (res, data) = self.sdnve_client.sdnve_update( + 'floatingip', id, + {'floatingip': processed_request['floatingip']}) + if res not in constants.HTTP_ACCEPTABLE: + super(SdnvePluginV2, self).update_floatingip( + context, id, {'floatingip': original_floatingip}) + raise sdnve_exc.SdnveException( + msg=(_('Update floating ip failed in SDN-VE: %s') % res)) + + return updated_floatingip + + @_ha + def delete_floatingip(self, context, id): + LOG.debug(_("Delete floatingip in progress: %s"), id) + super(SdnvePluginV2, self).delete_floatingip(context, id) + + (res, data) = self.sdnve_client.sdnve_delete('floatingip', id) + if res not in constants.HTTP_ACCEPTABLE: + LOG.error(_("Delete floatingip failed in SDN-VE: %s"), res) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py new file mode 100644 index 00000000..fa60b19d --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py @@ -0,0 +1,1023 @@ +#!/usr/bin/env python +# Copyright 2012 Cisco Systems, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# +# Performs per host Linux Bridge configuration for Neutron. +# Based on the structure of the OpenVSwitch agent in the +# Neutron OpenVSwitch Plugin. +# @author: Sumit Naiksatam, Cisco Systems, Inc. + +import os +import sys +import time + +import eventlet +eventlet.monkey_patch() + +from oslo.config import cfg + +from neutron.agent import l2population_rpc as l2pop_rpc +from neutron.agent.linux import ip_lib +from neutron.agent.linux import utils +from neutron.agent import rpc as agent_rpc +from neutron.agent import securitygroups_rpc as sg_rpc +from neutron.common import config as common_config +from neutron.common import constants +from neutron.common import exceptions +from neutron.common import rpc as n_rpc +from neutron.common import topics +from neutron.common import utils as q_utils +from neutron import context +from neutron.openstack.common import log as logging +from neutron.openstack.common import loopingcall +from neutron.plugins.common import constants as p_const +from neutron.plugins.linuxbridge.common import config # noqa +from neutron.plugins.linuxbridge.common import constants as lconst + + +LOG = logging.getLogger(__name__) + +BRIDGE_NAME_PREFIX = "brq" +TAP_INTERFACE_PREFIX = "tap" +BRIDGE_FS = "/sys/devices/virtual/net/" +BRIDGE_NAME_PLACEHOLDER = "bridge_name" +BRIDGE_INTERFACES_FS = BRIDGE_FS + BRIDGE_NAME_PLACEHOLDER + "/brif/" +DEVICE_NAME_PLACEHOLDER = "device_name" +BRIDGE_PORT_FS_FOR_DEVICE = BRIDGE_FS + DEVICE_NAME_PLACEHOLDER + "/brport" +VXLAN_INTERFACE_PREFIX = "vxlan-" + + +class NetworkSegment: + def __init__(self, network_type, physical_network, segmentation_id): + self.network_type = network_type + self.physical_network = physical_network + self.segmentation_id = segmentation_id + + +class LinuxBridgeManager: + def __init__(self, interface_mappings, root_helper): + self.interface_mappings = interface_mappings + self.root_helper = root_helper + self.ip = ip_lib.IPWrapper(self.root_helper) + # VXLAN related parameters: + self.local_ip = cfg.CONF.VXLAN.local_ip + self.vxlan_mode = lconst.VXLAN_NONE + if cfg.CONF.VXLAN.enable_vxlan: + self.local_int = self.get_interface_by_ip(self.local_ip) + if self.local_int: + self.check_vxlan_support() + else: + LOG.warning(_('VXLAN is enabled, a valid local_ip ' + 'must be provided')) + # Store network mapping to segments + self.network_map = {} + + def interface_exists_on_bridge(self, bridge, interface): + directory = '/sys/class/net/%s/brif' % bridge + for filename in os.listdir(directory): + if filename == interface: + return True + return False + + def get_bridge_name(self, network_id): + if not network_id: + LOG.warning(_("Invalid Network ID, will lead to incorrect bridge" + "name")) + bridge_name = BRIDGE_NAME_PREFIX + network_id[0:11] + return bridge_name + + def get_subinterface_name(self, physical_interface, vlan_id): + if not vlan_id: + LOG.warning(_("Invalid VLAN ID, will lead to incorrect " + "subinterface name")) + subinterface_name = '%s.%s' % (physical_interface, vlan_id) + return subinterface_name + + def get_tap_device_name(self, interface_id): + if not interface_id: + LOG.warning(_("Invalid Interface ID, will lead to incorrect " + "tap device name")) + tap_device_name = TAP_INTERFACE_PREFIX + interface_id[0:11] + return tap_device_name + + def get_vxlan_device_name(self, segmentation_id): + if 0 <= int(segmentation_id) <= constants.MAX_VXLAN_VNI: + return VXLAN_INTERFACE_PREFIX + str(segmentation_id) + else: + LOG.warning(_("Invalid Segmentation ID: %s, will lead to " + "incorrect vxlan device name"), segmentation_id) + + def get_all_neutron_bridges(self): + neutron_bridge_list = [] + bridge_list = os.listdir(BRIDGE_FS) + for bridge in bridge_list: + if bridge.startswith(BRIDGE_NAME_PREFIX): + neutron_bridge_list.append(bridge) + return neutron_bridge_list + + def get_interfaces_on_bridge(self, bridge_name): + if ip_lib.device_exists(bridge_name): + bridge_interface_path = BRIDGE_INTERFACES_FS.replace( + BRIDGE_NAME_PLACEHOLDER, bridge_name) + return os.listdir(bridge_interface_path) + else: + return [] + + def get_tap_devices_count(self, bridge_name): + bridge_interface_path = BRIDGE_INTERFACES_FS.replace( + BRIDGE_NAME_PLACEHOLDER, bridge_name) + try: + if_list = os.listdir(bridge_interface_path) + return len([interface for interface in if_list if + interface.startswith(TAP_INTERFACE_PREFIX)]) + except OSError: + return 0 + + def get_interface_by_ip(self, ip): + for device in self.ip.get_devices(): + if device.addr.list(to=ip): + return device.name + + def get_bridge_for_tap_device(self, tap_device_name): + bridges = self.get_all_neutron_bridges() + for bridge in bridges: + interfaces = self.get_interfaces_on_bridge(bridge) + if tap_device_name in interfaces: + return bridge + + return None + + def is_device_on_bridge(self, device_name): + if not device_name: + return False + else: + bridge_port_path = BRIDGE_PORT_FS_FOR_DEVICE.replace( + DEVICE_NAME_PLACEHOLDER, device_name) + return os.path.exists(bridge_port_path) + + def ensure_vlan_bridge(self, network_id, physical_interface, vlan_id): + """Create a vlan and bridge unless they already exist.""" + interface = self.ensure_vlan(physical_interface, vlan_id) + bridge_name = self.get_bridge_name(network_id) + ips, gateway = self.get_interface_details(interface) + if self.ensure_bridge(bridge_name, interface, ips, gateway): + return interface + + def ensure_vxlan_bridge(self, network_id, segmentation_id): + """Create a vxlan and bridge unless they already exist.""" + interface = self.ensure_vxlan(segmentation_id) + if not interface: + LOG.error(_("Failed creating vxlan interface for " + "%(segmentation_id)s"), + {segmentation_id: segmentation_id}) + return + bridge_name = self.get_bridge_name(network_id) + self.ensure_bridge(bridge_name, interface) + return interface + + def get_interface_details(self, interface): + device = self.ip.device(interface) + ips = device.addr.list(scope='global') + + # Update default gateway if necessary + gateway = device.route.get_gateway(scope='global') + return ips, gateway + + def ensure_flat_bridge(self, network_id, physical_interface): + """Create a non-vlan bridge unless it already exists.""" + bridge_name = self.get_bridge_name(network_id) + ips, gateway = self.get_interface_details(physical_interface) + if self.ensure_bridge(bridge_name, physical_interface, ips, gateway): + return physical_interface + + def ensure_local_bridge(self, network_id): + """Create a local bridge unless it already exists.""" + bridge_name = self.get_bridge_name(network_id) + return self.ensure_bridge(bridge_name) + + def ensure_vlan(self, physical_interface, vlan_id): + """Create a vlan unless it already exists.""" + interface = self.get_subinterface_name(physical_interface, vlan_id) + if not ip_lib.device_exists(interface): + LOG.debug(_("Creating subinterface %(interface)s for " + "VLAN %(vlan_id)s on interface " + "%(physical_interface)s"), + {'interface': interface, 'vlan_id': vlan_id, + 'physical_interface': physical_interface}) + if utils.execute(['ip', 'link', 'add', 'link', + physical_interface, + 'name', interface, 'type', 'vlan', 'id', + vlan_id], root_helper=self.root_helper): + return + if utils.execute(['ip', 'link', 'set', + interface, 'up'], root_helper=self.root_helper): + return + LOG.debug(_("Done creating subinterface %s"), interface) + return interface + + def ensure_vxlan(self, segmentation_id): + """Create a vxlan unless it already exists.""" + interface = self.get_vxlan_device_name(segmentation_id) + if not ip_lib.device_exists(interface): + LOG.debug(_("Creating vxlan interface %(interface)s for " + "VNI %(segmentation_id)s"), + {'interface': interface, + 'segmentation_id': segmentation_id}) + args = {'dev': self.local_int} + if self.vxlan_mode == lconst.VXLAN_MCAST: + args['group'] = cfg.CONF.VXLAN.vxlan_group + if cfg.CONF.VXLAN.ttl: + args['ttl'] = cfg.CONF.VXLAN.ttl + if cfg.CONF.VXLAN.tos: + args['tos'] = cfg.CONF.VXLAN.tos + if cfg.CONF.VXLAN.l2_population: + args['proxy'] = True + int_vxlan = self.ip.add_vxlan(interface, segmentation_id, **args) + int_vxlan.link.set_up() + LOG.debug(_("Done creating vxlan interface %s"), interface) + return interface + + def update_interface_ip_details(self, destination, source, ips, + gateway): + if ips or gateway: + dst_device = self.ip.device(destination) + src_device = self.ip.device(source) + + # Append IP's to bridge if necessary + if ips: + for ip in ips: + dst_device.addr.add(ip_version=ip['ip_version'], + cidr=ip['cidr'], + broadcast=ip['broadcast']) + + if gateway: + # Ensure that the gateway can be updated by changing the metric + metric = 100 + if 'metric' in gateway: + metric = gateway['metric'] - 1 + dst_device.route.add_gateway(gateway=gateway['gateway'], + metric=metric) + src_device.route.delete_gateway(gateway=gateway['gateway']) + + # Remove IP's from interface + if ips: + for ip in ips: + src_device.addr.delete(ip_version=ip['ip_version'], + cidr=ip['cidr']) + + def _bridge_exists_and_ensure_up(self, bridge_name): + """Check if the bridge exists and make sure it is up.""" + br = ip_lib.IPDevice(bridge_name, self.root_helper) + try: + # If the device doesn't exist this will throw a RuntimeError + br.link.set_up() + except RuntimeError: + return False + return True + + def ensure_bridge(self, bridge_name, interface=None, ips=None, + gateway=None): + """Create a bridge unless it already exists.""" + # _bridge_exists_and_ensure_up instead of device_exists is used here + # because there are cases where the bridge exists but it's not UP, + # for example: + # 1) A greenthread was executing this function and had not yet executed + # "ip link set bridge_name up" before eventlet switched to this + # thread running the same function + # 2) The Nova VIF driver was running concurrently and had just created + # the bridge, but had not yet put it UP + if not self._bridge_exists_and_ensure_up(bridge_name): + LOG.debug(_("Starting bridge %(bridge_name)s for subinterface " + "%(interface)s"), + {'bridge_name': bridge_name, 'interface': interface}) + if utils.execute(['brctl', 'addbr', bridge_name], + root_helper=self.root_helper): + return + if utils.execute(['brctl', 'setfd', bridge_name, + str(0)], root_helper=self.root_helper): + return + if utils.execute(['brctl', 'stp', bridge_name, + 'off'], root_helper=self.root_helper): + return + if utils.execute(['ip', 'link', 'set', bridge_name, + 'up'], root_helper=self.root_helper): + return + LOG.debug(_("Done starting bridge %(bridge_name)s for " + "subinterface %(interface)s"), + {'bridge_name': bridge_name, 'interface': interface}) + + if not interface: + return bridge_name + + # Update IP info if necessary + self.update_interface_ip_details(bridge_name, interface, ips, gateway) + + # Check if the interface is part of the bridge + if not self.interface_exists_on_bridge(bridge_name, interface): + try: + # Check if the interface is not enslaved in another bridge + if self.is_device_on_bridge(interface): + bridge = self.get_bridge_for_tap_device(interface) + utils.execute(['brctl', 'delif', bridge, interface], + root_helper=self.root_helper) + + utils.execute(['brctl', 'addif', bridge_name, interface], + root_helper=self.root_helper) + except Exception as e: + LOG.error(_("Unable to add %(interface)s to %(bridge_name)s! " + "Exception: %(e)s"), + {'interface': interface, 'bridge_name': bridge_name, + 'e': e}) + return + return bridge_name + + def ensure_physical_in_bridge(self, network_id, + network_type, + physical_network, + segmentation_id): + if network_type == p_const.TYPE_VXLAN: + if self.vxlan_mode == lconst.VXLAN_NONE: + LOG.error(_("Unable to add vxlan interface for network %s"), + network_id) + return + return self.ensure_vxlan_bridge(network_id, segmentation_id) + + physical_interface = self.interface_mappings.get(physical_network) + if not physical_interface: + LOG.error(_("No mapping for physical network %s"), + physical_network) + return + if network_type == p_const.TYPE_FLAT: + return self.ensure_flat_bridge(network_id, physical_interface) + elif network_type == p_const.TYPE_VLAN: + return self.ensure_vlan_bridge(network_id, physical_interface, + segmentation_id) + else: + LOG.error(_("Unknown network_type %(network_type)s for network " + "%(network_id)s."), {network_type: network_type, + network_id: network_id}) + + def add_tap_interface(self, network_id, network_type, physical_network, + segmentation_id, tap_device_name): + """Add tap interface. + + If a VIF has been plugged into a network, this function will + add the corresponding tap device to the relevant bridge. + """ + if not ip_lib.device_exists(tap_device_name): + LOG.debug(_("Tap device: %s does not exist on " + "this host, skipped"), tap_device_name) + return False + + bridge_name = self.get_bridge_name(network_id) + if network_type == p_const.TYPE_LOCAL: + self.ensure_local_bridge(network_id) + elif not self.ensure_physical_in_bridge(network_id, + network_type, + physical_network, + segmentation_id): + return False + + # Check if device needs to be added to bridge + tap_device_in_bridge = self.get_bridge_for_tap_device(tap_device_name) + if not tap_device_in_bridge: + data = {'tap_device_name': tap_device_name, + 'bridge_name': bridge_name} + msg = _("Adding device %(tap_device_name)s to bridge " + "%(bridge_name)s") % data + LOG.debug(msg) + if utils.execute(['brctl', 'addif', bridge_name, tap_device_name], + root_helper=self.root_helper): + return False + else: + data = {'tap_device_name': tap_device_name, + 'bridge_name': bridge_name} + msg = _("%(tap_device_name)s already exists on bridge " + "%(bridge_name)s") % data + LOG.debug(msg) + return True + + def add_interface(self, network_id, network_type, physical_network, + segmentation_id, port_id): + self.network_map[network_id] = NetworkSegment(network_type, + physical_network, + segmentation_id) + tap_device_name = self.get_tap_device_name(port_id) + return self.add_tap_interface(network_id, network_type, + physical_network, segmentation_id, + tap_device_name) + + def delete_vlan_bridge(self, bridge_name): + if ip_lib.device_exists(bridge_name): + interfaces_on_bridge = self.get_interfaces_on_bridge(bridge_name) + for interface in interfaces_on_bridge: + self.remove_interface(bridge_name, interface) + + if interface.startswith(VXLAN_INTERFACE_PREFIX): + self.delete_vxlan(interface) + continue + + for physical_interface in self.interface_mappings.itervalues(): + if (interface.startswith(physical_interface)): + ips, gateway = self.get_interface_details(bridge_name) + if ips: + # This is a flat network or a VLAN interface that + # was setup outside of neutron => return IP's from + # bridge to interface + self.update_interface_ip_details(interface, + bridge_name, + ips, gateway) + elif physical_interface != interface: + self.delete_vlan(interface) + + LOG.debug(_("Deleting bridge %s"), bridge_name) + if utils.execute(['ip', 'link', 'set', bridge_name, 'down'], + root_helper=self.root_helper): + return + if utils.execute(['brctl', 'delbr', bridge_name], + root_helper=self.root_helper): + return + LOG.debug(_("Done deleting bridge %s"), bridge_name) + + else: + LOG.error(_("Cannot delete bridge %s, does not exist"), + bridge_name) + + def remove_empty_bridges(self): + for network_id in self.network_map.keys(): + bridge_name = self.get_bridge_name(network_id) + if not self.get_tap_devices_count(bridge_name): + self.delete_vlan_bridge(bridge_name) + del self.network_map[network_id] + + def remove_interface(self, bridge_name, interface_name): + if ip_lib.device_exists(bridge_name): + if not self.is_device_on_bridge(interface_name): + return True + LOG.debug(_("Removing device %(interface_name)s from bridge " + "%(bridge_name)s"), + {'interface_name': interface_name, + 'bridge_name': bridge_name}) + if utils.execute(['brctl', 'delif', bridge_name, interface_name], + root_helper=self.root_helper): + return False + LOG.debug(_("Done removing device %(interface_name)s from bridge " + "%(bridge_name)s"), + {'interface_name': interface_name, + 'bridge_name': bridge_name}) + return True + else: + LOG.debug(_("Cannot remove device %(interface_name)s bridge " + "%(bridge_name)s does not exist"), + {'interface_name': interface_name, + 'bridge_name': bridge_name}) + return False + + def delete_vlan(self, interface): + if ip_lib.device_exists(interface): + LOG.debug(_("Deleting subinterface %s for vlan"), interface) + if utils.execute(['ip', 'link', 'set', interface, 'down'], + root_helper=self.root_helper): + return + if utils.execute(['ip', 'link', 'delete', interface], + root_helper=self.root_helper): + return + LOG.debug(_("Done deleting subinterface %s"), interface) + + def delete_vxlan(self, interface): + if ip_lib.device_exists(interface): + LOG.debug(_("Deleting vxlan interface %s for vlan"), + interface) + int_vxlan = self.ip.device(interface) + int_vxlan.link.set_down() + int_vxlan.link.delete() + LOG.debug(_("Done deleting vxlan interface %s"), interface) + + def get_tap_devices(self): + devices = set() + for device in os.listdir(BRIDGE_FS): + if device.startswith(TAP_INTERFACE_PREFIX): + devices.add(device) + return devices + + def vxlan_ucast_supported(self): + if not cfg.CONF.VXLAN.l2_population: + return False + if not ip_lib.iproute_arg_supported( + ['bridge', 'fdb'], 'append', self.root_helper): + LOG.warning(_('Option "%(option)s" must be supported by command ' + '"%(command)s" to enable %(mode)s mode') % + {'option': 'append', + 'command': 'bridge fdb', + 'mode': 'VXLAN UCAST'}) + return False + for segmentation_id in range(1, constants.MAX_VXLAN_VNI + 1): + if not ip_lib.device_exists( + self.get_vxlan_device_name(segmentation_id)): + break + else: + LOG.error(_('No valid Segmentation ID to perform UCAST test.')) + return False + + test_iface = self.ensure_vxlan(segmentation_id) + try: + utils.execute( + cmd=['bridge', 'fdb', 'append', constants.FLOODING_ENTRY[0], + 'dev', test_iface, 'dst', '1.1.1.1'], + root_helper=self.root_helper) + return True + except RuntimeError: + return False + finally: + self.delete_vxlan(test_iface) + + def vxlan_mcast_supported(self): + if not cfg.CONF.VXLAN.vxlan_group: + LOG.warning(_('VXLAN muticast group must be provided in ' + 'vxlan_group option to enable VXLAN MCAST mode')) + return False + if not ip_lib.iproute_arg_supported( + ['ip', 'link', 'add', 'type', 'vxlan'], + 'proxy', self.root_helper): + LOG.warning(_('Option "%(option)s" must be supported by command ' + '"%(command)s" to enable %(mode)s mode') % + {'option': 'proxy', + 'command': 'ip link add type vxlan', + 'mode': 'VXLAN MCAST'}) + + return False + return True + + def vxlan_module_supported(self): + try: + utils.execute(cmd=['modinfo', 'vxlan']) + return True + except RuntimeError: + return False + + def check_vxlan_support(self): + self.vxlan_mode = lconst.VXLAN_NONE + if not self.vxlan_module_supported(): + LOG.error(_('Linux kernel vxlan module and iproute2 3.8 or above ' + 'are required to enable VXLAN.')) + raise exceptions.VxlanNetworkUnsupported() + + if self.vxlan_ucast_supported(): + self.vxlan_mode = lconst.VXLAN_UCAST + elif self.vxlan_mcast_supported(): + self.vxlan_mode = lconst.VXLAN_MCAST + else: + raise exceptions.VxlanNetworkUnsupported() + LOG.debug(_('Using %s VXLAN mode'), self.vxlan_mode) + + def fdb_ip_entry_exists(self, mac, ip, interface): + entries = utils.execute(['ip', 'neigh', 'show', 'to', ip, + 'dev', interface], + root_helper=self.root_helper) + return mac in entries + + def fdb_bridge_entry_exists(self, mac, interface, agent_ip=None): + entries = utils.execute(['bridge', 'fdb', 'show', 'dev', interface], + root_helper=self.root_helper) + if not agent_ip: + return mac in entries + + return (agent_ip in entries and mac in entries) + + def add_fdb_ip_entry(self, mac, ip, interface): + utils.execute(['ip', 'neigh', 'replace', ip, 'lladdr', mac, + 'dev', interface, 'nud', 'permanent'], + root_helper=self.root_helper, + check_exit_code=False) + + def remove_fdb_ip_entry(self, mac, ip, interface): + utils.execute(['ip', 'neigh', 'del', ip, 'lladdr', mac, + 'dev', interface], + root_helper=self.root_helper, + check_exit_code=False) + + def add_fdb_bridge_entry(self, mac, agent_ip, interface, operation="add"): + utils.execute(['bridge', 'fdb', operation, mac, 'dev', interface, + 'dst', agent_ip], + root_helper=self.root_helper, + check_exit_code=False) + + def remove_fdb_bridge_entry(self, mac, agent_ip, interface): + utils.execute(['bridge', 'fdb', 'del', mac, 'dev', interface, + 'dst', agent_ip], + root_helper=self.root_helper, + check_exit_code=False) + + def add_fdb_entries(self, agent_ip, ports, interface): + for mac, ip in ports: + if mac != constants.FLOODING_ENTRY[0]: + self.add_fdb_ip_entry(mac, ip, interface) + self.add_fdb_bridge_entry(mac, agent_ip, interface) + elif self.vxlan_mode == lconst.VXLAN_UCAST: + if self.fdb_bridge_entry_exists(mac, interface): + self.add_fdb_bridge_entry(mac, agent_ip, interface, + "append") + else: + self.add_fdb_bridge_entry(mac, agent_ip, interface) + + def remove_fdb_entries(self, agent_ip, ports, interface): + for mac, ip in ports: + if mac != constants.FLOODING_ENTRY[0]: + self.remove_fdb_ip_entry(mac, ip, interface) + self.remove_fdb_bridge_entry(mac, agent_ip, interface) + elif self.vxlan_mode == lconst.VXLAN_UCAST: + self.remove_fdb_bridge_entry(mac, agent_ip, interface) + + +class LinuxBridgeRpcCallbacks(n_rpc.RpcCallback, + sg_rpc.SecurityGroupAgentRpcCallbackMixin, + l2pop_rpc.L2populationRpcCallBackMixin): + + # Set RPC API version to 1.0 by default. + # history + # 1.1 Support Security Group RPC + RPC_API_VERSION = '1.1' + + def __init__(self, context, agent): + super(LinuxBridgeRpcCallbacks, self).__init__() + self.context = context + self.agent = agent + self.sg_agent = agent + + def network_delete(self, context, **kwargs): + LOG.debug(_("network_delete received")) + network_id = kwargs.get('network_id') + bridge_name = self.agent.br_mgr.get_bridge_name(network_id) + LOG.debug(_("Delete %s"), bridge_name) + self.agent.br_mgr.delete_vlan_bridge(bridge_name) + + def port_update(self, context, **kwargs): + port_id = kwargs['port']['id'] + tap_name = self.agent.br_mgr.get_tap_device_name(port_id) + # Put the tap name in the updated_devices set. + # Do not store port details, as if they're used for processing + # notifications there is no guarantee the notifications are + # processed in the same order as the relevant API requests. + self.agent.updated_devices.add(tap_name) + LOG.debug(_("port_update RPC received for port: %s"), port_id) + + def fdb_add(self, context, fdb_entries): + LOG.debug(_("fdb_add received")) + for network_id, values in fdb_entries.items(): + segment = self.agent.br_mgr.network_map.get(network_id) + if not segment: + return + + if segment.network_type != p_const.TYPE_VXLAN: + return + + interface = self.agent.br_mgr.get_vxlan_device_name( + segment.segmentation_id) + + agent_ports = values.get('ports') + for agent_ip, ports in agent_ports.items(): + if agent_ip == self.agent.br_mgr.local_ip: + continue + + self.agent.br_mgr.add_fdb_entries(agent_ip, + ports, + interface) + + def fdb_remove(self, context, fdb_entries): + LOG.debug(_("fdb_remove received")) + for network_id, values in fdb_entries.items(): + segment = self.agent.br_mgr.network_map.get(network_id) + if not segment: + return + + if segment.network_type != p_const.TYPE_VXLAN: + return + + interface = self.agent.br_mgr.get_vxlan_device_name( + segment.segmentation_id) + + agent_ports = values.get('ports') + for agent_ip, ports in agent_ports.items(): + if agent_ip == self.agent.br_mgr.local_ip: + continue + + self.agent.br_mgr.remove_fdb_entries(agent_ip, + ports, + interface) + + def _fdb_chg_ip(self, context, fdb_entries): + LOG.debug(_("update chg_ip received")) + for network_id, agent_ports in fdb_entries.items(): + segment = self.agent.br_mgr.network_map.get(network_id) + if not segment: + return + + if segment.network_type != p_const.TYPE_VXLAN: + return + + interface = self.agent.br_mgr.get_vxlan_device_name( + segment.segmentation_id) + + for agent_ip, state in agent_ports.items(): + if agent_ip == self.agent.br_mgr.local_ip: + continue + + after = state.get('after') + for mac, ip in after: + self.agent.br_mgr.add_fdb_ip_entry(mac, ip, interface) + + before = state.get('before') + for mac, ip in before: + self.agent.br_mgr.remove_fdb_ip_entry(mac, ip, interface) + + def fdb_update(self, context, fdb_entries): + LOG.debug(_("fdb_update received")) + for action, values in fdb_entries.items(): + method = '_fdb_' + action + if not hasattr(self, method): + raise NotImplementedError() + + getattr(self, method)(context, values) + + +class LinuxBridgePluginApi(agent_rpc.PluginApi, + sg_rpc.SecurityGroupServerRpcApiMixin): + pass + + +class LinuxBridgeNeutronAgentRPC(sg_rpc.SecurityGroupAgentRpcMixin): + + def __init__(self, interface_mappings, polling_interval, + root_helper): + self.polling_interval = polling_interval + self.root_helper = root_helper + self.setup_linux_bridge(interface_mappings) + configurations = {'interface_mappings': interface_mappings} + if self.br_mgr.vxlan_mode != lconst.VXLAN_NONE: + configurations['tunneling_ip'] = self.br_mgr.local_ip + configurations['tunnel_types'] = [p_const.TYPE_VXLAN] + configurations['l2_population'] = cfg.CONF.VXLAN.l2_population + self.agent_state = { + 'binary': 'neutron-linuxbridge-agent', + 'host': cfg.CONF.host, + 'topic': constants.L2_AGENT_TOPIC, + 'configurations': configurations, + 'agent_type': constants.AGENT_TYPE_LINUXBRIDGE, + 'start_flag': True} + + # stores received port_updates for processing by the main loop + self.updated_devices = set() + self.setup_rpc(interface_mappings.values()) + self.init_firewall() + + def _report_state(self): + try: + devices = len(self.br_mgr.get_tap_devices()) + self.agent_state.get('configurations')['devices'] = devices + self.state_rpc.report_state(self.context, + self.agent_state) + self.agent_state.pop('start_flag', None) + except Exception: + LOG.exception(_("Failed reporting state!")) + + def setup_rpc(self, physical_interfaces): + if physical_interfaces: + mac = utils.get_interface_mac(physical_interfaces[0]) + else: + devices = ip_lib.IPWrapper(self.root_helper).get_devices(True) + if devices: + mac = utils.get_interface_mac(devices[0].name) + else: + LOG.error(_("Unable to obtain MAC address for unique ID. " + "Agent terminated!")) + exit(1) + self.agent_id = '%s%s' % ('lb', (mac.replace(":", ""))) + LOG.info(_("RPC agent_id: %s"), self.agent_id) + + self.topic = topics.AGENT + self.plugin_rpc = LinuxBridgePluginApi(topics.PLUGIN) + self.state_rpc = agent_rpc.PluginReportStateAPI(topics.PLUGIN) + # RPC network init + self.context = context.get_admin_context_without_session() + # Handle updates from service + self.endpoints = [LinuxBridgeRpcCallbacks(self.context, self)] + # Define the listening consumers for the agent + consumers = [[topics.PORT, topics.UPDATE], + [topics.NETWORK, topics.DELETE], + [topics.SECURITY_GROUP, topics.UPDATE]] + if cfg.CONF.VXLAN.l2_population: + consumers.append([topics.L2POPULATION, + topics.UPDATE, cfg.CONF.host]) + self.connection = agent_rpc.create_consumers(self.endpoints, + self.topic, + consumers) + report_interval = cfg.CONF.AGENT.report_interval + if report_interval: + heartbeat = loopingcall.FixedIntervalLoopingCall( + self._report_state) + heartbeat.start(interval=report_interval) + + def setup_linux_bridge(self, interface_mappings): + self.br_mgr = LinuxBridgeManager(interface_mappings, self.root_helper) + + def remove_port_binding(self, network_id, interface_id): + bridge_name = self.br_mgr.get_bridge_name(network_id) + tap_device_name = self.br_mgr.get_tap_device_name(interface_id) + return self.br_mgr.remove_interface(bridge_name, tap_device_name) + + def process_network_devices(self, device_info): + resync_a = False + resync_b = False + + self.prepare_devices_filter(device_info.get('added')) + + if device_info.get('updated'): + self.refresh_firewall() + + # Updated devices are processed the same as new ones, as their + # admin_state_up may have changed. The set union prevents duplicating + # work when a device is new and updated in the same polling iteration. + devices_added_updated = (set(device_info.get('added')) + | set(device_info.get('updated'))) + if devices_added_updated: + resync_a = self.treat_devices_added_updated(devices_added_updated) + + if device_info.get('removed'): + resync_b = self.treat_devices_removed(device_info['removed']) + # If one of the above operations fails => resync with plugin + return (resync_a | resync_b) + + def treat_devices_added_updated(self, devices): + try: + devices_details_list = self.plugin_rpc.get_devices_details_list( + self.context, devices, self.agent_id) + except Exception as e: + LOG.debug("Unable to get port details for " + "%(devices)s: %(e)s", + {'devices': devices, 'e': e}) + # resync is needed + return True + + for device_details in devices_details_list: + device = device_details['device'] + LOG.debug("Port %s added", device) + + if 'port_id' in device_details: + LOG.info(_("Port %(device)s updated. Details: %(details)s"), + {'device': device, 'details': device_details}) + if device_details['admin_state_up']: + # create the networking for the port + network_type = device_details.get('network_type') + if network_type: + segmentation_id = device_details.get('segmentation_id') + else: + # compatibility with pre-Havana RPC vlan_id encoding + vlan_id = device_details.get('vlan_id') + (network_type, + segmentation_id) = lconst.interpret_vlan_id(vlan_id) + if self.br_mgr.add_interface( + device_details['network_id'], + network_type, + device_details['physical_network'], + segmentation_id, + device_details['port_id']): + + # update plugin about port status + self.plugin_rpc.update_device_up(self.context, + device, + self.agent_id, + cfg.CONF.host) + else: + self.plugin_rpc.update_device_down(self.context, + device, + self.agent_id, + cfg.CONF.host) + else: + self.remove_port_binding(device_details['network_id'], + device_details['port_id']) + else: + LOG.info(_("Device %s not defined on plugin"), device) + return False + + def treat_devices_removed(self, devices): + resync = False + self.remove_devices_filter(devices) + for device in devices: + LOG.info(_("Attachment %s removed"), device) + details = None + try: + details = self.plugin_rpc.update_device_down(self.context, + device, + self.agent_id, + cfg.CONF.host) + except Exception as e: + LOG.debug(_("port_removed failed for %(device)s: %(e)s"), + {'device': device, 'e': e}) + resync = True + if details and details['exists']: + LOG.info(_("Port %s updated."), device) + else: + LOG.debug(_("Device %s not defined on plugin"), device) + self.br_mgr.remove_empty_bridges() + return resync + + def scan_devices(self, registered_devices, updated_devices): + curr_devices = self.br_mgr.get_tap_devices() + device_info = {} + device_info['current'] = curr_devices + device_info['added'] = curr_devices - registered_devices + # we don't want to process updates for devices that don't exist + device_info['updated'] = updated_devices & curr_devices + # we need to clean up after devices are removed + device_info['removed'] = registered_devices - curr_devices + return device_info + + def _device_info_has_changes(self, device_info): + return (device_info.get('added') + or device_info.get('updated') + or device_info.get('removed')) + + def daemon_loop(self): + sync = True + devices = set() + + LOG.info(_("LinuxBridge Agent RPC Daemon Started!")) + + while True: + start = time.time() + if sync: + LOG.info(_("Agent out of sync with plugin!")) + devices.clear() + sync = False + device_info = {} + # Save updated devices dict to perform rollback in case + # resync would be needed, and then clear self.updated_devices. + # As the greenthread should not yield between these + # two statements, this will should be thread-safe. + updated_devices_copy = self.updated_devices + self.updated_devices = set() + try: + device_info = self.scan_devices(devices, updated_devices_copy) + if self._device_info_has_changes(device_info): + LOG.debug(_("Agent loop found changes! %s"), device_info) + # If treat devices fails - indicates must resync with + # plugin + sync = self.process_network_devices(device_info) + devices = device_info['current'] + except Exception: + LOG.exception(_("Error in agent loop. Devices info: %s"), + device_info) + sync = True + # Restore devices that were removed from this set earlier + # without overwriting ones that may have arrived since. + self.updated_devices |= updated_devices_copy + + # sleep till end of polling interval + elapsed = (time.time() - start) + if (elapsed < self.polling_interval): + time.sleep(self.polling_interval - elapsed) + else: + LOG.debug(_("Loop iteration exceeded interval " + "(%(polling_interval)s vs. %(elapsed)s)!"), + {'polling_interval': self.polling_interval, + 'elapsed': elapsed}) + + +def main(): + common_config.init(sys.argv[1:]) + + common_config.setup_logging(cfg.CONF) + try: + interface_mappings = q_utils.parse_mappings( + cfg.CONF.LINUX_BRIDGE.physical_interface_mappings) + except ValueError as e: + LOG.error(_("Parsing physical_interface_mappings failed: %s." + " Agent terminated!"), e) + sys.exit(1) + LOG.info(_("Interface mappings: %s"), interface_mappings) + + polling_interval = cfg.CONF.AGENT.polling_interval + root_helper = cfg.CONF.AGENT.root_helper + agent = LinuxBridgeNeutronAgentRPC(interface_mappings, + polling_interval, + root_helper) + LOG.info(_("Agent initialized successfully, now running... ")) + agent.daemon_loop() + sys.exit(0) + + +if __name__ == "__main__": + main() diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/linuxbridge/common/__init__.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/linuxbridge/common/__init__.py new file mode 100644 index 00000000..a9d66be8 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/linuxbridge/common/__init__.py @@ -0,0 +1,15 @@ +# Copyright 2012 Cisco Systems, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Sumit Naiksatam, Cisco Systems, Inc. diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/linuxbridge/common/config.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/linuxbridge/common/config.py new file mode 100644 index 00000000..0bf92ea4 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/linuxbridge/common/config.py @@ -0,0 +1,76 @@ +# Copyright 2012 Cisco Systems, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Sumit Naiksatam, Cisco Systems, Inc. +# @author: Rohit Agarwalla, Cisco Systems, Inc. + +from oslo.config import cfg + +from neutron.agent.common import config + +DEFAULT_VLAN_RANGES = [] +DEFAULT_INTERFACE_MAPPINGS = [] +DEFAULT_VXLAN_GROUP = '224.0.0.1' + + +vlan_opts = [ + cfg.StrOpt('tenant_network_type', default='local', + help=_("Network type for tenant networks " + "(local, vlan, or none)")), + cfg.ListOpt('network_vlan_ranges', + default=DEFAULT_VLAN_RANGES, + help=_("List of :: " + "or ")), +] + +vxlan_opts = [ + cfg.BoolOpt('enable_vxlan', default=False, + help=_("Enable VXLAN on the agent. Can be enabled when " + "agent is managed by ml2 plugin using linuxbridge " + "mechanism driver")), + cfg.IntOpt('ttl', + help=_("TTL for vxlan interface protocol packets.")), + cfg.IntOpt('tos', + help=_("TOS for vxlan interface protocol packets.")), + cfg.StrOpt('vxlan_group', default=DEFAULT_VXLAN_GROUP, + help=_("Multicast group for vxlan interface.")), + cfg.StrOpt('local_ip', default='', + help=_("Local IP address of the VXLAN endpoints.")), + cfg.BoolOpt('l2_population', default=False, + help=_("Extension to use alongside ml2 plugin's l2population " + "mechanism driver. It enables the plugin to populate " + "VXLAN forwarding table.")), +] + +bridge_opts = [ + cfg.ListOpt('physical_interface_mappings', + default=DEFAULT_INTERFACE_MAPPINGS, + help=_("List of :")), +] + +agent_opts = [ + cfg.IntOpt('polling_interval', default=2, + help=_("The number of seconds the agent will wait between " + "polling for local device changes.")), + cfg.BoolOpt('rpc_support_old_agents', default=False, + help=_("Enable server RPC compatibility with old agents")), +] + + +cfg.CONF.register_opts(vlan_opts, "VLANS") +cfg.CONF.register_opts(vxlan_opts, "VXLAN") +cfg.CONF.register_opts(bridge_opts, "LINUX_BRIDGE") +cfg.CONF.register_opts(agent_opts, "AGENT") +config.register_agent_state_opts_helper(cfg.CONF) +config.register_root_helper(cfg.CONF) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/linuxbridge/common/constants.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/linuxbridge/common/constants.py new file mode 100644 index 00000000..5cd29ac1 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/linuxbridge/common/constants.py @@ -0,0 +1,40 @@ +# Copyright 2012 Cisco Systems, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Sumit Naiksatam, Cisco Systems, Inc. + + +from neutron.plugins.common import constants as p_const + + +FLAT_VLAN_ID = -1 +LOCAL_VLAN_ID = -2 + +# Supported VXLAN features +VXLAN_NONE = 'not_supported' +VXLAN_MCAST = 'multicast_flooding' +VXLAN_UCAST = 'unicast_flooding' + + +# TODO(rkukura): Eventually remove this function, which provides +# temporary backward compatibility with pre-Havana RPC and DB vlan_id +# encoding. +def interpret_vlan_id(vlan_id): + """Return (network_type, segmentation_id) tuple for encoded vlan_id.""" + if vlan_id == LOCAL_VLAN_ID: + return (p_const.TYPE_LOCAL, None) + elif vlan_id == FLAT_VLAN_ID: + return (p_const.TYPE_FLAT, None) + else: + return (p_const.TYPE_VLAN, vlan_id) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/linuxbridge/db/__init__.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/linuxbridge/db/__init__.py new file mode 100644 index 00000000..8eaec07b --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/linuxbridge/db/__init__.py @@ -0,0 +1,16 @@ +# Copyright 2012 Cisco Systems, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Sumit Naiksatam, Cisco Systems, Inc. +# diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/linuxbridge/db/l2network_db_v2.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/linuxbridge/db/l2network_db_v2.py new file mode 100644 index 00000000..416bd2f5 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/linuxbridge/db/l2network_db_v2.py @@ -0,0 +1,238 @@ +# Copyright (c) 2012 OpenStack Foundation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from six import moves +from sqlalchemy.orm import exc + +from neutron.common import exceptions as n_exc +import neutron.db.api as db +from neutron.db import models_v2 +from neutron.db import securitygroups_db as sg_db +from neutron import manager +from neutron.openstack.common import log as logging +from neutron.plugins.linuxbridge.common import config # noqa +from neutron.plugins.linuxbridge.common import constants +from neutron.plugins.linuxbridge.db import l2network_models_v2 + +LOG = logging.getLogger(__name__) + + +def sync_network_states(network_vlan_ranges): + """Synchronize network_states table with current configured VLAN ranges.""" + + session = db.get_session() + with session.begin(): + # get existing allocations for all physical networks + allocations = dict() + states = (session.query(l2network_models_v2.NetworkState). + all()) + for state in states: + if state.physical_network not in allocations: + allocations[state.physical_network] = set() + allocations[state.physical_network].add(state) + + # process vlan ranges for each configured physical network + for physical_network, vlan_ranges in network_vlan_ranges.iteritems(): + # determine current configured allocatable vlans for this + # physical network + vlan_ids = set() + for vlan_range in vlan_ranges: + vlan_ids |= set(moves.xrange(vlan_range[0], vlan_range[1] + 1)) + + # remove from table unallocated vlans not currently allocatable + if physical_network in allocations: + for state in allocations[physical_network]: + try: + # see if vlan is allocatable + vlan_ids.remove(state.vlan_id) + except KeyError: + # it's not allocatable, so check if its allocated + if not state.allocated: + # it's not, so remove it from table + LOG.debug(_("Removing vlan %(vlan_id)s on " + "physical network %(physical_network)s" + " from pool"), + {'vlan_id': state.vlan_id, + 'physical_network': physical_network}) + session.delete(state) + del allocations[physical_network] + + # add missing allocatable vlans to table + for vlan_id in sorted(vlan_ids): + state = l2network_models_v2.NetworkState(physical_network, + vlan_id) + session.add(state) + + # remove from table unallocated vlans for any unconfigured physical + # networks + for states in allocations.itervalues(): + for state in states: + if not state.allocated: + LOG.debug(_("Removing vlan %(vlan_id)s on physical " + "network %(physical_network)s" + " from pool"), + {'vlan_id': state.vlan_id, + 'physical_network': state.physical_network}) + session.delete(state) + + +def get_network_state(physical_network, vlan_id): + """Get state of specified network.""" + + session = db.get_session() + try: + state = (session.query(l2network_models_v2.NetworkState). + filter_by(physical_network=physical_network, + vlan_id=vlan_id). + one()) + return state + except exc.NoResultFound: + return None + + +def reserve_network(session): + with session.begin(subtransactions=True): + state = (session.query(l2network_models_v2.NetworkState). + filter_by(allocated=False). + with_lockmode('update'). + first()) + if not state: + raise n_exc.NoNetworkAvailable() + LOG.debug(_("Reserving vlan %(vlan_id)s on physical network " + "%(physical_network)s from pool"), + {'vlan_id': state.vlan_id, + 'physical_network': state.physical_network}) + state.allocated = True + return (state.physical_network, state.vlan_id) + + +def reserve_specific_network(session, physical_network, vlan_id): + with session.begin(subtransactions=True): + try: + state = (session.query(l2network_models_v2.NetworkState). + filter_by(physical_network=physical_network, + vlan_id=vlan_id). + with_lockmode('update'). + one()) + if state.allocated: + if vlan_id == constants.FLAT_VLAN_ID: + raise n_exc.FlatNetworkInUse( + physical_network=physical_network) + else: + raise n_exc.VlanIdInUse(vlan_id=vlan_id, + physical_network=physical_network) + LOG.debug(_("Reserving specific vlan %(vlan_id)s on physical " + "network %(physical_network)s from pool"), + {'vlan_id': vlan_id, + 'physical_network': physical_network}) + state.allocated = True + except exc.NoResultFound: + LOG.debug(_("Reserving specific vlan %(vlan_id)s on physical " + "network %(physical_network)s outside pool"), + {'vlan_id': vlan_id, + 'physical_network': physical_network}) + state = l2network_models_v2.NetworkState(physical_network, vlan_id) + state.allocated = True + session.add(state) + + +def release_network(session, physical_network, vlan_id, network_vlan_ranges): + with session.begin(subtransactions=True): + try: + state = (session.query(l2network_models_v2.NetworkState). + filter_by(physical_network=physical_network, + vlan_id=vlan_id). + with_lockmode('update'). + one()) + state.allocated = False + inside = False + for vlan_range in network_vlan_ranges.get(physical_network, []): + if vlan_id >= vlan_range[0] and vlan_id <= vlan_range[1]: + inside = True + break + if inside: + LOG.debug(_("Releasing vlan %(vlan_id)s on physical network " + "%(physical_network)s to pool"), + {'vlan_id': vlan_id, + 'physical_network': physical_network}) + else: + LOG.debug(_("Releasing vlan %(vlan_id)s on physical network " + "%(physical_network)s outside pool"), + {'vlan_id': vlan_id, + 'physical_network': physical_network}) + session.delete(state) + except exc.NoResultFound: + LOG.warning(_("vlan_id %(vlan_id)s on physical network " + "%(physical_network)s not found"), + {'vlan_id': vlan_id, + 'physical_network': physical_network}) + + +def add_network_binding(session, network_id, physical_network, vlan_id): + with session.begin(subtransactions=True): + binding = l2network_models_v2.NetworkBinding(network_id, + physical_network, vlan_id) + session.add(binding) + + +def get_network_binding(session, network_id): + try: + binding = (session.query(l2network_models_v2.NetworkBinding). + filter_by(network_id=network_id). + one()) + return binding + except exc.NoResultFound: + return + + +def get_port_from_device(device): + """Get port from database.""" + LOG.debug(_("get_port_from_device() called")) + session = db.get_session() + sg_binding_port = sg_db.SecurityGroupPortBinding.port_id + + query = session.query(models_v2.Port, + sg_db.SecurityGroupPortBinding.security_group_id) + query = query.outerjoin(sg_db.SecurityGroupPortBinding, + models_v2.Port.id == sg_binding_port) + query = query.filter(models_v2.Port.id.startswith(device)) + port_and_sgs = query.all() + if not port_and_sgs: + return + port = port_and_sgs[0][0] + plugin = manager.NeutronManager.get_plugin() + port_dict = plugin._make_port_dict(port) + port_dict['security_groups'] = [] + for port_in_db, sg_id in port_and_sgs: + if sg_id: + port_dict['security_groups'].append(sg_id) + port_dict['security_group_rules'] = [] + port_dict['security_group_source_groups'] = [] + port_dict['fixed_ips'] = [ip['ip_address'] + for ip in port['fixed_ips']] + return port_dict + + +def set_port_status(port_id, status): + """Set the port status.""" + LOG.debug(_("set_port_status as %s called"), status) + session = db.get_session() + try: + port = session.query(models_v2.Port).filter_by(id=port_id).one() + port['status'] = status + session.merge(port) + session.flush() + except exc.NoResultFound: + raise n_exc.PortNotFound(port_id=port_id) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/linuxbridge/lb_neutron_plugin.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/linuxbridge/lb_neutron_plugin.py new file mode 100644 index 00000000..89b1354a --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/linuxbridge/lb_neutron_plugin.py @@ -0,0 +1,541 @@ +# Copyright (c) 2012 OpenStack Foundation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys + +from oslo.config import cfg + +from neutron.agent import securitygroups_rpc as sg_rpc +from neutron.api.rpc.agentnotifiers import dhcp_rpc_agent_api +from neutron.api.rpc.agentnotifiers import l3_rpc_agent_api +from neutron.api.v2 import attributes +from neutron.common import constants as q_const +from neutron.common import exceptions as n_exc +from neutron.common import rpc as n_rpc +from neutron.common import topics +from neutron.common import utils +from neutron.db import agents_db +from neutron.db import agentschedulers_db +from neutron.db import api as db_api +from neutron.db import db_base_plugin_v2 +from neutron.db import dhcp_rpc_base +from neutron.db import external_net_db +from neutron.db import extraroute_db +from neutron.db import l3_agentschedulers_db +from neutron.db import l3_gwmode_db +from neutron.db import l3_rpc_base +from neutron.db import portbindings_db +from neutron.db import quota_db # noqa +from neutron.db import securitygroups_rpc_base as sg_db_rpc +from neutron.extensions import portbindings +from neutron.extensions import providernet as provider +from neutron import manager +from neutron.openstack.common import importutils +from neutron.openstack.common import log as logging +from neutron.plugins.common import constants as svc_constants +from neutron.plugins.common import utils as plugin_utils +from neutron.plugins.linuxbridge.common import constants +from neutron.plugins.linuxbridge.db import l2network_db_v2 as db + + +LOG = logging.getLogger(__name__) + + +class LinuxBridgeRpcCallbacks(n_rpc.RpcCallback, + dhcp_rpc_base.DhcpRpcCallbackMixin, + l3_rpc_base.L3RpcCallbackMixin, + sg_db_rpc.SecurityGroupServerRpcCallbackMixin + ): + + # history + # 1.1 Support Security Group RPC + # 1.2 Support get_devices_details_list + RPC_API_VERSION = '1.2' + # Device names start with "tap" + TAP_PREFIX_LEN = 3 + + @classmethod + def get_port_from_device(cls, device): + port = db.get_port_from_device(device[cls.TAP_PREFIX_LEN:]) + if port: + port['device'] = device + return port + + def get_device_details(self, rpc_context, **kwargs): + """Agent requests device details.""" + agent_id = kwargs.get('agent_id') + device = kwargs.get('device') + LOG.debug(_("Device %(device)s details requested from %(agent_id)s"), + {'device': device, 'agent_id': agent_id}) + port = self.get_port_from_device(device) + if port: + binding = db.get_network_binding(db_api.get_session(), + port['network_id']) + (network_type, + segmentation_id) = constants.interpret_vlan_id(binding.vlan_id) + entry = {'device': device, + 'network_type': network_type, + 'physical_network': binding.physical_network, + 'segmentation_id': segmentation_id, + 'network_id': port['network_id'], + 'port_id': port['id'], + 'admin_state_up': port['admin_state_up']} + if cfg.CONF.AGENT.rpc_support_old_agents: + entry['vlan_id'] = binding.vlan_id + new_status = (q_const.PORT_STATUS_ACTIVE if port['admin_state_up'] + else q_const.PORT_STATUS_DOWN) + if port['status'] != new_status: + db.set_port_status(port['id'], new_status) + else: + entry = {'device': device} + LOG.debug(_("%s can not be found in database"), device) + return entry + + def get_devices_details_list(self, rpc_context, **kwargs): + return [ + self.get_device_details( + rpc_context, + device=device, + **kwargs + ) + for device in kwargs.pop('devices', []) + ] + + def update_device_down(self, rpc_context, **kwargs): + """Device no longer exists on agent.""" + # TODO(garyk) - live migration and port status + agent_id = kwargs.get('agent_id') + device = kwargs.get('device') + host = kwargs.get('host') + port = self.get_port_from_device(device) + LOG.debug(_("Device %(device)s no longer exists on %(agent_id)s"), + {'device': device, 'agent_id': agent_id}) + plugin = manager.NeutronManager.get_plugin() + if port: + entry = {'device': device, + 'exists': True} + if (host and not + plugin.get_port_host(rpc_context, port['id']) == host): + LOG.debug(_("Device %(device)s not bound to the" + " agent host %(host)s"), + {'device': device, 'host': host}) + elif port['status'] != q_const.PORT_STATUS_DOWN: + # Set port status to DOWN + db.set_port_status(port['id'], q_const.PORT_STATUS_DOWN) + else: + entry = {'device': device, + 'exists': False} + LOG.debug(_("%s can not be found in database"), device) + return entry + + def update_device_up(self, rpc_context, **kwargs): + """Device is up on agent.""" + agent_id = kwargs.get('agent_id') + device = kwargs.get('device') + host = kwargs.get('host') + port = self.get_port_from_device(device) + LOG.debug(_("Device %(device)s up on %(agent_id)s"), + {'device': device, 'agent_id': agent_id}) + plugin = manager.NeutronManager.get_plugin() + if port: + if (host and + not plugin.get_port_host(rpc_context, port['id']) == host): + LOG.debug(_("Device %(device)s not bound to the" + " agent host %(host)s"), + {'device': device, 'host': host}) + return + elif port['status'] != q_const.PORT_STATUS_ACTIVE: + db.set_port_status(port['id'], + q_const.PORT_STATUS_ACTIVE) + else: + LOG.debug(_("%s can not be found in database"), device) + + +class AgentNotifierApi(n_rpc.RpcProxy, + sg_rpc.SecurityGroupAgentRpcApiMixin): + '''Agent side of the linux bridge rpc API. + + API version history: + 1.0 - Initial version. + 1.1 - Added get_active_networks_info, create_dhcp_port, + and update_dhcp_port methods. + + + ''' + + BASE_RPC_API_VERSION = '1.1' + + def __init__(self, topic): + super(AgentNotifierApi, self).__init__( + topic=topic, default_version=self.BASE_RPC_API_VERSION) + self.topic = topic + self.topic_network_delete = topics.get_topic_name(topic, + topics.NETWORK, + topics.DELETE) + self.topic_port_update = topics.get_topic_name(topic, + topics.PORT, + topics.UPDATE) + + def network_delete(self, context, network_id): + self.fanout_cast(context, + self.make_msg('network_delete', + network_id=network_id), + topic=self.topic_network_delete) + + def port_update(self, context, port, physical_network, vlan_id): + network_type, segmentation_id = constants.interpret_vlan_id(vlan_id) + kwargs = {'port': port, + 'network_type': network_type, + 'physical_network': physical_network, + 'segmentation_id': segmentation_id} + if cfg.CONF.AGENT.rpc_support_old_agents: + kwargs['vlan_id'] = vlan_id + msg = self.make_msg('port_update', **kwargs) + self.fanout_cast(context, msg, + topic=self.topic_port_update) + + +class LinuxBridgePluginV2(db_base_plugin_v2.NeutronDbPluginV2, + external_net_db.External_net_db_mixin, + extraroute_db.ExtraRoute_db_mixin, + l3_gwmode_db.L3_NAT_db_mixin, + sg_db_rpc.SecurityGroupServerRpcMixin, + l3_agentschedulers_db.L3AgentSchedulerDbMixin, + agentschedulers_db.DhcpAgentSchedulerDbMixin, + portbindings_db.PortBindingMixin): + """Implement the Neutron abstractions using Linux bridging. + + A new VLAN is created for each network. An agent is relied upon + to perform the actual Linux bridge configuration on each host. + + The provider extension is also supported. As discussed in + https://bugs.launchpad.net/neutron/+bug/1023156, this class could + be simplified, and filtering on extended attributes could be + handled, by adding support for extended attributes to the + NeutronDbPluginV2 base class. When that occurs, this class should + be updated to take advantage of it. + + The port binding extension enables an external application relay + information to and from the plugin. + """ + + # This attribute specifies whether the plugin supports or not + # bulk/pagination/sorting operations. Name mangling is used in + # order to ensure it is qualified by class + __native_bulk_support = True + __native_pagination_support = True + __native_sorting_support = True + + _supported_extension_aliases = ["provider", "external-net", "router", + "ext-gw-mode", "binding", "quotas", + "security-group", "agent", "extraroute", + "l3_agent_scheduler", + "dhcp_agent_scheduler"] + + @property + def supported_extension_aliases(self): + if not hasattr(self, '_aliases'): + aliases = self._supported_extension_aliases[:] + sg_rpc.disable_security_group_extension_by_config(aliases) + self._aliases = aliases + return self._aliases + + def __init__(self): + super(LinuxBridgePluginV2, self).__init__() + self.base_binding_dict = { + portbindings.VIF_TYPE: portbindings.VIF_TYPE_BRIDGE, + portbindings.VIF_DETAILS: { + # TODO(rkukura): Replace with new VIF security details + portbindings.CAP_PORT_FILTER: + 'security-group' in self.supported_extension_aliases}} + self._parse_network_vlan_ranges() + db.sync_network_states(self.network_vlan_ranges) + self.tenant_network_type = cfg.CONF.VLANS.tenant_network_type + if self.tenant_network_type not in [svc_constants.TYPE_LOCAL, + svc_constants.TYPE_VLAN, + svc_constants.TYPE_NONE]: + LOG.error(_("Invalid tenant_network_type: %s. " + "Service terminated!"), + self.tenant_network_type) + sys.exit(1) + self._setup_rpc() + self.network_scheduler = importutils.import_object( + cfg.CONF.network_scheduler_driver + ) + self.router_scheduler = importutils.import_object( + cfg.CONF.router_scheduler_driver + ) + LOG.debug(_("Linux Bridge Plugin initialization complete")) + + def _setup_rpc(self): + # RPC support + self.service_topics = {svc_constants.CORE: topics.PLUGIN, + svc_constants.L3_ROUTER_NAT: topics.L3PLUGIN} + self.conn = n_rpc.create_connection(new=True) + self.endpoints = [LinuxBridgeRpcCallbacks(), + agents_db.AgentExtRpcCallback()] + for svc_topic in self.service_topics.values(): + self.conn.create_consumer(svc_topic, self.endpoints, fanout=False) + # Consume from all consumers in threads + self.conn.consume_in_threads() + self.notifier = AgentNotifierApi(topics.AGENT) + self.agent_notifiers[q_const.AGENT_TYPE_DHCP] = ( + dhcp_rpc_agent_api.DhcpAgentNotifyAPI() + ) + self.agent_notifiers[q_const.AGENT_TYPE_L3] = ( + l3_rpc_agent_api.L3AgentNotifyAPI() + ) + + def _parse_network_vlan_ranges(self): + try: + self.network_vlan_ranges = plugin_utils.parse_network_vlan_ranges( + cfg.CONF.VLANS.network_vlan_ranges) + except Exception as ex: + LOG.error(_("%s. Agent terminated!"), ex) + sys.exit(1) + LOG.info(_("Network VLAN ranges: %s"), self.network_vlan_ranges) + + def _add_network_vlan_range(self, physical_network, vlan_min, vlan_max): + self._add_network(physical_network) + self.network_vlan_ranges[physical_network].append((vlan_min, vlan_max)) + + def _add_network(self, physical_network): + if physical_network not in self.network_vlan_ranges: + self.network_vlan_ranges[physical_network] = [] + + def _extend_network_dict_provider(self, context, network): + binding = db.get_network_binding(context.session, network['id']) + if binding.vlan_id == constants.FLAT_VLAN_ID: + network[provider.NETWORK_TYPE] = svc_constants.TYPE_FLAT + network[provider.PHYSICAL_NETWORK] = binding.physical_network + network[provider.SEGMENTATION_ID] = None + elif binding.vlan_id == constants.LOCAL_VLAN_ID: + network[provider.NETWORK_TYPE] = svc_constants.TYPE_LOCAL + network[provider.PHYSICAL_NETWORK] = None + network[provider.SEGMENTATION_ID] = None + else: + network[provider.NETWORK_TYPE] = svc_constants.TYPE_VLAN + network[provider.PHYSICAL_NETWORK] = binding.physical_network + network[provider.SEGMENTATION_ID] = binding.vlan_id + + def _process_provider_create(self, context, attrs): + network_type = attrs.get(provider.NETWORK_TYPE) + physical_network = attrs.get(provider.PHYSICAL_NETWORK) + segmentation_id = attrs.get(provider.SEGMENTATION_ID) + + network_type_set = attributes.is_attr_set(network_type) + physical_network_set = attributes.is_attr_set(physical_network) + segmentation_id_set = attributes.is_attr_set(segmentation_id) + + if not (network_type_set or physical_network_set or + segmentation_id_set): + return (None, None, None) + + if not network_type_set: + msg = _("provider:network_type required") + raise n_exc.InvalidInput(error_message=msg) + elif network_type == svc_constants.TYPE_FLAT: + if segmentation_id_set: + msg = _("provider:segmentation_id specified for flat network") + raise n_exc.InvalidInput(error_message=msg) + else: + segmentation_id = constants.FLAT_VLAN_ID + elif network_type == svc_constants.TYPE_VLAN: + if not segmentation_id_set: + msg = _("provider:segmentation_id required") + raise n_exc.InvalidInput(error_message=msg) + if not utils.is_valid_vlan_tag(segmentation_id): + msg = (_("provider:segmentation_id out of range " + "(%(min_id)s through %(max_id)s)") % + {'min_id': q_const.MIN_VLAN_TAG, + 'max_id': q_const.MAX_VLAN_TAG}) + raise n_exc.InvalidInput(error_message=msg) + elif network_type == svc_constants.TYPE_LOCAL: + if physical_network_set: + msg = _("provider:physical_network specified for local " + "network") + raise n_exc.InvalidInput(error_message=msg) + else: + physical_network = None + if segmentation_id_set: + msg = _("provider:segmentation_id specified for local " + "network") + raise n_exc.InvalidInput(error_message=msg) + else: + segmentation_id = constants.LOCAL_VLAN_ID + else: + msg = _("provider:network_type %s not supported") % network_type + raise n_exc.InvalidInput(error_message=msg) + + if network_type in [svc_constants.TYPE_VLAN, svc_constants.TYPE_FLAT]: + if physical_network_set: + if physical_network not in self.network_vlan_ranges: + msg = (_("Unknown provider:physical_network %s") % + physical_network) + raise n_exc.InvalidInput(error_message=msg) + elif 'default' in self.network_vlan_ranges: + physical_network = 'default' + else: + msg = _("provider:physical_network required") + raise n_exc.InvalidInput(error_message=msg) + + return (network_type, physical_network, segmentation_id) + + def create_network(self, context, network): + (network_type, physical_network, + vlan_id) = self._process_provider_create(context, + network['network']) + + session = context.session + with session.begin(subtransactions=True): + #set up default security groups + tenant_id = self._get_tenant_id_for_create( + context, network['network']) + self._ensure_default_security_group(context, tenant_id) + + if not network_type: + # tenant network + network_type = self.tenant_network_type + if network_type == svc_constants.TYPE_NONE: + raise n_exc.TenantNetworksDisabled() + elif network_type == svc_constants.TYPE_VLAN: + physical_network, vlan_id = db.reserve_network(session) + else: # TYPE_LOCAL + vlan_id = constants.LOCAL_VLAN_ID + else: + # provider network + if network_type in [svc_constants.TYPE_VLAN, + svc_constants.TYPE_FLAT]: + db.reserve_specific_network(session, physical_network, + vlan_id) + # no reservation needed for TYPE_LOCAL + net = super(LinuxBridgePluginV2, self).create_network(context, + network) + db.add_network_binding(session, net['id'], + physical_network, vlan_id) + self._process_l3_create(context, net, network['network']) + self._extend_network_dict_provider(context, net) + # note - exception will rollback entire transaction + return net + + def update_network(self, context, id, network): + provider._raise_if_updates_provider_attributes(network['network']) + + session = context.session + with session.begin(subtransactions=True): + net = super(LinuxBridgePluginV2, self).update_network(context, id, + network) + self._process_l3_update(context, net, network['network']) + self._extend_network_dict_provider(context, net) + return net + + def delete_network(self, context, id): + session = context.session + with session.begin(subtransactions=True): + binding = db.get_network_binding(session, id) + self._process_l3_delete(context, id) + super(LinuxBridgePluginV2, self).delete_network(context, id) + if binding.vlan_id != constants.LOCAL_VLAN_ID: + db.release_network(session, binding.physical_network, + binding.vlan_id, self.network_vlan_ranges) + # the network_binding record is deleted via cascade from + # the network record, so explicit removal is not necessary + self.notifier.network_delete(context, id) + + def get_network(self, context, id, fields=None): + session = context.session + with session.begin(subtransactions=True): + net = super(LinuxBridgePluginV2, self).get_network(context, + id, None) + self._extend_network_dict_provider(context, net) + return self._fields(net, fields) + + def get_networks(self, context, filters=None, fields=None, + sorts=None, limit=None, marker=None, page_reverse=False): + session = context.session + with session.begin(subtransactions=True): + nets = super(LinuxBridgePluginV2, + self).get_networks(context, filters, None, sorts, + limit, marker, page_reverse) + for net in nets: + self._extend_network_dict_provider(context, net) + + return [self._fields(net, fields) for net in nets] + + def create_port(self, context, port): + session = context.session + port_data = port['port'] + with session.begin(subtransactions=True): + self._ensure_default_security_group_on_port(context, port) + sgids = self._get_security_groups_on_port(context, port) + # Set port status as 'DOWN'. This will be updated by agent + port['port']['status'] = q_const.PORT_STATUS_DOWN + + port = super(LinuxBridgePluginV2, + self).create_port(context, port) + self._process_portbindings_create_and_update(context, + port_data, + port) + self._process_port_create_security_group( + context, port, sgids) + self.notify_security_groups_member_updated(context, port) + return port + + def update_port(self, context, id, port): + original_port = self.get_port(context, id) + session = context.session + need_port_update_notify = False + + with session.begin(subtransactions=True): + updated_port = super(LinuxBridgePluginV2, self).update_port( + context, id, port) + self._process_portbindings_create_and_update(context, + port['port'], + updated_port) + need_port_update_notify = self.update_security_group_on_port( + context, id, port, original_port, updated_port) + + need_port_update_notify |= self.is_security_group_member_updated( + context, original_port, updated_port) + + if original_port['admin_state_up'] != updated_port['admin_state_up']: + need_port_update_notify = True + + if need_port_update_notify: + self._notify_port_updated(context, updated_port) + return updated_port + + def delete_port(self, context, id, l3_port_check=True): + + # if needed, check to see if this is a port owned by + # and l3-router. If so, we should prevent deletion. + if l3_port_check: + self.prevent_l3_port_deletion(context, id) + + session = context.session + with session.begin(subtransactions=True): + self.disassociate_floatingips(context, id) + port = self.get_port(context, id) + self._delete_port_security_group_bindings(context, id) + super(LinuxBridgePluginV2, self).delete_port(context, id) + + self.notify_security_groups_member_updated(context, port) + + def _notify_port_updated(self, context, port): + binding = db.get_network_binding(context.session, + port['network_id']) + self.notifier.port_update(context, port, + binding.physical_network, + binding.vlan_id) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/metaplugin/__init__.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/metaplugin/__init__.py new file mode 100644 index 00000000..65685178 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/metaplugin/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2012, Nachi Ueno, NTT MCL, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/metaplugin/common/__init__.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/metaplugin/common/__init__.py new file mode 100644 index 00000000..65685178 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/metaplugin/common/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2012, Nachi Ueno, NTT MCL, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/metaplugin/common/config.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/metaplugin/common/config.py new file mode 100644 index 00000000..a973e243 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/metaplugin/common/config.py @@ -0,0 +1,78 @@ +# Copyright 2012, Nachi Ueno, NTT MCL, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo.config import cfg + + +meta_plugin_opts = [ + cfg.StrOpt( + 'plugin_list', + default='', + help=_("Comma separated list of flavor:neutron_plugin for " + "plugins to load. Extension method is searched in the " + "list order and the first one is used.")), + cfg.StrOpt( + 'l3_plugin_list', + default='', + help=_("Comma separated list of flavor:neutron_plugin for L3 " + "service plugins to load. This is intended for specifying " + "L2 plugins which support L3 functions. If you use a router " + "service plugin, set this blank.")), + cfg.StrOpt( + 'default_flavor', + default='', + help=_("Default flavor to use, when flavor:network is not " + "specified at network creation.")), + cfg.StrOpt( + 'default_l3_flavor', + default='', + help=_("Default L3 flavor to use, when flavor:router is not " + "specified at router creation. Ignored if 'l3_plugin_list' " + "is blank.")), + cfg.StrOpt( + 'supported_extension_aliases', + default='', + help=_("Comma separated list of supported extension aliases.")), + cfg.StrOpt( + 'extension_map', + default='', + help=_("Comma separated list of method:flavor to select specific " + "plugin for a method. This has priority over method search " + "order based on 'plugin_list'.")), + cfg.StrOpt( + 'rpc_flavor', + default='', + help=_("Specifies flavor for plugin to handle 'q-plugin' RPC " + "requests.")), +] + +proxy_plugin_opts = [ + cfg.StrOpt('admin_user', + help=_("Admin user")), + cfg.StrOpt('admin_password', + help=_("Admin password"), + secret=True), + cfg.StrOpt('admin_tenant_name', + help=_("Admin tenant name")), + cfg.StrOpt('auth_url', + help=_("Authentication URL")), + cfg.StrOpt('auth_strategy', default='keystone', + help=_("The type of authentication to use")), + cfg.StrOpt('auth_region', + help=_("Authentication region")), +] + +cfg.CONF.register_opts(meta_plugin_opts, "META") +cfg.CONF.register_opts(proxy_plugin_opts, "PROXY") diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/metaplugin/meta_db_v2.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/metaplugin/meta_db_v2.py new file mode 100644 index 00000000..939442eb --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/metaplugin/meta_db_v2.py @@ -0,0 +1,50 @@ +# Copyright 2012, Nachi Ueno, NTT MCL, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy.orm import exc + +from neutron.plugins.metaplugin import meta_models_v2 + + +def get_flavor_by_network(session, net_id): + try: + binding = (session.query(meta_models_v2.NetworkFlavor). + filter_by(network_id=net_id). + one()) + except exc.NoResultFound: + return None + return binding.flavor + + +def add_network_flavor_binding(session, flavor, net_id): + binding = meta_models_v2.NetworkFlavor(flavor=flavor, network_id=net_id) + session.add(binding) + return binding + + +def get_flavor_by_router(session, router_id): + try: + binding = (session.query(meta_models_v2.RouterFlavor). + filter_by(router_id=router_id). + one()) + except exc.NoResultFound: + return None + return binding.flavor + + +def add_router_flavor_binding(session, flavor, router_id): + binding = meta_models_v2.RouterFlavor(flavor=flavor, router_id=router_id) + session.add(binding) + return binding diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/metaplugin/meta_models_v2.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/metaplugin/meta_models_v2.py new file mode 100644 index 00000000..70d546ed --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/metaplugin/meta_models_v2.py @@ -0,0 +1,41 @@ +# Copyright 2012, Nachi Ueno, NTT MCL, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import sqlalchemy as sa +from sqlalchemy import Column, String + +from neutron.db import models_v2 + + +class NetworkFlavor(models_v2.model_base.BASEV2): + """Represents a binding of network_id to flavor.""" + flavor = Column(String(255)) + network_id = sa.Column(sa.String(36), sa.ForeignKey('networks.id', + ondelete="CASCADE"), + primary_key=True) + + def __repr__(self): + return "" % (self.flavor, self.network_id) + + +class RouterFlavor(models_v2.model_base.BASEV2): + """Represents a binding of router_id to flavor.""" + flavor = Column(String(255)) + router_id = sa.Column(sa.String(36), sa.ForeignKey('routers.id', + ondelete="CASCADE"), + primary_key=True) + + def __repr__(self): + return "" % (self.flavor, self.router_id) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/metaplugin/meta_neutron_plugin.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/metaplugin/meta_neutron_plugin.py new file mode 100644 index 00000000..97a46082 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/metaplugin/meta_neutron_plugin.py @@ -0,0 +1,417 @@ +# Copyright 2012, Nachi Ueno, NTT MCL, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo.config import cfg + +from neutron.common import exceptions as exc +from neutron.common import topics +from neutron import context as neutron_context +from neutron.db import api as db +from neutron.db import db_base_plugin_v2 +from neutron.db import external_net_db +from neutron.db import extraroute_db +from neutron.db import l3_db +from neutron.db import models_v2 +from neutron.extensions import flavor as ext_flavor +from neutron.openstack.common import importutils +from neutron.openstack.common import log as logging +from neutron.plugins.metaplugin.common import config # noqa +from neutron.plugins.metaplugin import meta_db_v2 +from neutron.plugins.metaplugin import meta_models_v2 + + +LOG = logging.getLogger(__name__) + + +# Hooks used to select records which belong a target plugin. +def _meta_network_model_hook(context, original_model, query): + return query.outerjoin(meta_models_v2.NetworkFlavor, + meta_models_v2.NetworkFlavor.network_id == + models_v2.Network.id) + + +def _meta_port_model_hook(context, original_model, query): + return query.join(meta_models_v2.NetworkFlavor, + meta_models_v2.NetworkFlavor.network_id == + models_v2.Port.network_id) + + +def _meta_flavor_filter_hook(query, filters): + if ext_flavor.FLAVOR_NETWORK in filters: + return query.filter(meta_models_v2.NetworkFlavor.flavor == + filters[ext_flavor.FLAVOR_NETWORK][0]) + return query + + +# Metaplugin Exceptions +class FlavorNotFound(exc.NotFound): + message = _("Flavor %(flavor)s could not be found") + + +class FaildToAddFlavorBinding(exc.NeutronException): + message = _("Failed to add flavor binding") + + +class MetaPluginV2(db_base_plugin_v2.NeutronDbPluginV2, + external_net_db.External_net_db_mixin, + extraroute_db.ExtraRoute_db_mixin): + + def __init__(self, configfile=None): + super(MetaPluginV2, self).__init__() + LOG.debug(_("Start initializing metaplugin")) + self.supported_extension_aliases = ['flavor', 'external-net'] + if cfg.CONF.META.supported_extension_aliases: + cfg_aliases = cfg.CONF.META.supported_extension_aliases.split(',') + self.supported_extension_aliases += cfg_aliases + + # Ignore config option overapping + def _is_opt_registered(opts, opt): + if opt.dest in opts: + return True + else: + return False + + cfg._is_opt_registered = _is_opt_registered + + # Keep existing tables if multiple plugin use same table name. + db.model_base.NeutronBase.__table_args__ = {'keep_existing': True} + + self.plugins = {} + + plugin_list = [plugin_set.split(':') + for plugin_set + in cfg.CONF.META.plugin_list.split(',')] + self.rpc_flavor = cfg.CONF.META.rpc_flavor + topic_save = topics.PLUGIN + topic_fake = topic_save + '-metaplugin' + for flavor, plugin_provider in plugin_list: + # Rename topic used by a plugin other than rpc_flavor during + # loading the plugin instance if rpc_flavor is specified. + # This enforces the plugin specified by rpc_flavor is only + # consumer of 'q-plugin'. It is a bit tricky but there is no + # bad effect. + if self.rpc_flavor and self.rpc_flavor != flavor: + topics.PLUGIN = topic_fake + self.plugins[flavor] = self._load_plugin(plugin_provider) + topics.PLUGIN = topic_save + + self.l3_plugins = {} + if cfg.CONF.META.l3_plugin_list: + l3_plugin_list = [plugin_set.split(':') + for plugin_set + in cfg.CONF.META.l3_plugin_list.split(',')] + for flavor, plugin_provider in l3_plugin_list: + if flavor in self.plugins: + self.l3_plugins[flavor] = self.plugins[flavor] + else: + # For l3 only plugin + self.l3_plugins[flavor] = self._load_plugin( + plugin_provider) + + self.default_flavor = cfg.CONF.META.default_flavor + if self.default_flavor not in self.plugins: + raise exc.Invalid(_('default_flavor %s is not plugin list') % + self.default_flavor) + + if self.l3_plugins: + self.default_l3_flavor = cfg.CONF.META.default_l3_flavor + if self.default_l3_flavor not in self.l3_plugins: + raise exc.Invalid(_('default_l3_flavor %s is not plugin list') + % self.default_l3_flavor) + self.supported_extension_aliases += ['router', 'ext-gw-mode', + 'extraroute'] + + if self.rpc_flavor and self.rpc_flavor not in self.plugins: + raise exc.Invalid(_('rpc_flavor %s is not plugin list') % + self.rpc_flavor) + + self.extension_map = {} + if not cfg.CONF.META.extension_map == '': + extension_list = [method_set.split(':') + for method_set + in cfg.CONF.META.extension_map.split(',')] + for method_name, flavor in extension_list: + self.extension_map[method_name] = flavor + + # Register hooks. + # The hooks are applied for each target plugin instance when + # calling the base class to get networks/ports so that only records + # which belong to the plugin are selected. + #NOTE: Doing registration here (within __init__()) is to avoid + # registration when merely importing this file. This is only + # for running whole unit tests. + db_base_plugin_v2.NeutronDbPluginV2.register_model_query_hook( + models_v2.Network, + 'metaplugin_net', + _meta_network_model_hook, + None, + _meta_flavor_filter_hook) + db_base_plugin_v2.NeutronDbPluginV2.register_model_query_hook( + models_v2.Port, + 'metaplugin_port', + _meta_port_model_hook, + None, + _meta_flavor_filter_hook) + + def _load_plugin(self, plugin_provider): + LOG.debug(_("Plugin location: %s"), plugin_provider) + plugin_klass = importutils.import_class(plugin_provider) + return plugin_klass() + + def _get_plugin(self, flavor): + if flavor not in self.plugins: + raise FlavorNotFound(flavor=flavor) + return self.plugins[flavor] + + def _get_l3_plugin(self, flavor): + if flavor not in self.l3_plugins: + raise FlavorNotFound(flavor=flavor) + return self.l3_plugins[flavor] + + def __getattr__(self, key): + # At first, try to pickup extension command from extension_map + + if key in self.extension_map: + flavor = self.extension_map[key] + plugin = self._get_plugin(flavor) + if plugin and hasattr(plugin, key): + return getattr(plugin, key) + + # Second, try to match extension method in order of plugin list + for flavor, plugin in self.plugins.items(): + if hasattr(plugin, key): + return getattr(plugin, key) + + # if no plugin support the method, then raise + raise AttributeError + + def _extend_network_dict(self, context, network): + flavor = self._get_flavor_by_network_id(context, network['id']) + network[ext_flavor.FLAVOR_NETWORK] = flavor + + def start_rpc_listeners(self): + return self.plugins[self.rpc_flavor].start_rpc_listeners() + + def rpc_workers_supported(self): + #NOTE: If a plugin which supports multiple RPC workers is desired + # to handle RPC, rpc_flavor must be specified. + return (self.rpc_flavor and + self.plugins[self.rpc_flavor].rpc_workers_supported()) + + def create_network(self, context, network): + n = network['network'] + flavor = n.get(ext_flavor.FLAVOR_NETWORK) + if str(flavor) not in self.plugins: + flavor = self.default_flavor + plugin = self._get_plugin(flavor) + net = plugin.create_network(context, network) + LOG.debug(_("Created network: %(net_id)s with flavor " + "%(flavor)s"), {'net_id': net['id'], 'flavor': flavor}) + try: + meta_db_v2.add_network_flavor_binding(context.session, + flavor, str(net['id'])) + except Exception: + LOG.exception(_('Failed to add flavor bindings')) + plugin.delete_network(context, net['id']) + raise FaildToAddFlavorBinding() + + LOG.debug(_("Created network: %s"), net['id']) + self._extend_network_dict(context, net) + return net + + def update_network(self, context, id, network): + flavor = meta_db_v2.get_flavor_by_network(context.session, id) + plugin = self._get_plugin(flavor) + return plugin.update_network(context, id, network) + + def delete_network(self, context, id): + flavor = meta_db_v2.get_flavor_by_network(context.session, id) + plugin = self._get_plugin(flavor) + return plugin.delete_network(context, id) + + def get_network(self, context, id, fields=None): + flavor = meta_db_v2.get_flavor_by_network(context.session, id) + plugin = self._get_plugin(flavor) + net = plugin.get_network(context, id, fields) + net['id'] = id + if not fields or ext_flavor.FLAVOR_NETWORK in fields: + self._extend_network_dict(context, net) + if fields and 'id' not in fields: + del net['id'] + return net + + def get_networks(self, context, filters=None, fields=None): + nets = [] + for flavor, plugin in self.plugins.items(): + if (filters and ext_flavor.FLAVOR_NETWORK in filters and + not flavor in filters[ext_flavor.FLAVOR_NETWORK]): + continue + if filters: + #NOTE: copy each time since a target plugin may modify + # plugin_filters. + plugin_filters = filters.copy() + else: + plugin_filters = {} + plugin_filters[ext_flavor.FLAVOR_NETWORK] = [flavor] + plugin_nets = plugin.get_networks(context, plugin_filters, fields) + for net in plugin_nets: + if not fields or ext_flavor.FLAVOR_NETWORK in fields: + net[ext_flavor.FLAVOR_NETWORK] = flavor + nets.append(net) + return nets + + def _get_flavor_by_network_id(self, context, network_id): + return meta_db_v2.get_flavor_by_network(context.session, network_id) + + def _get_flavor_by_router_id(self, context, router_id): + return meta_db_v2.get_flavor_by_router(context.session, router_id) + + def _get_plugin_by_network_id(self, context, network_id): + flavor = self._get_flavor_by_network_id(context, network_id) + return self._get_plugin(flavor) + + def create_port(self, context, port): + p = port['port'] + if 'network_id' not in p: + raise exc.NotFound + plugin = self._get_plugin_by_network_id(context, p['network_id']) + return plugin.create_port(context, port) + + def update_port(self, context, id, port): + port_in_db = self._get_port(context, id) + plugin = self._get_plugin_by_network_id(context, + port_in_db['network_id']) + return plugin.update_port(context, id, port) + + def delete_port(self, context, id, l3_port_check=True): + port_in_db = self._get_port(context, id) + plugin = self._get_plugin_by_network_id(context, + port_in_db['network_id']) + return plugin.delete_port(context, id, l3_port_check) + + # This is necessary since there is a case that + # NeutronManager.get_plugin()._make_port_dict is called. + def _make_port_dict(self, port): + context = neutron_context.get_admin_context() + plugin = self._get_plugin_by_network_id(context, + port['network_id']) + return plugin._make_port_dict(port) + + def get_port(self, context, id, fields=None): + port_in_db = self._get_port(context, id) + plugin = self._get_plugin_by_network_id(context, + port_in_db['network_id']) + return plugin.get_port(context, id, fields) + + def get_ports(self, context, filters=None, fields=None): + all_ports = [] + for flavor, plugin in self.plugins.items(): + if filters: + #NOTE: copy each time since a target plugin may modify + # plugin_filters. + plugin_filters = filters.copy() + else: + plugin_filters = {} + plugin_filters[ext_flavor.FLAVOR_NETWORK] = [flavor] + ports = plugin.get_ports(context, plugin_filters, fields) + all_ports += ports + return all_ports + + def create_subnet(self, context, subnet): + s = subnet['subnet'] + if 'network_id' not in s: + raise exc.NotFound + plugin = self._get_plugin_by_network_id(context, + s['network_id']) + return plugin.create_subnet(context, subnet) + + def update_subnet(self, context, id, subnet): + s = self.get_subnet(context, id) + plugin = self._get_plugin_by_network_id(context, + s['network_id']) + return plugin.update_subnet(context, id, subnet) + + def delete_subnet(self, context, id): + s = self.get_subnet(context, id) + plugin = self._get_plugin_by_network_id(context, + s['network_id']) + return plugin.delete_subnet(context, id) + + def _extend_router_dict(self, context, router): + flavor = self._get_flavor_by_router_id(context, router['id']) + router[ext_flavor.FLAVOR_ROUTER] = flavor + + def create_router(self, context, router): + r = router['router'] + flavor = r.get(ext_flavor.FLAVOR_ROUTER) + if str(flavor) not in self.l3_plugins: + flavor = self.default_l3_flavor + plugin = self._get_l3_plugin(flavor) + r_in_db = plugin.create_router(context, router) + LOG.debug(_("Created router: %(router_id)s with flavor " + "%(flavor)s"), + {'router_id': r_in_db['id'], 'flavor': flavor}) + try: + meta_db_v2.add_router_flavor_binding(context.session, + flavor, str(r_in_db['id'])) + except Exception: + LOG.exception(_('Failed to add flavor bindings')) + plugin.delete_router(context, r_in_db['id']) + raise FaildToAddFlavorBinding() + + LOG.debug(_("Created router: %s"), r_in_db['id']) + self._extend_router_dict(context, r_in_db) + return r_in_db + + def update_router(self, context, id, router): + flavor = meta_db_v2.get_flavor_by_router(context.session, id) + plugin = self._get_l3_plugin(flavor) + return plugin.update_router(context, id, router) + + def delete_router(self, context, id): + flavor = meta_db_v2.get_flavor_by_router(context.session, id) + plugin = self._get_l3_plugin(flavor) + return plugin.delete_router(context, id) + + def get_router(self, context, id, fields=None): + flavor = meta_db_v2.get_flavor_by_router(context.session, id) + plugin = self._get_l3_plugin(flavor) + router = plugin.get_router(context, id, fields) + if not fields or ext_flavor.FLAVOR_ROUTER in fields: + self._extend_router_dict(context, router) + return router + + def get_routers_with_flavor(self, context, filters=None, + fields=None): + collection = self._model_query(context, l3_db.Router) + r_model = meta_models_v2.RouterFlavor + collection = collection.join(r_model, + l3_db.Router.id == r_model.router_id) + if filters: + for key, value in filters.iteritems(): + if key == ext_flavor.FLAVOR_ROUTER: + column = meta_models_v2.RouterFlavor.flavor + else: + column = getattr(l3_db.Router, key, None) + if column: + collection = collection.filter(column.in_(value)) + return [self._make_router_dict(c, fields) for c in collection] + + def get_routers(self, context, filters=None, fields=None): + routers = self.get_routers_with_flavor(context, filters, + None) + return [self.get_router(context, router['id'], + fields) + for router in routers] diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/metaplugin/proxy_neutron_plugin.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/metaplugin/proxy_neutron_plugin.py new file mode 100644 index 00000000..1309adc0 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/metaplugin/proxy_neutron_plugin.py @@ -0,0 +1,134 @@ +# Copyright 2012, Nachi Ueno, NTT MCL, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo.config import cfg + +from neutron.db import db_base_plugin_v2 +from neutron.db import external_net_db +from neutron.db import l3_db +from neutron.openstack.common import log as logging +from neutronclient.common import exceptions +from neutronclient.v2_0 import client + + +LOG = logging.getLogger(__name__) + + +class ProxyPluginV2(db_base_plugin_v2.NeutronDbPluginV2, + external_net_db.External_net_db_mixin, + l3_db.L3_NAT_db_mixin): + supported_extension_aliases = ["external-net", "router"] + + def __init__(self, configfile=None): + super(ProxyPluginV2, self).__init__() + self.neutron = client.Client( + username=cfg.CONF.PROXY.admin_user, + password=cfg.CONF.PROXY.admin_password, + tenant_name=cfg.CONF.PROXY.admin_tenant_name, + auth_url=cfg.CONF.PROXY.auth_url, + auth_strategy=cfg.CONF.PROXY.auth_strategy, + region_name=cfg.CONF.PROXY.auth_region + ) + + def _get_client(self): + return self.neutron + + def create_subnet(self, context, subnet): + subnet_remote = self._get_client().create_subnet(subnet) + subnet['subnet']['id'] = subnet_remote['id'] + tenant_id = self._get_tenant_id_for_create(context, subnet['subnet']) + subnet['subnet']['tenant_id'] = tenant_id + try: + subnet_in_db = super(ProxyPluginV2, self).create_subnet( + context, subnet) + except Exception: + self._get_client().delete_subnet(subnet_remote['id']) + return subnet_in_db + + def update_subnet(self, context, id, subnet): + subnet_in_db = super(ProxyPluginV2, self).update_subnet( + context, id, subnet) + try: + self._get_client().update_subnet(id, subnet) + except Exception as e: + LOG.error(_("Update subnet failed: %s"), e) + return subnet_in_db + + def delete_subnet(self, context, id): + try: + self._get_client().delete_subnet(id) + except exceptions.NotFound: + LOG.warn(_("Subnet in remote have already deleted")) + return super(ProxyPluginV2, self).delete_subnet(context, id) + + def create_network(self, context, network): + network_remote = self._get_client().create_network(network) + network['network']['id'] = network_remote['id'] + tenant_id = self._get_tenant_id_for_create(context, network['network']) + network['network']['tenant_id'] = tenant_id + try: + network_in_db = super(ProxyPluginV2, self).create_network( + context, network) + except Exception: + self._get_client().delete_network(network_remote['id']) + return network_in_db + + def update_network(self, context, id, network): + network_in_db = super(ProxyPluginV2, self).update_network( + context, id, network) + try: + self._get_client().update_network(id, network) + except Exception as e: + LOG.error(_("Update network failed: %s"), e) + return network_in_db + + def delete_network(self, context, id): + try: + self._get_client().delete_network(id) + except exceptions.NetworkNotFoundClient: + LOG.warn(_("Network in remote have already deleted")) + return super(ProxyPluginV2, self).delete_network(context, id) + + def create_port(self, context, port): + port_remote = self._get_client().create_port(port) + port['port']['id'] = port_remote['id'] + tenant_id = self._get_tenant_id_for_create(context, port['port']) + port['port']['tenant_id'] = tenant_id + try: + port_in_db = super(ProxyPluginV2, self).create_port( + context, port) + except Exception: + self._get_client().delete_port(port_remote['id']) + return port_in_db + + def update_port(self, context, id, port): + port_in_db = super(ProxyPluginV2, self).update_port( + context, id, port) + try: + self._get_client().update_port(id, port) + except Exception as e: + LOG.error(_("Update port failed: %s"), e) + return port_in_db + + def delete_port(self, context, id, l3_port_check=True): + if l3_port_check: + self.prevent_l3_port_deletion(context, id) + self.disassociate_floatingips(context, id) + + try: + self._get_client().delete_port(id) + except exceptions.PortNotFoundClient: + LOG.warn(_("Port in remote have already deleted")) + return super(ProxyPluginV2, self).delete_port(context, id) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/midonet/__init__.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/midonet/__init__.py new file mode 100644 index 00000000..e2cc2a75 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/midonet/__init__.py @@ -0,0 +1,15 @@ +# Copyright (C) 2012 Midokura Japan K.K. +# Copyright (C) 2013 Midokura PTE LTD +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/midonet/agent/__init__.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/midonet/agent/__init__.py new file mode 100644 index 00000000..cf581856 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/midonet/agent/__init__.py @@ -0,0 +1,14 @@ +# Copyright (C) 2013 Midokura PTE LTD +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/midonet/agent/midonet_driver.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/midonet/agent/midonet_driver.py new file mode 100644 index 00000000..8ebcf675 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/midonet/agent/midonet_driver.py @@ -0,0 +1,50 @@ +# Copyright (C) 2013 Midokura PTE LTD +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Rossella Sblendido, Midokura Japan KK +# @author: Tomoe Sugihara, Midokura Japan KK +# @author: Ryu Ishimoto, Midokura Japan KK + +from neutron.agent.linux import dhcp +from neutron.openstack.common import log as logging +from neutron.plugins.midonet.common import config # noqa + +LOG = logging.getLogger(__name__) + + +class DhcpNoOpDriver(dhcp.DhcpLocalProcess): + + @classmethod + def existing_dhcp_networks(cls, conf, root_helper): + """Return a list of existing networks ids that we have configs for.""" + return [] + + @classmethod + def check_version(cls): + """Execute version checks on DHCP server.""" + return float(1.0) + + def disable(self, retain_port=False): + """Disable DHCP for this network.""" + if not retain_port: + self.device_manager.destroy(self.network, self.interface_name) + self._remove_config_files() + + def reload_allocations(self): + """Force the DHCP server to reload the assignment database.""" + pass + + def spawn_process(self): + pass diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/midonet/common/__init__.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/midonet/common/__init__.py new file mode 100644 index 00000000..cf581856 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/midonet/common/__init__.py @@ -0,0 +1,14 @@ +# Copyright (C) 2013 Midokura PTE LTD +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/midonet/common/config.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/midonet/common/config.py new file mode 100644 index 00000000..54d7f12f --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/midonet/common/config.py @@ -0,0 +1,44 @@ +# Copyright (C) 2012 Midokura Japan K.K. +# Copyright (C) 2013 Midokura PTE LTD +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Tomoe Sugihara, Midokura Japan KK + +from oslo.config import cfg + +midonet_opts = [ + cfg.StrOpt('midonet_uri', default='http://localhost:8080/midonet-api', + help=_('MidoNet API server URI.')), + cfg.StrOpt('username', default='admin', + help=_('MidoNet admin username.')), + cfg.StrOpt('password', default='passw0rd', + secret=True, + help=_('MidoNet admin password.')), + cfg.StrOpt('project_id', + default='77777777-7777-7777-7777-777777777777', + help=_('ID of the project that MidoNet admin user' + 'belongs to.')), + cfg.StrOpt('provider_router_id', + help=_('Virtual provider router ID.')), + cfg.StrOpt('mode', + default='dev', + help=_('Operational mode. Internal dev use only.')), + cfg.StrOpt('midonet_host_uuid_path', + default='/etc/midolman/host_uuid.properties', + help=_('Path to midonet host uuid file')) +] + + +cfg.CONF.register_opts(midonet_opts, "MIDONET") diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/midonet/common/net_util.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/midonet/common/net_util.py new file mode 100644 index 00000000..6f62ecdc --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/midonet/common/net_util.py @@ -0,0 +1,66 @@ +# Copyright (C) 2013 Midokura PTE LTD +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Ryu Ishimoto, Midokura Japan KK + + +from neutron.common import constants + + +def subnet_str(cidr): + """Convert the cidr string to x.x.x.x_y format + + :param cidr: CIDR in x.x.x.x/y format + """ + if cidr is None: + return None + return cidr.replace("/", "_") + + +def net_addr(addr): + """Get network address prefix and length from a given address.""" + if addr is None: + return (None, None) + nw_addr, nw_len = addr.split('/') + nw_len = int(nw_len) + return nw_addr, nw_len + + +def get_ethertype_value(ethertype): + """Convert string representation of ethertype to the numerical.""" + if ethertype is None: + return None + mapping = { + 'ipv4': 0x0800, + 'ipv6': 0x86DD, + 'arp': 0x806 + } + return mapping.get(ethertype.lower()) + + +def get_protocol_value(protocol): + """Convert string representation of protocol to the numerical.""" + if protocol is None: + return None + + if isinstance(protocol, int): + return protocol + + mapping = { + constants.PROTO_NAME_TCP: constants.PROTO_NUM_TCP, + constants.PROTO_NAME_UDP: constants.PROTO_NUM_UDP, + constants.PROTO_NAME_ICMP: constants.PROTO_NUM_ICMP + } + return mapping.get(protocol.lower()) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/midonet/midonet_lib.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/midonet/midonet_lib.py new file mode 100644 index 00000000..6488a8b4 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/midonet/midonet_lib.py @@ -0,0 +1,694 @@ +# Copyright (C) 2012 Midokura Japan K.K. +# Copyright (C) 2013 Midokura PTE LTD +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Tomoe Sugihara, Midokura Japan KK +# @author: Ryu Ishimoto, Midokura Japan KK +# @author: Rossella Sblendido, Midokura Japan KK +# @author: Duarte Nunes, Midokura Japan KK + +from midonetclient import exc +from webob import exc as w_exc + +from neutron.common import exceptions as n_exc +from neutron.openstack.common import log as logging +from neutron.plugins.midonet.common import net_util + +LOG = logging.getLogger(__name__) + + +def handle_api_error(fn): + """Wrapper for methods that throws custom exceptions.""" + def wrapped(*args, **kwargs): + try: + return fn(*args, **kwargs) + except (w_exc.HTTPException, + exc.MidoApiConnectionError) as ex: + raise MidonetApiException(msg=ex) + return wrapped + + +class MidonetResourceNotFound(n_exc.NotFound): + message = _('MidoNet %(resource_type)s %(id)s could not be found') + + +class MidonetApiException(n_exc.NeutronException): + message = _("MidoNet API error: %(msg)s") + + +class MidoClient: + + def __init__(self, mido_api): + self.mido_api = mido_api + + @classmethod + def _fill_dto(cls, dto, fields): + for field_name, field_value in fields.iteritems(): + # We assume the setters are named the + # same way as the attributes themselves. + try: + getattr(dto, field_name)(field_value) + except AttributeError: + pass + return dto + + @classmethod + def _create_dto(cls, dto, fields): + return cls._fill_dto(dto, fields).create() + + @classmethod + def _update_dto(cls, dto, fields): + return cls._fill_dto(dto, fields).update() + + @handle_api_error + def create_bridge(self, **kwargs): + """Create a new bridge + + :param \**kwargs: configuration of the new bridge + :returns: newly created bridge + """ + LOG.debug(_("MidoClient.create_bridge called: " + "kwargs=%(kwargs)s"), {'kwargs': kwargs}) + return self._create_dto(self.mido_api.add_bridge(), kwargs) + + @handle_api_error + def delete_bridge(self, id): + """Delete a bridge + + :param id: id of the bridge + """ + LOG.debug(_("MidoClient.delete_bridge called: id=%(id)s"), {'id': id}) + return self.mido_api.delete_bridge(id) + + @handle_api_error + def get_bridge(self, id): + """Get a bridge + + :param id: id of the bridge + :returns: requested bridge. None if bridge does not exist. + """ + LOG.debug(_("MidoClient.get_bridge called: id=%s"), id) + try: + return self.mido_api.get_bridge(id) + except w_exc.HTTPNotFound: + raise MidonetResourceNotFound(resource_type='Bridge', id=id) + + @handle_api_error + def update_bridge(self, id, **kwargs): + """Update a bridge of the given id with the new fields + + :param id: id of the bridge + :param \**kwargs: the fields to update and their values + :returns: bridge object + """ + LOG.debug(_("MidoClient.update_bridge called: " + "id=%(id)s, kwargs=%(kwargs)s"), + {'id': id, 'kwargs': kwargs}) + try: + return self._update_dto(self.mido_api.get_bridge(id), kwargs) + except w_exc.HTTPNotFound: + raise MidonetResourceNotFound(resource_type='Bridge', id=id) + + @handle_api_error + def create_dhcp(self, bridge, gateway_ip, cidr, host_rts=None, + dns_servers=None): + """Create a new DHCP entry + + :param bridge: bridge object to add dhcp to + :param gateway_ip: IP address of gateway + :param cidr: subnet represented as x.x.x.x/y + :param host_rts: list of routes set in the host + :param dns_servers: list of dns servers + :returns: newly created dhcp + """ + LOG.debug(_("MidoClient.create_dhcp called: bridge=%(bridge)s, " + "cidr=%(cidr)s, gateway_ip=%(gateway_ip)s, " + "host_rts=%(host_rts)s, dns_servers=%(dns_servers)s"), + {'bridge': bridge, 'cidr': cidr, 'gateway_ip': gateway_ip, + 'host_rts': host_rts, 'dns_servers': dns_servers}) + self.mido_api.add_bridge_dhcp(bridge, gateway_ip, cidr, + host_rts=host_rts, + dns_nservers=dns_servers) + + @handle_api_error + def add_dhcp_host(self, bridge, cidr, ip, mac): + """Add DHCP host entry + + :param bridge: bridge the DHCP is configured for + :param cidr: subnet represented as x.x.x.x/y + :param ip: IP address + :param mac: MAC address + """ + LOG.debug(_("MidoClient.add_dhcp_host called: bridge=%(bridge)s, " + "cidr=%(cidr)s, ip=%(ip)s, mac=%(mac)s"), + {'bridge': bridge, 'cidr': cidr, 'ip': ip, 'mac': mac}) + subnet = bridge.get_dhcp_subnet(net_util.subnet_str(cidr)) + if subnet is None: + raise MidonetApiException(msg=_("Tried to add to" + "non-existent DHCP")) + + subnet.add_dhcp_host().ip_addr(ip).mac_addr(mac).create() + + @handle_api_error + def remove_dhcp_host(self, bridge, cidr, ip, mac): + """Remove DHCP host entry + + :param bridge: bridge the DHCP is configured for + :param cidr: subnet represented as x.x.x.x/y + :param ip: IP address + :param mac: MAC address + """ + LOG.debug(_("MidoClient.remove_dhcp_host called: bridge=%(bridge)s, " + "cidr=%(cidr)s, ip=%(ip)s, mac=%(mac)s"), + {'bridge': bridge, 'cidr': cidr, 'ip': ip, 'mac': mac}) + subnet = bridge.get_dhcp_subnet(net_util.subnet_str(cidr)) + if subnet is None: + LOG.warn(_("Tried to delete mapping from non-existent subnet")) + return + + for dh in subnet.get_dhcp_hosts(): + if dh.get_mac_addr() == mac and dh.get_ip_addr() == ip: + LOG.debug(_("MidoClient.remove_dhcp_host: Deleting %(dh)r"), + {"dh": dh}) + dh.delete() + + @handle_api_error + def delete_dhcp_host(self, bridge_id, cidr, ip, mac): + """Delete DHCP host entry + + :param bridge_id: id of the bridge of the DHCP + :param cidr: subnet represented as x.x.x.x/y + :param ip: IP address + :param mac: MAC address + """ + LOG.debug(_("MidoClient.delete_dhcp_host called: " + "bridge_id=%(bridge_id)s, cidr=%(cidr)s, ip=%(ip)s, " + "mac=%(mac)s"), {'bridge_id': bridge_id, + 'cidr': cidr, + 'ip': ip, 'mac': mac}) + bridge = self.get_bridge(bridge_id) + self.remove_dhcp_host(bridge, net_util.subnet_str(cidr), ip, mac) + + @handle_api_error + def delete_dhcp(self, bridge, cidr): + """Delete a DHCP entry + + :param bridge: bridge to remove DHCP from + :param cidr: subnet represented as x.x.x.x/y + """ + LOG.debug(_("MidoClient.delete_dhcp called: bridge=%(bridge)s, " + "cidr=%(cidr)s"), + {'bridge': bridge, 'cidr': cidr}) + dhcp_subnets = bridge.get_dhcp_subnets() + net_addr, net_len = net_util.net_addr(cidr) + if not dhcp_subnets: + raise MidonetApiException( + msg=_("Tried to delete non-existent DHCP")) + for dhcp in dhcp_subnets: + if dhcp.get_subnet_prefix() == net_addr: + dhcp.delete() + break + + @handle_api_error + def delete_port(self, id, delete_chains=False): + """Delete a port + + :param id: id of the port + """ + LOG.debug(_("MidoClient.delete_port called: id=%(id)s, " + "delete_chains=%(delete_chains)s"), + {'id': id, 'delete_chains': delete_chains}) + if delete_chains: + self.delete_port_chains(id) + + self.mido_api.delete_port(id) + + @handle_api_error + def get_port(self, id): + """Get a port + + :param id: id of the port + :returns: requested port. None if it does not exist + """ + LOG.debug(_("MidoClient.get_port called: id=%(id)s"), {'id': id}) + try: + return self.mido_api.get_port(id) + except w_exc.HTTPNotFound: + raise MidonetResourceNotFound(resource_type='Port', id=id) + + @handle_api_error + def add_bridge_port(self, bridge, **kwargs): + """Add a port on a bridge + + :param bridge: bridge to add a new port to + :param \**kwargs: configuration of the new port + :returns: newly created port + """ + LOG.debug(_("MidoClient.add_bridge_port called: " + "bridge=%(bridge)s, kwargs=%(kwargs)s"), + {'bridge': bridge, 'kwargs': kwargs}) + return self._create_dto(self.mido_api.add_bridge_port(bridge), kwargs) + + @handle_api_error + def update_port(self, id, **kwargs): + """Update a port of the given id with the new fields + + :param id: id of the port + :param \**kwargs: the fields to update and their values + """ + LOG.debug(_("MidoClient.update_port called: " + "id=%(id)s, kwargs=%(kwargs)s"), + {'id': id, 'kwargs': kwargs}) + try: + return self._update_dto(self.mido_api.get_port(id), kwargs) + except w_exc.HTTPNotFound: + raise MidonetResourceNotFound(resource_type='Port', id=id) + + @handle_api_error + def add_router_port(self, router, **kwargs): + """Add a new port to an existing router. + + :param router: router to add a new port to + :param \**kwargs: configuration of the new port + :returns: newly created port + """ + return self._create_dto(self.mido_api.add_router_port(router), kwargs) + + @handle_api_error + def create_router(self, **kwargs): + """Create a new router + + :param \**kwargs: configuration of the new router + :returns: newly created router + """ + LOG.debug(_("MidoClient.create_router called: " + "kwargs=%(kwargs)s"), {'kwargs': kwargs}) + return self._create_dto(self.mido_api.add_router(), kwargs) + + @handle_api_error + def delete_router(self, id): + """Delete a router + + :param id: id of the router + """ + LOG.debug(_("MidoClient.delete_router called: id=%(id)s"), {'id': id}) + return self.mido_api.delete_router(id) + + @handle_api_error + def get_router(self, id): + """Get a router with the given id + + :param id: id of the router + :returns: requested router object. None if it does not exist. + """ + LOG.debug(_("MidoClient.get_router called: id=%(id)s"), {'id': id}) + try: + return self.mido_api.get_router(id) + except w_exc.HTTPNotFound: + raise MidonetResourceNotFound(resource_type='Router', id=id) + + @handle_api_error + def update_router(self, id, **kwargs): + """Update a router of the given id with the new name + + :param id: id of the router + :param \**kwargs: the fields to update and their values + :returns: router object + """ + LOG.debug(_("MidoClient.update_router called: " + "id=%(id)s, kwargs=%(kwargs)s"), + {'id': id, 'kwargs': kwargs}) + try: + return self._update_dto(self.mido_api.get_router(id), kwargs) + except w_exc.HTTPNotFound: + raise MidonetResourceNotFound(resource_type='Router', id=id) + + @handle_api_error + def delete_route(self, id): + return self.mido_api.delete_route(id) + + @handle_api_error + def add_dhcp_route_option(self, bridge, cidr, gw_ip, dst_ip): + """Add Option121 route to subnet + + :param bridge: Bridge to add the option route to + :param cidr: subnet represented as x.x.x.x/y + :param gw_ip: IP address of the next hop + :param dst_ip: IP address of the destination, in x.x.x.x/y format + """ + LOG.debug(_("MidoClient.add_dhcp_route_option called: " + "bridge=%(bridge)s, cidr=%(cidr)s, gw_ip=%(gw_ip)s" + "dst_ip=%(dst_ip)s"), + {"bridge": bridge, "cidr": cidr, "gw_ip": gw_ip, + "dst_ip": dst_ip}) + subnet = bridge.get_dhcp_subnet(net_util.subnet_str(cidr)) + if subnet is None: + raise MidonetApiException( + msg=_("Tried to access non-existent DHCP")) + prefix, length = dst_ip.split("/") + routes = [{'destinationPrefix': prefix, 'destinationLength': length, + 'gatewayAddr': gw_ip}] + cur_routes = subnet.get_opt121_routes() + if cur_routes: + routes = routes + cur_routes + subnet.opt121_routes(routes).update() + + @handle_api_error + def link(self, port, peer_id): + """Link a port to a given peerId.""" + self.mido_api.link(port, peer_id) + + @handle_api_error + def delete_port_routes(self, routes, port_id): + """Remove routes whose next hop port is the given port ID.""" + for route in routes: + if route.get_next_hop_port() == port_id: + self.mido_api.delete_route(route.get_id()) + + @handle_api_error + def get_router_routes(self, router_id): + """Get all routes for the given router.""" + return self.mido_api.get_router_routes(router_id) + + @handle_api_error + def unlink(self, port): + """Unlink a port + + :param port: port object + """ + LOG.debug(_("MidoClient.unlink called: port=%(port)s"), + {'port': port}) + if port.get_peer_id(): + self.mido_api.unlink(port) + else: + LOG.warn(_("Attempted to unlink a port that was not linked. %s"), + port.get_id()) + + @handle_api_error + def remove_rules_by_property(self, tenant_id, chain_name, key, value): + """Remove all the rules that match the provided key and value.""" + LOG.debug(_("MidoClient.remove_rules_by_property called: " + "tenant_id=%(tenant_id)s, chain_name=%(chain_name)s" + "key=%(key)s, value=%(value)s"), + {'tenant_id': tenant_id, 'chain_name': chain_name, + 'key': key, 'value': value}) + chain = self.get_chain_by_name(tenant_id, chain_name) + if chain is None: + raise MidonetResourceNotFound(resource_type='Chain', + id=chain_name) + + for r in chain.get_rules(): + if key in r.get_properties(): + if r.get_properties()[key] == value: + self.mido_api.delete_rule(r.get_id()) + + @handle_api_error + def add_router_chains(self, router, inbound_chain_name, + outbound_chain_name): + """Create chains for a new router. + + Creates inbound and outbound chains for the router with the given + names, and the new chains are set on the router. + + :param router: router to set chains for + :param inbound_chain_name: Name of the inbound chain + :param outbound_chain_name: Name of the outbound chain + """ + LOG.debug(_("MidoClient.create_router_chains called: " + "router=%(router)s, inbound_chain_name=%(in_chain)s, " + "outbound_chain_name=%(out_chain)s"), + {"router": router, "in_chain": inbound_chain_name, + "out_chain": outbound_chain_name}) + tenant_id = router.get_tenant_id() + + inbound_chain = self.mido_api.add_chain().tenant_id(tenant_id).name( + inbound_chain_name,).create() + outbound_chain = self.mido_api.add_chain().tenant_id(tenant_id).name( + outbound_chain_name).create() + + # set chains to in/out filters + router.inbound_filter_id(inbound_chain.get_id()).outbound_filter_id( + outbound_chain.get_id()).update() + return inbound_chain, outbound_chain + + @handle_api_error + def delete_router_chains(self, id): + """Deletes chains of a router. + + :param id: router ID to delete chains of + """ + LOG.debug(_("MidoClient.delete_router_chains called: " + "id=%(id)s"), {'id': id}) + router = self.get_router(id) + if (router.get_inbound_filter_id()): + self.mido_api.delete_chain(router.get_inbound_filter_id()) + + if (router.get_outbound_filter_id()): + self.mido_api.delete_chain(router.get_outbound_filter_id()) + + @handle_api_error + def delete_port_chains(self, id): + """Deletes chains of a port. + + :param id: port ID to delete chains of + """ + LOG.debug(_("MidoClient.delete_port_chains called: " + "id=%(id)s"), {'id': id}) + port = self.get_port(id) + if (port.get_inbound_filter_id()): + self.mido_api.delete_chain(port.get_inbound_filter_id()) + + if (port.get_outbound_filter_id()): + self.mido_api.delete_chain(port.get_outbound_filter_id()) + + @handle_api_error + def get_link_port(self, router, peer_router_id): + """Setup a route on the router to the next hop router.""" + LOG.debug(_("MidoClient.get_link_port called: " + "router=%(router)s, peer_router_id=%(peer_router_id)s"), + {'router': router, 'peer_router_id': peer_router_id}) + # Find the port linked between the two routers + link_port = None + for p in router.get_peer_ports(): + if p.get_device_id() == peer_router_id: + link_port = p + break + return link_port + + @handle_api_error + def add_router_route(self, router, type='Normal', + src_network_addr=None, src_network_length=None, + dst_network_addr=None, dst_network_length=None, + next_hop_port=None, next_hop_gateway=None, + weight=100): + """Setup a route on the router.""" + return self.mido_api.add_router_route( + router, type=type, src_network_addr=src_network_addr, + src_network_length=src_network_length, + dst_network_addr=dst_network_addr, + dst_network_length=dst_network_length, + next_hop_port=next_hop_port, next_hop_gateway=next_hop_gateway, + weight=weight) + + @handle_api_error + def add_static_nat(self, tenant_id, chain_name, from_ip, to_ip, port_id, + nat_type='dnat', **kwargs): + """Add a static NAT entry + + :param tenant_id: owner fo the chain to add a NAT to + :param chain_name: name of the chain to add a NAT to + :param from_ip: IP to translate from + :param from_ip: IP to translate from + :param to_ip: IP to translate to + :param port_id: port to match on + :param nat_type: 'dnat' or 'snat' + """ + LOG.debug(_("MidoClient.add_static_nat called: " + "tenant_id=%(tenant_id)s, chain_name=%(chain_name)s, " + "from_ip=%(from_ip)s, to_ip=%(to_ip)s, " + "port_id=%(port_id)s, nat_type=%(nat_type)s"), + {'tenant_id': tenant_id, 'chain_name': chain_name, + 'from_ip': from_ip, 'to_ip': to_ip, + 'portid': port_id, 'nat_type': nat_type}) + if nat_type not in ['dnat', 'snat']: + raise ValueError(_("Invalid NAT type passed in %s") % nat_type) + + chain = self.get_chain_by_name(tenant_id, chain_name) + nat_targets = [] + nat_targets.append( + {'addressFrom': to_ip, 'addressTo': to_ip, + 'portFrom': 0, 'portTo': 0}) + + rule = chain.add_rule().type(nat_type).flow_action('accept').position( + 1).nat_targets(nat_targets).properties(kwargs) + + if nat_type == 'dnat': + rule = rule.nw_dst_address(from_ip).nw_dst_length(32).in_ports( + [port_id]) + else: + rule = rule.nw_src_address(from_ip).nw_src_length(32).out_ports( + [port_id]) + + return rule.create() + + @handle_api_error + def add_dynamic_snat(self, tenant_id, pre_chain_name, post_chain_name, + snat_ip, port_id, **kwargs): + """Add SNAT masquerading rule + + MidoNet requires two rules on the router, one to do NAT to a range of + ports, and another to retrieve back the original IP in the return + flow. + """ + pre_chain = self.get_chain_by_name(tenant_id, pre_chain_name) + post_chain = self.get_chain_by_name(tenant_id, post_chain_name) + + pre_chain.add_rule().nw_dst_address(snat_ip).nw_dst_length( + 32).type('rev_snat').flow_action('accept').in_ports( + [port_id]).properties(kwargs).position(1).create() + + nat_targets = [] + nat_targets.append( + {'addressFrom': snat_ip, 'addressTo': snat_ip, + 'portFrom': 1, 'portTo': 65535}) + + post_chain.add_rule().type('snat').flow_action( + 'accept').nat_targets(nat_targets).out_ports( + [port_id]).properties(kwargs).position(1).create() + + @handle_api_error + def remove_static_route(self, router, ip): + """Remove static route for the IP + + :param router: next hop router to remove the routes to + :param ip: IP address of the route to remove + """ + LOG.debug(_("MidoClient.remote_static_route called: " + "router=%(router)s, ip=%(ip)s"), + {'router': router, 'ip': ip}) + for r in router.get_routes(): + if (r.get_dst_network_addr() == ip and + r.get_dst_network_length() == 32): + self.mido_api.delete_route(r.get_id()) + + @handle_api_error + def update_port_chains(self, port, inbound_chain_id, outbound_chain_id): + """Bind inbound and outbound chains to the port.""" + LOG.debug(_("MidoClient.update_port_chains called: port=%(port)s" + "inbound_chain_id=%(inbound_chain_id)s, " + "outbound_chain_id=%(outbound_chain_id)s"), + {"port": port, "inbound_chain_id": inbound_chain_id, + "outbound_chain_id": outbound_chain_id}) + port.inbound_filter_id(inbound_chain_id).outbound_filter_id( + outbound_chain_id).update() + + @handle_api_error + def create_chain(self, tenant_id, name): + """Create a new chain.""" + LOG.debug(_("MidoClient.create_chain called: tenant_id=%(tenant_id)s " + " name=%(name)s"), {"tenant_id": tenant_id, "name": name}) + return self.mido_api.add_chain().tenant_id(tenant_id).name( + name).create() + + @handle_api_error + def delete_chain(self, id): + """Delete chain matching the ID.""" + LOG.debug(_("MidoClient.delete_chain called: id=%(id)s"), {"id": id}) + self.mido_api.delete_chain(id) + + @handle_api_error + def delete_chains_by_names(self, tenant_id, names): + """Delete chains matching the names given for a tenant.""" + LOG.debug(_("MidoClient.delete_chains_by_names called: " + "tenant_id=%(tenant_id)s names=%(names)s "), + {"tenant_id": tenant_id, "names": names}) + chains = self.mido_api.get_chains({'tenant_id': tenant_id}) + for c in chains: + if c.get_name() in names: + self.mido_api.delete_chain(c.get_id()) + + @handle_api_error + def get_chain_by_name(self, tenant_id, name): + """Get the chain by its name.""" + LOG.debug(_("MidoClient.get_chain_by_name called: " + "tenant_id=%(tenant_id)s name=%(name)s "), + {"tenant_id": tenant_id, "name": name}) + for c in self.mido_api.get_chains({'tenant_id': tenant_id}): + if c.get_name() == name: + return c + return None + + @handle_api_error + def get_port_group_by_name(self, tenant_id, name): + """Get the port group by name.""" + LOG.debug(_("MidoClient.get_port_group_by_name called: " + "tenant_id=%(tenant_id)s name=%(name)s "), + {"tenant_id": tenant_id, "name": name}) + for p in self.mido_api.get_port_groups({'tenant_id': tenant_id}): + if p.get_name() == name: + return p + return None + + @handle_api_error + def create_port_group(self, tenant_id, name): + """Create a port group + + Create a new port group for a given name and ID. + """ + LOG.debug(_("MidoClient.create_port_group called: " + "tenant_id=%(tenant_id)s name=%(name)s"), + {"tenant_id": tenant_id, "name": name}) + return self.mido_api.add_port_group().tenant_id(tenant_id).name( + name).create() + + @handle_api_error + def delete_port_group_by_name(self, tenant_id, name): + """Delete port group matching the name given for a tenant.""" + LOG.debug(_("MidoClient.delete_port_group_by_name called: " + "tenant_id=%(tenant_id)s name=%(name)s "), + {"tenant_id": tenant_id, "name": name}) + pgs = self.mido_api.get_port_groups({'tenant_id': tenant_id}) + for pg in pgs: + if pg.get_name() == name: + LOG.debug(_("Deleting pg %(id)s"), {"id": pg.get_id()}) + self.mido_api.delete_port_group(pg.get_id()) + + @handle_api_error + def add_port_to_port_group_by_name(self, tenant_id, name, port_id): + """Add a port to a port group with the given name.""" + LOG.debug(_("MidoClient.add_port_to_port_group_by_name called: " + "tenant_id=%(tenant_id)s name=%(name)s " + "port_id=%(port_id)s"), + {"tenant_id": tenant_id, "name": name, "port_id": port_id}) + pg = self.get_port_group_by_name(tenant_id, name) + if pg is None: + raise MidonetResourceNotFound(resource_type='PortGroup', id=name) + + pg = pg.add_port_group_port().port_id(port_id).create() + return pg + + @handle_api_error + def remove_port_from_port_groups(self, port_id): + """Remove a port binding from all the port groups.""" + LOG.debug(_("MidoClient.remove_port_from_port_groups called: " + "port_id=%(port_id)s"), {"port_id": port_id}) + port = self.get_port(port_id) + for pg in port.get_port_groups(): + pg.delete() + + @handle_api_error + def add_chain_rule(self, chain, action='accept', **kwargs): + """Create a new accept chain rule.""" + self.mido_api.add_chain_rule(chain, action, **kwargs) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/midonet/plugin.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/midonet/plugin.py new file mode 100644 index 00000000..3de1194f --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/midonet/plugin.py @@ -0,0 +1,1256 @@ +# Copyright (C) 2012 Midokura Japan K.K. +# Copyright (C) 2013 Midokura PTE LTD +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Takaaki Suzuki, Midokura Japan KK +# @author: Tomoe Sugihara, Midokura Japan KK +# @author: Ryu Ishimoto, Midokura Japan KK +# @author: Rossella Sblendido, Midokura Japan KK +# @author: Duarte Nunes, Midokura Japan KK + +from midonetclient import api +from oslo.config import cfg +from sqlalchemy.orm import exc as sa_exc + +from neutron.api.v2 import attributes +from neutron.common import constants +from neutron.common import exceptions as n_exc +from neutron.common import rpc as n_rpc +from neutron.common import topics +from neutron.db import agents_db +from neutron.db import agentschedulers_db +from neutron.db import db_base_plugin_v2 +from neutron.db import dhcp_rpc_base +from neutron.db import external_net_db +from neutron.db import l3_db +from neutron.db import models_v2 +from neutron.db import portbindings_db +from neutron.db import securitygroups_db +from neutron.extensions import external_net as ext_net +from neutron.extensions import l3 +from neutron.extensions import portbindings +from neutron.extensions import securitygroup as ext_sg +from neutron.openstack.common import excutils +from neutron.openstack.common import log as logging +from neutron.plugins.midonet.common import config # noqa +from neutron.plugins.midonet.common import net_util +from neutron.plugins.midonet import midonet_lib + +LOG = logging.getLogger(__name__) + +EXTERNAL_GW_INFO = l3.EXTERNAL_GW_INFO + +METADATA_DEFAULT_IP = "169.254.169.254/32" +OS_FLOATING_IP_RULE_KEY = 'OS_FLOATING_IP' +OS_SG_RULE_KEY = 'OS_SG_RULE_ID' +OS_TENANT_ROUTER_RULE_KEY = 'OS_TENANT_ROUTER_RULE' +PRE_ROUTING_CHAIN_NAME = "OS_PRE_ROUTING_%s" +PORT_INBOUND_CHAIN_NAME = "OS_PORT_%s_INBOUND" +PORT_OUTBOUND_CHAIN_NAME = "OS_PORT_%s_OUTBOUND" +POST_ROUTING_CHAIN_NAME = "OS_POST_ROUTING_%s" +SG_INGRESS_CHAIN_NAME = "OS_SG_%s_INGRESS" +SG_EGRESS_CHAIN_NAME = "OS_SG_%s_EGRESS" +SG_PORT_GROUP_NAME = "OS_PG_%s" +SNAT_RULE = 'SNAT' + + +def _get_nat_ips(type, fip): + """Get NAT IP address information. + + From the route type given, determine the source and target IP addresses + from the provided floating IP DB object. + """ + if type == 'pre-routing': + return fip["floating_ip_address"], fip["fixed_ip_address"] + elif type == 'post-routing': + return fip["fixed_ip_address"], fip["floating_ip_address"] + else: + raise ValueError(_("Invalid nat_type %s") % type) + + +def _nat_chain_names(router_id): + """Get the chain names for NAT. + + These names are used to associate MidoNet chains to the NAT rules + applied to the router. For each of these, there are two NAT types, + 'dnat' and 'snat' that are returned as keys, and the corresponding + chain names as their values. + """ + pre_routing_name = PRE_ROUTING_CHAIN_NAME % router_id + post_routing_name = POST_ROUTING_CHAIN_NAME % router_id + return {'pre-routing': pre_routing_name, 'post-routing': post_routing_name} + + +def _sg_chain_names(sg_id): + """Get the chain names for security group. + + These names are used to associate a security group to MidoNet chains. + There are two names for ingress and egress security group directions. + """ + ingress = SG_INGRESS_CHAIN_NAME % sg_id + egress = SG_EGRESS_CHAIN_NAME % sg_id + return {'ingress': ingress, 'egress': egress} + + +def _port_chain_names(port_id): + """Get the chain names for a port. + + These are chains to hold security group chains. + """ + inbound = PORT_INBOUND_CHAIN_NAME % port_id + outbound = PORT_OUTBOUND_CHAIN_NAME % port_id + return {'inbound': inbound, 'outbound': outbound} + + +def _sg_port_group_name(sg_id): + """Get the port group name for security group.. + + This name is used to associate a security group to MidoNet port groups. + """ + return SG_PORT_GROUP_NAME % sg_id + + +def _rule_direction(sg_direction): + """Convert the SG direction to MidoNet direction + + MidoNet terms them 'inbound' and 'outbound' instead of 'ingress' and + 'egress'. Also, the direction is reversed since MidoNet sees it + from the network port's point of view, not the VM's. + """ + if sg_direction == 'ingress': + return 'outbound' + elif sg_direction == 'egress': + return 'inbound' + else: + raise ValueError(_("Unrecognized direction %s") % sg_direction) + + +def _is_router_interface_port(port): + """Check whether the given port is a router interface port.""" + device_owner = port['device_owner'] + return (device_owner in l3_db.DEVICE_OWNER_ROUTER_INTF) + + +def _is_router_gw_port(port): + """Check whether the given port is a router gateway port.""" + device_owner = port['device_owner'] + return (device_owner in l3_db.DEVICE_OWNER_ROUTER_GW) + + +def _is_vif_port(port): + """Check whether the given port is a standard VIF port.""" + device_owner = port['device_owner'] + return (not _is_dhcp_port(port) and + device_owner not in (l3_db.DEVICE_OWNER_ROUTER_GW, + l3_db.DEVICE_OWNER_ROUTER_INTF)) + + +def _is_dhcp_port(port): + """Check whether the given port is a DHCP port.""" + device_owner = port['device_owner'] + return device_owner.startswith(constants.DEVICE_OWNER_DHCP) + + +def _check_resource_exists(func, id, name, raise_exc=False): + """Check whether the given resource exists in MidoNet data store.""" + try: + func(id) + except midonet_lib.MidonetResourceNotFound as exc: + LOG.error(_("There is no %(name)s with ID %(id)s in MidoNet."), + {"name": name, "id": id}) + if raise_exc: + raise MidonetPluginException(msg=exc) + + +class MidoRpcCallbacks(n_rpc.RpcCallback, + dhcp_rpc_base.DhcpRpcCallbackMixin): + RPC_API_VERSION = '1.1' + + +class MidonetPluginException(n_exc.NeutronException): + message = _("%(msg)s") + + +class MidonetPluginV2(db_base_plugin_v2.NeutronDbPluginV2, + portbindings_db.PortBindingMixin, + external_net_db.External_net_db_mixin, + l3_db.L3_NAT_db_mixin, + agentschedulers_db.DhcpAgentSchedulerDbMixin, + securitygroups_db.SecurityGroupDbMixin): + + supported_extension_aliases = ['external-net', 'router', 'security-group', + 'agent', 'dhcp_agent_scheduler', 'binding'] + __native_bulk_support = False + + def __init__(self): + super(MidonetPluginV2, self).__init__() + # Read config values + midonet_conf = cfg.CONF.MIDONET + midonet_uri = midonet_conf.midonet_uri + admin_user = midonet_conf.username + admin_pass = midonet_conf.password + admin_project_id = midonet_conf.project_id + self.provider_router_id = midonet_conf.provider_router_id + self.provider_router = None + + self.mido_api = api.MidonetApi(midonet_uri, admin_user, + admin_pass, + project_id=admin_project_id) + self.client = midonet_lib.MidoClient(self.mido_api) + + # self.provider_router_id should have been set. + if self.provider_router_id is None: + msg = _('provider_router_id should be configured in the plugin ' + 'config file') + LOG.exception(msg) + raise MidonetPluginException(msg=msg) + + self.setup_rpc() + + self.base_binding_dict = { + portbindings.VIF_TYPE: portbindings.VIF_TYPE_MIDONET, + portbindings.VIF_DETAILS: { + # TODO(rkukura): Replace with new VIF security details + portbindings.CAP_PORT_FILTER: + 'security-group' in self.supported_extension_aliases}} + + def _get_provider_router(self): + if self.provider_router is None: + self.provider_router = self.client.get_router( + self.provider_router_id) + return self.provider_router + + def _dhcp_mappings(self, context, fixed_ips, mac): + for fixed_ip in fixed_ips: + subnet = self._get_subnet(context, fixed_ip["subnet_id"]) + if subnet["ip_version"] == 6: + # TODO(ryu) handle IPv6 + continue + if not subnet["enable_dhcp"]: + # Skip if DHCP is disabled + continue + yield subnet['cidr'], fixed_ip["ip_address"], mac + + def _metadata_subnets(self, context, fixed_ips): + for fixed_ip in fixed_ips: + subnet = self._get_subnet(context, fixed_ip["subnet_id"]) + if subnet["ip_version"] == 6: + continue + yield subnet['cidr'], fixed_ip["ip_address"] + + def _initialize_port_chains(self, port, in_chain, out_chain, sg_ids): + + tenant_id = port["tenant_id"] + + position = 1 + # mac spoofing protection + self._add_chain_rule(in_chain, action='drop', + dl_src=port["mac_address"], inv_dl_src=True, + position=position) + + # ip spoofing protection + for fixed_ip in port["fixed_ips"]: + position += 1 + self._add_chain_rule(in_chain, action="drop", + src_addr=fixed_ip["ip_address"] + "/32", + inv_nw_src=True, dl_type=0x0800, # IPv4 + position=position) + + # conntrack + position += 1 + self._add_chain_rule(in_chain, action='accept', + match_forward_flow=True, + position=position) + + # Reset the position to process egress + position = 1 + + # Add rule for SGs + if sg_ids: + for sg_id in sg_ids: + chain_name = _sg_chain_names(sg_id)["ingress"] + chain = self.client.get_chain_by_name(tenant_id, chain_name) + self._add_chain_rule(out_chain, action='jump', + jump_chain_id=chain.get_id(), + jump_chain_name=chain_name, + position=position) + position += 1 + + # add reverse flow matching at the end + self._add_chain_rule(out_chain, action='accept', + match_return_flow=True, + position=position) + position += 1 + + # fall back DROP rule at the end except for ARP + self._add_chain_rule(out_chain, action='drop', + dl_type=0x0806, # ARP + inv_dl_type=True, position=position) + + def _bind_port_to_sgs(self, context, port, sg_ids): + self._process_port_create_security_group(context, port, sg_ids) + if sg_ids is not None: + for sg_id in sg_ids: + pg_name = _sg_port_group_name(sg_id) + self.client.add_port_to_port_group_by_name( + port["tenant_id"], pg_name, port["id"]) + + def _unbind_port_from_sgs(self, context, port_id): + self._delete_port_security_group_bindings(context, port_id) + self.client.remove_port_from_port_groups(port_id) + + def _create_accept_chain_rule(self, context, sg_rule, chain=None): + direction = sg_rule["direction"] + tenant_id = sg_rule["tenant_id"] + sg_id = sg_rule["security_group_id"] + chain_name = _sg_chain_names(sg_id)[direction] + + if chain is None: + chain = self.client.get_chain_by_name(tenant_id, chain_name) + + pg_id = None + if sg_rule["remote_group_id"] is not None: + pg_name = _sg_port_group_name(sg_id) + pg = self.client.get_port_group_by_name(tenant_id, pg_name) + pg_id = pg.get_id() + + props = {OS_SG_RULE_KEY: str(sg_rule["id"])} + + # Determine source or destination address by looking at direction + src_pg_id = dst_pg_id = None + src_addr = dst_addr = None + src_port_to = dst_port_to = None + src_port_from = dst_port_from = None + if direction == "egress": + dst_pg_id = pg_id + dst_addr = sg_rule["remote_ip_prefix"] + dst_port_from = sg_rule["port_range_min"] + dst_port_to = sg_rule["port_range_max"] + else: + src_pg_id = pg_id + src_addr = sg_rule["remote_ip_prefix"] + src_port_from = sg_rule["port_range_min"] + src_port_to = sg_rule["port_range_max"] + + return self._add_chain_rule( + chain, action='accept', port_group_src=src_pg_id, + port_group_dst=dst_pg_id, + src_addr=src_addr, src_port_from=src_port_from, + src_port_to=src_port_to, + dst_addr=dst_addr, dst_port_from=dst_port_from, + dst_port_to=dst_port_to, + nw_proto=net_util.get_protocol_value(sg_rule["protocol"]), + dl_type=net_util.get_ethertype_value(sg_rule["ethertype"]), + properties=props) + + def _remove_nat_rules(self, context, fip): + router = self.client.get_router(fip["router_id"]) + self.client.remove_static_route(self._get_provider_router(), + fip["floating_ip_address"]) + + chain_names = _nat_chain_names(router.get_id()) + for _type, name in chain_names.iteritems(): + self.client.remove_rules_by_property( + router.get_tenant_id(), name, + OS_FLOATING_IP_RULE_KEY, fip["id"]) + + def setup_rpc(self): + # RPC support + self.topic = topics.PLUGIN + self.conn = n_rpc.create_connection(new=True) + self.endpoints = [MidoRpcCallbacks(), + agents_db.AgentExtRpcCallback()] + self.conn.create_consumer(self.topic, self.endpoints, + fanout=False) + # Consume from all consumers in threads + self.conn.consume_in_threads() + + def create_subnet(self, context, subnet): + """Create Neutron subnet. + + Creates a Neutron subnet and a DHCP entry in MidoNet bridge. + """ + LOG.debug(_("MidonetPluginV2.create_subnet called: subnet=%r"), subnet) + + s = subnet["subnet"] + net = super(MidonetPluginV2, self).get_network( + context, subnet['subnet']['network_id'], fields=None) + + session = context.session + with session.begin(subtransactions=True): + sn_entry = super(MidonetPluginV2, self).create_subnet(context, + subnet) + bridge = self.client.get_bridge(sn_entry['network_id']) + + gateway_ip = s['gateway_ip'] + cidr = s['cidr'] + if s['enable_dhcp']: + dns_nameservers = None + host_routes = None + if s['dns_nameservers'] is not attributes.ATTR_NOT_SPECIFIED: + dns_nameservers = s['dns_nameservers'] + + if s['host_routes'] is not attributes.ATTR_NOT_SPECIFIED: + host_routes = s['host_routes'] + + self.client.create_dhcp(bridge, gateway_ip, cidr, + host_rts=host_routes, + dns_servers=dns_nameservers) + + # For external network, link the bridge to the provider router. + if net['router:external']: + self._link_bridge_to_gw_router( + bridge, self._get_provider_router(), gateway_ip, cidr) + + LOG.debug(_("MidonetPluginV2.create_subnet exiting: sn_entry=%r"), + sn_entry) + return sn_entry + + def delete_subnet(self, context, id): + """Delete Neutron subnet. + + Delete neutron network and its corresponding MidoNet bridge. + """ + LOG.debug(_("MidonetPluginV2.delete_subnet called: id=%s"), id) + subnet = super(MidonetPluginV2, self).get_subnet(context, id, + fields=None) + net = super(MidonetPluginV2, self).get_network(context, + subnet['network_id'], + fields=None) + session = context.session + with session.begin(subtransactions=True): + + super(MidonetPluginV2, self).delete_subnet(context, id) + bridge = self.client.get_bridge(subnet['network_id']) + if subnet['enable_dhcp']: + self.client.delete_dhcp(bridge, subnet['cidr']) + + # If the network is external, clean up routes, links, ports + if net[ext_net.EXTERNAL]: + self._unlink_bridge_from_gw_router( + bridge, self._get_provider_router()) + + LOG.debug(_("MidonetPluginV2.delete_subnet exiting")) + + def create_network(self, context, network): + """Create Neutron network. + + Create a new Neutron network and its corresponding MidoNet bridge. + """ + LOG.debug(_('MidonetPluginV2.create_network called: network=%r'), + network) + net_data = network['network'] + tenant_id = self._get_tenant_id_for_create(context, net_data) + net_data['tenant_id'] = tenant_id + self._ensure_default_security_group(context, tenant_id) + + bridge = self.client.create_bridge(**net_data) + net_data['id'] = bridge.get_id() + + session = context.session + with session.begin(subtransactions=True): + net = super(MidonetPluginV2, self).create_network(context, network) + self._process_l3_create(context, net, net_data) + + LOG.debug(_("MidonetPluginV2.create_network exiting: net=%r"), net) + return net + + def update_network(self, context, id, network): + """Update Neutron network. + + Update an existing Neutron network and its corresponding MidoNet + bridge. + """ + LOG.debug(_("MidonetPluginV2.update_network called: id=%(id)r, " + "network=%(network)r"), {'id': id, 'network': network}) + session = context.session + with session.begin(subtransactions=True): + net = super(MidonetPluginV2, self).update_network( + context, id, network) + self._process_l3_update(context, net, network['network']) + self.client.update_bridge(id, **network['network']) + + LOG.debug(_("MidonetPluginV2.update_network exiting: net=%r"), net) + return net + + def get_network(self, context, id, fields=None): + """Get Neutron network. + + Retrieves a Neutron network and its corresponding MidoNet bridge. + """ + LOG.debug(_("MidonetPluginV2.get_network called: id=%(id)r, " + "fields=%(fields)r"), {'id': id, 'fields': fields}) + qnet = super(MidonetPluginV2, self).get_network(context, id, fields) + self.client.get_bridge(id) + + LOG.debug(_("MidonetPluginV2.get_network exiting: qnet=%r"), qnet) + return qnet + + def delete_network(self, context, id): + """Delete a network and its corresponding MidoNet bridge.""" + LOG.debug(_("MidonetPluginV2.delete_network called: id=%r"), id) + self.client.delete_bridge(id) + try: + with context.session.begin(subtransactions=True): + self._process_l3_delete(context, id) + super(MidonetPluginV2, self).delete_network(context, id) + except Exception: + with excutils.save_and_reraise_exception(): + LOG.error(_('Failed to delete neutron db, while Midonet ' + 'bridge=%r had been deleted'), id) + + def create_port(self, context, port): + """Create a L2 port in Neutron/MidoNet.""" + LOG.debug(_("MidonetPluginV2.create_port called: port=%r"), port) + port_data = port['port'] + + # Create a bridge port in MidoNet and set the bridge port ID as the + # port ID in Neutron. + bridge = self.client.get_bridge(port_data["network_id"]) + tenant_id = bridge.get_tenant_id() + asu = port_data.get("admin_state_up", True) + bridge_port = self.client.add_bridge_port(bridge, + admin_state_up=asu) + port_data["id"] = bridge_port.get_id() + + try: + session = context.session + with session.begin(subtransactions=True): + # Create a Neutron port + new_port = super(MidonetPluginV2, self).create_port(context, + port) + port_data.update(new_port) + self._ensure_default_security_group_on_port(context, + port) + if _is_vif_port(port_data): + # Bind security groups to the port + sg_ids = self._get_security_groups_on_port(context, port) + self._bind_port_to_sgs(context, new_port, sg_ids) + + # Create port chains + port_chains = {} + for d, name in _port_chain_names( + new_port["id"]).iteritems(): + port_chains[d] = self.client.create_chain(tenant_id, + name) + + self._initialize_port_chains(port_data, + port_chains['inbound'], + port_chains['outbound'], + sg_ids) + + # Update the port with the chain + self.client.update_port_chains( + bridge_port, port_chains["inbound"].get_id(), + port_chains["outbound"].get_id()) + + # DHCP mapping is only for VIF ports + for cidr, ip, mac in self._dhcp_mappings( + context, port_data["fixed_ips"], + port_data["mac_address"]): + self.client.add_dhcp_host(bridge, cidr, ip, mac) + + elif _is_dhcp_port(port_data): + # For DHCP port, add a metadata route + for cidr, ip in self._metadata_subnets( + context, port_data["fixed_ips"]): + self.client.add_dhcp_route_option(bridge, cidr, ip, + METADATA_DEFAULT_IP) + + self._process_portbindings_create_and_update(context, + port_data, new_port) + except Exception as ex: + # Try removing the MidoNet port before raising an exception. + with excutils.save_and_reraise_exception(): + LOG.error(_("Failed to create a port on network %(net_id)s: " + "%(err)s"), + {"net_id": port_data["network_id"], "err": ex}) + self.client.delete_port(bridge_port.get_id()) + + LOG.debug(_("MidonetPluginV2.create_port exiting: port=%r"), new_port) + return new_port + + def get_port(self, context, id, fields=None): + """Retrieve port.""" + LOG.debug(_("MidonetPluginV2.get_port called: id=%(id)s " + "fields=%(fields)r"), {'id': id, 'fields': fields}) + port = super(MidonetPluginV2, self).get_port(context, id, fields) + "Check if the port exists in MidoNet DB""" + try: + self.client.get_port(id) + except midonet_lib.MidonetResourceNotFound as exc: + LOG.error(_("There is no port with ID %(id)s in MidoNet."), + {"id": id}) + port['status'] = constants.PORT_STATUS_ERROR + raise exc + LOG.debug(_("MidonetPluginV2.get_port exiting: port=%r"), port) + return port + + def get_ports(self, context, filters=None, fields=None): + """List neutron ports and verify that they exist in MidoNet.""" + LOG.debug(_("MidonetPluginV2.get_ports called: filters=%(filters)s " + "fields=%(fields)r"), + {'filters': filters, 'fields': fields}) + ports = super(MidonetPluginV2, self).get_ports(context, filters, + fields) + return ports + + def delete_port(self, context, id, l3_port_check=True): + """Delete a neutron port and corresponding MidoNet bridge port.""" + LOG.debug(_("MidonetPluginV2.delete_port called: id=%(id)s " + "l3_port_check=%(l3_port_check)r"), + {'id': id, 'l3_port_check': l3_port_check}) + # if needed, check to see if this is a port owned by + # and l3-router. If so, we should prevent deletion. + if l3_port_check: + self.prevent_l3_port_deletion(context, id) + + self.disassociate_floatingips(context, id) + port = self.get_port(context, id) + device_id = port['device_id'] + # If this port is for router interface/gw, unlink and delete. + if _is_router_interface_port(port): + self._unlink_bridge_from_router(device_id, id) + elif _is_router_gw_port(port): + # Gateway removed + # Remove all the SNAT rules that are tagged. + router = self._get_router(context, device_id) + tenant_id = router["tenant_id"] + chain_names = _nat_chain_names(device_id) + for _type, name in chain_names.iteritems(): + self.client.remove_rules_by_property( + tenant_id, name, OS_TENANT_ROUTER_RULE_KEY, + SNAT_RULE) + # Remove the default routes and unlink + self._remove_router_gateway(port['device_id']) + + self.client.delete_port(id, delete_chains=True) + try: + for cidr, ip, mac in self._dhcp_mappings( + context, port["fixed_ips"], port["mac_address"]): + self.client.delete_dhcp_host(port["network_id"], cidr, ip, + mac) + except Exception: + LOG.error(_("Failed to delete DHCP mapping for port %(id)s"), + {"id": id}) + + super(MidonetPluginV2, self).delete_port(context, id) + + def update_port(self, context, id, port): + """Handle port update, including security groups and fixed IPs.""" + with context.session.begin(subtransactions=True): + + # Get the port and save the fixed IPs + old_port = self._get_port(context, id) + net_id = old_port["network_id"] + mac = old_port["mac_address"] + old_ips = old_port["fixed_ips"] + # update the port DB + p = super(MidonetPluginV2, self).update_port(context, id, port) + + if "admin_state_up" in port["port"]: + asu = port["port"]["admin_state_up"] + mido_port = self.client.update_port(id, admin_state_up=asu) + + # If we're changing the admin_state_up flag and the port is + # associated with a router, then we also need to update the + # peer port. + if _is_router_interface_port(p): + self.client.update_port(mido_port.get_peer_id(), + admin_state_up=asu) + + new_ips = p["fixed_ips"] + if new_ips: + bridge = self.client.get_bridge(net_id) + # If it's a DHCP port, add a route to reach the MD server + if _is_dhcp_port(p): + for cidr, ip in self._metadata_subnets( + context, new_ips): + self.client.add_dhcp_route_option( + bridge, cidr, ip, METADATA_DEFAULT_IP) + else: + # IPs have changed. Re-map the DHCP entries + for cidr, ip, mac in self._dhcp_mappings( + context, old_ips, mac): + self.client.remove_dhcp_host( + bridge, cidr, ip, mac) + + for cidr, ip, mac in self._dhcp_mappings( + context, new_ips, mac): + self.client.add_dhcp_host( + bridge, cidr, ip, mac) + + if (self._check_update_deletes_security_groups(port) or + self._check_update_has_security_groups(port)): + self._unbind_port_from_sgs(context, p["id"]) + sg_ids = self._get_security_groups_on_port(context, port) + self._bind_port_to_sgs(context, p, sg_ids) + + self._process_portbindings_create_and_update(context, + port['port'], + p) + return p + + def create_router(self, context, router): + """Handle router creation. + + When a new Neutron router is created, its corresponding MidoNet router + is also created. In MidoNet, this router is initialized with chains + for inbound and outbound traffic, which will be used to hold other + chains that include various rules, such as NAT. + + :param router: Router information provided to create a new router. + """ + + # NOTE(dcahill): Similar to the NSX plugin, we completely override + # this method in order to be able to use the MidoNet ID as Neutron ID + # TODO(dcahill): Propose upstream patch for allowing + # 3rd parties to specify IDs as we do with l2 plugin + LOG.debug(_("MidonetPluginV2.create_router called: router=%(router)s"), + {"router": router}) + r = router['router'] + tenant_id = self._get_tenant_id_for_create(context, r) + r['tenant_id'] = tenant_id + mido_router = self.client.create_router(**r) + mido_router_id = mido_router.get_id() + + try: + has_gw_info = False + if EXTERNAL_GW_INFO in r: + has_gw_info = True + gw_info = r.pop(EXTERNAL_GW_INFO) + with context.session.begin(subtransactions=True): + # pre-generate id so it will be available when + # configuring external gw port + router_db = l3_db.Router(id=mido_router_id, + tenant_id=tenant_id, + name=r['name'], + admin_state_up=r['admin_state_up'], + status="ACTIVE") + context.session.add(router_db) + if has_gw_info: + self._update_router_gw_info(context, router_db['id'], + gw_info) + + router_data = self._make_router_dict(router_db) + + except Exception: + # Try removing the midonet router + with excutils.save_and_reraise_exception(): + self.client.delete_router(mido_router_id) + + # Create router chains + chain_names = _nat_chain_names(mido_router_id) + try: + self.client.add_router_chains(mido_router, + chain_names["pre-routing"], + chain_names["post-routing"]) + except Exception: + # Set the router status to Error + with context.session.begin(subtransactions=True): + r = self._get_router(context, router_data["id"]) + router_data['status'] = constants.NET_STATUS_ERROR + r['status'] = router_data['status'] + context.session.add(r) + + LOG.debug(_("MidonetPluginV2.create_router exiting: " + "router_data=%(router_data)s."), + {"router_data": router_data}) + return router_data + + def _set_router_gateway(self, id, gw_router, gw_ip): + """Set router uplink gateway + + :param ID: ID of the router + :param gw_router: gateway router to link to + :param gw_ip: gateway IP address + """ + LOG.debug(_("MidonetPluginV2.set_router_gateway called: id=%(id)s, " + "gw_router=%(gw_router)s, gw_ip=%(gw_ip)s"), + {'id': id, 'gw_router': gw_router, 'gw_ip': gw_ip}), + + router = self.client.get_router(id) + + # Create a port in the gw router + gw_port = self.client.add_router_port(gw_router, + port_address='169.254.255.1', + network_address='169.254.255.0', + network_length=30) + + # Create a port in the router + port = self.client.add_router_port(router, + port_address='169.254.255.2', + network_address='169.254.255.0', + network_length=30) + + # Link them + self.client.link(gw_port, port.get_id()) + + # Add a route for gw_ip to bring it down to the router + self.client.add_router_route(gw_router, type='Normal', + src_network_addr='0.0.0.0', + src_network_length=0, + dst_network_addr=gw_ip, + dst_network_length=32, + next_hop_port=gw_port.get_id(), + weight=100) + + # Add default route to uplink in the router + self.client.add_router_route(router, type='Normal', + src_network_addr='0.0.0.0', + src_network_length=0, + dst_network_addr='0.0.0.0', + dst_network_length=0, + next_hop_port=port.get_id(), + weight=100) + + def _remove_router_gateway(self, id): + """Clear router gateway + + :param ID: ID of the router + """ + LOG.debug(_("MidonetPluginV2.remove_router_gateway called: " + "id=%(id)s"), {'id': id}) + router = self.client.get_router(id) + + # delete the port that is connected to the gateway router + for p in router.get_ports(): + if p.get_port_address() == '169.254.255.2': + peer_port_id = p.get_peer_id() + if peer_port_id is not None: + self.client.unlink(p) + self.client.delete_port(peer_port_id) + + # delete default route + for r in router.get_routes(): + if (r.get_dst_network_addr() == '0.0.0.0' and + r.get_dst_network_length() == 0): + self.client.delete_route(r.get_id()) + + def update_router(self, context, id, router): + """Handle router updates.""" + LOG.debug(_("MidonetPluginV2.update_router called: id=%(id)s " + "router=%(router)r"), {"id": id, "router": router}) + + router_data = router["router"] + + # Check if the update included changes to the gateway. + gw_updated = l3_db.EXTERNAL_GW_INFO in router_data + with context.session.begin(subtransactions=True): + + # Update the Neutron DB + r = super(MidonetPluginV2, self).update_router(context, id, + router) + tenant_id = r["tenant_id"] + if gw_updated: + if (l3_db.EXTERNAL_GW_INFO in r and + r[l3_db.EXTERNAL_GW_INFO] is not None): + # Gateway created + gw_port_neutron = self._get_port( + context.elevated(), r["gw_port_id"]) + gw_ip = gw_port_neutron['fixed_ips'][0]['ip_address'] + + # First link routers and set up the routes + self._set_router_gateway(r["id"], + self._get_provider_router(), + gw_ip) + gw_port_midonet = self.client.get_link_port( + self._get_provider_router(), r["id"]) + + # Get the NAT chains and add dynamic SNAT rules. + chain_names = _nat_chain_names(r["id"]) + props = {OS_TENANT_ROUTER_RULE_KEY: SNAT_RULE} + self.client.add_dynamic_snat(tenant_id, + chain_names['pre-routing'], + chain_names['post-routing'], + gw_ip, + gw_port_midonet.get_id(), + **props) + + self.client.update_router(id, **router_data) + + LOG.debug(_("MidonetPluginV2.update_router exiting: router=%r"), r) + return r + + def delete_router(self, context, id): + """Handler for router deletion. + + Deleting a router on Neutron simply means deleting its corresponding + router in MidoNet. + + :param id: router ID to remove + """ + LOG.debug(_("MidonetPluginV2.delete_router called: id=%s"), id) + + self.client.delete_router_chains(id) + self.client.delete_router(id) + + super(MidonetPluginV2, self).delete_router(context, id) + + def _link_bridge_to_gw_router(self, bridge, gw_router, gw_ip, cidr): + """Link a bridge to the gateway router + + :param bridge: bridge + :param gw_router: gateway router to link to + :param gw_ip: IP address of gateway + :param cidr: network CIDR + """ + net_addr, net_len = net_util.net_addr(cidr) + + # create a port on the gateway router + gw_port = self.client.add_router_port(gw_router, port_address=gw_ip, + network_address=net_addr, + network_length=net_len) + + # create a bridge port, then link it to the router. + port = self.client.add_bridge_port(bridge) + self.client.link(gw_port, port.get_id()) + + # add a route for the subnet in the gateway router + self.client.add_router_route(gw_router, type='Normal', + src_network_addr='0.0.0.0', + src_network_length=0, + dst_network_addr=net_addr, + dst_network_length=net_len, + next_hop_port=gw_port.get_id(), + weight=100) + + def _unlink_bridge_from_gw_router(self, bridge, gw_router): + """Unlink a bridge from the gateway router + + :param bridge: bridge to unlink + :param gw_router: gateway router to unlink from + """ + # Delete routes and unlink the router and the bridge. + routes = self.client.get_router_routes(gw_router.get_id()) + + bridge_ports_to_delete = [ + p for p in gw_router.get_peer_ports() + if p.get_device_id() == bridge.get_id()] + + for p in bridge.get_peer_ports(): + if p.get_device_id() == gw_router.get_id(): + # delete the routes going to the bridge + for r in routes: + if r.get_next_hop_port() == p.get_id(): + self.client.delete_route(r.get_id()) + self.client.unlink(p) + self.client.delete_port(p.get_id()) + + # delete bridge port + for port in bridge_ports_to_delete: + self.client.delete_port(port.get_id()) + + def _link_bridge_to_router(self, router, bridge_port, net_addr, net_len, + gw_ip, metadata_gw_ip): + router_port = self.client.add_router_port( + router, network_length=net_len, network_address=net_addr, + port_address=gw_ip, admin_state_up=bridge_port['admin_state_up']) + self.client.link(router_port, bridge_port['id']) + self.client.add_router_route(router, type='Normal', + src_network_addr='0.0.0.0', + src_network_length=0, + dst_network_addr=net_addr, + dst_network_length=net_len, + next_hop_port=router_port.get_id(), + weight=100) + + if metadata_gw_ip: + # Add a route for the metadata server. + # Not all VM images supports DHCP option 121. Add a route for the + # Metadata server in the router to forward the packet to the bridge + # that will send them to the Metadata Proxy. + md_net_addr, md_net_len = net_util.net_addr(METADATA_DEFAULT_IP) + self.client.add_router_route( + router, type='Normal', src_network_addr=net_addr, + src_network_length=net_len, + dst_network_addr=md_net_addr, + dst_network_length=md_net_len, + next_hop_port=router_port.get_id(), + next_hop_gateway=metadata_gw_ip) + + def _unlink_bridge_from_router(self, router_id, bridge_port_id): + """Unlink a bridge from a router.""" + + # Remove the routes to the port and unlink the port + bridge_port = self.client.get_port(bridge_port_id) + routes = self.client.get_router_routes(router_id) + self.client.delete_port_routes(routes, bridge_port.get_peer_id()) + self.client.unlink(bridge_port) + + def add_router_interface(self, context, router_id, interface_info): + """Handle router linking with network.""" + LOG.debug(_("MidonetPluginV2.add_router_interface called: " + "router_id=%(router_id)s " + "interface_info=%(interface_info)r"), + {'router_id': router_id, 'interface_info': interface_info}) + + with context.session.begin(subtransactions=True): + info = super(MidonetPluginV2, self).add_router_interface( + context, router_id, interface_info) + + try: + subnet = self._get_subnet(context, info["subnet_id"]) + cidr = subnet["cidr"] + net_addr, net_len = net_util.net_addr(cidr) + router = self.client.get_router(router_id) + + # Get the metadata GW IP + metadata_gw_ip = None + rport_qry = context.session.query(models_v2.Port) + dhcp_ports = rport_qry.filter_by( + network_id=subnet["network_id"], + device_owner=constants.DEVICE_OWNER_DHCP).all() + if dhcp_ports and dhcp_ports[0].fixed_ips: + metadata_gw_ip = dhcp_ports[0].fixed_ips[0].ip_address + else: + LOG.warn(_("DHCP agent is not working correctly. No port " + "to reach the Metadata server on this network")) + # Link the router and the bridge + port = super(MidonetPluginV2, self).get_port(context, + info["port_id"]) + self._link_bridge_to_router(router, port, net_addr, + net_len, subnet["gateway_ip"], + metadata_gw_ip) + except Exception: + LOG.error(_("Failed to create MidoNet resources to add router " + "interface. info=%(info)s, router_id=%(router_id)s"), + {"info": info, "router_id": router_id}) + with excutils.save_and_reraise_exception(): + with context.session.begin(subtransactions=True): + self.remove_router_interface(context, router_id, info) + + LOG.debug(_("MidonetPluginV2.add_router_interface exiting: " + "info=%r"), info) + return info + + def _assoc_fip(self, fip): + router = self.client.get_router(fip["router_id"]) + link_port = self.client.get_link_port( + self._get_provider_router(), router.get_id()) + self.client.add_router_route( + self._get_provider_router(), + src_network_addr='0.0.0.0', + src_network_length=0, + dst_network_addr=fip["floating_ip_address"], + dst_network_length=32, + next_hop_port=link_port.get_peer_id()) + props = {OS_FLOATING_IP_RULE_KEY: fip['id']} + tenant_id = router.get_tenant_id() + chain_names = _nat_chain_names(router.get_id()) + for chain_type, name in chain_names.items(): + src_ip, target_ip = _get_nat_ips(chain_type, fip) + if chain_type == 'pre-routing': + nat_type = 'dnat' + else: + nat_type = 'snat' + self.client.add_static_nat(tenant_id, name, src_ip, + target_ip, + link_port.get_id(), + nat_type, **props) + + def create_floatingip(self, context, floatingip): + session = context.session + with session.begin(subtransactions=True): + fip = super(MidonetPluginV2, self).create_floatingip( + context, floatingip) + if fip['port_id']: + self._assoc_fip(fip) + return fip + + def update_floatingip(self, context, id, floatingip): + """Handle floating IP association and disassociation.""" + LOG.debug(_("MidonetPluginV2.update_floatingip called: id=%(id)s " + "floatingip=%(floatingip)s "), + {'id': id, 'floatingip': floatingip}) + + session = context.session + with session.begin(subtransactions=True): + if floatingip['floatingip']['port_id']: + fip = super(MidonetPluginV2, self).update_floatingip( + context, id, floatingip) + + self._assoc_fip(fip) + + # disassociate floating IP + elif floatingip['floatingip']['port_id'] is None: + fip = super(MidonetPluginV2, self).get_floatingip(context, id) + self._remove_nat_rules(context, fip) + super(MidonetPluginV2, self).update_floatingip(context, id, + floatingip) + + LOG.debug(_("MidonetPluginV2.update_floating_ip exiting: fip=%s"), fip) + return fip + + def disassociate_floatingips(self, context, port_id): + """Disassociate floating IPs (if any) from this port.""" + try: + fip_qry = context.session.query(l3_db.FloatingIP) + fip_dbs = fip_qry.filter_by(fixed_port_id=port_id) + for fip_db in fip_dbs: + self._remove_nat_rules(context, fip_db) + except sa_exc.NoResultFound: + pass + + super(MidonetPluginV2, self).disassociate_floatingips(context, port_id) + + def create_security_group(self, context, security_group, default_sg=False): + """Create security group. + + Create a new security group, including the default security group. + In MidoNet, this means creating a pair of chains, inbound and outbound, + as well as a new port group. + """ + LOG.debug(_("MidonetPluginV2.create_security_group called: " + "security_group=%(security_group)s " + "default_sg=%(default_sg)s "), + {'security_group': security_group, 'default_sg': default_sg}) + + sg = security_group.get('security_group') + tenant_id = self._get_tenant_id_for_create(context, sg) + if not default_sg: + self._ensure_default_security_group(context, tenant_id) + + # Create the Neutron sg first + sg = super(MidonetPluginV2, self).create_security_group( + context, security_group, default_sg) + + try: + # Process the MidoNet side + self.client.create_port_group(tenant_id, + _sg_port_group_name(sg["id"])) + chain_names = _sg_chain_names(sg["id"]) + chains = {} + for direction, chain_name in chain_names.iteritems(): + c = self.client.create_chain(tenant_id, chain_name) + chains[direction] = c + + # Create all the rules for this SG. Only accept rules are created + for r in sg['security_group_rules']: + self._create_accept_chain_rule(context, r, + chain=chains[r['direction']]) + except Exception: + LOG.error(_("Failed to create MidoNet resources for sg %(sg)r"), + {"sg": sg}) + with excutils.save_and_reraise_exception(): + with context.session.begin(subtransactions=True): + sg = self._get_security_group(context, sg["id"]) + context.session.delete(sg) + + LOG.debug(_("MidonetPluginV2.create_security_group exiting: sg=%r"), + sg) + return sg + + def delete_security_group(self, context, id): + """Delete chains for Neutron security group.""" + LOG.debug(_("MidonetPluginV2.delete_security_group called: id=%s"), id) + + with context.session.begin(subtransactions=True): + sg = super(MidonetPluginV2, self).get_security_group(context, id) + if not sg: + raise ext_sg.SecurityGroupNotFound(id=id) + + if sg["name"] == 'default' and not context.is_admin: + raise ext_sg.SecurityGroupCannotRemoveDefault() + + sg_id = sg['id'] + filters = {'security_group_id': [sg_id]} + if super(MidonetPluginV2, self)._get_port_security_group_bindings( + context, filters): + raise ext_sg.SecurityGroupInUse(id=sg_id) + + # Delete MidoNet Chains and portgroup for the SG + tenant_id = sg['tenant_id'] + self.client.delete_chains_by_names( + tenant_id, _sg_chain_names(sg["id"]).values()) + + self.client.delete_port_group_by_name( + tenant_id, _sg_port_group_name(sg["id"])) + + super(MidonetPluginV2, self).delete_security_group(context, id) + + def create_security_group_rule(self, context, security_group_rule): + """Create a security group rule + + Create a security group rule in the Neutron DB and corresponding + MidoNet resources in its data store. + """ + LOG.debug(_("MidonetPluginV2.create_security_group_rule called: " + "security_group_rule=%(security_group_rule)r"), + {'security_group_rule': security_group_rule}) + + with context.session.begin(subtransactions=True): + rule = super(MidonetPluginV2, self).create_security_group_rule( + context, security_group_rule) + + self._create_accept_chain_rule(context, rule) + + LOG.debug(_("MidonetPluginV2.create_security_group_rule exiting: " + "rule=%r"), rule) + return rule + + def delete_security_group_rule(self, context, sg_rule_id): + """Delete a security group rule + + Delete a security group rule from the Neutron DB and corresponding + MidoNet resources from its data store. + """ + LOG.debug(_("MidonetPluginV2.delete_security_group_rule called: " + "sg_rule_id=%s"), sg_rule_id) + with context.session.begin(subtransactions=True): + rule = super(MidonetPluginV2, self).get_security_group_rule( + context, sg_rule_id) + + if not rule: + raise ext_sg.SecurityGroupRuleNotFound(id=sg_rule_id) + + sg = self._get_security_group(context, + rule["security_group_id"]) + chain_name = _sg_chain_names(sg["id"])[rule["direction"]] + self.client.remove_rules_by_property(rule["tenant_id"], chain_name, + OS_SG_RULE_KEY, + str(rule["id"])) + super(MidonetPluginV2, self).delete_security_group_rule( + context, sg_rule_id) + + def _add_chain_rule(self, chain, action, **kwargs): + + nw_proto = kwargs.get("nw_proto") + src_addr = kwargs.pop("src_addr", None) + dst_addr = kwargs.pop("dst_addr", None) + src_port_from = kwargs.pop("src_port_from", None) + src_port_to = kwargs.pop("src_port_to", None) + dst_port_from = kwargs.pop("dst_port_from", None) + dst_port_to = kwargs.pop("dst_port_to", None) + + # Convert to the keys and values that midonet client understands + if src_addr: + kwargs["nw_src_addr"], kwargs["nw_src_length"] = net_util.net_addr( + src_addr) + + if dst_addr: + kwargs["nw_dst_addr"], kwargs["nw_dst_length"] = net_util.net_addr( + dst_addr) + + kwargs["tp_src"] = {"start": src_port_from, "end": src_port_to} + + kwargs["tp_dst"] = {"start": dst_port_from, "end": dst_port_to} + + if nw_proto == 1: # ICMP + # Overwrite port fields regardless of the direction + kwargs["tp_src"] = {"start": src_port_from, "end": src_port_from} + kwargs["tp_dst"] = {"start": dst_port_to, "end": dst_port_to} + + return self.client.add_chain_rule(chain, action=action, **kwargs) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/ml2/db.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/ml2/db.py new file mode 100644 index 00000000..c1627584 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/ml2/db.py @@ -0,0 +1,200 @@ +# Copyright (c) 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy.orm import exc + +from neutron.common import constants as n_const +from neutron.db import api as db_api +from neutron.db import models_v2 +from neutron.db import securitygroups_db as sg_db +from neutron.extensions import portbindings +from neutron import manager +from neutron.openstack.common import log +from neutron.openstack.common import uuidutils +from neutron.plugins.ml2 import driver_api as api +from neutron.plugins.ml2 import models + +LOG = log.getLogger(__name__) + + +def add_network_segment(session, network_id, segment): + with session.begin(subtransactions=True): + record = models.NetworkSegment( + id=uuidutils.generate_uuid(), + network_id=network_id, + network_type=segment.get(api.NETWORK_TYPE), + physical_network=segment.get(api.PHYSICAL_NETWORK), + segmentation_id=segment.get(api.SEGMENTATION_ID) + ) + session.add(record) + LOG.info(_("Added segment %(id)s of type %(network_type)s for network" + " %(network_id)s"), + {'id': record.id, + 'network_type': record.network_type, + 'network_id': record.network_id}) + + +def get_network_segments(session, network_id): + with session.begin(subtransactions=True): + records = (session.query(models.NetworkSegment). + filter_by(network_id=network_id)) + return [{api.ID: record.id, + api.NETWORK_TYPE: record.network_type, + api.PHYSICAL_NETWORK: record.physical_network, + api.SEGMENTATION_ID: record.segmentation_id} + for record in records] + + +def ensure_port_binding(session, port_id): + with session.begin(subtransactions=True): + try: + record = (session.query(models.PortBinding). + filter_by(port_id=port_id). + one()) + except exc.NoResultFound: + record = models.PortBinding( + port_id=port_id, + vif_type=portbindings.VIF_TYPE_UNBOUND) + session.add(record) + return record + + +def ensure_dvr_port_binding(session, port_id, host, router_id=None): + # FIXME(armando-migliaccio): take care of LP #1335226 + with session.begin(subtransactions=True): + try: + record = (session.query(models.DVRPortBinding). + filter_by(port_id=port_id, host=host).one()) + except exc.NoResultFound: + record = models.DVRPortBinding( + port_id=port_id, + host=host, + router_id=router_id, + vif_type=portbindings.VIF_TYPE_UNBOUND, + vnic_type=portbindings.VNIC_NORMAL, + cap_port_filter=False, + status=n_const.PORT_STATUS_DOWN) + session.add(record) + return record + + +def delete_dvr_port_binding(session, port_id, host): + with session.begin(subtransactions=True): + session.query(models.DVRPortBinding).filter_by( + port_id=port_id, host=host).delete() + + +def get_port(session, port_id): + """Get port record for update within transcation.""" + + with session.begin(subtransactions=True): + try: + record = (session.query(models_v2.Port). + filter(models_v2.Port.id.startswith(port_id)). + one()) + return record + except exc.NoResultFound: + return + except exc.MultipleResultsFound: + LOG.error(_("Multiple ports have port_id starting with %s"), + port_id) + return + + +def get_port_from_device_mac(device_mac): + LOG.debug(_("get_port_from_device_mac() called for mac %s"), device_mac) + session = db_api.get_session() + qry = session.query(models_v2.Port).filter_by(mac_address=device_mac) + return qry.first() + + +def get_port_and_sgs(port_id): + """Get port from database with security group info.""" + + LOG.debug(_("get_port_and_sgs() called for port_id %s"), port_id) + session = db_api.get_session() + sg_binding_port = sg_db.SecurityGroupPortBinding.port_id + + with session.begin(subtransactions=True): + query = session.query(models_v2.Port, + sg_db.SecurityGroupPortBinding.security_group_id) + query = query.outerjoin(sg_db.SecurityGroupPortBinding, + models_v2.Port.id == sg_binding_port) + query = query.filter(models_v2.Port.id.startswith(port_id)) + port_and_sgs = query.all() + if not port_and_sgs: + return + port = port_and_sgs[0][0] + plugin = manager.NeutronManager.get_plugin() + port_dict = plugin._make_port_dict(port) + port_dict['security_groups'] = [ + sg_id for port_, sg_id in port_and_sgs if sg_id] + port_dict['security_group_rules'] = [] + port_dict['security_group_source_groups'] = [] + port_dict['fixed_ips'] = [ip['ip_address'] + for ip in port['fixed_ips']] + return port_dict + + +def get_port_binding_host(port_id): + session = db_api.get_session() + with session.begin(subtransactions=True): + try: + query = (session.query(models.PortBinding). + filter(models.PortBinding.port_id.startswith(port_id)). + one()) + except exc.NoResultFound: + LOG.debug(_("No binding found for port %(port_id)s"), + {'port_id': port_id}) + return + return query.host + + +def generate_dvr_port_status(session, port_id): + # an OR'ed value of status assigned to parent port from the + # dvrportbinding bucket + query = session.query(models.DVRPortBinding) + bindings = query.filter(models.DVRPortBinding.port_id == port_id).all() + for bind in bindings: + if bind.status == n_const.PORT_STATUS_ACTIVE: + return n_const.PORT_STATUS_ACTIVE + for bind in bindings: + if bind.status == n_const.PORT_STATUS_DOWN: + return n_const.PORT_STATUS_DOWN + return n_const.PORT_STATUS_BUILD + + +def get_dvr_port_binding_by_host(port_id, host, session=None): + if not session: + session = db_api.get_session() + with session.begin(subtransactions=True): + binding = (session.query(models.DVRPortBinding). + filter(models.DVRPortBinding.port_id.startswith(port_id), + models.DVRPortBinding.host == host).first()) + if not binding: + LOG.debug("No binding for DVR port %(port_id)s with host " + "%(host)s", {'port_id': port_id, 'host': host}) + return binding + + +def get_dvr_port_bindings(port_id): + session = db_api.get_session() + with session.begin(subtransactions=True): + bindings = (session.query(models.DVRPortBinding). + filter(models.DVRPortBinding.port_id.startswith(port_id)). + all()) + if not bindings: + LOG.debug("No bindings for port %s", port_id) + return bindings diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/ml2/driver_api.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/ml2/driver_api.py new file mode 100644 index 00000000..17aecf53 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/ml2/driver_api.py @@ -0,0 +1,602 @@ +# Copyright (c) 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import abc +import six + +# The following keys are used in the segment dictionaries passed via +# the driver API. These are defined separately from similar keys in +# neutron.extensions.providernet so that drivers don't need to change +# if/when providernet moves to the core API. +# +ID = 'id' +NETWORK_TYPE = 'network_type' +PHYSICAL_NETWORK = 'physical_network' +SEGMENTATION_ID = 'segmentation_id' + + +@six.add_metaclass(abc.ABCMeta) +class TypeDriver(object): + """Define stable abstract interface for ML2 type drivers. + + ML2 type drivers each support a specific network_type for provider + and/or tenant network segments. Type drivers must implement this + abstract interface, which defines the API by which the plugin uses + the driver to manage the persistent type-specific resource + allocation state associated with network segments of that type. + + Network segments are represented by segment dictionaries using the + NETWORK_TYPE, PHYSICAL_NETWORK, and SEGMENTATION_ID keys defined + above, corresponding to the provider attributes. Future revisions + of the TypeDriver API may add additional segment dictionary + keys. Attributes not applicable for a particular network_type may + either be excluded or stored as None. + """ + + @abc.abstractmethod + def get_type(self): + """Get driver's network type. + + :returns network_type value handled by this driver + """ + pass + + @abc.abstractmethod + def initialize(self): + """Perform driver initialization. + + Called after all drivers have been loaded and the database has + been initialized. No abstract methods defined below will be + called prior to this method being called. + """ + pass + + @abc.abstractmethod + def validate_provider_segment(self, segment): + """Validate attributes of a provider network segment. + + :param segment: segment dictionary using keys defined above + :raises: neutron.common.exceptions.InvalidInput if invalid + + Called outside transaction context to validate the provider + attributes for a provider network segment. Raise InvalidInput + if: + + - any required attribute is missing + - any prohibited or unrecognized attribute is present + - any attribute value is not valid + + The network_type attribute is present in segment, but + need not be validated. + """ + pass + + @abc.abstractmethod + def reserve_provider_segment(self, session, segment): + """Reserve resource associated with a provider network segment. + + :param session: database session + :param segment: segment dictionary using keys defined above + + Called inside transaction context on session to reserve the + type-specific resource for a provider network segment. The + segment dictionary passed in was returned by a previous + validate_provider_segment() call. + """ + pass + + @abc.abstractmethod + def allocate_tenant_segment(self, session): + """Allocate resource for a new tenant network segment. + + :param session: database session + :returns: segment dictionary using keys defined above + + Called inside transaction context on session to allocate a new + tenant network, typically from a type-specific resource + pool. If successful, return a segment dictionary describing + the segment. If tenant network segment cannot be allocated + (i.e. tenant networks not supported or resource pool is + exhausted), return None. + """ + pass + + @abc.abstractmethod + def release_segment(self, session, segment): + """Release network segment. + + :param session: database session + :param segment: segment dictionary using keys defined above + + Called inside transaction context on session to release a + tenant or provider network's type-specific resource. Runtime + errors are not expected, but raising an exception will result + in rollback of the transaction. + """ + pass + + +@six.add_metaclass(abc.ABCMeta) +class NetworkContext(object): + """Context passed to MechanismDrivers for changes to network resources. + + A NetworkContext instance wraps a network resource. It provides + helper methods for accessing other relevant information. Results + from expensive operations are cached so that other + MechanismDrivers can freely access the same information. + """ + + @abc.abstractproperty + def current(self): + """Return the current state of the network. + + Return the current state of the network, as defined by + NeutronPluginBaseV2.create_network and all extensions in the + ml2 plugin. + """ + pass + + @abc.abstractproperty + def original(self): + """Return the original state of the network. + + Return the original state of the network, prior to a call to + update_network. Method is only valid within calls to + update_network_precommit and update_network_postcommit. + """ + pass + + @abc.abstractproperty + def network_segments(self): + """Return the segments associated with this network resource.""" + pass + + +@six.add_metaclass(abc.ABCMeta) +class SubnetContext(object): + """Context passed to MechanismDrivers for changes to subnet resources. + + A SubnetContext instance wraps a subnet resource. It provides + helper methods for accessing other relevant information. Results + from expensive operations are cached so that other + MechanismDrivers can freely access the same information. + """ + + @abc.abstractproperty + def current(self): + """Return the current state of the subnet. + + Return the current state of the subnet, as defined by + NeutronPluginBaseV2.create_subnet and all extensions in the + ml2 plugin. + """ + pass + + @abc.abstractproperty + def original(self): + """Return the original state of the subnet. + + Return the original state of the subnet, prior to a call to + update_subnet. Method is only valid within calls to + update_subnet_precommit and update_subnet_postcommit. + """ + pass + + +@six.add_metaclass(abc.ABCMeta) +class PortContext(object): + """Context passed to MechanismDrivers for changes to port resources. + + A PortContext instance wraps a port resource. It provides helper + methods for accessing other relevant information. Results from + expensive operations are cached so that other MechanismDrivers can + freely access the same information. + """ + + @abc.abstractproperty + def binding(self): + """Return the current state of binding for this port.""" + pass + + @abc.abstractproperty + def current(self): + """Return the current state of the port. + + Return the current state of the port, as defined by + NeutronPluginBaseV2.create_port and all extensions in the ml2 + plugin. + """ + pass + + @abc.abstractproperty + def original(self): + """Return the original state of the port. + + Return the original state of the port, prior to a call to + update_port. Method is only valid within calls to + update_port_precommit and update_port_postcommit. + """ + pass + + @abc.abstractproperty + def network(self): + """Return the NetworkContext associated with this port.""" + pass + + @abc.abstractproperty + def bound_segment(self): + """Return the currently bound segment dictionary.""" + pass + + @abc.abstractproperty + def original_bound_segment(self): + """Return the original bound segment dictionary. + + Return the original bound segment dictionary, prior to a call + to update_port. Method is only valid within calls to + update_port_precommit and update_port_postcommit. + """ + pass + + @abc.abstractproperty + def bound_driver(self): + """Return the currently bound mechanism driver name.""" + pass + + @abc.abstractproperty + def original_bound_driver(self): + """Return the original bound mechanism driver name. + + Return the original bound mechanism driver name, prior to a + call to update_port. Method is only valid within calls to + update_port_precommit and update_port_postcommit. + """ + pass + + @abc.abstractmethod + def host_agents(self, agent_type): + """Get agents of the specified type on port's host. + + :param agent_type: Agent type identifier + :returns: List of agents_db.Agent records + """ + pass + + @abc.abstractmethod + def set_binding(self, segment_id, vif_type, vif_details, + status=None): + """Set the binding for the port. + + :param segment_id: Network segment bound for the port. + :param vif_type: The VIF type for the bound port. + :param vif_details: Dictionary with details for VIF driver. + :param status: Port status to set if not None. + + Called by MechanismDriver.bind_port to indicate success and + specify binding details to use for port. The segment_id must + identify an item in network.network_segments. + """ + pass + + +@six.add_metaclass(abc.ABCMeta) +class MechanismDriver(object): + """Define stable abstract interface for ML2 mechanism drivers. + + A mechanism driver is called on the creation, update, and deletion + of networks and ports. For every event, there are two methods that + get called - one within the database transaction (method suffix of + _precommit), one right afterwards (method suffix of _postcommit). + + Exceptions raised by methods called inside the transaction can + rollback, but should not make any blocking calls (for example, + REST requests to an outside controller). Methods called after + transaction commits can make blocking external calls, though these + will block the entire process. Exceptions raised in calls after + the transaction commits may cause the associated resource to be + deleted. + + Because rollback outside of the transaction is not done in the + update network/port case, all data validation must be done within + methods that are part of the database transaction. + """ + + @abc.abstractmethod + def initialize(self): + """Perform driver initialization. + + Called after all drivers have been loaded and the database has + been initialized. No abstract methods defined below will be + called prior to this method being called. + """ + pass + + def create_network_precommit(self, context): + """Allocate resources for a new network. + + :param context: NetworkContext instance describing the new + network. + + Create a new network, allocating resources as necessary in the + database. Called inside transaction context on session. Call + cannot block. Raising an exception will result in a rollback + of the current transaction. + """ + pass + + def create_network_postcommit(self, context): + """Create a network. + + :param context: NetworkContext instance describing the new + network. + + Called after the transaction commits. Call can block, though + will block the entire process so care should be taken to not + drastically affect performance. Raising an exception will + cause the deletion of the resource. + """ + pass + + def update_network_precommit(self, context): + """Update resources of a network. + + :param context: NetworkContext instance describing the new + state of the network, as well as the original state prior + to the update_network call. + + Update values of a network, updating the associated resources + in the database. Called inside transaction context on session. + Raising an exception will result in rollback of the + transaction. + + update_network_precommit is called for all changes to the + network state. It is up to the mechanism driver to ignore + state or state changes that it does not know or care about. + """ + pass + + def update_network_postcommit(self, context): + """Update a network. + + :param context: NetworkContext instance describing the new + state of the network, as well as the original state prior + to the update_network call. + + Called after the transaction commits. Call can block, though + will block the entire process so care should be taken to not + drastically affect performance. Raising an exception will + cause the deletion of the resource. + + update_network_postcommit is called for all changes to the + network state. It is up to the mechanism driver to ignore + state or state changes that it does not know or care about. + """ + pass + + def delete_network_precommit(self, context): + """Delete resources for a network. + + :param context: NetworkContext instance describing the current + state of the network, prior to the call to delete it. + + Delete network resources previously allocated by this + mechanism driver for a network. Called inside transaction + context on session. Runtime errors are not expected, but + raising an exception will result in rollback of the + transaction. + """ + pass + + def delete_network_postcommit(self, context): + """Delete a network. + + :param context: NetworkContext instance describing the current + state of the network, prior to the call to delete it. + + Called after the transaction commits. Call can block, though + will block the entire process so care should be taken to not + drastically affect performance. Runtime errors are not + expected, and will not prevent the resource from being + deleted. + """ + pass + + def create_subnet_precommit(self, context): + """Allocate resources for a new subnet. + + :param context: SubnetContext instance describing the new + subnet. + + Create a new subnet, allocating resources as necessary in the + database. Called inside transaction context on session. Call + cannot block. Raising an exception will result in a rollback + of the current transaction. + """ + pass + + def create_subnet_postcommit(self, context): + """Create a subnet. + + :param context: SubnetContext instance describing the new + subnet. + + Called after the transaction commits. Call can block, though + will block the entire process so care should be taken to not + drastically affect performance. Raising an exception will + cause the deletion of the resource. + """ + pass + + def update_subnet_precommit(self, context): + """Update resources of a subnet. + + :param context: SubnetContext instance describing the new + state of the subnet, as well as the original state prior + to the update_subnet call. + + Update values of a subnet, updating the associated resources + in the database. Called inside transaction context on session. + Raising an exception will result in rollback of the + transaction. + + update_subnet_precommit is called for all changes to the + subnet state. It is up to the mechanism driver to ignore + state or state changes that it does not know or care about. + """ + pass + + def update_subnet_postcommit(self, context): + """Update a subnet. + + :param context: SubnetContext instance describing the new + state of the subnet, as well as the original state prior + to the update_subnet call. + + Called after the transaction commits. Call can block, though + will block the entire process so care should be taken to not + drastically affect performance. Raising an exception will + cause the deletion of the resource. + + update_subnet_postcommit is called for all changes to the + subnet state. It is up to the mechanism driver to ignore + state or state changes that it does not know or care about. + """ + pass + + def delete_subnet_precommit(self, context): + """Delete resources for a subnet. + + :param context: SubnetContext instance describing the current + state of the subnet, prior to the call to delete it. + + Delete subnet resources previously allocated by this + mechanism driver for a subnet. Called inside transaction + context on session. Runtime errors are not expected, but + raising an exception will result in rollback of the + transaction. + """ + pass + + def delete_subnet_postcommit(self, context): + """Delete a subnet. + + :param context: SubnetContext instance describing the current + state of the subnet, prior to the call to delete it. + + Called after the transaction commits. Call can block, though + will block the entire process so care should be taken to not + drastically affect performance. Runtime errors are not + expected, and will not prevent the resource from being + deleted. + """ + pass + + def create_port_precommit(self, context): + """Allocate resources for a new port. + + :param context: PortContext instance describing the port. + + Create a new port, allocating resources as necessary in the + database. Called inside transaction context on session. Call + cannot block. Raising an exception will result in a rollback + of the current transaction. + """ + pass + + def create_port_postcommit(self, context): + """Create a port. + + :param context: PortContext instance describing the port. + + Called after the transaction completes. Call can block, though + will block the entire process so care should be taken to not + drastically affect performance. Raising an exception will + result in the deletion of the resource. + """ + pass + + def update_port_precommit(self, context): + """Update resources of a port. + + :param context: PortContext instance describing the new + state of the port, as well as the original state prior + to the update_port call. + + Called inside transaction context on session to complete a + port update as defined by this mechanism driver. Raising an + exception will result in rollback of the transaction. + + update_port_precommit is called for all changes to the port + state. It is up to the mechanism driver to ignore state or + state changes that it does not know or care about. + """ + pass + + def update_port_postcommit(self, context): + """Update a port. + + :param context: PortContext instance describing the new + state of the port, as well as the original state prior + to the update_port call. + + Called after the transaction completes. Call can block, though + will block the entire process so care should be taken to not + drastically affect performance. Raising an exception will + result in the deletion of the resource. + + update_port_postcommit is called for all changes to the port + state. It is up to the mechanism driver to ignore state or + state changes that it does not know or care about. + """ + pass + + def delete_port_precommit(self, context): + """Delete resources of a port. + + :param context: PortContext instance describing the current + state of the port, prior to the call to delete it. + + Called inside transaction context on session. Runtime errors + are not expected, but raising an exception will result in + rollback of the transaction. + """ + pass + + def delete_port_postcommit(self, context): + """Delete a port. + + :param context: PortContext instance describing the current + state of the port, prior to the call to delete it. + + Called after the transaction completes. Call can block, though + will block the entire process so care should be taken to not + drastically affect performance. Runtime errors are not + expected, and will not prevent the resource from being + deleted. + """ + pass + + def bind_port(self, context): + """Attempt to bind a port. + + :param context: PortContext instance describing the port + + Called inside transaction context on session, prior to + create_port_precommit or update_port_precommit, to + attempt to establish a port binding. If the driver is able to + bind the port, it calls context.set_binding with the binding + details. + """ + pass diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/ml2/driver_context.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/ml2/driver_context.py new file mode 100644 index 00000000..7ac58809 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/ml2/driver_context.py @@ -0,0 +1,142 @@ +# Copyright (c) 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from neutron.openstack.common import jsonutils +from neutron.plugins.ml2 import db +from neutron.plugins.ml2 import driver_api as api + + +class MechanismDriverContext(object): + """MechanismDriver context base class.""" + def __init__(self, plugin, plugin_context): + self._plugin = plugin + # This temporarily creates a reference loop, but the + # lifetime of PortContext is limited to a single + # method call of the plugin. + self._plugin_context = plugin_context + + +class NetworkContext(MechanismDriverContext, api.NetworkContext): + + def __init__(self, plugin, plugin_context, network, + original_network=None): + super(NetworkContext, self).__init__(plugin, plugin_context) + self._network = network + self._original_network = original_network + self._segments = db.get_network_segments(plugin_context.session, + network['id']) + + @property + def current(self): + return self._network + + @property + def original(self): + return self._original_network + + @property + def network_segments(self): + return self._segments + + +class SubnetContext(MechanismDriverContext, api.SubnetContext): + + def __init__(self, plugin, plugin_context, subnet, original_subnet=None): + super(SubnetContext, self).__init__(plugin, plugin_context) + self._subnet = subnet + self._original_subnet = original_subnet + + @property + def current(self): + return self._subnet + + @property + def original(self): + return self._original_subnet + + +class PortContext(MechanismDriverContext, api.PortContext): + + def __init__(self, plugin, plugin_context, port, network, + original_port=None, binding=None): + super(PortContext, self).__init__(plugin, plugin_context) + self._port = port + self._original_port = original_port + self._network_context = NetworkContext(plugin, plugin_context, + network) + if binding: + self._binding = binding + else: + self._binding = db.ensure_port_binding( + plugin_context.session, port['id']) + if original_port: + self._original_bound_segment_id = self._binding.segment + self._original_bound_driver = self._binding.driver + else: + self._original_bound_segment_id = None + self._original_bound_driver = None + self._new_port_status = None + + @property + def binding(self): + return self._binding + + @property + def current(self): + return self._port + + @property + def original(self): + return self._original_port + + @property + def network(self): + return self._network_context + + @property + def bound_segment(self): + id = self._binding.segment + if id: + for segment in self._network_context.network_segments: + if segment[api.ID] == id: + return segment + + @property + def original_bound_segment(self): + if self._original_bound_segment_id: + for segment in self._network_context.network_segments: + if segment[api.ID] == self._original_bound_segment_id: + return segment + + @property + def bound_driver(self): + return self._binding.driver + + @property + def original_bound_driver(self): + return self._original_bound_driver + + def host_agents(self, agent_type): + return self._plugin.get_agents(self._plugin_context, + filters={'agent_type': [agent_type], + 'host': [self._binding.host]}) + + def set_binding(self, segment_id, vif_type, vif_details, + status=None): + # TODO(rkukura) Verify binding allowed, segment in network + self._binding.segment = segment_id + self._binding.vif_type = vif_type + self._binding.vif_details = jsonutils.dumps(vif_details) + self._new_port_status = status diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/ml2/drivers/README.fslsdn b/icehouse-patches/neutron/dvr-patch/neutron/plugins/ml2/drivers/README.fslsdn new file mode 100644 index 00000000..09017284 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/ml2/drivers/README.fslsdn @@ -0,0 +1,102 @@ +===================================================== +Freescale SDN Mechanism Driver for Neutron ML2 plugin +===================================================== + +Introduction +============ + +Freescale SDN (FSL-SDN) Mechanism Driver is an add-on support for ML2 plugin +for Neutron. + +It supports the Cloud Resource Discovery (CRD) service by updating +Network, Subnet and Port Create/Update/Delete data into the CRD database. + +CRD service manages network nodes, virtual network appliances and openflow +controller based network applications. + +Basic work flow +--------------- + +:: + + +---------------------------------+ + | | + | Neutron Server | + | (with ML2 plugin) | + | | + | +-------------------------------+ + | | Freescale SDN | + | | Mechanism Driver | + +-+--------+----------------------+ + | + | ReST API + | + +----------+-------------+ + | CRD server | + +------------------------+ + + + +How does Freescale SDN Mechanism Driver work? +=========================================== + +- Freescale Mechanism driver handles the following postcommit operations. + - Network create/update/delete + - Subnet create/update/delete + - Port create/delete + +Sequence diagram : create_network +--------------------------------- + +:: + + create_network + { + neutron -> ML2_plugin + ML2_plugin -> FSL-SDN-MD + FSL-SDN-MD -> crd_service + FSL-SDN-MD <-- crd_service + ML2_plugin <-- FSL-SDN-MD + neutron <-- ML2_plugin + } + +- Supported network types by FSL OF Controller include vlan and vxlan. + +- Freescale SDN mechanism driver handles VM port binding within in the + mechanism driver (like ODL MD). + +- 'bind_port' function verifies the supported network types (vlan,vxlan) + and calls context.set_binding with binding details. + +- Flow management in OVS is handled by Freescale Openflow Controller. + + +How to use Freescale SDN Mechanism Driver? +========================================== + +Configuring ML2 Plugin +---------------------- + +In [ml2] section of /etc/neutron/plugins/ml2/ml2_conf.ini, +modify 'mechanism_drivers' attributes as: + +:: + + mechanism_drivers = fslsdn + +Configuring FSLSDN Mechanism Driver +----------------------------------- + +Update /etc/neutron/plugins/ml2/ml2_conf_fslsdn.ini, as below. + +:: + + [ml2_fslsdn] + crd_auth_strategy = keystone + crd_url = http://127.0.0.1:9797 + crd_auth_url = http://127.0.0.1:5000/v2.0/ + crd_tenant_name = service + crd_password = <-service-password-> + crd_user_name = <-service-username-> + +CRD service must be running in the controller. diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py new file mode 100644 index 00000000..015921df --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py @@ -0,0 +1,385 @@ +# Copyright 2014 Brocade Communications System, Inc. +# All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# Author: +# Shiv Haris (shivharis@hotmail.com) + + +"""Implentation of Brocade ML2 Mechanism driver for ML2 Plugin.""" + +from oslo.config import cfg + +from neutron.openstack.common import importutils +from neutron.openstack.common import log as logging +from neutron.plugins.ml2 import driver_api +from neutron.plugins.ml2.drivers.brocade.db import models as brocade_db + +LOG = logging.getLogger(__name__) +MECHANISM_VERSION = 0.9 +NOS_DRIVER = 'neutron.plugins.ml2.drivers.brocade.nos.nosdriver.NOSdriver' + +ML2_BROCADE = [cfg.StrOpt('address', default='', + help=_('The address of the host to SSH to')), + cfg.StrOpt('username', default='admin', + help=_('The SSH username to use')), + cfg.StrOpt('password', default='password', secret=True, + help=_('The SSH password to use')), + cfg.StrOpt('physical_networks', default='', + help=_('Allowed physical networks')), + cfg.StrOpt('ostype', default='NOS', + help=_('Unused')) + ] + +cfg.CONF.register_opts(ML2_BROCADE, "ml2_brocade") + + +class BrocadeMechanism(driver_api.MechanismDriver): + """ML2 Mechanism driver for Brocade VDX switches. This is the upper + layer driver class that interfaces to lower layer (NETCONF) below. + + """ + + def __init__(self): + self._driver = None + self._physical_networks = None + self._switch = None + self.initialize() + + def initialize(self): + """Initilize of variables needed by this class.""" + + self._physical_networks = cfg.CONF.ml2_brocade.physical_networks + self.brocade_init() + + def brocade_init(self): + """Brocade specific initialization for this class.""" + + self._switch = {'address': cfg.CONF.ml2_brocade.address, + 'username': cfg.CONF.ml2_brocade.username, + 'password': cfg.CONF.ml2_brocade.password + } + self._driver = importutils.import_object(NOS_DRIVER) + + def create_network_precommit(self, mech_context): + """Create Network in the mechanism specific database table.""" + + network = mech_context.current + context = mech_context._plugin_context + tenant_id = network['tenant_id'] + network_id = network['id'] + + segments = mech_context.network_segments + # currently supports only one segment per network + segment = segments[0] + + network_type = segment['network_type'] + vlan_id = segment['segmentation_id'] + segment_id = segment['id'] + + if segment['physical_network'] not in self._physical_networks: + raise Exception( + _("Brocade Mechanism: failed to create network, " + "network cannot be created in the configured " + "physical network")) + + if network_type != 'vlan': + raise Exception( + _("Brocade Mechanism: failed to create network, " + "only network type vlan is supported")) + + try: + brocade_db.create_network(context, network_id, vlan_id, + segment_id, network_type, tenant_id) + except Exception: + LOG.exception( + _("Brocade Mechanism: failed to create network in db")) + raise Exception( + _("Brocade Mechanism: create_network_precommit failed")) + + LOG.info(_("create network (precommit): %(network_id)s " + "of network type = %(network_type)s " + "with vlan = %(vlan_id)s " + "for tenant %(tenant_id)s"), + {'network_id': network_id, + 'network_type': network_type, + 'vlan_id': vlan_id, + 'tenant_id': tenant_id}) + + def create_network_postcommit(self, mech_context): + """Create Network as a portprofile on the switch.""" + + LOG.debug(_("create_network_postcommit: called")) + + network = mech_context.current + # use network_id to get the network attributes + # ONLY depend on our db for getting back network attributes + # this is so we can replay postcommit from db + context = mech_context._plugin_context + + network_id = network['id'] + network = brocade_db.get_network(context, network_id) + network_type = network['network_type'] + tenant_id = network['tenant_id'] + vlan_id = network['vlan'] + + try: + self._driver.create_network(self._switch['address'], + self._switch['username'], + self._switch['password'], + vlan_id) + except Exception: + LOG.exception(_("Brocade NOS driver: failed in create network")) + brocade_db.delete_network(context, network_id) + raise Exception( + _("Brocade Mechanism: create_network_postcommmit failed")) + + LOG.info(_("created network (postcommit): %(network_id)s" + " of network type = %(network_type)s" + " with vlan = %(vlan_id)s" + " for tenant %(tenant_id)s"), + {'network_id': network_id, + 'network_type': network_type, + 'vlan_id': vlan_id, + 'tenant_id': tenant_id}) + + def delete_network_precommit(self, mech_context): + """Delete Network from the plugin specific database table.""" + + LOG.debug(_("delete_network_precommit: called")) + + network = mech_context.current + network_id = network['id'] + vlan_id = network['provider:segmentation_id'] + tenant_id = network['tenant_id'] + + context = mech_context._plugin_context + + try: + brocade_db.delete_network(context, network_id) + except Exception: + LOG.exception( + _("Brocade Mechanism: failed to delete network in db")) + raise Exception( + _("Brocade Mechanism: delete_network_precommit failed")) + + LOG.info(_("delete network (precommit): %(network_id)s" + " with vlan = %(vlan_id)s" + " for tenant %(tenant_id)s"), + {'network_id': network_id, + 'vlan_id': vlan_id, + 'tenant_id': tenant_id}) + + def delete_network_postcommit(self, mech_context): + """Delete network which translates to removng portprofile + from the switch. + """ + + LOG.debug(_("delete_network_postcommit: called")) + network = mech_context.current + network_id = network['id'] + vlan_id = network['provider:segmentation_id'] + tenant_id = network['tenant_id'] + + try: + self._driver.delete_network(self._switch['address'], + self._switch['username'], + self._switch['password'], + vlan_id) + except Exception: + LOG.exception(_("Brocade NOS driver: failed to delete network")) + raise Exception( + _("Brocade switch exception, " + "delete_network_postcommit failed")) + + LOG.info(_("delete network (postcommit): %(network_id)s" + " with vlan = %(vlan_id)s" + " for tenant %(tenant_id)s"), + {'network_id': network_id, + 'vlan_id': vlan_id, + 'tenant_id': tenant_id}) + + def update_network_precommit(self, mech_context): + """Noop now, it is left here for future.""" + pass + + def update_network_postcommit(self, mech_context): + """Noop now, it is left here for future.""" + pass + + def create_port_precommit(self, mech_context): + """Create logical port on the switch (db update).""" + + LOG.debug(_("create_port_precommit: called")) + + port = mech_context.current + port_id = port['id'] + network_id = port['network_id'] + tenant_id = port['tenant_id'] + admin_state_up = port['admin_state_up'] + + context = mech_context._plugin_context + + network = brocade_db.get_network(context, network_id) + vlan_id = network['vlan'] + + try: + brocade_db.create_port(context, port_id, network_id, + None, + vlan_id, tenant_id, admin_state_up) + except Exception: + LOG.exception(_("Brocade Mechanism: failed to create port in db")) + raise Exception( + _("Brocade Mechanism: create_port_precommit failed")) + + def create_port_postcommit(self, mech_context): + """Associate the assigned MAC address to the portprofile.""" + + LOG.debug(_("create_port_postcommit: called")) + + port = mech_context.current + port_id = port['id'] + network_id = port['network_id'] + tenant_id = port['tenant_id'] + + context = mech_context._plugin_context + + network = brocade_db.get_network(context, network_id) + vlan_id = network['vlan'] + + interface_mac = port['mac_address'] + + # convert mac format: xx:xx:xx:xx:xx:xx -> xxxx.xxxx.xxxx + mac = self.mac_reformat_62to34(interface_mac) + try: + self._driver.associate_mac_to_network(self._switch['address'], + self._switch['username'], + self._switch['password'], + vlan_id, + mac) + except Exception: + LOG.exception( + _("Brocade NOS driver: failed to associate mac %s") + % interface_mac) + raise Exception( + _("Brocade switch exception: create_port_postcommit failed")) + + LOG.info( + _("created port (postcommit): port_id=%(port_id)s" + " network_id=%(network_id)s tenant_id=%(tenant_id)s"), + {'port_id': port_id, + 'network_id': network_id, 'tenant_id': tenant_id}) + + def delete_port_precommit(self, mech_context): + """Delete logical port on the switch (db update).""" + + LOG.debug(_("delete_port_precommit: called")) + port = mech_context.current + port_id = port['id'] + + context = mech_context._plugin_context + + try: + brocade_db.delete_port(context, port_id) + except Exception: + LOG.exception(_("Brocade Mechanism: failed to delete port in db")) + raise Exception( + _("Brocade Mechanism: delete_port_precommit failed")) + + def delete_port_postcommit(self, mech_context): + """Dissociate MAC address from the portprofile.""" + + LOG.debug(_("delete_port_postcommit: called")) + port = mech_context.current + port_id = port['id'] + network_id = port['network_id'] + tenant_id = port['tenant_id'] + + context = mech_context._plugin_context + + network = brocade_db.get_network(context, network_id) + vlan_id = network['vlan'] + + interface_mac = port['mac_address'] + + # convert mac format: xx:xx:xx:xx:xx:xx -> xxxx.xxxx.xxxx + mac = self.mac_reformat_62to34(interface_mac) + try: + self._driver.dissociate_mac_from_network( + self._switch['address'], + self._switch['username'], + self._switch['password'], + vlan_id, + mac) + except Exception: + LOG.exception( + _("Brocade NOS driver: failed to dissociate MAC %s") % + interface_mac) + raise Exception( + _("Brocade switch exception, delete_port_postcommit failed")) + + LOG.info( + _("delete port (postcommit): port_id=%(port_id)s" + " network_id=%(network_id)s tenant_id=%(tenant_id)s"), + {'port_id': port_id, + 'network_id': network_id, 'tenant_id': tenant_id}) + + def update_port_precommit(self, mech_context): + """Noop now, it is left here for future.""" + LOG.debug(_("update_port_precommit(self: called")) + + def update_port_postcommit(self, mech_context): + """Noop now, it is left here for future.""" + LOG.debug(_("update_port_postcommit: called")) + + def create_subnet_precommit(self, mech_context): + """Noop now, it is left here for future.""" + LOG.debug(_("create_subnetwork_precommit: called")) + + def create_subnet_postcommit(self, mech_context): + """Noop now, it is left here for future.""" + LOG.debug(_("create_subnetwork_postcommit: called")) + + def delete_subnet_precommit(self, mech_context): + """Noop now, it is left here for future.""" + LOG.debug(_("delete_subnetwork_precommit: called")) + + def delete_subnet_postcommit(self, mech_context): + """Noop now, it is left here for future.""" + LOG.debug(_("delete_subnetwork_postcommit: called")) + + def update_subnet_precommit(self, mech_context): + """Noop now, it is left here for future.""" + LOG.debug(_("update_subnet_precommit(self: called")) + + def update_subnet_postcommit(self, mech_context): + """Noop now, it is left here for future.""" + LOG.debug(_("update_subnet_postcommit: called")) + + @staticmethod + def mac_reformat_62to34(interface_mac): + """Transform MAC address format. + + Transforms from 6 groups of 2 hexadecimal numbers delimited by ":" + to 3 groups of 4 hexadecimals numbers delimited by ".". + + :param interface_mac: MAC address in the format xx:xx:xx:xx:xx:xx + :type interface_mac: string + :returns: MAC address in the format xxxx.xxxx.xxxx + :rtype: string + """ + + mac = interface_mac.replace(":", "") + mac = mac[0:4] + "." + mac[4:8] + "." + mac[8:12] + return mac diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/ml2/drivers/brocade/nos/nosdriver.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/ml2/drivers/brocade/nos/nosdriver.py new file mode 100644 index 00000000..f647370a --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/ml2/drivers/brocade/nos/nosdriver.py @@ -0,0 +1,236 @@ +# Copyright 2014 Brocade Communications System, Inc. +# All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# Authors: +# Varma Bhupatiraju (vbhupati@brocade.com) +# Shiv Haris (shivharis@hotmail.com) + + +"""Brocade NOS Driver implements NETCONF over SSHv2 for +Neutron network life-cycle management. +""" + +from ncclient import manager + +from neutron.openstack.common import excutils +from neutron.openstack.common import log as logging +from neutron.plugins.ml2.drivers.brocade.nos import nctemplates as template + + +LOG = logging.getLogger(__name__) +SSH_PORT = 22 + + +def nos_unknown_host_cb(host, fingerprint): + """An unknown host callback. + + Returns `True` if it finds the key acceptable, + and `False` if not. This default callback for NOS always returns 'True' + (i.e. trusts all hosts for now). + """ + return True + + +class NOSdriver(): + """NOS NETCONF interface driver for Neutron network. + + Handles life-cycle management of Neutron network (leverages AMPP on NOS) + """ + + def __init__(self): + self.mgr = None + + def connect(self, host, username, password): + """Connect via SSH and initialize the NETCONF session.""" + + # Use the persisted NETCONF connection + if self.mgr and self.mgr.connected: + return self.mgr + + # check if someone forgot to edit the conf file with real values + if host == '': + raise Exception(_("Brocade Switch IP address is not set, " + "check config ml2_conf_brocade.ini file")) + + # Open new NETCONF connection + try: + self.mgr = manager.connect(host=host, port=SSH_PORT, + username=username, password=password, + unknown_host_cb=nos_unknown_host_cb) + except Exception: + with excutils.save_and_reraise_exception(): + LOG.exception(_("Connect failed to switch")) + + LOG.debug(_("Connect success to host %(host)s:%(ssh_port)d"), + dict(host=host, ssh_port=SSH_PORT)) + return self.mgr + + def close_session(self): + """Close NETCONF session.""" + if self.mgr: + self.mgr.close_session() + self.mgr = None + + def create_network(self, host, username, password, net_id): + """Creates a new virtual network.""" + + name = template.OS_PORT_PROFILE_NAME.format(id=net_id) + try: + mgr = self.connect(host, username, password) + self.create_vlan_interface(mgr, net_id) + self.create_port_profile(mgr, name) + self.create_vlan_profile_for_port_profile(mgr, name) + self.configure_l2_mode_for_vlan_profile(mgr, name) + self.configure_trunk_mode_for_vlan_profile(mgr, name) + self.configure_allowed_vlans_for_vlan_profile(mgr, name, net_id) + self.activate_port_profile(mgr, name) + except Exception: + with excutils.save_and_reraise_exception(): + LOG.exception(_("NETCONF error")) + self.close_session() + + def delete_network(self, host, username, password, net_id): + """Deletes a virtual network.""" + + name = template.OS_PORT_PROFILE_NAME.format(id=net_id) + try: + mgr = self.connect(host, username, password) + self.deactivate_port_profile(mgr, name) + self.delete_port_profile(mgr, name) + self.delete_vlan_interface(mgr, net_id) + except Exception: + with excutils.save_and_reraise_exception(): + LOG.exception(_("NETCONF error")) + self.close_session() + + def associate_mac_to_network(self, host, username, password, + net_id, mac): + """Associates a MAC address to virtual network.""" + + name = template.OS_PORT_PROFILE_NAME.format(id=net_id) + try: + mgr = self.connect(host, username, password) + self.associate_mac_to_port_profile(mgr, name, mac) + except Exception: + with excutils.save_and_reraise_exception(): + LOG.exception(_("NETCONF error")) + self.close_session() + + def dissociate_mac_from_network(self, host, username, password, + net_id, mac): + """Dissociates a MAC address from virtual network.""" + + name = template.OS_PORT_PROFILE_NAME.format(id=net_id) + try: + mgr = self.connect(host, username, password) + self.dissociate_mac_from_port_profile(mgr, name, mac) + except Exception: + with excutils.save_and_reraise_exception(): + LOG.exception(_("NETCONF error")) + self.close_session() + + def create_vlan_interface(self, mgr, vlan_id): + """Configures a VLAN interface.""" + + confstr = template.CREATE_VLAN_INTERFACE.format(vlan_id=vlan_id) + mgr.edit_config(target='running', config=confstr) + + def delete_vlan_interface(self, mgr, vlan_id): + """Deletes a VLAN interface.""" + + confstr = template.DELETE_VLAN_INTERFACE.format(vlan_id=vlan_id) + mgr.edit_config(target='running', config=confstr) + + def get_port_profiles(self, mgr): + """Retrieves all port profiles.""" + + filterstr = template.PORT_PROFILE_XPATH_FILTER + response = mgr.get_config(source='running', + filter=('xpath', filterstr)).data_xml + return response + + def get_port_profile(self, mgr, name): + """Retrieves a port profile.""" + + filterstr = template.PORT_PROFILE_NAME_XPATH_FILTER.format(name=name) + response = mgr.get_config(source='running', + filter=('xpath', filterstr)).data_xml + return response + + def create_port_profile(self, mgr, name): + """Creates a port profile.""" + + confstr = template.CREATE_PORT_PROFILE.format(name=name) + mgr.edit_config(target='running', config=confstr) + + def delete_port_profile(self, mgr, name): + """Deletes a port profile.""" + + confstr = template.DELETE_PORT_PROFILE.format(name=name) + mgr.edit_config(target='running', config=confstr) + + def activate_port_profile(self, mgr, name): + """Activates a port profile.""" + + confstr = template.ACTIVATE_PORT_PROFILE.format(name=name) + mgr.edit_config(target='running', config=confstr) + + def deactivate_port_profile(self, mgr, name): + """Deactivates a port profile.""" + + confstr = template.DEACTIVATE_PORT_PROFILE.format(name=name) + mgr.edit_config(target='running', config=confstr) + + def associate_mac_to_port_profile(self, mgr, name, mac_address): + """Associates a MAC address to a port profile.""" + + confstr = template.ASSOCIATE_MAC_TO_PORT_PROFILE.format( + name=name, mac_address=mac_address) + mgr.edit_config(target='running', config=confstr) + + def dissociate_mac_from_port_profile(self, mgr, name, mac_address): + """Dissociates a MAC address from a port profile.""" + + confstr = template.DISSOCIATE_MAC_FROM_PORT_PROFILE.format( + name=name, mac_address=mac_address) + mgr.edit_config(target='running', config=confstr) + + def create_vlan_profile_for_port_profile(self, mgr, name): + """Creates VLAN sub-profile for port profile.""" + + confstr = template.CREATE_VLAN_PROFILE_FOR_PORT_PROFILE.format( + name=name) + mgr.edit_config(target='running', config=confstr) + + def configure_l2_mode_for_vlan_profile(self, mgr, name): + """Configures L2 mode for VLAN sub-profile.""" + + confstr = template.CONFIGURE_L2_MODE_FOR_VLAN_PROFILE.format( + name=name) + mgr.edit_config(target='running', config=confstr) + + def configure_trunk_mode_for_vlan_profile(self, mgr, name): + """Configures trunk mode for VLAN sub-profile.""" + + confstr = template.CONFIGURE_TRUNK_MODE_FOR_VLAN_PROFILE.format( + name=name) + mgr.edit_config(target='running', config=confstr) + + def configure_allowed_vlans_for_vlan_profile(self, mgr, name, vlan_id): + """Configures allowed VLANs for VLAN sub-profile.""" + + confstr = template.CONFIGURE_ALLOWED_VLANS_FOR_VLAN_PROFILE.format( + name=name, vlan_id=vlan_id) + mgr.edit_config(target='running', config=confstr) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/ml2/drivers/cisco/apic/__init__.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/ml2/drivers/cisco/apic/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/ml2/drivers/cisco/apic/apic_client.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/ml2/drivers/cisco/apic/apic_client.py new file mode 100644 index 00000000..202e84c1 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/ml2/drivers/cisco/apic/apic_client.py @@ -0,0 +1,416 @@ +# Copyright (c) 2014 Cisco Systems +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Henry Gessau, Cisco Systems + +import collections +import time + +import requests +import requests.exceptions + +from neutron.openstack.common import jsonutils as json +from neutron.openstack.common import log as logging +from neutron.plugins.ml2.drivers.cisco.apic import exceptions as cexc + + +LOG = logging.getLogger(__name__) + +APIC_CODE_FORBIDDEN = str(requests.codes.forbidden) + + +# Info about a Managed Object's relative name (RN) and container. +class ManagedObjectName(collections.namedtuple( + 'MoPath', ['container', 'rn_fmt', 'can_create'])): + def __new__(cls, container, rn_fmt, can_create=True): + return super(ManagedObjectName, cls).__new__(cls, container, rn_fmt, + can_create) + + +class ManagedObjectClass(object): + + """Information about a Managed Object (MO) class. + + Constructs and keeps track of the distinguished name (DN) and relative + name (RN) of a managed object (MO) class. The DN is the RN of the MO + appended to the recursive RNs of its containers, i.e.: + DN = uni/container-RN/.../container-RN/object-RN + + Also keeps track of whether the MO can be created in the APIC, as some + MOs are read-only or used for specifying relationships. + """ + + supported_mos = { + 'fvTenant': ManagedObjectName(None, 'tn-%s'), + 'fvBD': ManagedObjectName('fvTenant', 'BD-%s'), + 'fvRsBd': ManagedObjectName('fvAEPg', 'rsbd'), + 'fvSubnet': ManagedObjectName('fvBD', 'subnet-[%s]'), + 'fvCtx': ManagedObjectName('fvTenant', 'ctx-%s'), + 'fvRsCtx': ManagedObjectName('fvBD', 'rsctx'), + 'fvAp': ManagedObjectName('fvTenant', 'ap-%s'), + 'fvAEPg': ManagedObjectName('fvAp', 'epg-%s'), + 'fvRsProv': ManagedObjectName('fvAEPg', 'rsprov-%s'), + 'fvRsCons': ManagedObjectName('fvAEPg', 'rscons-%s'), + 'fvRsConsIf': ManagedObjectName('fvAEPg', 'rsconsif-%s'), + 'fvRsDomAtt': ManagedObjectName('fvAEPg', 'rsdomAtt-[%s]'), + 'fvRsPathAtt': ManagedObjectName('fvAEPg', 'rspathAtt-[%s]'), + + 'vzBrCP': ManagedObjectName('fvTenant', 'brc-%s'), + 'vzSubj': ManagedObjectName('vzBrCP', 'subj-%s'), + 'vzFilter': ManagedObjectName('fvTenant', 'flt-%s'), + 'vzRsFiltAtt': ManagedObjectName('vzSubj', 'rsfiltAtt-%s'), + 'vzEntry': ManagedObjectName('vzFilter', 'e-%s'), + 'vzInTerm': ManagedObjectName('vzSubj', 'intmnl'), + 'vzRsFiltAtt__In': ManagedObjectName('vzInTerm', 'rsfiltAtt-%s'), + 'vzOutTerm': ManagedObjectName('vzSubj', 'outtmnl'), + 'vzRsFiltAtt__Out': ManagedObjectName('vzOutTerm', 'rsfiltAtt-%s'), + 'vzCPIf': ManagedObjectName('fvTenant', 'cif-%s'), + 'vzRsIf': ManagedObjectName('vzCPIf', 'rsif'), + + 'vmmProvP': ManagedObjectName(None, 'vmmp-%s', False), + 'vmmDomP': ManagedObjectName('vmmProvP', 'dom-%s'), + 'vmmEpPD': ManagedObjectName('vmmDomP', 'eppd-[%s]'), + + 'physDomP': ManagedObjectName(None, 'phys-%s'), + + 'infra': ManagedObjectName(None, 'infra'), + 'infraNodeP': ManagedObjectName('infra', 'nprof-%s'), + 'infraLeafS': ManagedObjectName('infraNodeP', 'leaves-%s-typ-%s'), + 'infraNodeBlk': ManagedObjectName('infraLeafS', 'nodeblk-%s'), + 'infraRsAccPortP': ManagedObjectName('infraNodeP', 'rsaccPortP-[%s]'), + 'infraAccPortP': ManagedObjectName('infra', 'accportprof-%s'), + 'infraHPortS': ManagedObjectName('infraAccPortP', 'hports-%s-typ-%s'), + 'infraPortBlk': ManagedObjectName('infraHPortS', 'portblk-%s'), + 'infraRsAccBaseGrp': ManagedObjectName('infraHPortS', 'rsaccBaseGrp'), + 'infraFuncP': ManagedObjectName('infra', 'funcprof'), + 'infraAccPortGrp': ManagedObjectName('infraFuncP', 'accportgrp-%s'), + 'infraRsAttEntP': ManagedObjectName('infraAccPortGrp', 'rsattEntP'), + 'infraAttEntityP': ManagedObjectName('infra', 'attentp-%s'), + 'infraRsDomP': ManagedObjectName('infraAttEntityP', 'rsdomP-[%s]'), + 'infraRsVlanNs__phys': ManagedObjectName('physDomP', 'rsvlanNs'), + 'infraRsVlanNs__vmm': ManagedObjectName('vmmDomP', 'rsvlanNs'), + + 'fvnsVlanInstP': ManagedObjectName('infra', 'vlanns-%s-%s'), + 'fvnsEncapBlk__vlan': ManagedObjectName('fvnsVlanInstP', + 'from-%s-to-%s'), + 'fvnsVxlanInstP': ManagedObjectName('infra', 'vxlanns-%s'), + 'fvnsEncapBlk__vxlan': ManagedObjectName('fvnsVxlanInstP', + 'from-%s-to-%s'), + + # Read-only + 'fabricTopology': ManagedObjectName(None, 'topology', False), + 'fabricPod': ManagedObjectName('fabricTopology', 'pod-%s', False), + 'fabricPathEpCont': ManagedObjectName('fabricPod', 'paths-%s', False), + 'fabricPathEp': ManagedObjectName('fabricPathEpCont', 'pathep-%s', + False), + } + + # Note(Henry): The use of a mutable default argument _inst_cache is + # intentional. It persists for the life of MoClass to cache instances. + # noinspection PyDefaultArgument + def __new__(cls, mo_class, _inst_cache={}): + """Ensure we create only one instance per mo_class.""" + try: + return _inst_cache[mo_class] + except KeyError: + new_inst = super(ManagedObjectClass, cls).__new__(cls) + new_inst.__init__(mo_class) + _inst_cache[mo_class] = new_inst + return new_inst + + def __init__(self, mo_class): + self.klass = mo_class + self.klass_name = mo_class.split('__')[0] + mo = self.supported_mos[mo_class] + self.container = mo.container + self.rn_fmt = mo.rn_fmt + self.dn_fmt, self.args = self._dn_fmt() + self.arg_count = self.dn_fmt.count('%s') + rn_has_arg = self.rn_fmt.count('%s') + self.can_create = rn_has_arg and mo.can_create + + def _dn_fmt(self): + """Build the distinguished name format using container and RN. + + DN = uni/container-RN/.../container-RN/object-RN + + Also make a list of the required name arguments. + Note: Call this method only once at init. + """ + arg = [self.klass] if '%s' in self.rn_fmt else [] + if self.container: + container = ManagedObjectClass(self.container) + dn_fmt = '%s/%s' % (container.dn_fmt, self.rn_fmt) + args = container.args + arg + return dn_fmt, args + return 'uni/%s' % self.rn_fmt, arg + + def dn(self, *args): + """Return the distinguished name for a managed object.""" + return self.dn_fmt % args + + +class ApicSession(object): + + """Manages a session with the APIC.""" + + def __init__(self, host, port, usr, pwd, ssl): + protocol = ssl and 'https' or 'http' + self.api_base = '%s://%s:%s/api' % (protocol, host, port) + self.session = requests.Session() + self.session_deadline = 0 + self.session_timeout = 0 + self.cookie = {} + + # Log in + self.authentication = None + self.username = None + self.password = None + if usr and pwd: + self.login(usr, pwd) + + @staticmethod + def _make_data(key, **attrs): + """Build the body for a msg out of a key and some attributes.""" + return json.dumps({key: {'attributes': attrs}}) + + def _api_url(self, api): + """Create the URL for a generic API.""" + return '%s/%s.json' % (self.api_base, api) + + def _mo_url(self, mo, *args): + """Create a URL for a MO lookup by DN.""" + dn = mo.dn(*args) + return '%s/mo/%s.json' % (self.api_base, dn) + + def _qry_url(self, mo): + """Create a URL for a query lookup by MO class.""" + return '%s/class/%s.json' % (self.api_base, mo.klass_name) + + def _check_session(self): + """Check that we are logged in and ensure the session is active.""" + if not self.authentication: + raise cexc.ApicSessionNotLoggedIn + if time.time() > self.session_deadline: + self.refresh() + + def _send(self, request, url, data=None, refreshed=None): + """Send a request and process the response.""" + if data is None: + response = request(url, cookies=self.cookie) + else: + response = request(url, data=data, cookies=self.cookie) + if response is None: + raise cexc.ApicHostNoResponse(url=url) + # Every request refreshes the timeout + self.session_deadline = time.time() + self.session_timeout + if data is None: + request_str = url + else: + request_str = '%s, data=%s' % (url, data) + LOG.debug(_("data = %s"), data) + # imdata is where the APIC returns the useful information + imdata = response.json().get('imdata') + LOG.debug(_("Response: %s"), imdata) + if response.status_code != requests.codes.ok: + try: + err_code = imdata[0]['error']['attributes']['code'] + err_text = imdata[0]['error']['attributes']['text'] + except (IndexError, KeyError): + err_code = '[code for APIC error not found]' + err_text = '[text for APIC error not found]' + # If invalid token then re-login and retry once + if (not refreshed and err_code == APIC_CODE_FORBIDDEN and + err_text.lower().startswith('token was invalid')): + self.login() + return self._send(request, url, data=data, refreshed=True) + raise cexc.ApicResponseNotOk(request=request_str, + status=response.status_code, + reason=response.reason, + err_text=err_text, err_code=err_code) + return imdata + + # REST requests + + def get_data(self, request): + """Retrieve generic data from the server.""" + self._check_session() + url = self._api_url(request) + return self._send(self.session.get, url) + + def get_mo(self, mo, *args): + """Retrieve a managed object by its distinguished name.""" + self._check_session() + url = self._mo_url(mo, *args) + '?query-target=self' + return self._send(self.session.get, url) + + def list_mo(self, mo): + """Retrieve the list of managed objects for a class.""" + self._check_session() + url = self._qry_url(mo) + return self._send(self.session.get, url) + + def post_data(self, request, data): + """Post generic data to the server.""" + self._check_session() + url = self._api_url(request) + return self._send(self.session.post, url, data=data) + + def post_mo(self, mo, *args, **kwargs): + """Post data for a managed object to the server.""" + self._check_session() + url = self._mo_url(mo, *args) + data = self._make_data(mo.klass_name, **kwargs) + return self._send(self.session.post, url, data=data) + + # Session management + + def _save_cookie(self, request, response): + """Save the session cookie and its expiration time.""" + imdata = response.json().get('imdata') + if response.status_code == requests.codes.ok: + attributes = imdata[0]['aaaLogin']['attributes'] + try: + self.cookie = {'APIC-Cookie': attributes['token']} + except KeyError: + raise cexc.ApicResponseNoCookie(request=request) + timeout = int(attributes['refreshTimeoutSeconds']) + LOG.debug(_("APIC session will expire in %d seconds"), timeout) + # Give ourselves a few seconds to refresh before timing out + self.session_timeout = timeout - 5 + self.session_deadline = time.time() + self.session_timeout + else: + attributes = imdata[0]['error']['attributes'] + return attributes + + def login(self, usr=None, pwd=None): + """Log in to controller. Save user name and authentication.""" + usr = usr or self.username + pwd = pwd or self.password + name_pwd = self._make_data('aaaUser', name=usr, pwd=pwd) + url = self._api_url('aaaLogin') + try: + response = self.session.post(url, data=name_pwd, timeout=10.0) + except requests.exceptions.Timeout: + raise cexc.ApicHostNoResponse(url=url) + attributes = self._save_cookie('aaaLogin', response) + if response.status_code == requests.codes.ok: + self.username = usr + self.password = pwd + self.authentication = attributes + else: + self.authentication = None + raise cexc.ApicResponseNotOk(request=url, + status=response.status_code, + reason=response.reason, + err_text=attributes['text'], + err_code=attributes['code']) + + def refresh(self): + """Called when a session has timed out or almost timed out.""" + url = self._api_url('aaaRefresh') + response = self.session.get(url, cookies=self.cookie) + attributes = self._save_cookie('aaaRefresh', response) + if response.status_code == requests.codes.ok: + # We refreshed before the session timed out. + self.authentication = attributes + else: + err_code = attributes['code'] + err_text = attributes['text'] + if (err_code == APIC_CODE_FORBIDDEN and + err_text.lower().startswith('token was invalid')): + # This means the token timed out, so log in again. + LOG.debug(_("APIC session timed-out, logging in again.")) + self.login() + else: + self.authentication = None + raise cexc.ApicResponseNotOk(request=url, + status=response.status_code, + reason=response.reason, + err_text=err_text, + err_code=err_code) + + def logout(self): + """End session with controller.""" + if not self.username: + self.authentication = None + if self.authentication: + data = self._make_data('aaaUser', name=self.username) + self.post_data('aaaLogout', data=data) + self.authentication = None + + +class ManagedObjectAccess(object): + + """CRUD operations on APIC Managed Objects.""" + + def __init__(self, session, mo_class): + self.session = session + self.mo = ManagedObjectClass(mo_class) + + def _create_container(self, *args): + """Recursively create all container objects.""" + if self.mo.container: + container = ManagedObjectAccess(self.session, self.mo.container) + if container.mo.can_create: + container_args = args[0: container.mo.arg_count] + container._create_container(*container_args) + container.session.post_mo(container.mo, *container_args) + + def create(self, *args, **kwargs): + self._create_container(*args) + if self.mo.can_create and 'status' not in kwargs: + kwargs['status'] = 'created' + return self.session.post_mo(self.mo, *args, **kwargs) + + def _mo_attributes(self, obj_data): + if (self.mo.klass_name in obj_data and + 'attributes' in obj_data[self.mo.klass_name]): + return obj_data[self.mo.klass_name]['attributes'] + + def get(self, *args): + """Return a dict of the MO's attributes, or None.""" + imdata = self.session.get_mo(self.mo, *args) + if imdata: + return self._mo_attributes(imdata[0]) + + def list_all(self): + imdata = self.session.list_mo(self.mo) + return filter(None, [self._mo_attributes(obj) for obj in imdata]) + + def list_names(self): + return [obj['name'] for obj in self.list_all()] + + def update(self, *args, **kwargs): + return self.session.post_mo(self.mo, *args, **kwargs) + + def delete(self, *args): + return self.session.post_mo(self.mo, *args, status='deleted') + + +class RestClient(ApicSession): + + """APIC REST client for OpenStack Neutron.""" + + def __init__(self, host, port=80, usr=None, pwd=None, ssl=False): + """Establish a session with the APIC.""" + super(RestClient, self).__init__(host, port, usr, pwd, ssl) + + def __getattr__(self, mo_class): + """Add supported MOs as properties on demand.""" + if mo_class not in ManagedObjectClass.supported_mos: + raise cexc.ApicManagedObjectNotSupported(mo_class=mo_class) + self.__dict__[mo_class] = ManagedObjectAccess(self, mo_class) + return self.__dict__[mo_class] diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/ml2/drivers/cisco/apic/apic_manager.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/ml2/drivers/cisco/apic/apic_manager.py new file mode 100644 index 00000000..f86aa597 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/ml2/drivers/cisco/apic/apic_manager.py @@ -0,0 +1,559 @@ +# Copyright (c) 2014 Cisco Systems Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Arvind Somya (asomya@cisco.com), Cisco Systems Inc. + +import itertools +import uuid + +from oslo.config import cfg + +from neutron.openstack.common import excutils +from neutron.plugins.ml2.drivers.cisco.apic import apic_client +from neutron.plugins.ml2.drivers.cisco.apic import apic_model +from neutron.plugins.ml2.drivers.cisco.apic import config +from neutron.plugins.ml2.drivers.cisco.apic import exceptions as cexc + +AP_NAME = 'openstack' +CONTEXT_ENFORCED = '1' +CONTEXT_UNENFORCED = '2' +CONTEXT_DEFAULT = 'default' +DN_KEY = 'dn' +PORT_DN_PATH = 'topology/pod-1/paths-%s/pathep-[eth%s]' +SCOPE_GLOBAL = 'global' +SCOPE_TENANT = 'tenant' +TENANT_COMMON = 'common' + + +def group_by_ranges(i): + """Group a list of numbers into tuples representing contiguous ranges.""" + for a, b in itertools.groupby(enumerate(sorted(i)), lambda (x, y): y - x): + b = list(b) + yield b[0][1], b[-1][1] + + +class APICManager(object): + """Class to manage APIC translations and workflow. + + This class manages translation from Neutron objects to APIC + managed objects and contains workflows to implement these + translations. + """ + def __init__(self): + self.db = apic_model.ApicDbModel() + + apic_conf = cfg.CONF.ml2_cisco_apic + self.switch_dict = config.create_switch_dictionary() + + # Connect to the the APIC + self.apic = apic_client.RestClient( + apic_conf.apic_host, + apic_conf.apic_port, + apic_conf.apic_username, + apic_conf.apic_password + ) + + self.port_profiles = {} + self.vmm_domain = None + self.phys_domain = None + self.vlan_ns = None + self.node_profiles = {} + self.entity_profile = None + self.function_profile = None + self.clear_node_profiles = apic_conf.apic_clear_node_profiles + + def ensure_infra_created_on_apic(self): + """Ensure the infrastructure is setup. + + Loop over the switch dictionary from the config and + setup profiles for switches, modules and ports + """ + # Loop over switches + for switch in self.switch_dict: + # Create a node profile for this switch + self.ensure_node_profile_created_for_switch(switch) + + # Check if a port profile exists for this node + ppname = self.check_infra_port_profiles(switch) + + # Gather port ranges for this switch + modules = self.gather_infra_module_ports(switch) + + # Setup each module and port range + for module in modules: + profile = self.db.get_profile_for_module(switch, ppname, + module) + if not profile: + # Create host port selector for this module + hname = uuid.uuid4() + try: + self.apic.infraHPortS.create(ppname, hname, 'range') + # Add relation to the function profile + fpdn = self.function_profile[DN_KEY] + self.apic.infraRsAccBaseGrp.create(ppname, hname, + 'range', tDn=fpdn) + modules[module].sort() + except (cexc.ApicResponseNotOk, KeyError): + with excutils.save_and_reraise_exception(): + self.apic.infraHPortS.delete(ppname, hname, + 'range') + else: + hname = profile.hpselc_id + + ranges = group_by_ranges(modules[module]) + # Add this module and ports to the profile + for prange in ranges: + # Check if this port block is already added to the profile + if not self.db.get_profile_for_module_and_ports( + switch, ppname, module, prange[0], prange[-1]): + # Create port block for this port range + pbname = uuid.uuid4() + self.apic.infraPortBlk.create(ppname, hname, 'range', + pbname, fromCard=module, + toCard=module, + fromPort=str(prange[0]), + toPort=str(prange[-1])) + # Add DB row + self.db.add_profile_for_module_and_ports( + switch, ppname, hname, module, + prange[0], prange[-1]) + + def check_infra_port_profiles(self, switch): + """Check and create infra port profiles for a node.""" + sprofile = self.db.get_port_profile_for_node(switch) + ppname = None + if not sprofile: + # Generate uuid for port profile name + ppname = uuid.uuid4() + try: + # Create port profile for this switch + pprofile = self.ensure_port_profile_created_on_apic(ppname) + # Add port profile to node profile + ppdn = pprofile[DN_KEY] + self.apic.infraRsAccPortP.create(switch, ppdn) + except (cexc.ApicResponseNotOk, KeyError): + with excutils.save_and_reraise_exception(): + # Delete port profile + self.apic.infraAccPortP.delete(ppname) + else: + ppname = sprofile.profile_id + + return ppname + + def gather_infra_module_ports(self, switch): + """Build modules and ports per module dictionary.""" + ports = self.switch_dict[switch] + # Gather common modules + modules = {} + for port in ports: + module, sw_port = port.split('/') + if module not in modules: + modules[module] = [] + modules[module].append(int(sw_port)) + + return modules + + def ensure_context_unenforced(self, tenant_id=TENANT_COMMON, + name=CONTEXT_DEFAULT): + """Set the specified tenant's context to unenforced.""" + ctx = self.apic.fvCtx.get(tenant_id, name) + if not ctx: + self.apic.fvCtx.create(tenant_id, name, + pcEnfPref=CONTEXT_UNENFORCED) + elif ctx['pcEnfPref'] != CONTEXT_UNENFORCED: + self.apic.fvCtx.update(tenant_id, name, + pcEnfPref=CONTEXT_UNENFORCED) + + def ensure_context_enforced(self, tenant_id=TENANT_COMMON, + name=CONTEXT_DEFAULT): + """Set the specified tenant's context to enforced.""" + ctx = self.apic.fvCtx.get(tenant_id, name) + if not ctx: + self.apic.fvCtx.create(tenant_id, name, pcEnfPref=CONTEXT_ENFORCED) + elif ctx['pcEnfPref'] != CONTEXT_ENFORCED: + self.apic.fvCtx.update(tenant_id, name, pcEnfPref=CONTEXT_ENFORCED) + + def ensure_entity_profile_created_on_apic(self, name): + """Create the infrastructure entity profile.""" + if self.clear_node_profiles: + self.apic.infraAttEntityP.delete(name) + self.entity_profile = self.apic.infraAttEntityP.get(name) + if not self.entity_profile: + try: + phys_dn = self.phys_domain[DN_KEY] + self.apic.infraAttEntityP.create(name) + # Attach phys domain to entity profile + self.apic.infraRsDomP.create(name, phys_dn) + self.entity_profile = self.apic.infraAttEntityP.get(name) + except (cexc.ApicResponseNotOk, KeyError): + with excutils.save_and_reraise_exception(): + # Delete the created entity profile + self.apic.infraAttEntityP.delete(name) + + def ensure_function_profile_created_on_apic(self, name): + """Create the infrastructure function profile.""" + if self.clear_node_profiles: + self.apic.infraAccPortGrp.delete(name) + self.function_profile = self.apic.infraAccPortGrp.get(name) + if not self.function_profile: + try: + self.apic.infraAccPortGrp.create(name) + # Attach entity profile to function profile + entp_dn = self.entity_profile[DN_KEY] + self.apic.infraRsAttEntP.create(name, tDn=entp_dn) + self.function_profile = self.apic.infraAccPortGrp.get(name) + except (cexc.ApicResponseNotOk, KeyError): + with excutils.save_and_reraise_exception(): + # Delete the created function profile + self.apic.infraAccPortGrp.delete(name) + + def ensure_node_profile_created_for_switch(self, switch_id): + """Creates a switch node profile. + + Create a node profile for a switch and add a switch + to the leaf node selector + """ + if self.clear_node_profiles: + self.apic.infraNodeP.delete(switch_id) + self.db.delete_profile_for_node(switch_id) + sobj = self.apic.infraNodeP.get(switch_id) + if not sobj: + try: + # Create Node profile + self.apic.infraNodeP.create(switch_id) + # Create leaf selector + lswitch_id = uuid.uuid4() + self.apic.infraLeafS.create(switch_id, lswitch_id, 'range') + # Add leaf nodes to the selector + name = uuid.uuid4() + self.apic.infraNodeBlk.create(switch_id, lswitch_id, 'range', + name, from_=switch_id, + to_=switch_id) + sobj = self.apic.infraNodeP.get(switch_id) + except (cexc.ApicResponseNotOk, KeyError): + with excutils.save_and_reraise_exception(): + # Remove the node profile + self.apic.infraNodeP.delete(switch_id) + + self.node_profiles[switch_id] = { + 'object': sobj + } + + def ensure_port_profile_created_on_apic(self, name): + """Create a port profile.""" + try: + self.apic.infraAccPortP.create(name) + return self.apic.infraAccPortP.get(name) + except (cexc.ApicResponseNotOk, KeyError): + with excutils.save_and_reraise_exception(): + self.apic.infraAccPortP.delete(name) + + def ensure_vmm_domain_created_on_apic(self, vmm_name, + vlan_ns=None, vxlan_ns=None): + """Create Virtual Machine Manager domain. + + Creates the VMM domain on the APIC and adds a VLAN or VXLAN + namespace to that VMM domain. + TODO (asomya): Add VXLAN support + """ + provider = 'VMware' + if self.clear_node_profiles: + self.apic.vmmDomP.delete(provider, vmm_name) + self.vmm_domain = self.apic.vmmDomP.get(provider, vmm_name) + if not self.vmm_domain: + try: + self.apic.vmmDomP.create(provider, vmm_name) + if vlan_ns: + vlan_ns_dn = vlan_ns[DN_KEY] + self.apic.infraRsVlanNs__vmm.create(provider, vmm_name, + tDn=vlan_ns_dn) + self.vmm_domain = self.apic.vmmDomP.get(provider, vmm_name) + except (cexc.ApicResponseNotOk, KeyError): + with excutils.save_and_reraise_exception(): + # Delete the VMM domain + self.apic.vmmDomP.delete(provider, vmm_name) + + def ensure_phys_domain_created_on_apic(self, phys_name, + vlan_ns=None): + """Create Virtual Machine Manager domain. + + Creates the VMM domain on the APIC and adds a VLAN or VXLAN + namespace to that VMM domain. + TODO (asomya): Add VXLAN support + """ + if self.clear_node_profiles: + self.apic.physDomP.delete(phys_name) + self.phys_domain = self.apic.physDomP.get(phys_name) + if not self.phys_domain: + try: + self.apic.physDomP.create(phys_name) + if vlan_ns: + vlan_ns_dn = vlan_ns[DN_KEY] + self.apic.infraRsVlanNs__phys.create(phys_name, + tDn=vlan_ns_dn) + self.phys_domain = self.apic.physDomP.get(phys_name) + except (cexc.ApicResponseNotOk, KeyError): + with excutils.save_and_reraise_exception(): + # Delete the physical domain + self.apic.physDomP.delete(phys_name) + + def ensure_vlan_ns_created_on_apic(self, name, vlan_min, vlan_max): + """Creates a static VLAN namespace with the given vlan range.""" + ns_args = name, 'static' + if self.clear_node_profiles: + self.apic.fvnsVlanInstP.delete(name, 'dynamic') + self.apic.fvnsVlanInstP.delete(*ns_args) + self.vlan_ns = self.apic.fvnsVlanInstP.get(*ns_args) + if not self.vlan_ns: + try: + self.apic.fvnsVlanInstP.create(*ns_args) + vlan_min = 'vlan-' + vlan_min + vlan_max = 'vlan-' + vlan_max + ns_blk_args = name, 'static', vlan_min, vlan_max + vlan_encap = self.apic.fvnsEncapBlk__vlan.get(*ns_blk_args) + if not vlan_encap: + ns_kw_args = { + 'name': 'encap', + 'from': vlan_min, + 'to': vlan_max + } + self.apic.fvnsEncapBlk__vlan.create(*ns_blk_args, + **ns_kw_args) + self.vlan_ns = self.apic.fvnsVlanInstP.get(*ns_args) + return self.vlan_ns + except (cexc.ApicResponseNotOk, KeyError): + with excutils.save_and_reraise_exception(): + # Delete the vlan namespace + self.apic.fvnsVlanInstP.delete(*ns_args) + + def ensure_tenant_created_on_apic(self, tenant_id): + """Make sure a tenant exists on the APIC.""" + if not self.apic.fvTenant.get(tenant_id): + self.apic.fvTenant.create(tenant_id) + + def ensure_bd_created_on_apic(self, tenant_id, bd_id): + """Creates a Bridge Domain on the APIC.""" + if not self.apic.fvBD.get(tenant_id, bd_id): + try: + self.apic.fvBD.create(tenant_id, bd_id) + # Add default context to the BD + self.ensure_context_enforced() + self.apic.fvRsCtx.create(tenant_id, bd_id, + tnFvCtxName=CONTEXT_DEFAULT) + except (cexc.ApicResponseNotOk, KeyError): + with excutils.save_and_reraise_exception(): + # Delete the bridge domain + self.apic.fvBD.delete(tenant_id, bd_id) + + def delete_bd_on_apic(self, tenant_id, bd_id): + """Deletes a Bridge Domain from the APIC.""" + self.apic.fvBD.delete(tenant_id, bd_id) + + def ensure_subnet_created_on_apic(self, tenant_id, bd_id, gw_ip): + """Creates a subnet on the APIC + + The gateway ip (gw_ip) should be specified as a CIDR + e.g. 10.0.0.1/24 + """ + if not self.apic.fvSubnet.get(tenant_id, bd_id, gw_ip): + self.apic.fvSubnet.create(tenant_id, bd_id, gw_ip) + + def ensure_filter_created_on_apic(self, tenant_id, filter_id): + """Create a filter on the APIC.""" + if not self.apic.vzFilter.get(tenant_id, filter_id): + self.apic.vzFilter.create(tenant_id, filter_id) + + def ensure_epg_created_for_network(self, tenant_id, network_id, net_name): + """Creates an End Point Group on the APIC. + + Create a new EPG on the APIC for the network spcified. This information + is also tracked in the local DB and associate the bridge domain for the + network with the EPG created. + """ + # Check if an EPG is already present for this network + epg = self.db.get_epg_for_network(network_id) + if epg: + return epg + + # Create a new EPG on the APIC + epg_uid = '-'.join([str(net_name), str(uuid.uuid4())]) + try: + self.apic.fvAEPg.create(tenant_id, AP_NAME, epg_uid) + + # Add bd to EPG + bd = self.apic.fvBD.get(tenant_id, network_id) + bd_name = bd['name'] + + # Create fvRsBd + self.apic.fvRsBd.create(tenant_id, AP_NAME, epg_uid, + tnFvBDName=bd_name) + + # Add EPG to physical domain + phys_dn = self.phys_domain[DN_KEY] + self.apic.fvRsDomAtt.create(tenant_id, AP_NAME, epg_uid, phys_dn) + except (cexc.ApicResponseNotOk, KeyError): + with excutils.save_and_reraise_exception(): + # Delete the EPG + self.apic.fvAEPg.delete(tenant_id, AP_NAME, epg_uid) + + # Stick it in the DB + epg = self.db.write_epg_for_network(network_id, epg_uid) + + return epg + + def delete_epg_for_network(self, tenant_id, network_id): + """Deletes the EPG from the APIC and removes it from the DB.""" + # Check if an EPG is already present for this network + epg = self.db.get_epg_for_network(network_id) + if not epg: + return False + + # Delete this epg + self.apic.fvAEPg.delete(tenant_id, AP_NAME, epg.epg_id) + # Remove DB row + self.db.delete_epg(epg) + + def create_tenant_filter(self, tenant_id): + """Creates a tenant filter and a generic entry under it.""" + fuuid = uuid.uuid4() + try: + # Create a new tenant filter + self.apic.vzFilter.create(tenant_id, fuuid) + # Create a new entry + euuid = uuid.uuid4() + self.apic.vzEntry.create(tenant_id, fuuid, euuid) + return fuuid + except (cexc.ApicResponseNotOk, KeyError): + with excutils.save_and_reraise_exception(): + self.apic.vzFilter.delete(tenant_id, fuuid) + + def set_contract_for_epg(self, tenant_id, epg_id, + contract_id, provider=False): + """Set the contract for an EPG. + + By default EPGs are consumers to a contract. Set provider flag + for a single EPG to act as a contract provider. + """ + if provider: + try: + self.apic.fvRsProv.create(tenant_id, AP_NAME, + epg_id, contract_id) + self.db.set_provider_contract(epg_id) + self.make_tenant_contract_global(tenant_id) + except (cexc.ApicResponseNotOk, KeyError): + with excutils.save_and_reraise_exception(): + self.make_tenant_contract_local(tenant_id) + self.apic.fvRsProv.delete(tenant_id, AP_NAME, + epg_id, contract_id) + else: + self.apic.fvRsCons.create(tenant_id, AP_NAME, epg_id, contract_id) + + def delete_contract_for_epg(self, tenant_id, epg_id, + contract_id, provider=False): + """Delete the contract for an End Point Group. + + Check if the EPG was a provider and attempt to grab another contract + consumer from the DB and set that as the new contract provider. + """ + if provider: + self.apic.fvRsProv.delete(tenant_id, AP_NAME, epg_id, contract_id) + self.db.unset_provider_contract(epg_id) + # Pick out another EPG to set as contract provider + epg = self.db.get_an_epg(epg_id) + self.update_contract_for_epg(tenant_id, epg.epg_id, + contract_id, True) + else: + self.apic.fvRsCons.delete(tenant_id, AP_NAME, epg_id, contract_id) + + def update_contract_for_epg(self, tenant_id, epg_id, + contract_id, provider=False): + """Updates the contract for an End Point Group.""" + self.apic.fvRsCons.delete(tenant_id, AP_NAME, epg_id, contract_id) + self.set_contract_for_epg(tenant_id, epg_id, contract_id, provider) + + def create_tenant_contract(self, tenant_id): + """Creates a tenant contract. + + Create a tenant contract if one doesn't exist. Also create a + subject, filter and entry and set the filters to allow all + protocol traffic on all ports + """ + contract = self.db.get_contract_for_tenant(tenant_id) + if not contract: + cuuid = uuid.uuid4() + try: + # Create contract + self.apic.vzBrCP.create(tenant_id, cuuid, scope=SCOPE_TENANT) + acontract = self.apic.vzBrCP.get(tenant_id, cuuid) + # Create subject + suuid = uuid.uuid4() + self.apic.vzSubj.create(tenant_id, cuuid, suuid) + # Create filter and entry + tfilter = self.create_tenant_filter(tenant_id) + # Create interm and outterm + self.apic.vzInTerm.create(tenant_id, cuuid, suuid) + self.apic.vzRsFiltAtt__In.create(tenant_id, cuuid, + suuid, tfilter) + self.apic.vzOutTerm.create(tenant_id, cuuid, suuid) + self.apic.vzRsFiltAtt__Out.create(tenant_id, cuuid, + suuid, tfilter) + # Create contract interface + iuuid = uuid.uuid4() + self.apic.vzCPIf.create(tenant_id, iuuid) + self.apic.vzRsIf.create(tenant_id, iuuid, + tDn=acontract[DN_KEY]) + # Store contract in DB + contract = self.db.write_contract_for_tenant(tenant_id, + cuuid, tfilter) + except (cexc.ApicResponseNotOk, KeyError): + with excutils.save_and_reraise_exception(): + # Delete tenant contract + self.apic.vzBrCP.delete(tenant_id, cuuid) + + return contract + + def make_tenant_contract_global(self, tenant_id): + """Mark the tenant contract's scope to global.""" + contract = self.db.get_contract_for_tenant(tenant_id) + self.apic.vzBrCP.update(tenant_id, contract.contract_id, + scope=SCOPE_GLOBAL) + + def make_tenant_contract_local(self, tenant_id): + """Mark the tenant contract's scope to tenant.""" + contract = self.db.get_contract_for_tenant(tenant_id) + self.apic.vzBrCP.update(tenant_id, contract.contract_id, + scope=SCOPE_TENANT) + + def ensure_path_created_for_port(self, tenant_id, network_id, + host_id, encap, net_name): + """Create path attribute for an End Point Group.""" + encap = 'vlan-' + str(encap) + epg = self.ensure_epg_created_for_network(tenant_id, network_id, + net_name) + eid = epg.epg_id + + # Get attached switch and port for this host + host_config = config.get_switch_and_port_for_host(host_id) + if not host_config: + raise cexc.ApicHostNotConfigured(host=host_id) + switch, port = host_config + pdn = PORT_DN_PATH % (switch, port) + + # Check if exists + patt = self.apic.fvRsPathAtt.get(tenant_id, AP_NAME, eid, pdn) + if not patt: + self.apic.fvRsPathAtt.create(tenant_id, AP_NAME, eid, pdn, + encap=encap, mode="regular", + instrImedcy="immediate") diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/ml2/drivers/cisco/apic/apic_model.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/ml2/drivers/cisco/apic/apic_model.py new file mode 100644 index 00000000..a3c05d63 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/ml2/drivers/cisco/apic/apic_model.py @@ -0,0 +1,177 @@ +# Copyright (c) 2014 Cisco Systems Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Arvind Somya (asomya@cisco.com), Cisco Systems Inc. + +import sqlalchemy as sa + +from neutron.db import api as db_api +from neutron.db import model_base +from neutron.db import models_v2 + + +class NetworkEPG(model_base.BASEV2): + + """EPG's created on the apic per network.""" + + __tablename__ = 'cisco_ml2_apic_epgs' + + network_id = sa.Column(sa.String(255), nullable=False, primary_key=True) + epg_id = sa.Column(sa.String(64), nullable=False) + segmentation_id = sa.Column(sa.String(64), nullable=False) + provider = sa.Column(sa.Boolean, default=False, nullable=False) + + +class PortProfile(model_base.BASEV2): + + """Port profiles created on the APIC.""" + + __tablename__ = 'cisco_ml2_apic_port_profiles' + + node_id = sa.Column(sa.String(255), nullable=False, primary_key=True) + profile_id = sa.Column(sa.String(64), nullable=False) + hpselc_id = sa.Column(sa.String(64), nullable=False) + module = sa.Column(sa.String(10), nullable=False) + from_port = sa.Column(sa.Integer(), nullable=False) + to_port = sa.Column(sa.Integer(), nullable=False) + + +class TenantContract(model_base.BASEV2, models_v2.HasTenant): + + """Contracts (and Filters) created on the APIC.""" + + __tablename__ = 'cisco_ml2_apic_contracts' + + __table_args__ = (sa.PrimaryKeyConstraint('tenant_id'),) + contract_id = sa.Column(sa.String(64), nullable=False) + filter_id = sa.Column(sa.String(64), nullable=False) + + +class ApicDbModel(object): + + """DB Model to manage all APIC DB interactions.""" + + def __init__(self): + self.session = db_api.get_session() + + def get_port_profile_for_node(self, node_id): + """Returns a port profile for a switch if found in the DB.""" + return self.session.query(PortProfile).filter_by( + node_id=node_id).first() + + def get_profile_for_module_and_ports(self, node_id, profile_id, + module, from_port, to_port): + """Returns profile for module and ports. + + Grabs the profile row from the DB for the specified switch, + module (linecard) and from/to port combination. + """ + return self.session.query(PortProfile).filter_by( + node_id=node_id, + module=module, + profile_id=profile_id, + from_port=from_port, + to_port=to_port).first() + + def get_profile_for_module(self, node_id, profile_id, module): + """Returns the first profile for a switch module from the DB.""" + return self.session.query(PortProfile).filter_by( + node_id=node_id, + profile_id=profile_id, + module=module).first() + + def add_profile_for_module_and_ports(self, node_id, profile_id, + hpselc_id, module, + from_port, to_port): + """Adds a profile for switch, module and port range.""" + row = PortProfile(node_id=node_id, profile_id=profile_id, + hpselc_id=hpselc_id, module=module, + from_port=from_port, to_port=to_port) + self.session.add(row) + self.session.flush() + + def get_provider_contract(self): + """Returns provider EPG from the DB if found.""" + return self.session.query(NetworkEPG).filter_by( + provider=True).first() + + def set_provider_contract(self, epg_id): + """Sets an EPG to be a contract provider.""" + epg = self.session.query(NetworkEPG).filter_by( + epg_id=epg_id).first() + if epg: + epg.provider = True + self.session.merge(epg) + self.session.flush() + + def unset_provider_contract(self, epg_id): + """Sets an EPG to be a contract consumer.""" + epg = self.session.query(NetworkEPG).filter_by( + epg_id=epg_id).first() + if epg: + epg.provider = False + self.session.merge(epg) + self.session.flush() + + def get_an_epg(self, exception): + """Returns an EPG from the DB that does not match the id specified.""" + return self.session.query(NetworkEPG).filter( + NetworkEPG.epg_id != exception).first() + + def get_epg_for_network(self, network_id): + """Returns an EPG for a give neutron network.""" + return self.session.query(NetworkEPG).filter_by( + network_id=network_id).first() + + def write_epg_for_network(self, network_id, epg_uid, segmentation_id='1'): + """Stores EPG details for a network. + + NOTE: Segmentation_id is just a placeholder currently, it will be + populated with a proper segment id once segmentation mgmt is + moved to the APIC. + """ + epg = NetworkEPG(network_id=network_id, epg_id=epg_uid, + segmentation_id=segmentation_id) + self.session.add(epg) + self.session.flush() + return epg + + def delete_epg(self, epg): + """Deletes an EPG from the DB.""" + self.session.delete(epg) + self.session.flush() + + def get_contract_for_tenant(self, tenant_id): + """Returns the specified tenant's contract.""" + return self.session.query(TenantContract).filter_by( + tenant_id=tenant_id).first() + + def write_contract_for_tenant(self, tenant_id, contract_id, filter_id): + """Stores a new contract for the given tenant.""" + contract = TenantContract(tenant_id=tenant_id, + contract_id=contract_id, + filter_id=filter_id) + self.session.add(contract) + self.session.flush() + + return contract + + def delete_profile_for_node(self, node_id): + """Deletes the port profile for a node.""" + profile = self.session.query(PortProfile).filter_by( + node_id=node_id).first() + if profile: + self.session.delete(profile) + self.session.flush() diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/ml2/drivers/cisco/apic/config.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/ml2/drivers/cisco/apic/config.py new file mode 100644 index 00000000..c5c43f28 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/ml2/drivers/cisco/apic/config.py @@ -0,0 +1,82 @@ +# Copyright (c) 2014 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Arvind Somya (asomya@cisco.com), Cisco Systems Inc. + +from oslo.config import cfg + + +apic_opts = [ + cfg.StrOpt('apic_host', + help=_("Host name or IP Address of the APIC controller")), + cfg.StrOpt('apic_username', + help=_("Username for the APIC controller")), + cfg.StrOpt('apic_password', + help=_("Password for the APIC controller"), secret=True), + cfg.StrOpt('apic_port', + help=_("Communication port for the APIC controller")), + cfg.StrOpt('apic_vmm_provider', default='VMware', + help=_("Name for the VMM domain provider")), + cfg.StrOpt('apic_vmm_domain', default='openstack', + help=_("Name for the VMM domain to be created for Openstack")), + cfg.StrOpt('apic_vlan_ns_name', default='openstack_ns', + help=_("Name for the vlan namespace to be used for openstack")), + cfg.StrOpt('apic_vlan_range', default='2:4093', + help=_("Range of VLAN's to be used for Openstack")), + cfg.StrOpt('apic_node_profile', default='openstack_profile', + help=_("Name of the node profile to be created")), + cfg.StrOpt('apic_entity_profile', default='openstack_entity', + help=_("Name of the entity profile to be created")), + cfg.StrOpt('apic_function_profile', default='openstack_function', + help=_("Name of the function profile to be created")), + cfg.BoolOpt('apic_clear_node_profiles', default=False, + help=_("Clear the node profiles on the APIC at startup " + "(mainly used for testing)")), +] + + +cfg.CONF.register_opts(apic_opts, "ml2_cisco_apic") + + +def get_switch_and_port_for_host(host_id): + for switch, connected in _switch_dict.items(): + for port, hosts in connected.items(): + if host_id in hosts: + return switch, port + + +_switch_dict = {} + + +def create_switch_dictionary(): + multi_parser = cfg.MultiConfigParser() + read_ok = multi_parser.read(cfg.CONF.config_file) + + if len(read_ok) != len(cfg.CONF.config_file): + raise cfg.Error(_("Some config files were not parsed properly")) + + for parsed_file in multi_parser.parsed: + for parsed_item in parsed_file.keys(): + if parsed_item.startswith('apic_switch'): + switch, switch_id = parsed_item.split(':') + if switch.lower() == 'apic_switch': + _switch_dict[switch_id] = {} + port_cfg = parsed_file[parsed_item].items() + for host_list, port in port_cfg: + hosts = host_list.split(',') + port = port[0] + _switch_dict[switch_id][port] = hosts + + return _switch_dict diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/ml2/drivers/cisco/apic/exceptions.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/ml2/drivers/cisco/apic/exceptions.py new file mode 100644 index 00000000..b33abb17 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/ml2/drivers/cisco/apic/exceptions.py @@ -0,0 +1,59 @@ +# Copyright (c) 2014 Cisco Systems +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Henry Gessau, Cisco Systems + +"""Exceptions used by Cisco APIC ML2 mechanism driver.""" + +from neutron.common import exceptions + + +class ApicHostNoResponse(exceptions.NotFound): + """No response from the APIC via the specified URL.""" + message = _("No response from APIC at %(url)s") + + +class ApicResponseNotOk(exceptions.NeutronException): + """A response from the APIC was not HTTP OK.""" + message = _("APIC responded with HTTP status %(status)s: %(reason)s, " + "Request: '%(request)s', " + "APIC error code %(err_code)s: %(err_text)s") + + +class ApicResponseNoCookie(exceptions.NeutronException): + """A response from the APIC did not contain an expected cookie.""" + message = _("APIC failed to provide cookie for %(request)s request") + + +class ApicSessionNotLoggedIn(exceptions.NotAuthorized): + """Attempted APIC operation while not logged in to APIC.""" + message = _("Authorized APIC session not established") + + +class ApicHostNotConfigured(exceptions.NotAuthorized): + """The switch and port for the specified host are not configured.""" + message = _("The switch and port for host '%(host)s' are not configured") + + +class ApicManagedObjectNotSupported(exceptions.NeutronException): + """Attempted to use an unsupported Managed Object.""" + message = _("Managed Object '%(mo_class)s' is not supported") + + +class ApicMultipleVlanRanges(exceptions.NeutronException): + """Multiple VLAN ranges specified.""" + message = _("Multiple VLAN ranges are not supported in the APIC plugin. " + "Please specify a single VLAN range. " + "Current config: '%(vlan_ranges)s'") diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/ml2/drivers/cisco/apic/mechanism_apic.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/ml2/drivers/cisco/apic/mechanism_apic.py new file mode 100644 index 00000000..d5297df6 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/ml2/drivers/cisco/apic/mechanism_apic.py @@ -0,0 +1,150 @@ +# Copyright (c) 2014 Cisco Systems Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Arvind Somya (asomya@cisco.com), Cisco Systems Inc. + +import netaddr + +from oslo.config import cfg + +from neutron.extensions import portbindings +from neutron.openstack.common import log +from neutron.plugins.common import constants +from neutron.plugins.ml2 import driver_api as api +from neutron.plugins.ml2.drivers.cisco.apic import apic_manager +from neutron.plugins.ml2.drivers.cisco.apic import exceptions as apic_exc + + +LOG = log.getLogger(__name__) + + +class APICMechanismDriver(api.MechanismDriver): + + def initialize(self): + self.apic_manager = apic_manager.APICManager() + + # Create a Phys domain and VLAN namespace + # Get vlan ns name + ns_name = cfg.CONF.ml2_cisco_apic.apic_vlan_ns_name + + # Grab vlan ranges + if len(cfg.CONF.ml2_type_vlan.network_vlan_ranges) != 1: + raise apic_exc.ApicMultipleVlanRanges( + cfg.CONF.ml2_type_vlan.network_vlan_ranges) + vlan_ranges = cfg.CONF.ml2_type_vlan.network_vlan_ranges[0] + if ',' in vlan_ranges: + raise apic_exc.ApicMultipleVlanRanges(vlan_ranges) + (vlan_min, vlan_max) = vlan_ranges.split(':')[-2:] + + # Create VLAN namespace + vlan_ns = self.apic_manager.ensure_vlan_ns_created_on_apic(ns_name, + vlan_min, + vlan_max) + phys_name = cfg.CONF.ml2_cisco_apic.apic_vmm_domain + # Create Physical domain + self.apic_manager.ensure_phys_domain_created_on_apic(phys_name, + vlan_ns) + + # Create entity profile + ent_name = cfg.CONF.ml2_cisco_apic.apic_entity_profile + self.apic_manager.ensure_entity_profile_created_on_apic(ent_name) + + # Create function profile + func_name = cfg.CONF.ml2_cisco_apic.apic_function_profile + self.apic_manager.ensure_function_profile_created_on_apic(func_name) + + # Create infrastructure on apic + self.apic_manager.ensure_infra_created_on_apic() + + def _perform_port_operations(self, context): + # Get tenant details from port context + tenant_id = context.current['tenant_id'] + + # Get network + network = context.network.current['id'] + net_name = context.network.current['name'] + + # Get port + port = context.current + + # Get segmentation id + if not context.bound_segment: + LOG.debug(_("Port %s is not bound to a segment"), port) + return + seg = None + if (context.bound_segment.get(api.NETWORK_TYPE) in + [constants.TYPE_VLAN]): + seg = context.bound_segment.get(api.SEGMENTATION_ID) + + # Check if a compute port + if not port['device_owner'].startswith('compute'): + # Not a compute port, return + return + + host = port.get(portbindings.HOST_ID) + # Check host that the dhcp agent is running on + filters = {'device_owner': 'network:dhcp', + 'network_id': network} + dhcp_ports = context._plugin.get_ports(context._plugin_context, + filters=filters) + dhcp_hosts = [] + for dhcp_port in dhcp_ports: + dhcp_hosts.append(dhcp_port.get(portbindings.HOST_ID)) + + # Create a static path attachment for this host/epg/switchport combo + self.apic_manager.ensure_tenant_created_on_apic(tenant_id) + if dhcp_hosts: + for dhcp_host in dhcp_hosts: + self.apic_manager.ensure_path_created_for_port(tenant_id, + network, + dhcp_host, seg, + net_name) + if host not in dhcp_hosts: + self.apic_manager.ensure_path_created_for_port(tenant_id, network, + host, seg, net_name) + + def create_port_postcommit(self, context): + self._perform_port_operations(context) + + def update_port_postcommit(self, context): + self._perform_port_operations(context) + + def create_network_postcommit(self, context): + net_id = context.current['id'] + tenant_id = context.current['tenant_id'] + net_name = context.current['name'] + + self.apic_manager.ensure_bd_created_on_apic(tenant_id, net_id) + # Create EPG for this network + self.apic_manager.ensure_epg_created_for_network(tenant_id, net_id, + net_name) + + def delete_network_postcommit(self, context): + net_id = context.current['id'] + tenant_id = context.current['tenant_id'] + + self.apic_manager.delete_bd_on_apic(tenant_id, net_id) + self.apic_manager.delete_epg_for_network(tenant_id, net_id) + + def create_subnet_postcommit(self, context): + tenant_id = context.current['tenant_id'] + network_id = context.current['network_id'] + gateway_ip = context.current['gateway_ip'] + cidr = netaddr.IPNetwork(context.current['cidr']) + netmask = str(cidr.prefixlen) + gateway_ip = gateway_ip + '/' + netmask + + self.apic_manager.ensure_subnet_created_on_apic(tenant_id, network_id, + gateway_ip) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/ml2/drivers/cisco/nexus/config.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/ml2/drivers/cisco/nexus/config.py new file mode 100644 index 00000000..3be44308 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/ml2/drivers/cisco/nexus/config.py @@ -0,0 +1,65 @@ +# Copyright (c) 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo.config import cfg + + +ml2_cisco_opts = [ + cfg.StrOpt('vlan_name_prefix', default='q-', + help=_("VLAN Name prefix")), + cfg.BoolOpt('svi_round_robin', default=False, + help=_("Distribute SVI interfaces over all switches")), + cfg.StrOpt('managed_physical_network', + help=_("The physical network managed by the switches.")), +] + + +cfg.CONF.register_opts(ml2_cisco_opts, "ml2_cisco") + +# +# Format for ml2_conf_cisco.ini 'ml2_mech_cisco_nexus' is: +# {('', ''): '', ...} +# +# Example: +# {('1.1.1.1', 'username'): 'admin', +# ('1.1.1.1', 'password'): 'mySecretPassword', +# ('1.1.1.1', 'compute1'): '1/1', ...} +# + + +class ML2MechCiscoConfig(object): + """ML2 Mechanism Driver Cisco Configuration class.""" + nexus_dict = {} + + def __init__(self): + self._create_ml2_mech_device_cisco_dictionary() + + def _create_ml2_mech_device_cisco_dictionary(self): + """Create the ML2 device cisco dictionary. + + Read data from the ml2_conf_cisco.ini device supported sections. + """ + multi_parser = cfg.MultiConfigParser() + read_ok = multi_parser.read(cfg.CONF.config_file) + + if len(read_ok) != len(cfg.CONF.config_file): + raise cfg.Error(_("Some config files were not parsed properly")) + + for parsed_file in multi_parser.parsed: + for parsed_item in parsed_file.keys(): + dev_id, sep, dev_ip = parsed_item.partition(':') + if dev_id.lower() == 'ml2_mech_cisco_nexus': + for dev_key, value in parsed_file[parsed_item].items(): + self.nexus_dict[dev_ip, dev_key] = value[0] diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/ml2/drivers/cisco/nexus/mech_cisco_nexus.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/ml2/drivers/cisco/nexus/mech_cisco_nexus.py new file mode 100644 index 00000000..8db75282 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/ml2/drivers/cisco/nexus/mech_cisco_nexus.py @@ -0,0 +1,219 @@ +# Copyright 2013 OpenStack Foundation +# All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +ML2 Mechanism Driver for Cisco Nexus platforms. +""" + +from oslo.config import cfg + +from neutron.common import constants as n_const +from neutron.extensions import portbindings +from neutron.openstack.common import log as logging +from neutron.plugins.common import constants as p_const +from neutron.plugins.ml2 import driver_api as api +from neutron.plugins.ml2.drivers.cisco.nexus import config as conf +from neutron.plugins.ml2.drivers.cisco.nexus import exceptions as excep +from neutron.plugins.ml2.drivers.cisco.nexus import nexus_db_v2 as nxos_db +from neutron.plugins.ml2.drivers.cisco.nexus import nexus_network_driver + +LOG = logging.getLogger(__name__) + + +class CiscoNexusMechanismDriver(api.MechanismDriver): + + """Cisco Nexus ML2 Mechanism Driver.""" + + def initialize(self): + # Create ML2 device dictionary from ml2_conf.ini entries. + conf.ML2MechCiscoConfig() + + # Extract configuration parameters from the configuration file. + self._nexus_switches = conf.ML2MechCiscoConfig.nexus_dict + LOG.debug(_("nexus_switches found = %s"), self._nexus_switches) + + self.driver = nexus_network_driver.CiscoNexusDriver() + + def _valid_network_segment(self, segment): + return (cfg.CONF.ml2_cisco.managed_physical_network is None or + cfg.CONF.ml2_cisco.managed_physical_network == + segment[api.PHYSICAL_NETWORK]) + + def _get_vlanid(self, segment): + if (segment and segment[api.NETWORK_TYPE] == p_const.TYPE_VLAN and + self._valid_network_segment(segment)): + return segment.get(api.SEGMENTATION_ID) + + def _is_deviceowner_compute(self, port): + return port['device_owner'].startswith('compute') + + def _is_status_active(self, port): + return port['status'] == n_const.PORT_STATUS_ACTIVE + + def _get_switch_info(self, host_id): + host_connections = [] + for switch_ip, attr in self._nexus_switches: + if str(attr) == str(host_id): + port_id = self._nexus_switches[switch_ip, attr] + if ':' in port_id: + intf_type, port = port_id.split(':') + else: + intf_type, port = 'ethernet', port_id + host_connections.append((switch_ip, intf_type, port)) + + if host_connections: + return host_connections + else: + raise excep.NexusComputeHostNotConfigured(host=host_id) + + def _configure_nxos_db(self, vlan_id, device_id, host_id): + """Create the nexus database entry. + + Called during update precommit port event. + """ + host_connections = self._get_switch_info(host_id) + for switch_ip, intf_type, nexus_port in host_connections: + port_id = '%s:%s' % (intf_type, nexus_port) + nxos_db.add_nexusport_binding(port_id, str(vlan_id), switch_ip, + device_id) + + def _configure_switch_entry(self, vlan_id, device_id, host_id): + """Create a nexus switch entry. + + if needed, create a VLAN in the appropriate switch/port and + configure the appropriate interfaces for this VLAN. + + Called during update postcommit port event. + """ + vlan_name = cfg.CONF.ml2_cisco.vlan_name_prefix + str(vlan_id) + host_connections = self._get_switch_info(host_id) + + for switch_ip, intf_type, nexus_port in host_connections: + # Check to see if this is the first binding to use this vlan on the + # switch/port. Configure switch accordingly. + bindings = nxos_db.get_nexusvlan_binding(vlan_id, switch_ip) + if len(bindings) == 1: + LOG.debug(_("Nexus: create & trunk vlan %s"), vlan_name) + self.driver.create_and_trunk_vlan( + switch_ip, vlan_id, vlan_name, intf_type, nexus_port) + else: + LOG.debug(_("Nexus: trunk vlan %s"), vlan_name) + self.driver.enable_vlan_on_trunk_int(switch_ip, vlan_id, + intf_type, nexus_port) + + def _delete_nxos_db(self, vlan_id, device_id, host_id): + """Delete the nexus database entry. + + Called during delete precommit port event. + """ + try: + rows = nxos_db.get_nexusvm_bindings(vlan_id, device_id) + for row in rows: + nxos_db.remove_nexusport_binding( + row.port_id, row.vlan_id, row.switch_ip, row.instance_id) + except excep.NexusPortBindingNotFound: + return + + def _delete_switch_entry(self, vlan_id, device_id, host_id): + """Delete the nexus switch entry. + + By accessing the current db entries determine if switch + configuration can be removed. + + Called during update postcommit port event. + """ + host_connections = self._get_switch_info(host_id) + for switch_ip, intf_type, nexus_port in host_connections: + # if there are no remaining db entries using this vlan on this + # nexus switch port then remove vlan from the switchport trunk. + port_id = '%s:%s' % (intf_type, nexus_port) + try: + nxos_db.get_port_vlan_switch_binding(port_id, vlan_id, + switch_ip) + except excep.NexusPortBindingNotFound: + self.driver.disable_vlan_on_trunk_int(switch_ip, vlan_id, + intf_type, nexus_port) + + # if there are no remaining db entries using this vlan on this + # nexus switch then remove the vlan. + try: + nxos_db.get_nexusvlan_binding(vlan_id, switch_ip) + except excep.NexusPortBindingNotFound: + self.driver.delete_vlan(switch_ip, vlan_id) + + def _is_vm_migration(self, context): + if not context.bound_segment and context.original_bound_segment: + return (context.current.get(portbindings.HOST_ID) != + context.original.get(portbindings.HOST_ID)) + + def _port_action(self, port, segment, func): + """Verify configuration and then process event.""" + device_id = port.get('device_id') + host_id = port.get(portbindings.HOST_ID) + vlan_id = self._get_vlanid(segment) + + if vlan_id and device_id and host_id: + func(vlan_id, device_id, host_id) + else: + fields = "vlan_id " if not vlan_id else "" + fields += "device_id " if not device_id else "" + fields += "host_id" if not host_id else "" + raise excep.NexusMissingRequiredFields(fields=fields) + + def update_port_precommit(self, context): + """Update port pre-database transaction commit event.""" + + # if VM migration is occurring then remove previous database entry + # else process update event. + if self._is_vm_migration(context): + self._port_action(context.original, + context.original_bound_segment, + self._delete_nxos_db) + else: + if (self._is_deviceowner_compute(context.current) and + self._is_status_active(context.current)): + self._port_action(context.current, + context.bound_segment, + self._configure_nxos_db) + + def update_port_postcommit(self, context): + """Update port non-database commit event.""" + + # if VM migration is occurring then remove previous nexus switch entry + # else process update event. + if self._is_vm_migration(context): + self._port_action(context.original, + context.original_bound_segment, + self._delete_switch_entry) + else: + if (self._is_deviceowner_compute(context.current) and + self._is_status_active(context.current)): + self._port_action(context.current, + context.bound_segment, + self._configure_switch_entry) + + def delete_port_precommit(self, context): + """Delete port pre-database commit event.""" + if self._is_deviceowner_compute(context.current): + self._port_action(context.current, + context.bound_segment, + self._delete_nxos_db) + + def delete_port_postcommit(self, context): + """Delete port non-database commit event.""" + if self._is_deviceowner_compute(context.current): + self._port_action(context.current, + context.bound_segment, + self._delete_switch_entry) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/ml2/drivers/cisco/nexus/nexus_db_v2.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/ml2/drivers/cisco/nexus/nexus_db_v2.py new file mode 100644 index 00000000..081b0d0a --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/ml2/drivers/cisco/nexus/nexus_db_v2.py @@ -0,0 +1,143 @@ +# Copyright (c) 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +import sqlalchemy.orm.exc as sa_exc + +import neutron.db.api as db +from neutron.openstack.common import log as logging +from neutron.plugins.ml2.drivers.cisco.nexus import exceptions as c_exc +from neutron.plugins.ml2.drivers.cisco.nexus import nexus_models_v2 + + +LOG = logging.getLogger(__name__) + + +def get_nexusport_binding(port_id, vlan_id, switch_ip, instance_id): + """Lists a nexusport binding.""" + LOG.debug(_("get_nexusport_binding() called")) + return _lookup_all_nexus_bindings(port_id=port_id, + vlan_id=vlan_id, + switch_ip=switch_ip, + instance_id=instance_id) + + +def get_nexusvlan_binding(vlan_id, switch_ip): + """Lists a vlan and switch binding.""" + LOG.debug(_("get_nexusvlan_binding() called")) + return _lookup_all_nexus_bindings(vlan_id=vlan_id, switch_ip=switch_ip) + + +def add_nexusport_binding(port_id, vlan_id, switch_ip, instance_id): + """Adds a nexusport binding.""" + LOG.debug(_("add_nexusport_binding() called")) + session = db.get_session() + binding = nexus_models_v2.NexusPortBinding(port_id=port_id, + vlan_id=vlan_id, + switch_ip=switch_ip, + instance_id=instance_id) + session.add(binding) + session.flush() + return binding + + +def remove_nexusport_binding(port_id, vlan_id, switch_ip, instance_id): + """Removes a nexusport binding.""" + LOG.debug(_("remove_nexusport_binding() called")) + session = db.get_session() + binding = _lookup_all_nexus_bindings(session=session, + vlan_id=vlan_id, + switch_ip=switch_ip, + port_id=port_id, + instance_id=instance_id) + for bind in binding: + session.delete(bind) + session.flush() + return binding + + +def update_nexusport_binding(port_id, new_vlan_id): + """Updates nexusport binding.""" + if not new_vlan_id: + LOG.warning(_("update_nexusport_binding called with no vlan")) + return + LOG.debug(_("update_nexusport_binding called")) + session = db.get_session() + binding = _lookup_one_nexus_binding(session=session, port_id=port_id) + binding.vlan_id = new_vlan_id + session.merge(binding) + session.flush() + return binding + + +def get_nexusvm_bindings(vlan_id, instance_id): + """Lists nexusvm bindings.""" + LOG.debug(_("get_nexusvm_bindings() called")) + return _lookup_all_nexus_bindings(instance_id=instance_id, + vlan_id=vlan_id) + + +def get_port_vlan_switch_binding(port_id, vlan_id, switch_ip): + """Lists nexusvm bindings.""" + LOG.debug(_("get_port_vlan_switch_binding() called")) + return _lookup_all_nexus_bindings(port_id=port_id, + switch_ip=switch_ip, + vlan_id=vlan_id) + + +def get_port_switch_bindings(port_id, switch_ip): + """List all vm/vlan bindings on a Nexus switch port.""" + LOG.debug(_("get_port_switch_bindings() called, " + "port:'%(port_id)s', switch:'%(switch_ip)s'"), + {'port_id': port_id, 'switch_ip': switch_ip}) + try: + return _lookup_all_nexus_bindings(port_id=port_id, + switch_ip=switch_ip) + except c_exc.NexusPortBindingNotFound: + pass + + +def _lookup_nexus_bindings(query_type, session=None, **bfilter): + """Look up 'query_type' Nexus bindings matching the filter. + + :param query_type: 'all', 'one' or 'first' + :param session: db session + :param bfilter: filter for bindings query + :return: bindings if query gave a result, else + raise NexusPortBindingNotFound. + """ + if session is None: + session = db.get_session() + query_method = getattr(session.query( + nexus_models_v2.NexusPortBinding).filter_by(**bfilter), query_type) + try: + bindings = query_method() + if bindings: + return bindings + except sa_exc.NoResultFound: + pass + raise c_exc.NexusPortBindingNotFound(**bfilter) + + +def _lookup_all_nexus_bindings(session=None, **bfilter): + return _lookup_nexus_bindings('all', session, **bfilter) + + +def _lookup_one_nexus_binding(session=None, **bfilter): + return _lookup_nexus_bindings('one', session, **bfilter) + + +def _lookup_first_nexus_binding(session=None, **bfilter): + return _lookup_nexus_bindings('first', session, **bfilter) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/ml2/drivers/cisco/nexus/nexus_network_driver.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/ml2/drivers/cisco/nexus/nexus_network_driver.py new file mode 100644 index 00000000..983678d1 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/ml2/drivers/cisco/nexus/nexus_network_driver.py @@ -0,0 +1,171 @@ +# Copyright 2013 OpenStack Foundation +# All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Implements a Nexus-OS NETCONF over SSHv2 API Client +""" + +from neutron.openstack.common import excutils +from neutron.openstack.common import importutils +from neutron.openstack.common import log as logging +from neutron.plugins.ml2.drivers.cisco.nexus import config as conf +from neutron.plugins.ml2.drivers.cisco.nexus import constants as const +from neutron.plugins.ml2.drivers.cisco.nexus import exceptions as cexc +from neutron.plugins.ml2.drivers.cisco.nexus import nexus_db_v2 +from neutron.plugins.ml2.drivers.cisco.nexus import nexus_snippets as snipp + +LOG = logging.getLogger(__name__) + + +class CiscoNexusDriver(object): + """Nexus Driver Main Class.""" + def __init__(self): + self.ncclient = None + self.nexus_switches = conf.ML2MechCiscoConfig.nexus_dict + self.connections = {} + + def _import_ncclient(self): + """Import the NETCONF client (ncclient) module. + + The ncclient module is not installed as part of the normal Neutron + distributions. It is imported dynamically in this module so that + the import can be mocked, allowing unit testing without requiring + the installation of ncclient. + + """ + return importutils.import_module('ncclient.manager') + + def _edit_config(self, nexus_host, target='running', config='', + allowed_exc_strs=None): + """Modify switch config for a target config type. + + :param nexus_host: IP address of switch to configure + :param target: Target config type + :param config: Configuration string in XML format + :param allowed_exc_strs: Exceptions which have any of these strings + as a subset of their exception message + (str(exception)) can be ignored + + :raises: NexusConfigFailed + + """ + if not allowed_exc_strs: + allowed_exc_strs = [] + mgr = self.nxos_connect(nexus_host) + try: + mgr.edit_config(target, config=config) + except Exception as e: + for exc_str in allowed_exc_strs: + if exc_str in str(e): + break + else: + # Raise a Neutron exception. Include a description of + # the original ncclient exception. + raise cexc.NexusConfigFailed(config=config, exc=e) + + def nxos_connect(self, nexus_host): + """Make SSH connection to the Nexus Switch.""" + if getattr(self.connections.get(nexus_host), 'connected', None): + return self.connections[nexus_host] + + if not self.ncclient: + self.ncclient = self._import_ncclient() + nexus_ssh_port = int(self.nexus_switches[nexus_host, 'ssh_port']) + nexus_user = self.nexus_switches[nexus_host, const.USERNAME] + nexus_password = self.nexus_switches[nexus_host, const.PASSWORD] + try: + man = self.ncclient.connect(host=nexus_host, + port=nexus_ssh_port, + username=nexus_user, + password=nexus_password) + self.connections[nexus_host] = man + except Exception as e: + # Raise a Neutron exception. Include a description of + # the original ncclient exception. + raise cexc.NexusConnectFailed(nexus_host=nexus_host, exc=e) + + return self.connections[nexus_host] + + def create_xml_snippet(self, customized_config): + """Create XML snippet. + + Creates the Proper XML structure for the Nexus Switch Configuration. + """ + conf_xml_snippet = snipp.EXEC_CONF_SNIPPET % (customized_config) + return conf_xml_snippet + + def create_vlan(self, nexus_host, vlanid, vlanname): + """Create a VLAN on Nexus Switch given the VLAN ID and Name.""" + confstr = self.create_xml_snippet( + snipp.CMD_VLAN_CONF_SNIPPET % (vlanid, vlanname)) + LOG.debug(_("NexusDriver: %s"), confstr) + self._edit_config(nexus_host, target='running', config=confstr) + + # Enable VLAN active and no-shutdown states. Some versions of + # Nexus switch do not allow state changes for the extended VLAN + # range (1006-4094), but these errors can be ignored (default + # values are appropriate). + for snippet in [snipp.CMD_VLAN_ACTIVE_SNIPPET, + snipp.CMD_VLAN_NO_SHUTDOWN_SNIPPET]: + try: + confstr = self.create_xml_snippet(snippet % vlanid) + self._edit_config( + nexus_host, + target='running', + config=confstr, + allowed_exc_strs=["Can't modify state for extended", + "Command is only allowed on VLAN"]) + except cexc.NexusConfigFailed: + with excutils.save_and_reraise_exception(): + self.delete_vlan(nexus_host, vlanid) + + def delete_vlan(self, nexus_host, vlanid): + """Delete a VLAN on Nexus Switch given the VLAN ID.""" + confstr = snipp.CMD_NO_VLAN_CONF_SNIPPET % vlanid + confstr = self.create_xml_snippet(confstr) + self._edit_config(nexus_host, target='running', config=confstr) + + def enable_vlan_on_trunk_int(self, nexus_host, vlanid, intf_type, + interface): + """Enable a VLAN on a trunk interface.""" + # If more than one VLAN is configured on this interface then + # include the 'add' keyword. + if len(nexus_db_v2.get_port_switch_bindings( + '%s:%s' % (intf_type, interface), nexus_host)) == 1: + snippet = snipp.CMD_INT_VLAN_SNIPPET + else: + snippet = snipp.CMD_INT_VLAN_ADD_SNIPPET + confstr = snippet % (intf_type, interface, vlanid, intf_type) + confstr = self.create_xml_snippet(confstr) + LOG.debug(_("NexusDriver: %s"), confstr) + self._edit_config(nexus_host, target='running', config=confstr) + + def disable_vlan_on_trunk_int(self, nexus_host, vlanid, intf_type, + interface): + """Disable a VLAN on a trunk interface.""" + confstr = (snipp.CMD_NO_VLAN_INT_SNIPPET % + (intf_type, interface, vlanid, intf_type)) + confstr = self.create_xml_snippet(confstr) + LOG.debug(_("NexusDriver: %s"), confstr) + self._edit_config(nexus_host, target='running', config=confstr) + + def create_and_trunk_vlan(self, nexus_host, vlan_id, vlan_name, + intf_type, nexus_port): + """Create VLAN and trunk it on the specified ports.""" + self.create_vlan(nexus_host, vlan_id, vlan_name) + LOG.debug(_("NexusDriver created VLAN: %s"), vlan_id) + if nexus_port: + self.enable_vlan_on_trunk_int(nexus_host, vlan_id, intf_type, + nexus_port) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/ml2/drivers/cisco/nexus/nexus_snippets.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/ml2/drivers/cisco/nexus/nexus_snippets.py new file mode 100644 index 00000000..fb38e419 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/ml2/drivers/cisco/nexus/nexus_snippets.py @@ -0,0 +1,200 @@ +# Copyright 2013 OpenStack Foundation. +# All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +""" +Cisco Nexus-OS XML-based configuration snippets. +""" + +import logging + + +LOG = logging.getLogger(__name__) + + +# The following are standard strings, messages used to communicate with Nexus. +EXEC_CONF_SNIPPET = """ + + + <__XML__MODE__exec_configure>%s + + + +""" + +CMD_VLAN_CONF_SNIPPET = """ + + + <__XML__PARAM_value>%s + <__XML__MODE_vlan> + + %s + + + + +""" + +CMD_VLAN_ACTIVE_SNIPPET = """ + + + <__XML__PARAM_value>%s + <__XML__MODE_vlan> + + active + + + + +""" + +CMD_VLAN_NO_SHUTDOWN_SNIPPET = """ + + + <__XML__PARAM_value>%s + <__XML__MODE_vlan> + + + + + + +""" + +CMD_NO_VLAN_CONF_SNIPPET = """ + + + + <__XML__PARAM_value>%s + + + +""" + +CMD_INT_VLAN_HEADER = """ + + <%s> + %s + <__XML__MODE_if-ethernet-switch> + + + + """ + +CMD_VLAN_ID = """ + %s""" + +CMD_VLAN_ADD_ID = """ + %s + """ % CMD_VLAN_ID + +CMD_INT_VLAN_TRAILER = """ + + + + + + + +""" + +CMD_INT_VLAN_SNIPPET = (CMD_INT_VLAN_HEADER + + CMD_VLAN_ID + + CMD_INT_VLAN_TRAILER) + +CMD_INT_VLAN_ADD_SNIPPET = (CMD_INT_VLAN_HEADER + + CMD_VLAN_ADD_ID + + CMD_INT_VLAN_TRAILER) + +CMD_PORT_TRUNK = """ + + <%s> + %s + <__XML__MODE_if-ethernet-switch> + + + + + + + + + + +""" + +CMD_NO_SWITCHPORT = """ + + <%s> + %s + <__XML__MODE_if-ethernet-switch> + + + + + + + +""" + +CMD_NO_VLAN_INT_SNIPPET = """ + + <%s> + %s + <__XML__MODE_if-ethernet-switch> + + + + + + + %s + + + + + + + + +""" + +CMD_VLAN_SVI_SNIPPET = """ + + + %s + <__XML__MODE_vlan> + + + + +
+
%s
+
+
+ +
+
+""" + +CMD_NO_VLAN_SVI_SNIPPET = """ + + + + %s + + + +""" diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/ml2/drivers/l2pop/README b/icehouse-patches/neutron/dvr-patch/neutron/plugins/ml2/drivers/l2pop/README new file mode 100644 index 00000000..46bb27e5 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/ml2/drivers/l2pop/README @@ -0,0 +1,41 @@ +Neutron ML2 l2 population Mechanism Drivers + +l2 population (l2pop) mechanism drivers implements the ML2 driver to improve +open source plugins overlay implementations (VXLAN with Linux bridge and +GRE/VXLAN with OVS). This mechanism driver is implemented in ML2 to propagate +the forwarding information among agents using a common RPC API. + +More informations could be found on the wiki page [1]. + +VXLAN Linux kernel: +------------------- +The VXLAN Linux kernel module provide all necessary functionalities to populate +the forwarding table and local ARP responder tables. This module appears on +release 3.7 of the vanilla Linux kernel in experimental: +- 3.8: first stable release, no edge replication (multicast necessary), +- 3.9: edge replication only for the broadcasted packets, +- 3.11: edge replication for broadcast, multicast and unknown packets. + +Note: Some distributions (like RHEL) have backported this module on precedent + kernel version. + +OpenvSwitch: +------------ +The OVS OpenFlow tables provide all of the necessary functionality to populate +the forwarding table and local ARP responder tables. +A wiki page describe how the flow tables did evolve on OVS agents: +- [2] without local ARP responder +- [3] with local ARP responder. /!\ This functionality is only available since + the development branch 2.1. It's possible + to disable (enable by default) it through + the flag 'arp_responder'. /!\ + + +Note: A difference persists between the LB and OVS agents when they are used + with the l2-pop mechanism driver (and local ARP responder available). The + LB agent will drop unknown unicast (VXLAN bridge mode), whereas the OVS + agent will flood it. + +[1] https://wiki.openstack.org/wiki/L2population_blueprint +[2] https://wiki.openstack.org/wiki/Ovs-flow-logic#OVS_flows_logic +[3] https://wiki.openstack.org/wiki/Ovs-flow-logic#OVS_flows_logic_with_local_ARP_responder \ No newline at end of file diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/ml2/drivers/l2pop/db.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/ml2/drivers/l2pop/db.py new file mode 100644 index 00000000..a69bd2e5 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/ml2/drivers/l2pop/db.py @@ -0,0 +1,113 @@ +# Copyright (c) 2013 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Sylvain Afchain, eNovance SAS +# @author: Francois Eleouet, Orange +# @author: Mathieu Rohon, Orange + +from sqlalchemy import sql + +from neutron.common import constants as const +from neutron.db import agents_db +from neutron.db import db_base_plugin_v2 as base_db +from neutron.db import models_v2 +from neutron.openstack.common import jsonutils +from neutron.openstack.common import timeutils +from neutron.plugins.ml2.drivers.l2pop import constants as l2_const +from neutron.plugins.ml2 import models as ml2_models + + +class L2populationDbMixin(base_db.CommonDbMixin): + + def get_agent_ip_by_host(self, session, agent_host): + agent = self.get_agent_by_host(session, agent_host) + if agent: + return self.get_agent_ip(agent) + + def get_agent_ip(self, agent): + configuration = jsonutils.loads(agent.configurations) + return configuration.get('tunneling_ip') + + def get_agent_uptime(self, agent): + return timeutils.delta_seconds(agent.started_at, + agent.heartbeat_timestamp) + + def get_agent_tunnel_types(self, agent): + configuration = jsonutils.loads(agent.configurations) + return configuration.get('tunnel_types') + + def get_agent_by_host(self, session, agent_host): + with session.begin(subtransactions=True): + query = session.query(agents_db.Agent) + query = query.filter(agents_db.Agent.host == agent_host, + agents_db.Agent.agent_type.in_( + l2_const.SUPPORTED_AGENT_TYPES)) + return query.first() + + def get_network_ports(self, session, network_id): + with session.begin(subtransactions=True): + query = session.query(ml2_models.PortBinding, + agents_db.Agent) + query = query.join(agents_db.Agent, + agents_db.Agent.host == + ml2_models.PortBinding.host) + query = query.join(models_v2.Port) + query = query.filter(models_v2.Port.network_id == network_id, + models_v2.Port.admin_state_up == sql.true(), + agents_db.Agent.agent_type.in_( + l2_const.SUPPORTED_AGENT_TYPES)) + return query + + def get_nondvr_network_ports(self, session, network_id): + query = self.get_network_ports(session, network_id) + return query.filter(models_v2.Port.device_owner != + const.DEVICE_OWNER_DVR_INTERFACE) + + def get_dvr_network_ports(self, session, network_id): + with session.begin(subtransactions=True): + query = session.query(ml2_models.DVRPortBinding, + agents_db.Agent) + query = query.join(agents_db.Agent, + agents_db.Agent.host == + ml2_models.DVRPortBinding.host) + query = query.join(models_v2.Port) + query = query.filter(models_v2.Port.network_id == network_id, + models_v2.Port.admin_state_up == sql.true(), + models_v2.Port.device_owner == + const.DEVICE_OWNER_DVR_INTERFACE, + agents_db.Agent.agent_type.in_( + l2_const.SUPPORTED_AGENT_TYPES)) + return query + + def get_agent_network_active_port_count(self, session, agent_host, + network_id): + with session.begin(subtransactions=True): + query = session.query(models_v2.Port) + query1 = query.join(ml2_models.PortBinding) + query1 = query1.filter(models_v2.Port.network_id == network_id, + models_v2.Port.status == + const.PORT_STATUS_ACTIVE, + models_v2.Port.device_owner != + const.DEVICE_OWNER_DVR_INTERFACE, + ml2_models.PortBinding.host == agent_host) + query2 = query.join(ml2_models.DVRPortBinding) + query2 = query2.filter(models_v2.Port.network_id == network_id, + ml2_models.DVRPortBinding.status == + const.PORT_STATUS_ACTIVE, + models_v2.Port.device_owner == + const.DEVICE_OWNER_DVR_INTERFACE, + ml2_models.DVRPortBinding.host == + agent_host) + return (query1.count() + query2.count()) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/ml2/drivers/l2pop/mech_driver.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/ml2/drivers/l2pop/mech_driver.py new file mode 100644 index 00000000..147ee379 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/ml2/drivers/l2pop/mech_driver.py @@ -0,0 +1,302 @@ +# Copyright (c) 2013 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Sylvain Afchain, eNovance SAS +# @author: Francois Eleouet, Orange +# @author: Mathieu Rohon, Orange + +from oslo.config import cfg + +from neutron.common import constants as const +from neutron import context as n_context +from neutron.db import api as db_api +from neutron.openstack.common import log as logging +from neutron.plugins.ml2 import driver_api as api +from neutron.plugins.ml2.drivers.l2pop import config # noqa +from neutron.plugins.ml2.drivers.l2pop import db as l2pop_db +from neutron.plugins.ml2.drivers.l2pop import rpc as l2pop_rpc + +LOG = logging.getLogger(__name__) + + +class L2populationMechanismDriver(api.MechanismDriver, + l2pop_db.L2populationDbMixin): + + def __init__(self): + super(L2populationMechanismDriver, self).__init__() + self.L2populationAgentNotify = l2pop_rpc.L2populationAgentNotifyAPI() + + def initialize(self): + LOG.debug(_("Experimental L2 population driver")) + self.rpc_ctx = n_context.get_admin_context_without_session() + self.migrated_ports = {} + self.remove_fdb_entries = {} + + def _get_port_fdb_entries(self, port): + return [[port['mac_address'], port['device_owner'], + ip['ip_address']] for ip in port['fixed_ips']] + + def _get_agent_host(self, context, port): + if port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE: + agent_host = context.binding.host + else: + agent_host = port['binding:host_id'] + return agent_host + + def delete_port_precommit(self, context): + # TODO(matrohon): revisit once the original bound segment will be + # available in delete_port_postcommit. in delete_port_postcommit + # agent_active_ports will be equal to 0, and the _update_port_down + # won't need agent_active_ports_count_for_flooding anymore + port = context.current + agent_host = self._get_agent_host(context, port) + + if port['id'] not in self.remove_fdb_entries: + self.remove_fdb_entries[port['id']] = {} + + self.remove_fdb_entries[port['id']][agent_host] = ( + self._update_port_down(context, port, 1)) + + def delete_port_postcommit(self, context): + port = context.current + agent_host = self._get_agent_host(context, port) + + if port['id'] in self.remove_fdb_entries: + for agent_host in list(self.remove_fdb_entries[port['id']]): + self.L2populationAgentNotify.remove_fdb_entries( + self.rpc_ctx, + self.remove_fdb_entries[port['id']][agent_host]) + self.remove_fdb_entries[port['id']].pop(agent_host, 0) + self.remove_fdb_entries.pop(port['id'], 0) + + def _get_diff_ips(self, orig, port): + orig_ips = set([ip['ip_address'] for ip in orig['fixed_ips']]) + port_ips = set([ip['ip_address'] for ip in port['fixed_ips']]) + + # check if an ip has been added or removed + orig_chg_ips = orig_ips.difference(port_ips) + port_chg_ips = port_ips.difference(orig_ips) + + if orig_chg_ips or port_chg_ips: + return orig_chg_ips, port_chg_ips + + def _fixed_ips_changed(self, context, orig, port, diff_ips): + orig_ips, port_ips = diff_ips + + port_infos = self._get_port_infos(context, orig) + if not port_infos: + return + agent, agent_host, agent_ip, segment, port_fdb_entries = port_infos + + orig_mac_ip = [[port['mac_address'], port['device_owner'], ip] + for ip in orig_ips] + port_mac_ip = [[port['mac_address'], port['device_owner'], ip] + for ip in port_ips] + + upd_fdb_entries = {port['network_id']: {agent_ip: {}}} + + ports = upd_fdb_entries[port['network_id']][agent_ip] + if orig_mac_ip: + ports['before'] = orig_mac_ip + + if port_mac_ip: + ports['after'] = port_mac_ip + + self.L2populationAgentNotify.update_fdb_entries( + self.rpc_ctx, {'chg_ip': upd_fdb_entries}) + + return True + + def update_port_postcommit(self, context): + port = context.current + orig = context.original + + diff_ips = self._get_diff_ips(orig, port) + if diff_ips: + self._fixed_ips_changed(context, orig, port, diff_ips) + # TODO(vivek): DVR may need more robust handling of binding:host_id key + if (port.get('binding:host_id') != orig.get('binding:host_id') + and port['status'] == const.PORT_STATUS_ACTIVE + and not self.migrated_ports.get(orig['id'])): + # The port has been migrated. We have to store the original + # binding to send appropriate fdb once the port will be set + # on the destination host + self.migrated_ports[orig['id']] = orig + elif port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE: + binding = context.binding + if binding.status == const.PORT_STATUS_ACTIVE: + self._update_port_up(context) + if binding.status == const.PORT_STATUS_DOWN: + agent_host = binding.host + fdb_entries = {agent_host: + self._update_port_down(context, port)} + self.L2populationAgentNotify.remove_fdb_entries( + self.rpc_ctx, fdb_entries[agent_host]) + elif port['status'] != orig['status']: + agent_host = port['binding:host_id'] + if port['status'] == const.PORT_STATUS_ACTIVE: + self._update_port_up(context) + elif port['status'] == const.PORT_STATUS_DOWN: + fdb_entries = {agent_host: self._update_port_down(context, + port)} + self.L2populationAgentNotify.remove_fdb_entries( + self.rpc_ctx, fdb_entries[agent_host]) + elif port['status'] == const.PORT_STATUS_BUILD: + orig = self.migrated_ports.pop(port['id'], None) + if orig: + # this port has been migrated : remove its entries from fdb + fdb_entries = {agent_host: self._update_port_down(context, + orig)} + self.L2populationAgentNotify.remove_fdb_entries( + self.rpc_ctx, fdb_entries[agent_host]) + + def _get_port_infos(self, context, port): + agent_host = self._get_agent_host(context, port) + if not agent_host: + return + + session = db_api.get_session() + agent = self.get_agent_by_host(session, agent_host) + if not agent: + return + + agent_ip = self.get_agent_ip(agent) + if not agent_ip: + LOG.warning(_("Unable to retrieve the agent ip, check the agent " + "configuration.")) + return + + segment = context.bound_segment + if not segment: + LOG.warning(_("Port %(port)s updated by agent %(agent)s " + "isn't bound to any segment"), + {'port': port['id'], 'agent': agent}) + return + + tunnel_types = self.get_agent_tunnel_types(agent) + if segment['network_type'] not in tunnel_types: + return + + fdb_entries = self._get_port_fdb_entries(port) + + return agent, agent_host, agent_ip, segment, fdb_entries + + def _update_port_up(self, context): + port_context = context.current + port_infos = self._get_port_infos(context, port_context) + if not port_infos: + return + agent, agent_host, agent_ip, segment, port_fdb_entries = port_infos + + network_id = port_context['network_id'] + + session = db_api.get_session() + agent_active_ports = self.get_agent_network_active_port_count( + session, agent_host, network_id) + + other_fdb_entries = {network_id: + {'segment_id': segment['segmentation_id'], + 'network_type': segment['network_type'], + 'ports': {agent_ip: []}}} + + if agent_active_ports == 1 or ( + self.get_agent_uptime(agent) < cfg.CONF.l2pop.agent_boot_time): + # First port activated on current agent in this network, + # we have to provide it with the whole list of fdb entries + agent_fdb_entries = {network_id: + {'segment_id': segment['segmentation_id'], + 'network_type': segment['network_type'], + 'ports': {}}} + ports = agent_fdb_entries[network_id]['ports'] + + nondvr_network_ports = self.get_nondvr_network_ports(session, + network_id) + for network_port in nondvr_network_ports: + binding, agent = network_port + if agent.host == agent_host: + continue + + ip = self.get_agent_ip(agent) + if not ip: + LOG.debug(_("Unable to retrieve the agent ip, check " + "the agent %(agent_host)s configuration."), + {'agent_host': agent.host}) + continue + + agent_ports = ports.get(ip, [const.FLOODING_ENTRY]) + agent_ports += self._get_port_fdb_entries(binding.port) + ports[ip] = agent_ports + + dvr_network_ports = self.get_dvr_network_ports(session, network_id) + for network_port in dvr_network_ports: + binding, agent = network_port + if agent.host == agent_host: + continue + + ip = self.get_agent_ip(agent) + if not ip: + LOG.debug(_("Unable to retrieve the agent ip, check " + "the agent %(agent_host)s configuration."), + {'agent_host': agent.host}) + continue + + agent_ports = ports.get(ip, [const.FLOODING_ENTRY]) + ports[ip] = agent_ports + + # And notify other agents to add flooding entry + other_fdb_entries[network_id]['ports'][agent_ip].append( + const.FLOODING_ENTRY) + + if ports.keys(): + self.L2populationAgentNotify.add_fdb_entries( + self.rpc_ctx, agent_fdb_entries, agent_host) + + # Notify other agents to add fdb rule for current port + if port_context['device_owner'] != const.DEVICE_OWNER_DVR_INTERFACE: + other_fdb_entries[network_id]['ports'][agent_ip] += ( + port_fdb_entries) + + self.L2populationAgentNotify.add_fdb_entries(self.rpc_ctx, + other_fdb_entries) + + def _update_port_down(self, context, port_context, + agent_active_ports_count_for_flooding=0): + port_infos = self._get_port_infos(context, port_context) + if not port_infos: + return + agent, agent_host, agent_ip, segment, port_fdb_entries = port_infos + + network_id = port_context['network_id'] + + session = db_api.get_session() + agent_active_ports = self.get_agent_network_active_port_count( + session, agent_host, network_id) + + other_fdb_entries = {network_id: + {'segment_id': segment['segmentation_id'], + 'network_type': segment['network_type'], + 'ports': {agent_ip: []}}} + if agent_active_ports == agent_active_ports_count_for_flooding: + # Agent is removing its last activated port in this network, + # other agents needs to be notified to delete their flooding entry. + other_fdb_entries[network_id]['ports'][agent_ip].append( + const.FLOODING_ENTRY) + + # Notify other agents to remove fdb rule for current port + if port_context['device_owner'] != const.DEVICE_OWNER_DVR_INTERFACE: + fdb_entries = self._get_port_fdb_entries(port_context) + other_fdb_entries[network_id]['ports'][agent_ip] += fdb_entries + + return other_fdb_entries diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/ml2/drivers/l2pop/rpc.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/ml2/drivers/l2pop/rpc.py new file mode 100644 index 00000000..7be7b8a6 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/ml2/drivers/l2pop/rpc.py @@ -0,0 +1,86 @@ +# Copyright (c) 2013 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Sylvain Afchain, eNovance SAS +# @author: Francois Eleouet, Orange +# @author: Mathieu Rohon, Orange + +from neutron.common import rpc as n_rpc +from neutron.common import topics +from neutron.openstack.common import log as logging + + +LOG = logging.getLogger(__name__) + + +class L2populationAgentNotifyAPI(n_rpc.RpcProxy): + BASE_RPC_API_VERSION = '1.0' + + def __init__(self, topic=topics.AGENT): + super(L2populationAgentNotifyAPI, self).__init__( + topic=topic, default_version=self.BASE_RPC_API_VERSION) + + self.topic_l2pop_update = topics.get_topic_name(topic, + topics.L2POPULATION, + topics.UPDATE) + + def _notification_fanout(self, context, method, fdb_entries): + LOG.debug(_('Fanout notify l2population agents at %(topic)s ' + 'the message %(method)s with %(fdb_entries)s'), + {'topic': self.topic, + 'method': method, + 'fdb_entries': fdb_entries}) + + self.fanout_cast(context, + self.make_msg(method, fdb_entries=fdb_entries), + topic=self.topic_l2pop_update) + + def _notification_host(self, context, method, fdb_entries, host): + LOG.debug(_('Notify l2population agent %(host)s at %(topic)s the ' + 'message %(method)s with %(fdb_entries)s'), + {'host': host, + 'topic': self.topic, + 'method': method, + 'fdb_entries': fdb_entries}) + self.cast(context, + self.make_msg(method, fdb_entries=fdb_entries), + topic='%s.%s' % (self.topic_l2pop_update, host)) + + def add_fdb_entries(self, context, fdb_entries, host=None): + if fdb_entries: + if host: + self._notification_host(context, 'add_fdb_entries', + fdb_entries, host) + else: + self._notification_fanout(context, 'add_fdb_entries', + fdb_entries) + + def remove_fdb_entries(self, context, fdb_entries, host=None): + if fdb_entries: + if host: + self._notification_host(context, 'remove_fdb_entries', + fdb_entries, host) + else: + self._notification_fanout(context, 'remove_fdb_entries', + fdb_entries) + + def update_fdb_entries(self, context, fdb_entries, host=None): + if fdb_entries: + if host: + self._notification_host(context, 'update_fdb_entries', + fdb_entries, host) + else: + self._notification_fanout(context, 'update_fdb_entries', + fdb_entries) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/ml2/drivers/mech_agent.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/ml2/drivers/mech_agent.py new file mode 100644 index 00000000..d0aad3ae --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/ml2/drivers/mech_agent.py @@ -0,0 +1,149 @@ +# Copyright (c) 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import abc +import six + +from neutron.extensions import portbindings +from neutron.openstack.common import log +from neutron.plugins.ml2 import driver_api as api + +LOG = log.getLogger(__name__) + + +@six.add_metaclass(abc.ABCMeta) +class AgentMechanismDriverBase(api.MechanismDriver): + """Base class for drivers that attach to networks using an L2 agent. + + The AgentMechanismDriverBase provides common code for mechanism + drivers that integrate the ml2 plugin with L2 agents. Port binding + with this driver requires the driver's associated agent to be + running on the port's host, and that agent to have connectivity to + at least one segment of the port's network. + + MechanismDrivers using this base class must pass the agent type to + __init__(), and must implement try_to_bind_segment_for_agent(). + """ + + def __init__(self, agent_type, + supported_vnic_types=[portbindings.VNIC_NORMAL]): + """Initialize base class for specific L2 agent type. + + :param agent_type: Constant identifying agent type in agents_db + :param supported_vnic_types: The binding:vnic_type values we can bind + """ + self.agent_type = agent_type + self.supported_vnic_types = supported_vnic_types + + def initialize(self): + pass + + def bind_port(self, context): + LOG.debug(_("Attempting to bind port %(port)s on " + "network %(network)s"), + {'port': context.current['id'], + 'network': context.network.current['id']}) + vnic_type = context.current.get(portbindings.VNIC_TYPE, + portbindings.VNIC_NORMAL) + if vnic_type not in self.supported_vnic_types: + LOG.debug(_("Refusing to bind due to unsupported vnic_type: %s"), + vnic_type) + return + for agent in context.host_agents(self.agent_type): + LOG.debug(_("Checking agent: %s"), agent) + if agent['alive']: + for segment in context.network.network_segments: + if self.try_to_bind_segment_for_agent(context, segment, + agent): + LOG.debug(_("Bound using segment: %s"), segment) + return + else: + LOG.warning(_("Attempting to bind with dead agent: %s"), + agent) + + @abc.abstractmethod + def try_to_bind_segment_for_agent(self, context, segment, agent): + """Try to bind with segment for agent. + + :param context: PortContext instance describing the port + :param segment: segment dictionary describing segment to bind + :param agent: agents_db entry describing agent to bind + :returns: True iff segment has been bound for agent + + Called inside transaction during bind_port() so that derived + MechanismDrivers can use agent_db data along with built-in + knowledge of the corresponding agent's capabilities to attempt + to bind to the specified network segment for the agent. + + If the segment can be bound for the agent, this function must + call context.set_binding() with appropriate values and then + return True. Otherwise, it must return False. + """ + + +@six.add_metaclass(abc.ABCMeta) +class SimpleAgentMechanismDriverBase(AgentMechanismDriverBase): + """Base class for simple drivers using an L2 agent. + + The SimpleAgentMechanismDriverBase provides common code for + mechanism drivers that integrate the ml2 plugin with L2 agents, + where the binding:vif_type and binding:vif_details values are the + same for all bindings. Port binding with this driver requires the + driver's associated agent to be running on the port's host, and + that agent to have connectivity to at least one segment of the + port's network. + + MechanismDrivers using this base class must pass the agent type + and the values for binding:vif_type and binding:vif_details to + __init__(), and must implement check_segment_for_agent(). + """ + + def __init__(self, agent_type, vif_type, vif_details, + supported_vnic_types=[portbindings.VNIC_NORMAL]): + """Initialize base class for specific L2 agent type. + + :param agent_type: Constant identifying agent type in agents_db + :param vif_type: Value for binding:vif_type when bound + :param vif_details: Dictionary with details for VIF driver when bound + :param supported_vnic_types: The binding:vnic_type values we can bind + """ + super(SimpleAgentMechanismDriverBase, self).__init__( + agent_type, supported_vnic_types) + self.vif_type = vif_type + self.vif_details = vif_details + + def try_to_bind_segment_for_agent(self, context, segment, agent): + if self.check_segment_for_agent(segment, agent): + context.set_binding(segment[api.ID], + self.vif_type, + self.vif_details) + return True + else: + return False + + @abc.abstractmethod + def check_segment_for_agent(self, segment, agent): + """Check if segment can be bound for agent. + + :param segment: segment dictionary describing segment to bind + :param agent: agents_db entry describing agent to bind + :returns: True iff segment can be bound for agent + + Called inside transaction during bind_port so that derived + MechanismDrivers can use agent_db data along with built-in + knowledge of the corresponding agent's capabilities to + determine whether or not the specified network segment can be + bound for the agent. + """ diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/ml2/drivers/mech_arista/config.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/ml2/drivers/mech_arista/config.py new file mode 100644 index 00000000..2f968c87 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/ml2/drivers/mech_arista/config.py @@ -0,0 +1,70 @@ +# Copyright (c) 2013 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from oslo.config import cfg + +""" Arista ML2 Mechanism driver specific configuration knobs. + +Following are user configurable options for Arista ML2 Mechanism +driver. The eapi_username, eapi_password, and eapi_host are +required options. Region Name must be the same that is used by +Keystone service. This option is available to support multiple +OpenStack/Neutron controllers. +""" + +ARISTA_DRIVER_OPTS = [ + cfg.StrOpt('eapi_username', + default='', + help=_('Username for Arista EOS. This is required field. ' + 'If not set, all communications to Arista EOS' + 'will fail.')), + cfg.StrOpt('eapi_password', + default='', + secret=True, # do not expose value in the logs + help=_('Password for Arista EOS. This is required field. ' + 'If not set, all communications to Arista EOS ' + 'will fail.')), + cfg.StrOpt('eapi_host', + default='', + help=_('Arista EOS IP address. This is required field. ' + 'If not set, all communications to Arista EOS' + 'will fail.')), + cfg.BoolOpt('use_fqdn', + default=True, + help=_('Defines if hostnames are sent to Arista EOS as FQDNs ' + '("node1.domain.com") or as short names ("node1"). ' + 'This is optional. If not set, a value of "True" ' + 'is assumed.')), + cfg.IntOpt('sync_interval', + default=180, + help=_('Sync interval in seconds between Neutron plugin and ' + 'EOS. This interval defines how often the ' + 'synchronization is performed. This is an optional ' + 'field. If not set, a value of 180 seconds is ' + 'assumed.')), + cfg.StrOpt('region_name', + default='RegionOne', + help=_('Defines Region Name that is assigned to this OpenStack ' + 'Controller. This is useful when multiple ' + 'OpenStack/Neutron controllers are managing the same ' + 'Arista HW clusters. Note that this name must match ' + 'with the region name registered (or known) to keystone ' + 'service. Authentication with Keysotne is performed by ' + 'EOS. This is optional. If not set, a value of ' + '"RegionOne" is assumed.')) +] + +cfg.CONF.register_opts(ARISTA_DRIVER_OPTS, "ml2_arista") diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/ml2/drivers/mech_bigswitch/driver.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/ml2/drivers/mech_bigswitch/driver.py new file mode 100644 index 00000000..c40b4a09 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/ml2/drivers/mech_bigswitch/driver.py @@ -0,0 +1,128 @@ +# Copyright 2014 Big Switch Networks, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Sumit Naiksatam, sumitnaiksatam@gmail.com, Big Switch Networks, Inc. +# @author: Kevin Benton, Big Switch Networks, Inc. +import copy +import httplib + +import eventlet +from oslo.config import cfg + +from neutron import context as ctx +from neutron.extensions import portbindings +from neutron.openstack.common import excutils +from neutron.openstack.common import log +from neutron.plugins.bigswitch import config as pl_config +from neutron.plugins.bigswitch import plugin +from neutron.plugins.bigswitch import servermanager +from neutron.plugins.ml2 import driver_api as api + + +LOG = log.getLogger(__name__) + + +class BigSwitchMechanismDriver(plugin.NeutronRestProxyV2Base, + api.MechanismDriver): + + """Mechanism Driver for Big Switch Networks Controller. + + This driver relays the network create, update, delete + operations to the Big Switch Controller. + """ + + def initialize(self): + LOG.debug(_('Initializing driver')) + + # register plugin config opts + pl_config.register_config() + self.evpool = eventlet.GreenPool(cfg.CONF.RESTPROXY.thread_pool_size) + # backend doesn't support bulk operations yet + self.native_bulk_support = False + + # init network ctrl connections + self.servers = servermanager.ServerPool() + self.servers.get_topo_function = self._get_all_data + self.servers.get_topo_function_args = {'get_ports': True, + 'get_floating_ips': False, + 'get_routers': False} + self.segmentation_types = ', '.join(cfg.CONF.ml2.type_drivers) + LOG.debug(_("Initialization done")) + + def create_network_postcommit(self, context): + # create network on the network controller + self._send_create_network(context.current) + + def update_network_postcommit(self, context): + # update network on the network controller + self._send_update_network(context.current) + + def delete_network_postcommit(self, context): + # delete network on the network controller + self._send_delete_network(context.current) + + def create_port_postcommit(self, context): + # create port on the network controller + port = self._prepare_port_for_controller(context) + if port: + self.async_port_create(port["network"]["tenant_id"], + port["network"]["id"], port) + + def update_port_postcommit(self, context): + # update port on the network controller + port = self._prepare_port_for_controller(context) + if port: + try: + self.servers.rest_update_port(port["network"]["tenant_id"], + port["network"]["id"], port) + except servermanager.RemoteRestError as e: + with excutils.save_and_reraise_exception() as ctxt: + if (cfg.CONF.RESTPROXY.auto_sync_on_failure and + e.status == httplib.NOT_FOUND and + servermanager.NXNETWORK in e.reason): + ctxt.reraise = False + LOG.error(_("Iconsistency with backend controller " + "triggering full synchronization.")) + topoargs = self.servers.get_topo_function_args + self._send_all_data( + send_ports=topoargs['get_ports'], + send_floating_ips=topoargs['get_floating_ips'], + send_routers=topoargs['get_routers'], + triggered_by_tenant=port["network"]["tenant_id"] + ) + + def delete_port_postcommit(self, context): + # delete port on the network controller + port = context.current + net = context.network.current + self.servers.rest_delete_port(net["tenant_id"], net["id"], port['id']) + + def _prepare_port_for_controller(self, context): + # make a copy so the context isn't changed for other drivers + port = copy.deepcopy(context.current) + net = context.network.current + port['network'] = net + port['bound_segment'] = context.bound_segment + actx = ctx.get_admin_context() + prepped_port = self._extend_port_dict_binding(actx, port) + prepped_port = self._map_state_and_status(prepped_port) + if (portbindings.HOST_ID not in prepped_port or + prepped_port[portbindings.HOST_ID] == ''): + LOG.warning(_("Ignoring port notification to controller because " + "of missing host ID.")) + # in ML2, controller doesn't care about ports without + # the host_id set + return False + return prepped_port diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/ml2/drivers/mechanism_fslsdn.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/ml2/drivers/mechanism_fslsdn.py new file mode 100644 index 00000000..514fd9b8 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/ml2/drivers/mechanism_fslsdn.py @@ -0,0 +1,288 @@ +# Copyright (c) 2014 Freescale Semiconductor +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# @author: Trinath Somanchi, Freescale, Inc + + +from neutronclient.v2_0 import client +from oslo.config import cfg + +from neutron.common import constants as n_const +from neutron.common import log +from neutron.extensions import portbindings +from neutron.openstack.common import log as logging +from neutron.plugins.common import constants +from neutron.plugins.ml2 import driver_api as api + + +LOG = logging.getLogger(__name__) + +# CRD service options required for FSL SDN OS Mech Driver +ml2_fslsdn_opts = [ + cfg.StrOpt('crd_user_name', default='crd', + help=_("CRD service Username")), + cfg.StrOpt('crd_password', default='password', + secret='True', + help=_("CRD Service Password")), + cfg.StrOpt('crd_tenant_name', default='service', + help=_("CRD Tenant Name")), + cfg.StrOpt('crd_auth_url', + default='http://127.0.0.1:5000/v2.0/', + help=_("CRD Auth URL")), + cfg.StrOpt('crd_url', + default='http://127.0.0.1:9797', + help=_("URL for connecting to CRD service")), + cfg.IntOpt('crd_url_timeout', + default=30, + help=_("Timeout value for connecting to " + "CRD service in seconds")), + cfg.StrOpt('crd_region_name', + default='RegionOne', + help=_("Region name for connecting to " + "CRD Service in admin context")), + cfg.BoolOpt('crd_api_insecure', + default=False, + help=_("If set, ignore any SSL validation issues")), + cfg.StrOpt('crd_auth_strategy', + default='keystone', + help=_("Auth strategy for connecting to " + "neutron in admin context")), + cfg.StrOpt('crd_ca_certificates_file', + help=_("Location of ca certificates file to use for " + "CRD client requests.")), +] + +# Register the configuration option for crd service +# required for FSL SDN OS Mechanism driver +cfg.CONF.register_opts(ml2_fslsdn_opts, "ml2_fslsdn") + +# shortcut +FSLCONF = cfg.CONF.ml2_fslsdn + +SERVICE_TYPE = 'crd' + + +class FslsdnMechanismDriver(api.MechanismDriver): + + """Freescale SDN OS Mechanism Driver for ML2 Plugin.""" + + @log.log + def initialize(self): + """Initialize the Mechanism driver.""" + + self.vif_type = portbindings.VIF_TYPE_OVS + self.vif_details = {portbindings.CAP_PORT_FILTER: True} + LOG.info(_("Initializing CRD client... ")) + crd_client_params = { + 'username': FSLCONF.crd_user_name, + 'tenant_name': FSLCONF.crd_tenant_name, + 'region_name': FSLCONF.crd_region_name, + 'password': FSLCONF.crd_password, + 'auth_url': FSLCONF.crd_auth_url, + 'auth_strategy': FSLCONF.crd_auth_strategy, + 'endpoint_url': FSLCONF.crd_url, + 'timeout': FSLCONF.crd_url_timeout, + 'insecure': FSLCONF.crd_api_insecure, + 'service_type': SERVICE_TYPE, + 'ca_cert': FSLCONF.crd_ca_certificates_file, + } + self._crdclient = client.Client(**crd_client_params) + + # Network Management + @staticmethod + @log.log + def _prepare_crd_network(network, segments): + """Helper function to create 'network' data.""" + + return {'network': + {'network_id': network['id'], + 'tenant_id': network['tenant_id'], + 'name': network['name'], + 'status': network['status'], + 'admin_state_up': network['admin_state_up'], + 'segments': segments, + }} + + def create_network_postcommit(self, context): + """Send create_network data to CRD service.""" + + network = context.current + segments = context.network_segments + body = self._prepare_crd_network(network, segments) + self._crdclient.create_network(body=body) + LOG.debug("create_network update sent to CRD Server: %s", body) + + def update_network_postcommit(self, context): + """Send update_network data to CRD service.""" + + network = context.current + segments = context.network_segments + body = self._prepare_crd_network(network, segments) + self._crdclient.update_network(network['id'], body=body) + LOG.debug("update_network update sent to CRD Server: %s", body) + + def delete_network_postcommit(self, context): + """Send delete_network data to CRD service.""" + + network = context.current + self._crdclient.delete_network(network['id']) + LOG.debug( + "delete_network update sent to CRD Server: %s", + network['id']) + + # Port Management + @staticmethod + def _prepare_crd_port(port): + """Helper function to prepare 'port' data.""" + + crd_subnet_id = '' + crd_ipaddress = '' + crd_sec_grps = '' + # Since CRD accepts one Fixed IP, + # so handle only one fixed IP per port. + if len(port['fixed_ips']) > 1: + LOG.debug("More than one fixed IP exists - using first one.") + # check empty fixed_ips list, move on if one or more exists + if len(port['fixed_ips']) != 0: + crd_subnet_id = port['fixed_ips'][0]['subnet_id'] + crd_ipaddress = port['fixed_ips'][0]['ip_address'] + LOG.debug("Handling fixed IP {subnet_id:%(subnet)s, " + "ip_address:%(ip)s}", + {'subnet': crd_subnet_id, 'ip': crd_ipaddress}) + else: + LOG.debug("No fixed IPs found.") + if 'security_groups' in port: + crd_sec_grps = ','.join(port['security_groups']) + return {'port': + {'port_id': port['id'], + 'tenant_id': port['tenant_id'], + 'name': port['name'], + 'network_id': port['network_id'], + 'subnet_id': crd_subnet_id, + 'mac_address': port['mac_address'], + 'device_id': port['device_id'], + 'ip_address': crd_ipaddress, + 'admin_state_up': port['admin_state_up'], + 'status': port['status'], + 'device_owner': port['device_owner'], + 'security_groups': crd_sec_grps, + }} + + def create_port_postcommit(self, context): + """Send create_port data to CRD service.""" + + port = context.current + body = self._prepare_crd_port(port) + self._crdclient.create_port(body=body) + LOG.debug("create_port update sent to CRD Server: %s", body) + + def delete_port_postcommit(self, context): + """Send delete_port data to CRD service.""" + + port = context.current + self._crdclient.delete_port(port['id']) + LOG.debug("delete_port update sent to CRD Server: %s", port['id']) + + # Subnet Management + @staticmethod + @log.log + def _prepare_crd_subnet(subnet): + """Helper function to prepare 'subnet' data.""" + + crd_allocation_pools = '' + crd_dns_nameservers = '' + crd_host_routes = '' + # Handling Allocation IPs + if 'allocation_pools' in subnet: + a_pools = subnet['allocation_pools'] + crd_allocation_pools = ','.join(["%s-%s" % (p['start'], + p['end']) + for p in a_pools]) + # Handling Host Routes + if 'host_routes' in subnet: + crd_host_routes = ','.join(["%s-%s" % (r['destination'], + r['nexthop']) + for r in subnet['host_routes']]) + # Handling DNS Nameservers + if 'dns_nameservers' in subnet: + crd_dns_nameservers = ','.join(subnet['dns_nameservers']) + # return Subnet Data + return {'subnet': + {'subnet_id': subnet['id'], + 'tenant_id': subnet['tenant_id'], + 'name': subnet['name'], + 'network_id': subnet['network_id'], + 'ip_version': subnet['ip_version'], + 'cidr': subnet['cidr'], + 'gateway_ip': subnet['gateway_ip'], + 'dns_nameservers': crd_dns_nameservers, + 'allocation_pools': crd_allocation_pools, + 'host_routes': crd_host_routes, + }} + + def create_subnet_postcommit(self, context): + """Send create_subnet data to CRD service.""" + + subnet = context.current + body = self._prepare_crd_subnet(subnet) + self._crdclient.create_subnet(body=body) + LOG.debug("create_subnet update sent to CRD Server: %s", body) + + def update_subnet_postcommit(self, context): + """Send update_subnet data to CRD service.""" + + subnet = context.current + body = self._prepare_crd_subnet(subnet) + self._crdclient.update_subnet(subnet['id'], body=body) + LOG.debug("update_subnet update sent to CRD Server: %s", body) + + def delete_subnet_postcommit(self, context): + """Send delete_subnet data to CRD service.""" + + subnet = context.current + self._crdclient.delete_subnet(subnet['id']) + LOG.debug("delete_subnet update sent to CRD Server: %s", subnet['id']) + + def bind_port(self, context): + """Set porting binding data for use with nova.""" + + LOG.debug("Attempting to bind port %(port)s on " + "network %(network)s", + {'port': context.current['id'], + 'network': context.network.current['id']}) + # Prepared porting binding data + for segment in context.network.network_segments: + if self.check_segment(segment): + context.set_binding(segment[api.ID], + self.vif_type, + self.vif_details, + status=n_const.PORT_STATUS_ACTIVE) + LOG.debug("Bound using segment: %s", segment) + return + else: + LOG.debug("Refusing to bind port for segment ID %(id)s, " + "segment %(seg)s, phys net %(physnet)s, and " + "network type %(nettype)s", + {'id': segment[api.ID], + 'seg': segment[api.SEGMENTATION_ID], + 'physnet': segment[api.PHYSICAL_NETWORK], + 'nettype': segment[api.NETWORK_TYPE]}) + + @log.log + def check_segment(self, segment): + """Verify a segment is valid for the FSL SDN MechanismDriver.""" + + return segment[api.NETWORK_TYPE] in [constants.TYPE_VLAN, + constants.TYPE_VXLAN] diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/ml2/drivers/mechanism_odl.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/ml2/drivers/mechanism_odl.py new file mode 100644 index 00000000..416e870d --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/ml2/drivers/mechanism_odl.py @@ -0,0 +1,374 @@ +# Copyright (c) 2013-2014 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# @author: Kyle Mestery, Cisco Systems, Inc. +# @author: Dave Tucker, Hewlett-Packard Development Company L.P. + +import time + +from oslo.config import cfg +import requests + +from neutron.common import constants as n_const +from neutron.common import exceptions as n_exc +from neutron.common import utils +from neutron.extensions import portbindings +from neutron.openstack.common import excutils +from neutron.openstack.common import jsonutils +from neutron.openstack.common import log +from neutron.plugins.common import constants +from neutron.plugins.ml2 import driver_api as api + +LOG = log.getLogger(__name__) + +ODL_NETWORK = 'network' +ODL_NETWORKS = 'networks' +ODL_SUBNET = 'subnet' +ODL_SUBNETS = 'subnets' +ODL_PORT = 'port' +ODL_PORTS = 'ports' + +not_found_exception_map = {ODL_NETWORKS: n_exc.NetworkNotFound, + ODL_SUBNETS: n_exc.SubnetNotFound, + ODL_PORTS: n_exc.PortNotFound} + +odl_opts = [ + cfg.StrOpt('url', + help=_("HTTP URL of OpenDaylight REST interface.")), + cfg.StrOpt('username', + help=_("HTTP username for authentication")), + cfg.StrOpt('password', secret=True, + help=_("HTTP password for authentication")), + cfg.IntOpt('timeout', default=10, + help=_("HTTP timeout in seconds.")), + cfg.IntOpt('session_timeout', default=30, + help=_("Tomcat session timeout in minutes.")), +] + +cfg.CONF.register_opts(odl_opts, "ml2_odl") + + +def try_del(d, keys): + """Ignore key errors when deleting from a dictionary.""" + for key in keys: + try: + del d[key] + except KeyError: + pass + + +class OpendaylightAuthError(n_exc.NeutronException): + message = '%(msg)s' + + +class JsessionId(requests.auth.AuthBase): + + """Attaches the JSESSIONID and JSESSIONIDSSO cookies to an HTTP Request. + + If the cookies are not available or when the session expires, a new + set of cookies are obtained. + """ + + def __init__(self, url, username, password): + """Initialization function for JsessionId.""" + + # NOTE(kmestery) The 'limit' paramater is intended to limit how much + # data is returned from ODL. This is not implemented in the Hydrogen + # release of OpenDaylight, but will be implemented in the Helium + # timeframe. Hydrogen will silently ignore this value. + self.url = str(url) + '/' + ODL_NETWORKS + '?limit=1' + self.username = username + self.password = password + self.auth_cookies = None + self.last_request = None + self.expired = None + self.session_timeout = cfg.CONF.ml2_odl.session_timeout * 60 + self.session_deadline = 0 + + def obtain_auth_cookies(self): + """Make a REST call to obtain cookies for ODL authenticiation.""" + + try: + r = requests.get(self.url, auth=(self.username, self.password)) + r.raise_for_status() + except requests.exceptions.HTTPError as e: + raise OpendaylightAuthError(msg=_("Failed to authenticate with " + "OpenDaylight: %s") % e) + except requests.exceptions.Timeout as e: + raise OpendaylightAuthError(msg=_("Authentication Timed" + " Out: %s") % e) + + jsessionid = r.cookies.get('JSESSIONID') + jsessionidsso = r.cookies.get('JSESSIONIDSSO') + if jsessionid and jsessionidsso: + self.auth_cookies = dict(JSESSIONID=jsessionid, + JSESSIONIDSSO=jsessionidsso) + + def __call__(self, r): + """Verify timestamp for Tomcat session timeout.""" + + if time.time() > self.session_deadline: + self.obtain_auth_cookies() + self.session_deadline = time.time() + self.session_timeout + r.prepare_cookies(self.auth_cookies) + return r + + +class OpenDaylightMechanismDriver(api.MechanismDriver): + + """Mechanism Driver for OpenDaylight. + + This driver was a port from the Tail-F NCS MechanismDriver. The API + exposed by ODL is slightly different from the API exposed by NCS, + but the general concepts are the same. + """ + auth = None + out_of_sync = True + + def initialize(self): + self.url = cfg.CONF.ml2_odl.url + self.timeout = cfg.CONF.ml2_odl.timeout + self.username = cfg.CONF.ml2_odl.username + self.password = cfg.CONF.ml2_odl.password + required_opts = ('url', 'username', 'password') + for opt in required_opts: + if not getattr(self, opt): + raise cfg.RequiredOptError(opt, 'ml2_odl') + self.auth = JsessionId(self.url, self.username, self.password) + self.vif_type = portbindings.VIF_TYPE_OVS + self.vif_details = {portbindings.CAP_PORT_FILTER: True} + + # Postcommit hooks are used to trigger synchronization. + + def create_network_postcommit(self, context): + self.synchronize('create', ODL_NETWORKS, context) + + def update_network_postcommit(self, context): + self.synchronize('update', ODL_NETWORKS, context) + + def delete_network_postcommit(self, context): + self.synchronize('delete', ODL_NETWORKS, context) + + def create_subnet_postcommit(self, context): + self.synchronize('create', ODL_SUBNETS, context) + + def update_subnet_postcommit(self, context): + self.synchronize('update', ODL_SUBNETS, context) + + def delete_subnet_postcommit(self, context): + self.synchronize('delete', ODL_SUBNETS, context) + + def create_port_postcommit(self, context): + self.synchronize('create', ODL_PORTS, context) + + def update_port_postcommit(self, context): + self.synchronize('update', ODL_PORTS, context) + + def delete_port_postcommit(self, context): + self.synchronize('delete', ODL_PORTS, context) + + def synchronize(self, operation, object_type, context): + """Synchronize ODL with Neutron following a configuration change.""" + if self.out_of_sync: + self.sync_full(context) + else: + self.sync_object(operation, object_type, context) + + def filter_create_network_attributes(self, network, context, dbcontext): + """Filter out network attributes not required for a create.""" + try_del(network, ['status', 'subnets']) + + def filter_create_subnet_attributes(self, subnet, context, dbcontext): + """Filter out subnet attributes not required for a create.""" + pass + + def filter_create_port_attributes(self, port, context, dbcontext): + """Filter out port attributes not required for a create.""" + self.add_security_groups(context, dbcontext, port) + # TODO(kmestery): Converting to uppercase due to ODL bug + # https://bugs.opendaylight.org/show_bug.cgi?id=477 + port['mac_address'] = port['mac_address'].upper() + try_del(port, ['status']) + + def sync_resources(self, resource_name, collection_name, resources, + context, dbcontext, attr_filter): + """Sync objects from Neutron over to OpenDaylight. + + This will handle syncing networks, subnets, and ports from Neutron to + OpenDaylight. It also filters out the requisite items which are not + valid for create API operations. + """ + to_be_synced = [] + for resource in resources: + try: + urlpath = collection_name + '/' + resource['id'] + self.sendjson('get', urlpath, None) + except requests.exceptions.HTTPError as e: + with excutils.save_and_reraise_exception() as ctx: + if e.response.status_code == 404: + attr_filter(resource, context, dbcontext) + to_be_synced.append(resource) + ctx.reraise = False + + key = resource_name if len(to_be_synced) == 1 else collection_name + + # 400 errors are returned if an object exists, which we ignore. + self.sendjson('post', collection_name, {key: to_be_synced}, [400]) + + @utils.synchronized('odl-sync-full') + def sync_full(self, context): + """Resync the entire database to ODL. + + Transition to the in-sync state on success. + Note: we only allow a single thead in here at a time. + """ + if not self.out_of_sync: + return + dbcontext = context._plugin_context + networks = context._plugin.get_networks(dbcontext) + subnets = context._plugin.get_subnets(dbcontext) + ports = context._plugin.get_ports(dbcontext) + + self.sync_resources(ODL_NETWORK, ODL_NETWORKS, networks, + context, dbcontext, + self.filter_create_network_attributes) + self.sync_resources(ODL_SUBNET, ODL_SUBNETS, subnets, + context, dbcontext, + self.filter_create_subnet_attributes) + self.sync_resources(ODL_PORT, ODL_PORTS, ports, + context, dbcontext, + self.filter_create_port_attributes) + self.out_of_sync = False + + def filter_update_network_attributes(self, network, context, dbcontext): + """Filter out network attributes for an update operation.""" + try_del(network, ['id', 'status', 'subnets', 'tenant_id']) + + def filter_update_subnet_attributes(self, subnet, context, dbcontext): + """Filter out subnet attributes for an update operation.""" + try_del(subnet, ['id', 'network_id', 'ip_version', 'cidr', + 'allocation_pools', 'tenant_id']) + + def filter_update_port_attributes(self, port, context, dbcontext): + """Filter out port attributes for an update operation.""" + self.add_security_groups(context, dbcontext, port) + try_del(port, ['network_id', 'id', 'status', 'mac_address', + 'tenant_id', 'fixed_ips']) + + create_object_map = {ODL_NETWORKS: filter_create_network_attributes, + ODL_SUBNETS: filter_create_subnet_attributes, + ODL_PORTS: filter_create_port_attributes} + + update_object_map = {ODL_NETWORKS: filter_update_network_attributes, + ODL_SUBNETS: filter_update_subnet_attributes, + ODL_PORTS: filter_update_port_attributes} + + def sync_single_resource(self, operation, object_type, obj_id, + context, attr_filter_create, attr_filter_update): + """Sync over a single resource from Neutron to OpenDaylight. + + Handle syncing a single operation over to OpenDaylight, and correctly + filter attributes out which are not required for the requisite + operation (create or update) being handled. + """ + dbcontext = context._plugin_context + if operation == 'create': + urlpath = object_type + method = 'post' + else: + urlpath = object_type + '/' + obj_id + method = 'put' + + try: + obj_getter = getattr(context._plugin, 'get_%s' % object_type[:-1]) + resource = obj_getter(dbcontext, obj_id) + except not_found_exception_map[object_type]: + LOG.debug(_('%(object_type)s not found (%(obj_id)s)'), + {'object_type': object_type.capitalize(), + 'obj_id': obj_id}) + else: + if operation == 'create': + attr_filter_create(self, resource, context, dbcontext) + elif operation == 'update': + attr_filter_update(self, resource, context, dbcontext) + try: + # 400 errors are returned if an object exists, which we ignore. + self.sendjson(method, urlpath, {object_type[:-1]: resource}, + [400]) + except Exception: + with excutils.save_and_reraise_exception(): + self.out_of_sync = True + + def sync_object(self, operation, object_type, context): + """Synchronize the single modified record to ODL.""" + obj_id = context.current['id'] + + self.sync_single_resource(operation, object_type, obj_id, context, + self.create_object_map[object_type], + self.update_object_map[object_type]) + + def add_security_groups(self, context, dbcontext, port): + """Populate the 'security_groups' field with entire records.""" + groups = [context._plugin.get_security_group(dbcontext, sg) + for sg in port['security_groups']] + port['security_groups'] = groups + + def sendjson(self, method, urlpath, obj, ignorecodes=[]): + """Send json to the OpenDaylight controller.""" + + headers = {'Content-Type': 'application/json'} + data = jsonutils.dumps(obj, indent=2) if obj else None + url = '/'.join([self.url, urlpath]) + LOG.debug(_('ODL-----> sending URL (%s) <-----ODL') % url) + LOG.debug(_('ODL-----> sending JSON (%s) <-----ODL') % obj) + r = requests.request(method, url=url, + headers=headers, data=data, + auth=self.auth, timeout=self.timeout) + + # ignorecodes contains a list of HTTP error codes to ignore. + if r.status_code in ignorecodes: + return + r.raise_for_status() + + def bind_port(self, context): + LOG.debug(_("Attempting to bind port %(port)s on " + "network %(network)s"), + {'port': context.current['id'], + 'network': context.network.current['id']}) + for segment in context.network.network_segments: + if self.check_segment(segment): + context.set_binding(segment[api.ID], + self.vif_type, + self.vif_details, + status=n_const.PORT_STATUS_ACTIVE) + LOG.debug(_("Bound using segment: %s"), segment) + return + else: + LOG.debug(_("Refusing to bind port for segment ID %(id)s, " + "segment %(seg)s, phys net %(physnet)s, and " + "network type %(nettype)s"), + {'id': segment[api.ID], + 'seg': segment[api.SEGMENTATION_ID], + 'physnet': segment[api.PHYSICAL_NETWORK], + 'nettype': segment[api.NETWORK_TYPE]}) + + def check_segment(self, segment): + """Verify a segment is valid for the OpenDaylight MechanismDriver. + + Verify the requested segment is supported by ODL and return True or + False to indicate this to callers. + """ + network_type = segment[api.NETWORK_TYPE] + return network_type in [constants.TYPE_LOCAL, constants.TYPE_GRE, + constants.TYPE_VXLAN, constants.TYPE_VLAN] diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/ml2/drivers/mlnx/mech_mlnx.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/ml2/drivers/mlnx/mech_mlnx.py new file mode 100644 index 00000000..97eb03a4 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/ml2/drivers/mlnx/mech_mlnx.py @@ -0,0 +1,91 @@ +# Copyright (c) 2014 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from oslo.config import cfg + +from neutron.common import constants +from neutron.extensions import portbindings +from neutron.openstack.common import jsonutils +from neutron.openstack.common import log +from neutron.plugins.ml2 import driver_api as api +from neutron.plugins.ml2.drivers import mech_agent +from neutron.plugins.ml2.drivers.mlnx import config # noqa + +LOG = log.getLogger(__name__) + + +class MlnxMechanismDriver(mech_agent.SimpleAgentMechanismDriverBase): + """Attach to networks using Mellanox eSwitch L2 agent. + + The MellanoxMechanismDriver integrates the ml2 plugin with the + Mellanox eswitch L2 agent. Port binding with this driver requires the + Mellanox eswitch agent to be running on the port's host, and that agent + to have connectivity to at least one segment of the port's + network. + """ + + def __init__(self): + # REVISIT(irenab): update supported_vnic_types to contain + # only VNIC_DIRECT and VNIC_MACVTAP once its possible to specify + # vnic_type via nova API/GUI. Currently VNIC_NORMAL is included + # to enable VM creation via GUI. It should be noted, that if + # several MDs are capable to bing bind port on chosen host, the + # first listed MD will bind the port for VNIC_NORMAL. + super(MlnxMechanismDriver, self).__init__( + constants.AGENT_TYPE_MLNX, + cfg.CONF.ESWITCH.vnic_type, + {portbindings.CAP_PORT_FILTER: False}, + portbindings.VNIC_TYPES) + self.update_profile = cfg.CONF.ESWITCH.apply_profile_patch + + def check_segment_for_agent(self, segment, agent): + mappings = agent['configurations'].get('interface_mappings', {}) + LOG.debug(_("Checking segment: %(segment)s " + "for mappings: %(mappings)s "), + {'segment': segment, 'mappings': mappings}) + + network_type = segment[api.NETWORK_TYPE] + if network_type == 'local': + return True + elif network_type in ['flat', 'vlan']: + return segment[api.PHYSICAL_NETWORK] in mappings + else: + return False + + def try_to_bind_segment_for_agent(self, context, segment, agent): + if self.check_segment_for_agent(segment, agent): + vif_type = self._get_vif_type( + context.current[portbindings.VNIC_TYPE]) + if segment[api.NETWORK_TYPE] in ['flat', 'vlan']: + self.vif_details['physical_network'] = segment[ + 'physical_network'] + context.set_binding(segment[api.ID], + vif_type, + self.vif_details) + # REVISIT(irenab): Temporary solution till nova support + # will be merged for physical_network propagation + # via VIF object to VIFDriver (required by mlnx vif plugging). + if self.update_profile: + profile = {'physical_network': + segment['physical_network']} + context._binding.profile = jsonutils.dumps(profile) + + def _get_vif_type(self, requested_vnic_type): + if requested_vnic_type == portbindings.VNIC_MACVTAP: + return portbindings.VIF_TYPE_MLNX_DIRECT + elif requested_vnic_type == portbindings.VNIC_DIRECT: + return portbindings.VIF_TYPE_MLNX_HOSTDEV + return self.vif_type diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/ml2/drivers/type_gre.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/ml2/drivers/type_gre.py new file mode 100644 index 00000000..abd894bf --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/ml2/drivers/type_gre.py @@ -0,0 +1,190 @@ +# Copyright (c) 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo.config import cfg +from six import moves +import sqlalchemy as sa +from sqlalchemy.orm import exc as sa_exc + +from neutron.common import exceptions as exc +from neutron.db import api as db_api +from neutron.db import model_base +from neutron.openstack.common import log +from neutron.plugins.common import constants as p_const +from neutron.plugins.ml2 import driver_api as api +from neutron.plugins.ml2.drivers import type_tunnel + +LOG = log.getLogger(__name__) + +gre_opts = [ + cfg.ListOpt('tunnel_id_ranges', + default=[], + help=_("Comma-separated list of : tuples " + "enumerating ranges of GRE tunnel IDs that are " + "available for tenant network allocation")) +] + +cfg.CONF.register_opts(gre_opts, "ml2_type_gre") + + +class GreAllocation(model_base.BASEV2): + + __tablename__ = 'ml2_gre_allocations' + + gre_id = sa.Column(sa.Integer, nullable=False, primary_key=True, + autoincrement=False) + allocated = sa.Column(sa.Boolean, nullable=False, default=False) + + +class GreEndpoints(model_base.BASEV2): + """Represents tunnel endpoint in RPC mode.""" + __tablename__ = 'ml2_gre_endpoints' + + ip_address = sa.Column(sa.String(64), primary_key=True) + + def __repr__(self): + return "" % self.ip_address + + +class GreTypeDriver(type_tunnel.TunnelTypeDriver): + + def get_type(self): + return p_const.TYPE_GRE + + def initialize(self): + self.gre_id_ranges = [] + self._parse_tunnel_ranges( + cfg.CONF.ml2_type_gre.tunnel_id_ranges, + self.gre_id_ranges, + p_const.TYPE_GRE + ) + self._sync_gre_allocations() + + def reserve_provider_segment(self, session, segment): + segmentation_id = segment.get(api.SEGMENTATION_ID) + with session.begin(subtransactions=True): + try: + alloc = (session.query(GreAllocation). + filter_by(gre_id=segmentation_id). + with_lockmode('update'). + one()) + if alloc.allocated: + raise exc.TunnelIdInUse(tunnel_id=segmentation_id) + LOG.debug(_("Reserving specific gre tunnel %s from pool"), + segmentation_id) + alloc.allocated = True + except sa_exc.NoResultFound: + LOG.debug(_("Reserving specific gre tunnel %s outside pool"), + segmentation_id) + alloc = GreAllocation(gre_id=segmentation_id) + alloc.allocated = True + session.add(alloc) + + def allocate_tenant_segment(self, session): + with session.begin(subtransactions=True): + alloc = (session.query(GreAllocation). + filter_by(allocated=False). + with_lockmode('update'). + first()) + if alloc: + LOG.debug(_("Allocating gre tunnel id %(gre_id)s"), + {'gre_id': alloc.gre_id}) + alloc.allocated = True + return {api.NETWORK_TYPE: p_const.TYPE_GRE, + api.PHYSICAL_NETWORK: None, + api.SEGMENTATION_ID: alloc.gre_id} + + def release_segment(self, session, segment): + gre_id = segment[api.SEGMENTATION_ID] + with session.begin(subtransactions=True): + try: + alloc = (session.query(GreAllocation). + filter_by(gre_id=gre_id). + with_lockmode('update'). + one()) + alloc.allocated = False + for lo, hi in self.gre_id_ranges: + if lo <= gre_id <= hi: + LOG.debug(_("Releasing gre tunnel %s to pool"), + gre_id) + break + else: + session.delete(alloc) + LOG.debug(_("Releasing gre tunnel %s outside pool"), + gre_id) + except sa_exc.NoResultFound: + LOG.warning(_("gre_id %s not found"), gre_id) + + def _sync_gre_allocations(self): + """Synchronize gre_allocations table with configured tunnel ranges.""" + + # determine current configured allocatable gres + gre_ids = set() + for gre_id_range in self.gre_id_ranges: + tun_min, tun_max = gre_id_range + if tun_max + 1 - tun_min > 1000000: + LOG.error(_("Skipping unreasonable gre ID range " + "%(tun_min)s:%(tun_max)s"), + {'tun_min': tun_min, 'tun_max': tun_max}) + else: + gre_ids |= set(moves.xrange(tun_min, tun_max + 1)) + + session = db_api.get_session() + with session.begin(subtransactions=True): + # remove from table unallocated tunnels not currently allocatable + allocs = (session.query(GreAllocation).all()) + for alloc in allocs: + try: + # see if tunnel is allocatable + gre_ids.remove(alloc.gre_id) + except KeyError: + # it's not allocatable, so check if its allocated + if not alloc.allocated: + # it's not, so remove it from table + LOG.debug(_("Removing tunnel %s from pool"), + alloc.gre_id) + session.delete(alloc) + + # add missing allocatable tunnels to table + for gre_id in sorted(gre_ids): + alloc = GreAllocation(gre_id=gre_id) + session.add(alloc) + + def get_gre_allocation(self, session, gre_id): + return session.query(GreAllocation).filter_by(gre_id=gre_id).first() + + def get_endpoints(self): + """Get every gre endpoints from database.""" + + LOG.debug(_("get_gre_endpoints() called")) + session = db_api.get_session() + + with session.begin(subtransactions=True): + gre_endpoints = session.query(GreEndpoints) + return [{'ip_address': gre_endpoint.ip_address} + for gre_endpoint in gre_endpoints] + + def add_endpoint(self, ip): + LOG.debug(_("add_gre_endpoint() called for ip %s"), ip) + session = db_api.get_session() + with session.begin(subtransactions=True): + try: + gre_endpoint = (session.query(GreEndpoints). + filter_by(ip_address=ip).one()) + LOG.warning(_("Gre endpoint with ip %s already exists"), ip) + except sa_exc.NoResultFound: + gre_endpoint = GreEndpoints(ip_address=ip) + session.add(gre_endpoint) + return gre_endpoint diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/ml2/drivers/type_tunnel.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/ml2/drivers/type_tunnel.py new file mode 100644 index 00000000..fbc7110e --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/ml2/drivers/type_tunnel.py @@ -0,0 +1,132 @@ +# Copyright (c) 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +import abc +import six + +from neutron.common import exceptions as exc +from neutron.common import topics +from neutron.openstack.common import log +from neutron.plugins.ml2 import driver_api as api + +LOG = log.getLogger(__name__) + +TUNNEL = 'tunnel' + + +@six.add_metaclass(abc.ABCMeta) +class TunnelTypeDriver(api.TypeDriver): + """Define stable abstract interface for ML2 type drivers. + + tunnel type networks rely on tunnel endpoints. This class defines abstract + methods to manage these endpoints. + """ + + @abc.abstractmethod + def add_endpoint(self, ip): + """Register the endpoint in the type_driver database. + + param ip: the ip of the endpoint + """ + pass + + @abc.abstractmethod + def get_endpoints(self): + """Get every endpoint managed by the type_driver + + :returns a list of dict [{id:endpoint_id, ip_address:endpoint_ip},..] + """ + pass + + def _parse_tunnel_ranges(self, tunnel_ranges, current_range, tunnel_type): + for entry in tunnel_ranges: + entry = entry.strip() + try: + tun_min, tun_max = entry.split(':') + tun_min = tun_min.strip() + tun_max = tun_max.strip() + current_range.append((int(tun_min), int(tun_max))) + except ValueError as ex: + LOG.error(_("Invalid tunnel ID range: '%(range)s' - %(e)s. " + "Agent terminated!"), + {'range': tunnel_ranges, 'e': ex}) + LOG.info(_("%(type)s ID ranges: %(range)s"), + {'type': tunnel_type, 'range': current_range}) + + def validate_provider_segment(self, segment): + physical_network = segment.get(api.PHYSICAL_NETWORK) + if physical_network: + msg = _("provider:physical_network specified for %s " + "network") % segment.get(api.NETWORK_TYPE) + raise exc.InvalidInput(error_message=msg) + + segmentation_id = segment.get(api.SEGMENTATION_ID) + if not segmentation_id: + msg = _("segmentation_id required for %s provider " + "network") % segment.get(api.NETWORK_TYPE) + raise exc.InvalidInput(error_message=msg) + + for key, value in segment.items(): + if value and key not in [api.NETWORK_TYPE, + api.SEGMENTATION_ID]: + msg = (_("%(key)s prohibited for %(tunnel)s provider network"), + {'key': key, 'tunnel': segment.get(api.NETWORK_TYPE)}) + raise exc.InvalidInput(error_message=msg) + + +class TunnelRpcCallbackMixin(object): + + def setup_tunnel_callback_mixin(self, notifier, type_manager): + self._notifier = notifier + self._type_manager = type_manager + + def tunnel_sync(self, rpc_context, **kwargs): + """Update new tunnel. + + Updates the database with the tunnel IP. All listening agents will also + be notified about the new tunnel IP. + """ + tunnel_ip = kwargs.get('tunnel_ip') + tunnel_type = kwargs.get('tunnel_type') + if not tunnel_type: + msg = _("Network_type value needed by the ML2 plugin") + raise exc.InvalidInput(error_message=msg) + driver = self._type_manager.drivers.get(tunnel_type) + if driver: + tunnel = driver.obj.add_endpoint(tunnel_ip) + tunnels = driver.obj.get_endpoints() + entry = {'tunnels': tunnels} + # Notify all other listening agents + self._notifier.tunnel_update(rpc_context, tunnel.ip_address, + tunnel_type) + # Return the list of tunnels IP's to the agent + return entry + else: + msg = _("network_type value '%s' not supported") % tunnel_type + raise exc.InvalidInput(error_message=msg) + + +class TunnelAgentRpcApiMixin(object): + + def _get_tunnel_update_topic(self): + return topics.get_topic_name(self.topic, + TUNNEL, + topics.UPDATE) + + def tunnel_update(self, context, tunnel_ip, tunnel_type): + self.fanout_cast(context, + self.make_msg('tunnel_update', + tunnel_ip=tunnel_ip, + tunnel_type=tunnel_type), + topic=self._get_tunnel_update_topic()) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/ml2/drivers/type_vlan.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/ml2/drivers/type_vlan.py new file mode 100644 index 00000000..0159d571 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/ml2/drivers/type_vlan.py @@ -0,0 +1,267 @@ +# Copyright (c) 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import sys + +from oslo.config import cfg +from six import moves +import sqlalchemy as sa + +from neutron.common import constants as q_const +from neutron.common import exceptions as exc +from neutron.common import utils +from neutron.db import api as db_api +from neutron.db import model_base +from neutron.openstack.common import log +from neutron.plugins.common import constants as p_const +from neutron.plugins.common import utils as plugin_utils +from neutron.plugins.ml2 import driver_api as api + +LOG = log.getLogger(__name__) + +vlan_opts = [ + cfg.ListOpt('network_vlan_ranges', + default=[], + help=_("List of :: or " + " specifying physical_network names " + "usable for VLAN provider and tenant networks, as " + "well as ranges of VLAN tags on each available for " + "allocation to tenant networks.")) +] + +cfg.CONF.register_opts(vlan_opts, "ml2_type_vlan") + + +class VlanAllocation(model_base.BASEV2): + """Represent allocation state of a vlan_id on a physical network. + + If allocated is False, the vlan_id on the physical_network is + available for allocation to a tenant network. If allocated is + True, the vlan_id on the physical_network is in use, either as a + tenant or provider network. + + When an allocation is released, if the vlan_id for the + physical_network is inside the pool described by + VlanTypeDriver.network_vlan_ranges, then allocated is set to + False. If it is outside the pool, the record is deleted. + """ + + __tablename__ = 'ml2_vlan_allocations' + + physical_network = sa.Column(sa.String(64), nullable=False, + primary_key=True) + vlan_id = sa.Column(sa.Integer, nullable=False, primary_key=True, + autoincrement=False) + allocated = sa.Column(sa.Boolean, nullable=False) + + +class VlanTypeDriver(api.TypeDriver): + """Manage state for VLAN networks with ML2. + + The VlanTypeDriver implements the 'vlan' network_type. VLAN + network segments provide connectivity between VMs and other + devices using any connected IEEE 802.1Q conformant + physical_network segmented into virtual networks via IEEE 802.1Q + headers. Up to 4094 VLAN network segments can exist on each + available physical_network. + """ + + def __init__(self): + self._parse_network_vlan_ranges() + + def _parse_network_vlan_ranges(self): + try: + self.network_vlan_ranges = plugin_utils.parse_network_vlan_ranges( + cfg.CONF.ml2_type_vlan.network_vlan_ranges) + # TODO(rkukura): Validate that each physical_network name + # is neither empty nor too long. + except Exception: + LOG.exception(_("Failed to parse network_vlan_ranges. " + "Service terminated!")) + sys.exit(1) + LOG.info(_("Network VLAN ranges: %s"), self.network_vlan_ranges) + + def _sync_vlan_allocations(self): + session = db_api.get_session() + with session.begin(subtransactions=True): + # get existing allocations for all physical networks + allocations = dict() + allocs = (session.query(VlanAllocation). + with_lockmode('update')) + for alloc in allocs: + if alloc.physical_network not in allocations: + allocations[alloc.physical_network] = set() + allocations[alloc.physical_network].add(alloc) + + # process vlan ranges for each configured physical network + for (physical_network, + vlan_ranges) in self.network_vlan_ranges.items(): + # determine current configured allocatable vlans for + # this physical network + vlan_ids = set() + for vlan_min, vlan_max in vlan_ranges: + vlan_ids |= set(moves.xrange(vlan_min, vlan_max + 1)) + + # remove from table unallocated vlans not currently + # allocatable + if physical_network in allocations: + for alloc in allocations[physical_network]: + try: + # see if vlan is allocatable + vlan_ids.remove(alloc.vlan_id) + except KeyError: + # it's not allocatable, so check if its allocated + if not alloc.allocated: + # it's not, so remove it from table + LOG.debug(_("Removing vlan %(vlan_id)s on " + "physical network " + "%(physical_network)s from pool"), + {'vlan_id': alloc.vlan_id, + 'physical_network': + physical_network}) + session.delete(alloc) + del allocations[physical_network] + + # add missing allocatable vlans to table + for vlan_id in sorted(vlan_ids): + alloc = VlanAllocation(physical_network=physical_network, + vlan_id=vlan_id, + allocated=False) + session.add(alloc) + + # remove from table unallocated vlans for any unconfigured + # physical networks + for allocs in allocations.itervalues(): + for alloc in allocs: + if not alloc.allocated: + LOG.debug(_("Removing vlan %(vlan_id)s on physical " + "network %(physical_network)s from pool"), + {'vlan_id': alloc.vlan_id, + 'physical_network': + alloc.physical_network}) + session.delete(alloc) + + def get_type(self): + return p_const.TYPE_VLAN + + def initialize(self): + self._sync_vlan_allocations() + LOG.info(_("VlanTypeDriver initialization complete")) + + def validate_provider_segment(self, segment): + physical_network = segment.get(api.PHYSICAL_NETWORK) + if not physical_network: + msg = _("physical_network required for VLAN provider network") + raise exc.InvalidInput(error_message=msg) + if physical_network not in self.network_vlan_ranges: + msg = (_("physical_network '%s' unknown for VLAN provider network") + % physical_network) + raise exc.InvalidInput(error_message=msg) + + segmentation_id = segment.get(api.SEGMENTATION_ID) + if segmentation_id is None: + msg = _("segmentation_id required for VLAN provider network") + raise exc.InvalidInput(error_message=msg) + if not utils.is_valid_vlan_tag(segmentation_id): + msg = (_("segmentation_id out of range (%(min)s through " + "%(max)s)") % + {'min': q_const.MIN_VLAN_TAG, + 'max': q_const.MAX_VLAN_TAG}) + raise exc.InvalidInput(error_message=msg) + + for key, value in segment.items(): + if value and key not in [api.NETWORK_TYPE, + api.PHYSICAL_NETWORK, + api.SEGMENTATION_ID]: + msg = _("%s prohibited for VLAN provider network") % key + raise exc.InvalidInput(error_message=msg) + + def reserve_provider_segment(self, session, segment): + physical_network = segment[api.PHYSICAL_NETWORK] + vlan_id = segment[api.SEGMENTATION_ID] + with session.begin(subtransactions=True): + try: + alloc = (session.query(VlanAllocation). + filter_by(physical_network=physical_network, + vlan_id=vlan_id). + with_lockmode('update'). + one()) + if alloc.allocated: + raise exc.VlanIdInUse(vlan_id=vlan_id, + physical_network=physical_network) + LOG.debug(_("Reserving specific vlan %(vlan_id)s on physical " + "network %(physical_network)s from pool"), + {'vlan_id': vlan_id, + 'physical_network': physical_network}) + alloc.allocated = True + except sa.orm.exc.NoResultFound: + LOG.debug(_("Reserving specific vlan %(vlan_id)s on physical " + "network %(physical_network)s outside pool"), + {'vlan_id': vlan_id, + 'physical_network': physical_network}) + alloc = VlanAllocation(physical_network=physical_network, + vlan_id=vlan_id, + allocated=True) + session.add(alloc) + + def allocate_tenant_segment(self, session): + with session.begin(subtransactions=True): + alloc = (session.query(VlanAllocation). + filter_by(allocated=False). + with_lockmode('update'). + first()) + if alloc: + LOG.debug(_("Allocating vlan %(vlan_id)s on physical network " + "%(physical_network)s from pool"), + {'vlan_id': alloc.vlan_id, + 'physical_network': alloc.physical_network}) + alloc.allocated = True + return {api.NETWORK_TYPE: p_const.TYPE_VLAN, + api.PHYSICAL_NETWORK: alloc.physical_network, + api.SEGMENTATION_ID: alloc.vlan_id} + + def release_segment(self, session, segment): + physical_network = segment[api.PHYSICAL_NETWORK] + vlan_id = segment[api.SEGMENTATION_ID] + with session.begin(subtransactions=True): + try: + alloc = (session.query(VlanAllocation). + filter_by(physical_network=physical_network, + vlan_id=vlan_id). + with_lockmode('update'). + one()) + alloc.allocated = False + inside = False + for vlan_min, vlan_max in self.network_vlan_ranges.get( + physical_network, []): + if vlan_min <= vlan_id <= vlan_max: + inside = True + break + if not inside: + session.delete(alloc) + LOG.debug(_("Releasing vlan %(vlan_id)s on physical " + "network %(physical_network)s outside pool"), + {'vlan_id': vlan_id, + 'physical_network': physical_network}) + else: + LOG.debug(_("Releasing vlan %(vlan_id)s on physical " + "network %(physical_network)s to pool"), + {'vlan_id': vlan_id, + 'physical_network': physical_network}) + except sa.orm.exc.NoResultFound: + LOG.warning(_("No vlan_id %(vlan_id)s found on physical " + "network %(physical_network)s"), + {'vlan_id': vlan_id, + 'physical_network': physical_network}) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/ml2/drivers/type_vxlan.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/ml2/drivers/type_vxlan.py new file mode 100644 index 00000000..2d39d5ff --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/ml2/drivers/type_vxlan.py @@ -0,0 +1,213 @@ +# Copyright (c) 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# @author: Kyle Mestery, Cisco Systems, Inc. + +from oslo.config import cfg +import sqlalchemy as sa +from sqlalchemy.orm import exc as sa_exc + +from neutron.common import exceptions as exc +from neutron.db import api as db_api +from neutron.db import model_base +from neutron.openstack.common import log +from neutron.plugins.common import constants as p_const +from neutron.plugins.ml2 import driver_api as api +from neutron.plugins.ml2.drivers import type_tunnel + +LOG = log.getLogger(__name__) + +VXLAN_UDP_PORT = 4789 +MAX_VXLAN_VNI = 16777215 + +vxlan_opts = [ + cfg.ListOpt('vni_ranges', + default=[], + help=_("Comma-separated list of : tuples " + "enumerating ranges of VXLAN VNI IDs that are " + "available for tenant network allocation")), + cfg.StrOpt('vxlan_group', + help=_("Multicast group for VXLAN. If unset, disables VXLAN " + "multicast mode.")), +] + +cfg.CONF.register_opts(vxlan_opts, "ml2_type_vxlan") + + +class VxlanAllocation(model_base.BASEV2): + + __tablename__ = 'ml2_vxlan_allocations' + + vxlan_vni = sa.Column(sa.Integer, nullable=False, primary_key=True, + autoincrement=False) + allocated = sa.Column(sa.Boolean, nullable=False, default=False) + + +class VxlanEndpoints(model_base.BASEV2): + """Represents tunnel endpoint in RPC mode.""" + __tablename__ = 'ml2_vxlan_endpoints' + + ip_address = sa.Column(sa.String(64), primary_key=True) + udp_port = sa.Column(sa.Integer, primary_key=True, nullable=False, + autoincrement=False) + + def __repr__(self): + return "" % self.ip_address + + +class VxlanTypeDriver(type_tunnel.TunnelTypeDriver): + + def get_type(self): + return p_const.TYPE_VXLAN + + def initialize(self): + self.vxlan_vni_ranges = [] + self._parse_tunnel_ranges( + cfg.CONF.ml2_type_vxlan.vni_ranges, + self.vxlan_vni_ranges, + p_const.TYPE_VXLAN + ) + self._sync_vxlan_allocations() + + def reserve_provider_segment(self, session, segment): + segmentation_id = segment.get(api.SEGMENTATION_ID) + with session.begin(subtransactions=True): + try: + alloc = (session.query(VxlanAllocation). + filter_by(vxlan_vni=segmentation_id). + with_lockmode('update'). + one()) + if alloc.allocated: + raise exc.TunnelIdInUse(tunnel_id=segmentation_id) + LOG.debug(_("Reserving specific vxlan tunnel %s from pool"), + segmentation_id) + alloc.allocated = True + except sa_exc.NoResultFound: + LOG.debug(_("Reserving specific vxlan tunnel %s outside pool"), + segmentation_id) + alloc = VxlanAllocation(vxlan_vni=segmentation_id) + alloc.allocated = True + session.add(alloc) + + def allocate_tenant_segment(self, session): + with session.begin(subtransactions=True): + alloc = (session.query(VxlanAllocation). + filter_by(allocated=False). + with_lockmode('update'). + first()) + if alloc: + LOG.debug(_("Allocating vxlan tunnel vni %(vxlan_vni)s"), + {'vxlan_vni': alloc.vxlan_vni}) + alloc.allocated = True + return {api.NETWORK_TYPE: p_const.TYPE_VXLAN, + api.PHYSICAL_NETWORK: None, + api.SEGMENTATION_ID: alloc.vxlan_vni} + + def release_segment(self, session, segment): + vxlan_vni = segment[api.SEGMENTATION_ID] + with session.begin(subtransactions=True): + try: + alloc = (session.query(VxlanAllocation). + filter_by(vxlan_vni=vxlan_vni). + with_lockmode('update'). + one()) + alloc.allocated = False + for low, high in self.vxlan_vni_ranges: + if low <= vxlan_vni <= high: + LOG.debug(_("Releasing vxlan tunnel %s to pool"), + vxlan_vni) + break + else: + session.delete(alloc) + LOG.debug(_("Releasing vxlan tunnel %s outside pool"), + vxlan_vni) + except sa_exc.NoResultFound: + LOG.warning(_("vxlan_vni %s not found"), vxlan_vni) + + def _sync_vxlan_allocations(self): + """ + Synchronize vxlan_allocations table with configured tunnel ranges. + """ + + # determine current configured allocatable vnis + vxlan_vnis = set() + for tun_min, tun_max in self.vxlan_vni_ranges: + if tun_max + 1 - tun_min > MAX_VXLAN_VNI: + LOG.error(_("Skipping unreasonable VXLAN VNI range " + "%(tun_min)s:%(tun_max)s"), + {'tun_min': tun_min, 'tun_max': tun_max}) + else: + vxlan_vnis |= set(xrange(tun_min, tun_max + 1)) + + session = db_api.get_session() + with session.begin(subtransactions=True): + # remove from table unallocated tunnels not currently allocatable + # fetch results as list via all() because we'll be iterating + # through them twice + allocs = (session.query(VxlanAllocation). + with_lockmode("update").all()) + # collect all vnis present in db + existing_vnis = set(alloc.vxlan_vni for alloc in allocs) + # collect those vnis that needs to be deleted from db + vnis_to_remove = [alloc.vxlan_vni for alloc in allocs + if (alloc.vxlan_vni not in vxlan_vnis and + not alloc.allocated)] + # Immediately delete vnis in chunks. This leaves no work for + # flush at the end of transaction + bulk_size = 100 + chunked_vnis = (vnis_to_remove[i:i + bulk_size] for i in + range(0, len(vnis_to_remove), bulk_size)) + for vni_list in chunked_vnis: + session.query(VxlanAllocation).filter( + VxlanAllocation.vxlan_vni.in_(vni_list)).delete( + synchronize_session=False) + # collect vnis that need to be added + vnis = list(vxlan_vnis - existing_vnis) + chunked_vnis = (vnis[i:i + bulk_size] for i in + range(0, len(vnis), bulk_size)) + for vni_list in chunked_vnis: + bulk = [{'vxlan_vni': vni, 'allocated': False} + for vni in vni_list] + session.execute(VxlanAllocation.__table__.insert(), bulk) + + def get_vxlan_allocation(self, session, vxlan_vni): + with session.begin(subtransactions=True): + return session.query(VxlanAllocation).filter_by( + vxlan_vni=vxlan_vni).first() + + def get_endpoints(self): + """Get every vxlan endpoints from database.""" + + LOG.debug(_("get_vxlan_endpoints() called")) + session = db_api.get_session() + + with session.begin(subtransactions=True): + vxlan_endpoints = session.query(VxlanEndpoints) + return [{'ip_address': vxlan_endpoint.ip_address, + 'udp_port': vxlan_endpoint.udp_port} + for vxlan_endpoint in vxlan_endpoints] + + def add_endpoint(self, ip, udp_port=VXLAN_UDP_PORT): + LOG.debug(_("add_vxlan_endpoint() called for ip %s"), ip) + session = db_api.get_session() + with session.begin(subtransactions=True): + try: + vxlan_endpoint = (session.query(VxlanEndpoints). + filter_by(ip_address=ip). + with_lockmode('update').one()) + except sa_exc.NoResultFound: + vxlan_endpoint = VxlanEndpoints(ip_address=ip, + udp_port=udp_port) + session.add(vxlan_endpoint) + return vxlan_endpoint diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/ml2/managers.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/ml2/managers.py new file mode 100644 index 00000000..13df6732 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/ml2/managers.py @@ -0,0 +1,480 @@ +# Copyright (c) 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo.config import cfg +import stevedore + +from neutron.common import exceptions as exc +from neutron.extensions import portbindings +from neutron.openstack.common import log +from neutron.plugins.ml2.common import exceptions as ml2_exc +from neutron.plugins.ml2 import driver_api as api + + +LOG = log.getLogger(__name__) + + +class TypeManager(stevedore.named.NamedExtensionManager): + """Manage network segment types using drivers.""" + + def __init__(self): + # Mapping from type name to DriverManager + self.drivers = {} + + LOG.info(_("Configured type driver names: %s"), + cfg.CONF.ml2.type_drivers) + super(TypeManager, self).__init__('neutron.ml2.type_drivers', + cfg.CONF.ml2.type_drivers, + invoke_on_load=True) + LOG.info(_("Loaded type driver names: %s"), self.names()) + self._register_types() + self._check_tenant_network_types(cfg.CONF.ml2.tenant_network_types) + + def _register_types(self): + for ext in self: + network_type = ext.obj.get_type() + if network_type in self.drivers: + LOG.error(_("Type driver '%(new_driver)s' ignored because type" + " driver '%(old_driver)s' is already registered" + " for type '%(type)s'"), + {'new_driver': ext.name, + 'old_driver': self.drivers[network_type].name, + 'type': network_type}) + else: + self.drivers[network_type] = ext + LOG.info(_("Registered types: %s"), self.drivers.keys()) + + def _check_tenant_network_types(self, types): + self.tenant_network_types = [] + for network_type in types: + if network_type in self.drivers: + self.tenant_network_types.append(network_type) + else: + msg = _("No type driver for tenant network_type: %s. " + "Service terminated!") % network_type + LOG.error(msg) + raise SystemExit(1) + LOG.info(_("Tenant network_types: %s"), self.tenant_network_types) + + def initialize(self): + for network_type, driver in self.drivers.iteritems(): + LOG.info(_("Initializing driver for type '%s'"), network_type) + driver.obj.initialize() + + def validate_provider_segment(self, segment): + network_type = segment[api.NETWORK_TYPE] + driver = self.drivers.get(network_type) + if driver: + driver.obj.validate_provider_segment(segment) + else: + msg = _("network_type value '%s' not supported") % network_type + raise exc.InvalidInput(error_message=msg) + + def reserve_provider_segment(self, session, segment): + network_type = segment.get(api.NETWORK_TYPE) + driver = self.drivers.get(network_type) + driver.obj.reserve_provider_segment(session, segment) + + def allocate_tenant_segment(self, session): + for network_type in self.tenant_network_types: + driver = self.drivers.get(network_type) + segment = driver.obj.allocate_tenant_segment(session) + if segment: + return segment + raise exc.NoNetworkAvailable() + + def release_segment(self, session, segment): + network_type = segment.get(api.NETWORK_TYPE) + driver = self.drivers.get(network_type) + # ML2 may have been reconfigured since the segment was created, + # so a driver may no longer exist for this network_type. + # REVISIT: network_type-specific db entries may become orphaned + # if a network is deleted and the driver isn't available to release + # the segment. This may be fixed with explicit foreign-key references + # or consistency checks on driver initialization. + if not driver: + LOG.error(_("Failed to release segment '%s' because " + "network type is not supported."), segment) + return + driver.obj.release_segment(session, segment) + + +class MechanismManager(stevedore.named.NamedExtensionManager): + """Manage networking mechanisms using drivers.""" + + def __init__(self): + # Registered mechanism drivers, keyed by name. + self.mech_drivers = {} + # Ordered list of mechanism drivers, defining + # the order in which the drivers are called. + self.ordered_mech_drivers = [] + + LOG.info(_("Configured mechanism driver names: %s"), + cfg.CONF.ml2.mechanism_drivers) + super(MechanismManager, self).__init__('neutron.ml2.mechanism_drivers', + cfg.CONF.ml2.mechanism_drivers, + invoke_on_load=True, + name_order=True) + LOG.info(_("Loaded mechanism driver names: %s"), self.names()) + self._register_mechanisms() + + def _register_mechanisms(self): + """Register all mechanism drivers. + + This method should only be called once in the MechanismManager + constructor. + """ + for ext in self: + self.mech_drivers[ext.name] = ext + self.ordered_mech_drivers.append(ext) + LOG.info(_("Registered mechanism drivers: %s"), + [driver.name for driver in self.ordered_mech_drivers]) + + def initialize(self): + # For ML2 to support bulk operations, each driver must support them + self.native_bulk_support = True + for driver in self.ordered_mech_drivers: + LOG.info(_("Initializing mechanism driver '%s'"), driver.name) + driver.obj.initialize() + self.native_bulk_support &= getattr(driver.obj, + 'native_bulk_support', True) + + def _call_on_drivers(self, method_name, context, + continue_on_failure=False): + """Helper method for calling a method across all mechanism drivers. + + :param method_name: name of the method to call + :param context: context parameter to pass to each method call + :param continue_on_failure: whether or not to continue to call + all mechanism drivers once one has raised an exception + :raises: neutron.plugins.ml2.common.MechanismDriverError + if any mechanism driver call fails. + """ + error = False + for driver in self.ordered_mech_drivers: + try: + getattr(driver.obj, method_name)(context) + except Exception: + LOG.exception( + _("Mechanism driver '%(name)s' failed in %(method)s"), + {'name': driver.name, 'method': method_name} + ) + error = True + if not continue_on_failure: + break + if error: + raise ml2_exc.MechanismDriverError( + method=method_name + ) + + def create_network_precommit(self, context): + """Notify all mechanism drivers during network creation. + + :raises: neutron.plugins.ml2.common.MechanismDriverError + if any mechanism driver create_network_precommit call fails. + + Called within the database transaction. If a mechanism driver + raises an exception, then a MechanismDriverError is propogated + to the caller, triggering a rollback. There is no guarantee + that all mechanism drivers are called in this case. + """ + self._call_on_drivers("create_network_precommit", context) + + def create_network_postcommit(self, context): + """Notify all mechanism drivers after network creation. + + :raises: neutron.plugins.ml2.common.MechanismDriverError + if any mechanism driver create_network_postcommit call fails. + + Called after the database transaction. If a mechanism driver + raises an exception, then a MechanismDriverError is propagated + to the caller, where the network will be deleted, triggering + any required cleanup. There is no guarantee that all mechanism + drivers are called in this case. + """ + self._call_on_drivers("create_network_postcommit", context) + + def update_network_precommit(self, context): + """Notify all mechanism drivers during network update. + + :raises: neutron.plugins.ml2.common.MechanismDriverError + if any mechanism driver update_network_precommit call fails. + + Called within the database transaction. If a mechanism driver + raises an exception, then a MechanismDriverError is propogated + to the caller, triggering a rollback. There is no guarantee + that all mechanism drivers are called in this case. + """ + self._call_on_drivers("update_network_precommit", context) + + def update_network_postcommit(self, context): + """Notify all mechanism drivers after network update. + + :raises: neutron.plugins.ml2.common.MechanismDriverError + if any mechanism driver update_network_postcommit call fails. + + Called after the database transaction. If any mechanism driver + raises an error, then the error is logged but we continue to + call every other mechanism driver. A MechanismDriverError is + then reraised at the end to notify the caller of a failure. + """ + self._call_on_drivers("update_network_postcommit", context, + continue_on_failure=True) + + def delete_network_precommit(self, context): + """Notify all mechanism drivers during network deletion. + + :raises: neutron.plugins.ml2.common.MechanismDriverError + if any mechanism driver delete_network_precommit call fails. + + Called within the database transaction. If a mechanism driver + raises an exception, then a MechanismDriverError is propogated + to the caller, triggering a rollback. There is no guarantee + that all mechanism drivers are called in this case. + """ + self._call_on_drivers("delete_network_precommit", context) + + def delete_network_postcommit(self, context): + """Notify all mechanism drivers after network deletion. + + :raises: neutron.plugins.ml2.common.MechanismDriverError + if any mechanism driver delete_network_postcommit call fails. + + Called after the database transaction. If any mechanism driver + raises an error, then the error is logged but we continue to + call every other mechanism driver. A MechanismDriverError is + then reraised at the end to notify the caller of a failure. In + general we expect the caller to ignore the error, as the + network resource has already been deleted from the database + and it doesn't make sense to undo the action by recreating the + network. + """ + self._call_on_drivers("delete_network_postcommit", context, + continue_on_failure=True) + + def create_subnet_precommit(self, context): + """Notify all mechanism drivers during subnet creation. + + :raises: neutron.plugins.ml2.common.MechanismDriverError + if any mechanism driver create_subnet_precommit call fails. + + Called within the database transaction. If a mechanism driver + raises an exception, then a MechanismDriverError is propogated + to the caller, triggering a rollback. There is no guarantee + that all mechanism drivers are called in this case. + """ + self._call_on_drivers("create_subnet_precommit", context) + + def create_subnet_postcommit(self, context): + """Notify all mechanism drivers after subnet creation. + + :raises: neutron.plugins.ml2.common.MechanismDriverError + if any mechanism driver create_subnet_postcommit call fails. + + Called after the database transaction. If a mechanism driver + raises an exception, then a MechanismDriverError is propagated + to the caller, where the subnet will be deleted, triggering + any required cleanup. There is no guarantee that all mechanism + drivers are called in this case. + """ + self._call_on_drivers("create_subnet_postcommit", context) + + def update_subnet_precommit(self, context): + """Notify all mechanism drivers during subnet update. + + :raises: neutron.plugins.ml2.common.MechanismDriverError + if any mechanism driver update_subnet_precommit call fails. + + Called within the database transaction. If a mechanism driver + raises an exception, then a MechanismDriverError is propogated + to the caller, triggering a rollback. There is no guarantee + that all mechanism drivers are called in this case. + """ + self._call_on_drivers("update_subnet_precommit", context) + + def update_subnet_postcommit(self, context): + """Notify all mechanism drivers after subnet update. + + :raises: neutron.plugins.ml2.common.MechanismDriverError + if any mechanism driver update_subnet_postcommit call fails. + + Called after the database transaction. If any mechanism driver + raises an error, then the error is logged but we continue to + call every other mechanism driver. A MechanismDriverError is + then reraised at the end to notify the caller of a failure. + """ + self._call_on_drivers("update_subnet_postcommit", context, + continue_on_failure=True) + + def delete_subnet_precommit(self, context): + """Notify all mechanism drivers during subnet deletion. + + :raises: neutron.plugins.ml2.common.MechanismDriverError + if any mechanism driver delete_subnet_precommit call fails. + + Called within the database transaction. If a mechanism driver + raises an exception, then a MechanismDriverError is propogated + to the caller, triggering a rollback. There is no guarantee + that all mechanism drivers are called in this case. + """ + self._call_on_drivers("delete_subnet_precommit", context) + + def delete_subnet_postcommit(self, context): + """Notify all mechanism drivers after subnet deletion. + + :raises: neutron.plugins.ml2.common.MechanismDriverError + if any mechanism driver delete_subnet_postcommit call fails. + + Called after the database transaction. If any mechanism driver + raises an error, then the error is logged but we continue to + call every other mechanism driver. A MechanismDriverError is + then reraised at the end to notify the caller of a failure. In + general we expect the caller to ignore the error, as the + subnet resource has already been deleted from the database + and it doesn't make sense to undo the action by recreating the + subnet. + """ + self._call_on_drivers("delete_subnet_postcommit", context, + continue_on_failure=True) + + def create_port_precommit(self, context): + """Notify all mechanism drivers during port creation. + + :raises: neutron.plugins.ml2.common.MechanismDriverError + if any mechanism driver create_port_precommit call fails. + + Called within the database transaction. If a mechanism driver + raises an exception, then a MechanismDriverError is propogated + to the caller, triggering a rollback. There is no guarantee + that all mechanism drivers are called in this case. + """ + self._call_on_drivers("create_port_precommit", context) + + def create_port_postcommit(self, context): + """Notify all mechanism drivers of port creation. + + :raises: neutron.plugins.ml2.common.MechanismDriverError + if any mechanism driver create_port_postcommit call fails. + + Called after the database transaction. Errors raised by + mechanism drivers are left to propagate to the caller, where + the port will be deleted, triggering any required + cleanup. There is no guarantee that all mechanism drivers are + called in this case. + """ + self._call_on_drivers("create_port_postcommit", context) + + def update_port_precommit(self, context): + """Notify all mechanism drivers during port update. + + :raises: neutron.plugins.ml2.common.MechanismDriverError + if any mechanism driver update_port_precommit call fails. + + Called within the database transaction. If a mechanism driver + raises an exception, then a MechanismDriverError is propogated + to the caller, triggering a rollback. There is no guarantee + that all mechanism drivers are called in this case. + """ + self._call_on_drivers("update_port_precommit", context) + + def update_port_postcommit(self, context): + """Notify all mechanism drivers after port update. + + :raises: neutron.plugins.ml2.common.MechanismDriverError + if any mechanism driver update_port_postcommit call fails. + + Called after the database transaction. If any mechanism driver + raises an error, then the error is logged but we continue to + call every other mechanism driver. A MechanismDriverError is + then reraised at the end to notify the caller of a failure. + """ + self._call_on_drivers("update_port_postcommit", context, + continue_on_failure=True) + + def delete_port_precommit(self, context): + """Notify all mechanism drivers during port deletion. + + :raises: neutron.plugins.ml2.common.MechanismDriverError + if any mechanism driver delete_port_precommit call fails. + + Called within the database transaction. If a mechanism driver + raises an exception, then a MechanismDriverError is propogated + to the caller, triggering a rollback. There is no guarantee + that all mechanism drivers are called in this case. + """ + self._call_on_drivers("delete_port_precommit", context) + + def delete_port_postcommit(self, context): + """Notify all mechanism drivers after port deletion. + + :raises: neutron.plugins.ml2.common.MechanismDriverError + if any mechanism driver delete_port_postcommit call fails. + + Called after the database transaction. If any mechanism driver + raises an error, then the error is logged but we continue to + call every other mechanism driver. A MechanismDriverError is + then reraised at the end to notify the caller of a failure. In + general we expect the caller to ignore the error, as the + port resource has already been deleted from the database + and it doesn't make sense to undo the action by recreating the + port. + """ + self._call_on_drivers("delete_port_postcommit", context, + continue_on_failure=True) + + def bind_port(self, context): + """Attempt to bind a port using registered mechanism drivers. + + :param context: PortContext instance describing the port + + Called inside transaction context on session, prior to + create_port_precommit or update_port_precommit, to + attempt to establish a port binding. + """ + binding = context._binding + LOG.debug(_("Attempting to bind port %(port)s on host %(host)s " + "for vnic_type %(vnic_type)s with profile %(profile)s"), + {'port': context._port['id'], + 'host': binding.host, + 'vnic_type': binding.vnic_type, + 'profile': binding.profile}) + for driver in self.ordered_mech_drivers: + try: + driver.obj.bind_port(context) + if binding.segment: + binding.driver = driver.name + LOG.debug(_("Bound port: %(port)s, host: %(host)s, " + "vnic_type: %(vnic_type)s, " + "profile: %(profile)s" + "driver: %(driver)s, vif_type: %(vif_type)s, " + "vif_details: %(vif_details)s, " + "segment: %(segment)s"), + {'port': context._port['id'], + 'host': binding.host, + 'vnic_type': binding.vnic_type, + 'profile': binding.profile, + 'driver': binding.driver, + 'vif_type': binding.vif_type, + 'vif_details': binding.vif_details, + 'segment': binding.segment}) + return + except Exception: + LOG.exception(_("Mechanism driver %s failed in " + "bind_port"), + driver.name) + binding.vif_type = portbindings.VIF_TYPE_BINDING_FAILED + LOG.warning(_("Failed to bind port %(port)s on host %(host)s"), + {'port': context._port['id'], + 'host': binding.host}) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/ml2/models.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/ml2/models.py new file mode 100644 index 00000000..a77b705d --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/ml2/models.py @@ -0,0 +1,111 @@ +# Copyright (c) 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import sqlalchemy as sa +from sqlalchemy import orm + +from neutron.db import model_base +from neutron.db import models_v2 +from neutron.extensions import portbindings + +BINDING_PROFILE_LEN = 4095 + + +class NetworkSegment(model_base.BASEV2, models_v2.HasId): + """Represent persistent state of a network segment. + + A network segment is a portion of a neutron network with a + specific physical realization. A neutron network can consist of + one or more segments. + """ + + __tablename__ = 'ml2_network_segments' + + network_id = sa.Column(sa.String(36), + sa.ForeignKey('networks.id', ondelete="CASCADE"), + nullable=False) + network_type = sa.Column(sa.String(32), nullable=False) + physical_network = sa.Column(sa.String(64)) + segmentation_id = sa.Column(sa.Integer) + + +class PortBinding(model_base.BASEV2): + """Represent binding-related state of a port. + + A port binding stores the port attributes required for the + portbindings extension, as well as internal ml2 state such as + which MechanismDriver and which segment are used by the port + binding. + """ + + __tablename__ = 'ml2_port_bindings' + + port_id = sa.Column(sa.String(36), + sa.ForeignKey('ports.id', ondelete="CASCADE"), + primary_key=True) + host = sa.Column(sa.String(255), nullable=False, default='') + vnic_type = sa.Column(sa.String(64), nullable=False, + default=portbindings.VNIC_NORMAL) + profile = sa.Column(sa.String(BINDING_PROFILE_LEN), nullable=False, + default='') + vif_type = sa.Column(sa.String(64), nullable=False) + vif_details = sa.Column(sa.String(4095), nullable=False, default='') + driver = sa.Column(sa.String(64)) + segment = sa.Column(sa.String(36), + sa.ForeignKey('ml2_network_segments.id', + ondelete="SET NULL")) + + # Add a relationship to the Port model in order to instruct SQLAlchemy to + # eagerly load port bindings + port = orm.relationship( + models_v2.Port, + backref=orm.backref("port_binding", + lazy='joined', uselist=False, + cascade='delete')) + + +class DVRPortBinding(model_base.BASEV2): + """Represent binding-related state of a DVR port. + + Port binding for all the ports associated to a DVR identified by router_id. + """ + + __tablename__ = 'ml2_dvr_port_bindings' + + port_id = sa.Column(sa.String(36), + sa.ForeignKey('ports.id', ondelete="CASCADE"), + primary_key=True) + host = sa.Column(sa.String(255), nullable=False, primary_key=True) + router_id = sa.Column(sa.String(36), nullable=True) + vif_type = sa.Column(sa.String(64), nullable=False) + vif_details = sa.Column(sa.String(4095), nullable=False, default='') + vnic_type = sa.Column(sa.String(64), nullable=False, + default=portbindings.VNIC_NORMAL) + profile = sa.Column(sa.String(BINDING_PROFILE_LEN), nullable=False, + default='') + cap_port_filter = sa.Column(sa.Boolean, nullable=False) + driver = sa.Column(sa.String(64)) + segment = sa.Column(sa.String(36), + sa.ForeignKey('ml2_network_segments.id', + ondelete="SET NULL")) + status = sa.Column(sa.String(16), nullable=False) + + # Add a relationship to the Port model in order to instruct SQLAlchemy to + # eagerly load port bindings + port = orm.relationship( + models_v2.Port, + backref=orm.backref("dvr_port_binding", + lazy='joined', uselist=False, + cascade='delete')) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/ml2/plugin.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/ml2/plugin.py new file mode 100644 index 00000000..04bfc7e1 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/ml2/plugin.py @@ -0,0 +1,956 @@ +# Copyright (c) 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +import contextlib + +from oslo.config import cfg +from oslo.db import exception as os_db_exception +from sqlalchemy import exc as sql_exc +from sqlalchemy.orm import exc as sa_exc + +from neutron.agent import securitygroups_rpc as sg_rpc +from neutron.api.rpc.agentnotifiers import dhcp_rpc_agent_api +from neutron.api.v2 import attributes +from neutron.common import constants as const +from neutron.common import exceptions as exc +from neutron.common import rpc as n_rpc +from neutron.common import topics +from neutron.db import agents_db +from neutron.db import agentschedulers_db +from neutron.db import allowedaddresspairs_db as addr_pair_db +from neutron.db import db_base_plugin_v2 +from neutron.db import dvr_mac_db +from neutron.db import external_net_db +from neutron.db import extradhcpopt_db +from neutron.db import l3_dvrscheduler_db +from neutron.db import models_v2 +from neutron.db import quota_db # noqa +from neutron.db import securitygroups_rpc_base as sg_db_rpc +from neutron.extensions import allowedaddresspairs as addr_pair +from neutron.extensions import extra_dhcp_opt as edo_ext +from neutron.extensions import multiprovidernet as mpnet +from neutron.extensions import portbindings +from neutron.extensions import providernet as provider +from neutron import manager +from neutron.openstack.common import excutils +from neutron.openstack.common import importutils +from neutron.openstack.common import jsonutils +from neutron.openstack.common import lockutils +from neutron.openstack.common import log +from neutron.plugins.common import constants as service_constants +from neutron.plugins.ml2.common import exceptions as ml2_exc +from neutron.plugins.ml2 import config # noqa +from neutron.plugins.ml2 import db +from neutron.plugins.ml2 import driver_api as api +from neutron.plugins.ml2 import driver_context +from neutron.plugins.ml2 import managers +from neutron.plugins.ml2 import models +from neutron.plugins.ml2 import rpc + +LOG = log.getLogger(__name__) + +# REVISIT(rkukura): Move this and other network_type constants to +# providernet.py? +TYPE_MULTI_SEGMENT = 'multi-segment' + + +class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2, + dvr_mac_db.DVRDbMixin, + external_net_db.External_net_db_mixin, + sg_db_rpc.SecurityGroupServerRpcMixin, + agentschedulers_db.DhcpAgentSchedulerDbMixin, + addr_pair_db.AllowedAddressPairsMixin, + extradhcpopt_db.ExtraDhcpOptMixin, + l3_dvrscheduler_db.L3_DVRsch_db_mixin): + + """Implement the Neutron L2 abstractions using modules. + + Ml2Plugin is a Neutron plugin based on separately extensible sets + of network types and mechanisms for connecting to networks of + those types. The network types and mechanisms are implemented as + drivers loaded via Python entry points. Networks can be made up of + multiple segments (not yet fully implemented). + """ + + # This attribute specifies whether the plugin supports or not + # bulk/pagination/sorting operations. Name mangling is used in + # order to ensure it is qualified by class + __native_bulk_support = True + __native_pagination_support = True + __native_sorting_support = True + + # List of supported extensions + _supported_extension_aliases = ["provider", "external-net", "binding", + "quotas", "security-group", "agent", + "dhcp_agent_scheduler", + "multi-provider", "allowed-address-pairs", + "extra_dhcp_opt"] + + @property + def supported_extension_aliases(self): + if not hasattr(self, '_aliases'): + aliases = self._supported_extension_aliases[:] + sg_rpc.disable_security_group_extension_by_config(aliases) + self._aliases = aliases + return self._aliases + + def __init__(self): + # First load drivers, then initialize DB, then initialize drivers + self.type_manager = managers.TypeManager() + self.mechanism_manager = managers.MechanismManager() + super(Ml2Plugin, self).__init__() + self.type_manager.initialize() + self.mechanism_manager.initialize() + # bulk support depends on the underlying drivers + self.__native_bulk_support = self.mechanism_manager.native_bulk_support + + self._setup_rpc() + + # REVISIT(rkukura): Use stevedore for these? + self.network_scheduler = importutils.import_object( + cfg.CONF.network_scheduler_driver + ) + + LOG.info(_("Modular L2 Plugin initialization complete")) + + def _setup_rpc(self): + self.notifier = rpc.AgentNotifierApi(topics.AGENT) + self.agent_notifiers[const.AGENT_TYPE_DHCP] = ( + dhcp_rpc_agent_api.DhcpAgentNotifyAPI() + ) + + def start_rpc_listeners(self): + self.endpoints = [rpc.RpcCallbacks(self.notifier, self.type_manager), + agents_db.AgentExtRpcCallback()] + self.topic = topics.PLUGIN + self.conn = n_rpc.create_connection(new=True) + self.conn.create_consumer(self.topic, self.endpoints, + fanout=False) + return self.conn.consume_in_threads() + + def _process_provider_segment(self, segment): + network_type = self._get_attribute(segment, provider.NETWORK_TYPE) + physical_network = self._get_attribute(segment, + provider.PHYSICAL_NETWORK) + segmentation_id = self._get_attribute(segment, + provider.SEGMENTATION_ID) + + if attributes.is_attr_set(network_type): + segment = {api.NETWORK_TYPE: network_type, + api.PHYSICAL_NETWORK: physical_network, + api.SEGMENTATION_ID: segmentation_id} + self.type_manager.validate_provider_segment(segment) + return segment + + msg = _("network_type required") + raise exc.InvalidInput(error_message=msg) + + def _process_provider_create(self, network): + segments = [] + + if any(attributes.is_attr_set(network.get(f)) + for f in (provider.NETWORK_TYPE, provider.PHYSICAL_NETWORK, + provider.SEGMENTATION_ID)): + # Verify that multiprovider and provider attributes are not set + # at the same time. + if attributes.is_attr_set(network.get(mpnet.SEGMENTS)): + raise mpnet.SegmentsSetInConjunctionWithProviders() + + network_type = self._get_attribute(network, provider.NETWORK_TYPE) + physical_network = self._get_attribute(network, + provider.PHYSICAL_NETWORK) + segmentation_id = self._get_attribute(network, + provider.SEGMENTATION_ID) + segments = [{provider.NETWORK_TYPE: network_type, + provider.PHYSICAL_NETWORK: physical_network, + provider.SEGMENTATION_ID: segmentation_id}] + elif attributes.is_attr_set(network.get(mpnet.SEGMENTS)): + segments = network[mpnet.SEGMENTS] + else: + return + + return [self._process_provider_segment(s) for s in segments] + + def _get_attribute(self, attrs, key): + value = attrs.get(key) + if value is attributes.ATTR_NOT_SPECIFIED: + value = None + return value + + def _extend_network_dict_provider(self, context, network): + id = network['id'] + segments = db.get_network_segments(context.session, id) + if not segments: + LOG.error(_("Network %s has no segments"), id) + network[provider.NETWORK_TYPE] = None + network[provider.PHYSICAL_NETWORK] = None + network[provider.SEGMENTATION_ID] = None + elif len(segments) > 1: + network[mpnet.SEGMENTS] = [ + {provider.NETWORK_TYPE: segment[api.NETWORK_TYPE], + provider.PHYSICAL_NETWORK: segment[api.PHYSICAL_NETWORK], + provider.SEGMENTATION_ID: segment[api.SEGMENTATION_ID]} + for segment in segments] + else: + segment = segments[0] + network[provider.NETWORK_TYPE] = segment[api.NETWORK_TYPE] + network[provider.PHYSICAL_NETWORK] = segment[api.PHYSICAL_NETWORK] + network[provider.SEGMENTATION_ID] = segment[api.SEGMENTATION_ID] + + def _filter_nets_provider(self, context, nets, filters): + # TODO(rkukura): Implement filtering. + return nets + + def _process_port_binding(self, mech_context, context, attrs): + binding = mech_context._binding + port = mech_context.current + self._update_port_dict_binding(port, binding) + + host = attrs and attrs.get(portbindings.HOST_ID) + host_set = attributes.is_attr_set(host) + + vnic_type = attrs and attrs.get(portbindings.VNIC_TYPE) + vnic_type_set = attributes.is_attr_set(vnic_type) + + # CLI can't send {}, so treat None as {} + profile = attrs and attrs.get(portbindings.PROFILE) + profile_set = profile is not None and \ + profile is not attributes.ATTR_NOT_SPECIFIED + if profile_set and not profile: + profile = {} + + if binding.vif_type != portbindings.VIF_TYPE_UNBOUND: + if (not host_set and not vnic_type_set and not profile_set and + binding.segment): + return False + self._delete_port_binding(mech_context) + + # Return True only if an agent notification is needed. + # This will happen if a new host, vnic_type, or profile was specified + # that differs from the current one. Note that host_set is True + # even if the host is an empty string + ret_value = ((host_set and binding.get('host') != host) or + (vnic_type_set and + binding.get('vnic_type') != vnic_type) or + (profile_set and self._get_profile(binding) != profile)) + + if host_set: + binding.host = host + port[portbindings.HOST_ID] = host + if "compute:" in port['device_owner']: + self.dvr_update_router_addvm(context, port) + + if vnic_type_set: + binding.vnic_type = vnic_type + port[portbindings.VNIC_TYPE] = vnic_type + + if profile_set: + binding.profile = jsonutils.dumps(profile) + if len(binding.profile) > models.BINDING_PROFILE_LEN: + msg = _("binding:profile value too large") + raise exc.InvalidInput(error_message=msg) + port[portbindings.PROFILE] = profile + + # To try to [re]bind if host is non-empty. + if binding.host: + self.mechanism_manager.bind_port(mech_context) + self._update_port_dict_binding(port, binding) + + # Update the port status if requested by the bound driver. + if binding.segment and mech_context._new_port_status: + # REVISIT(rkukura): This function is currently called + # inside a transaction with the port either newly + # created or locked for update. After the fix for bug + # 1276391 is merged, this will no longer be true, and + # the port status update will need to be handled in + # the transaction that commits the new binding. + port_db = db.get_port(mech_context._plugin_context.session, + port['id']) + port_db.status = mech_context._new_port_status + port['status'] = mech_context._new_port_status + + return ret_value + + def _process_dvr_port_binding(self, mech_context, context, attrs): + binding = mech_context.binding + + host = attrs and attrs.get(portbindings.HOST_ID) + host_set = attributes.is_attr_set(host) + + if binding.vif_type != portbindings.VIF_TYPE_UNBOUND: + if (not host_set and binding.segment and + self.mechanism_manager. + validate_port_binding(mech_context)): + return False + self.mechanism_manager.unbind_port(mech_context) + + if host_set: + binding.host = host + + if binding.host: + self.mechanism_manager.bind_port(mech_context) + + return True + + def _update_port_dict_binding(self, port, binding): + port[portbindings.HOST_ID] = binding.host + port[portbindings.VNIC_TYPE] = binding.vnic_type + port[portbindings.PROFILE] = self._get_profile(binding) + port[portbindings.VIF_TYPE] = binding.vif_type + port[portbindings.VIF_DETAILS] = self._get_vif_details(binding) + + def _get_vif_details(self, binding): + if binding.vif_details: + try: + return jsonutils.loads(binding.vif_details) + except Exception: + LOG.error(_("Serialized vif_details DB value '%(value)s' " + "for port %(port)s is invalid"), + {'value': binding.vif_details, + 'port': binding.port_id}) + return {} + + def _get_profile(self, binding): + if binding.profile: + try: + return jsonutils.loads(binding.profile) + except Exception: + LOG.error(_("Serialized profile DB value '%(value)s' for " + "port %(port)s is invalid"), + {'value': binding.profile, + 'port': binding.port_id}) + return {} + + def _delete_port_binding(self, mech_context): + binding = mech_context._binding + binding.vif_type = portbindings.VIF_TYPE_UNBOUND + binding.vif_details = '' + binding.driver = None + binding.segment = None + port = mech_context.current + self._update_port_dict_binding(port, binding) + + def _check_and_delete_dvr_port_binding(self, mech_context, context): + dvr_binding = mech_context.binding + if (not dvr_binding.router_id and + dvr_binding.status == const.PORT_STATUS_DOWN): + with context.session.begin(subtransactions=True): + LOG.debug("DVR: Deleting dvr binding for port %s", + dvr_binding.port_id) + context.session.delete(dvr_binding) + + def _ml2_extend_port_dict_binding(self, port_res, port_db): + # None when called during unit tests for other plugins. + if port_db.port_binding: + self._update_port_dict_binding(port_res, port_db.port_binding) + + db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs( + attributes.PORTS, ['_ml2_extend_port_dict_binding']) + + # Note - The following hook methods have "ml2" in their names so + # that they are not called twice during unit tests due to global + # registration of hooks in portbindings_db.py used by other + # plugins. + + def _ml2_port_model_hook(self, context, original_model, query): + query = query.outerjoin(models.PortBinding, + (original_model.id == + models.PortBinding.port_id)) + return query + + def _ml2_port_result_filter_hook(self, query, filters): + values = filters and filters.get(portbindings.HOST_ID, []) + if not values: + return query + return query.filter(models.PortBinding.host.in_(values)) + + db_base_plugin_v2.NeutronDbPluginV2.register_model_query_hook( + models_v2.Port, + "ml2_port_bindings", + '_ml2_port_model_hook', + None, + '_ml2_port_result_filter_hook') + + def _notify_port_updated(self, mech_context): + port = mech_context._port + segment = mech_context.bound_segment + if not segment: + # REVISIT(rkukura): This should notify agent to unplug port + network = mech_context.network.current + LOG.warning(_("In _notify_port_updated(), no bound segment for " + "port %(port_id)s on network %(network_id)s"), + {'port_id': port['id'], + 'network_id': network['id']}) + return + self.notifier.port_update(mech_context._plugin_context, port, + segment[api.NETWORK_TYPE], + segment[api.SEGMENTATION_ID], + segment[api.PHYSICAL_NETWORK]) + + # TODO(apech): Need to override bulk operations + + def create_network(self, context, network): + net_data = network['network'] + segments = self._process_provider_create(net_data) + tenant_id = self._get_tenant_id_for_create(context, net_data) + + session = context.session + with session.begin(subtransactions=True): + self._ensure_default_security_group(context, tenant_id) + result = super(Ml2Plugin, self).create_network(context, network) + network_id = result['id'] + self._process_l3_create(context, result, net_data) + # REVISIT(rkukura): Consider moving all segment management + # to TypeManager. + if segments: + for segment in segments: + self.type_manager.reserve_provider_segment(session, + segment) + db.add_network_segment(session, network_id, segment) + else: + segment = self.type_manager.allocate_tenant_segment(session) + db.add_network_segment(session, network_id, segment) + self._extend_network_dict_provider(context, result) + mech_context = driver_context.NetworkContext(self, context, + result) + self.mechanism_manager.create_network_precommit(mech_context) + + try: + self.mechanism_manager.create_network_postcommit(mech_context) + except ml2_exc.MechanismDriverError: + with excutils.save_and_reraise_exception(): + LOG.error(_("mechanism_manager.create_network_postcommit " + "failed, deleting network '%s'"), result['id']) + self.delete_network(context, result['id']) + return result + + def update_network(self, context, id, network): + provider._raise_if_updates_provider_attributes(network['network']) + + session = context.session + with session.begin(subtransactions=True): + original_network = super(Ml2Plugin, self).get_network(context, id) + updated_network = super(Ml2Plugin, self).update_network(context, + id, + network) + self._process_l3_update(context, updated_network, + network['network']) + self._extend_network_dict_provider(context, updated_network) + mech_context = driver_context.NetworkContext( + self, context, updated_network, + original_network=original_network) + self.mechanism_manager.update_network_precommit(mech_context) + + # TODO(apech) - handle errors raised by update_network, potentially + # by re-calling update_network with the previous attributes. For + # now the error is propogated to the caller, which is expected to + # either undo/retry the operation or delete the resource. + self.mechanism_manager.update_network_postcommit(mech_context) + return updated_network + + def get_network(self, context, id, fields=None): + session = context.session + with session.begin(subtransactions=True): + result = super(Ml2Plugin, self).get_network(context, id, None) + self._extend_network_dict_provider(context, result) + + return self._fields(result, fields) + + def get_networks(self, context, filters=None, fields=None, + sorts=None, limit=None, marker=None, page_reverse=False): + session = context.session + with session.begin(subtransactions=True): + nets = super(Ml2Plugin, + self).get_networks(context, filters, None, sorts, + limit, marker, page_reverse) + for net in nets: + self._extend_network_dict_provider(context, net) + + nets = self._filter_nets_provider(context, nets, filters) + nets = self._filter_nets_l3(context, nets, filters) + + return [self._fields(net, fields) for net in nets] + + def delete_network(self, context, id): + # REVISIT(rkukura) The super(Ml2Plugin, self).delete_network() + # function is not used because it auto-deletes ports and + # subnets from the DB without invoking the derived class's + # delete_port() or delete_subnet(), preventing mechanism + # drivers from being called. This approach should be revisited + # when the API layer is reworked during icehouse. + + LOG.debug(_("Deleting network %s"), id) + session = context.session + while True: + try: + with session.begin(subtransactions=True): + self._process_l3_delete(context, id) + + # Get ports to auto-delete. + ports = (session.query(models_v2.Port). + enable_eagerloads(False). + filter_by(network_id=id). + with_lockmode('update').all()) + LOG.debug(_("Ports to auto-delete: %s"), ports) + only_auto_del = all(p.device_owner + in db_base_plugin_v2. + AUTO_DELETE_PORT_OWNERS + for p in ports) + if not only_auto_del: + LOG.debug(_("Tenant-owned ports exist")) + raise exc.NetworkInUse(net_id=id) + + # Get subnets to auto-delete. + subnets = (session.query(models_v2.Subnet). + enable_eagerloads(False). + filter_by(network_id=id). + with_lockmode('update').all()) + LOG.debug(_("Subnets to auto-delete: %s"), subnets) + + if not (ports or subnets): + network = self.get_network(context, id) + mech_context = driver_context.NetworkContext(self, + context, + network) + self.mechanism_manager.delete_network_precommit( + mech_context) + + record = self._get_network(context, id) + LOG.debug(_("Deleting network record %s"), record) + session.delete(record) + + for segment in mech_context.network_segments: + self.type_manager.release_segment(session, segment) + + # The segment records are deleted via cascade from the + # network record, so explicit removal is not necessary. + LOG.debug(_("Committing transaction")) + break + except os_db_exception.DBError as e: + with excutils.save_and_reraise_exception() as ctxt: + if isinstance(e.inner_exception, sql_exc.IntegrityError): + ctxt.reraise = False + msg = _("A concurrent port creation has occurred") + LOG.warning(msg) + continue + + for port in ports: + try: + self.delete_port(context, port.id) + except Exception: + with excutils.save_and_reraise_exception(): + LOG.exception(_("Exception auto-deleting port %s"), + port.id) + + for subnet in subnets: + try: + self.delete_subnet(context, subnet.id) + except Exception: + with excutils.save_and_reraise_exception(): + LOG.exception(_("Exception auto-deleting subnet %s"), + subnet.id) + + try: + self.mechanism_manager.delete_network_postcommit(mech_context) + except ml2_exc.MechanismDriverError: + # TODO(apech) - One or more mechanism driver failed to + # delete the network. Ideally we'd notify the caller of + # the fact that an error occurred. + LOG.error(_("mechanism_manager.delete_network_postcommit failed")) + self.notifier.network_delete(context, id) + + def create_subnet(self, context, subnet): + session = context.session + with session.begin(subtransactions=True): + result = super(Ml2Plugin, self).create_subnet(context, subnet) + mech_context = driver_context.SubnetContext(self, context, result) + self.mechanism_manager.create_subnet_precommit(mech_context) + + try: + self.mechanism_manager.create_subnet_postcommit(mech_context) + except ml2_exc.MechanismDriverError: + with excutils.save_and_reraise_exception(): + LOG.error(_("mechanism_manager.create_subnet_postcommit " + "failed, deleting subnet '%s'"), result['id']) + self.delete_subnet(context, result['id']) + return result + + def update_subnet(self, context, id, subnet): + session = context.session + with session.begin(subtransactions=True): + original_subnet = super(Ml2Plugin, self).get_subnet(context, id) + updated_subnet = super(Ml2Plugin, self).update_subnet( + context, id, subnet) + mech_context = driver_context.SubnetContext( + self, context, updated_subnet, original_subnet=original_subnet) + self.mechanism_manager.update_subnet_precommit(mech_context) + + # TODO(apech) - handle errors raised by update_subnet, potentially + # by re-calling update_subnet with the previous attributes. For + # now the error is propogated to the caller, which is expected to + # either undo/retry the operation or delete the resource. + self.mechanism_manager.update_subnet_postcommit(mech_context) + return updated_subnet + + def delete_subnet(self, context, id): + # REVISIT(rkukura) The super(Ml2Plugin, self).delete_subnet() + # function is not used because it deallocates the subnet's addresses + # from ports in the DB without invoking the derived class's + # update_port(), preventing mechanism drivers from being called. + # This approach should be revisited when the API layer is reworked + # during icehouse. + + LOG.debug(_("Deleting subnet %s"), id) + session = context.session + while True: + with session.begin(subtransactions=True): + subnet = self.get_subnet(context, id) + # Get ports to auto-deallocate + allocated = (session.query(models_v2.IPAllocation). + filter_by(subnet_id=id). + join(models_v2.Port). + filter_by(network_id=subnet['network_id']). + with_lockmode('update').all()) + LOG.debug(_("Ports to auto-deallocate: %s"), allocated) + only_auto_del = all(not a.port_id or + a.ports.device_owner in db_base_plugin_v2. + AUTO_DELETE_PORT_OWNERS + for a in allocated) + if not only_auto_del: + LOG.debug(_("Tenant-owned ports exist")) + raise exc.SubnetInUse(subnet_id=id) + + if not allocated: + mech_context = driver_context.SubnetContext(self, context, + subnet) + self.mechanism_manager.delete_subnet_precommit( + mech_context) + + LOG.debug(_("Deleting subnet record")) + record = self._get_subnet(context, id) + session.delete(record) + + LOG.debug(_("Committing transaction")) + break + + for a in allocated: + if a.port_id: + # calling update_port() for each allocation to remove the + # IP from the port and call the MechanismDrivers + data = {'port': + {'fixed_ips': [{'subnet_id': ip.subnet_id, + 'ip_address': ip.ip_address} + for ip in a.ports.fixed_ips + if ip.subnet_id != id]}} + try: + self.update_port(context, a.port_id, data) + except Exception: + with excutils.save_and_reraise_exception(): + LOG.exception(_("Exception deleting fixed_ip from " + "port %s"), a.port_id) + session.delete(a) + + try: + self.mechanism_manager.delete_subnet_postcommit(mech_context) + except ml2_exc.MechanismDriverError: + # TODO(apech) - One or more mechanism driver failed to + # delete the subnet. Ideally we'd notify the caller of + # the fact that an error occurred. + LOG.error(_("mechanism_manager.delete_subnet_postcommit failed")) + + def create_port(self, context, port): + attrs = port['port'] + attrs['status'] = const.PORT_STATUS_DOWN + + session = context.session + mech_context = None + with session.begin(subtransactions=True): + self._ensure_default_security_group_on_port(context, port) + sgids = self._get_security_groups_on_port(context, port) + dhcp_opts = port['port'].get(edo_ext.EXTRADHCPOPTS, []) + result = super(Ml2Plugin, self).create_port(context, port) + self._process_port_create_security_group(context, result, sgids) + network = self.get_network(context, result['network_id']) + if (attrs['device_owner'] != + const.DEVICE_OWNER_DVR_INTERFACE): + # for DVR ports late binding happens via L3-Agent + mech_context = driver_context.PortContext(self, context, + result, + network) + self._process_port_binding(mech_context, context, attrs) + + result[addr_pair.ADDRESS_PAIRS] = ( + self._process_create_allowed_address_pairs( + context, result, + attrs.get(addr_pair.ADDRESS_PAIRS))) + self._process_port_create_extra_dhcp_opts(context, result, + dhcp_opts) + #self.l3_agent_notify_for_vmarp_table(context, result['id'], 'add') + if mech_context: + self.mechanism_manager.create_port_precommit(mech_context) + + try: + if mech_context: + self.mechanism_manager.create_port_postcommit(mech_context) + except ml2_exc.MechanismDriverError: + with excutils.save_and_reraise_exception(): + LOG.error(_("mechanism_manager.create_port_postcommit " + "failed, deleting port '%s'"), result['id']) + self.delete_port(context, result['id']) + self.notify_security_groups_member_updated(context, result) + return result + + def update_port(self, context, id, port): + attrs = port['port'] + need_port_update_notify = False + + session = context.session + with session.begin(subtransactions=True): + try: + port_db = (session.query(models_v2.Port). + enable_eagerloads(False). + filter_by(id=id).with_lockmode('update').one()) + except sa_exc.NoResultFound: + raise exc.PortNotFound(port_id=id) + original_port = self._make_port_dict(port_db) + updated_port = super(Ml2Plugin, self).update_port(context, id, + port) + if addr_pair.ADDRESS_PAIRS in port['port']: + need_port_update_notify |= ( + self.update_address_pairs_on_port(context, id, port, + original_port, + updated_port)) + need_port_update_notify |= self.update_security_group_on_port( + context, id, port, original_port, updated_port) + network = self.get_network(context, original_port['network_id']) + need_port_update_notify |= self._update_extra_dhcp_opts_on_port( + context, id, port, updated_port) + mech_context = driver_context.PortContext( + self, context, updated_port, network, + original_port=original_port) + need_port_update_notify |= self._process_port_binding( + mech_context, context, attrs) + self.mechanism_manager.update_port_precommit(mech_context) + + # TODO(apech) - handle errors raised by update_port, potentially + # by re-calling update_port with the previous attributes. For + # now the error is propogated to the caller, which is expected to + # either undo/retry the operation or delete the resource. + self.mechanism_manager.update_port_postcommit(mech_context) + + need_port_update_notify |= self.is_security_group_member_updated( + context, original_port, updated_port) + + if original_port['admin_state_up'] != updated_port['admin_state_up']: + need_port_update_notify = True + + if need_port_update_notify: + self._notify_port_updated(mech_context) + + return updated_port + + def update_dvr_port_binding(self, context, id, port): + attrs = port['port'] + need_port_update_notify = False + + host = attrs and attrs.get(portbindings.HOST_ID) + host_set = attributes.is_attr_set(host) + + if not host_set: + LOG.error(_("No Host supplied to bind DVR Port %s"), id) + return + + binding = db.get_dvr_port_binding_by_host(port_id=id, host=host, + session=None) + if ((not binding) or + (binding.vif_type == portbindings.VIF_TYPE_BINDING_FAILED)): + session = context.session + with session.begin(subtransactions=True): + if (not binding): + binding = db.ensure_dvr_port_binding(session, id, + host, + attrs['device_id']) + orig_port = super(Ml2Plugin, self).get_port(context, id) + network = self.get_network(context, orig_port['network_id']) + mech_context = driver_context.PortContext(self, + context, orig_port, network, + original_port=orig_port, binding=binding) + need_port_update_notify |= self._process_dvr_port_binding( + mech_context, context, attrs) + + def delete_port(self, context, id, l3_port_check=True): + LOG.debug(_("Deleting port %s"), id) + l3plugin = manager.NeutronManager.get_service_plugins().get( + service_constants.L3_ROUTER_NAT) + if l3plugin and l3_port_check: + l3plugin.prevent_l3_port_deletion(context, id) + + session = context.session + mech_context = None + # REVISIT: Serialize this operation with a semaphore to prevent + # undesired eventlet yields leading to 'lock wait timeout' errors + with contextlib.nested(lockutils.lock('db-access'), + session.begin(subtransactions=True)): + try: + port_db = (session.query(models_v2.Port). + enable_eagerloads(False). + filter_by(id=id).with_lockmode('update').one()) + except sa_exc.NoResultFound: + # the port existed when l3plugin.prevent_l3_port_deletion + # was called but now is already gone + LOG.debug(_("The port '%s' was deleted"), id) + return + port = self._make_port_dict(port_db) + + network = self.get_network(context, port['network_id']) + if port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE: + bindings = db.get_dvr_port_bindings(id) + for bind in bindings: + mech_context = driver_context.PortContext(self, context, + port, network, + binding=bind) + self.mechanism_manager.delete_port_precommit(mech_context) + LOG.debug("Calling base delete_port %s for DVR", id) + super(Ml2Plugin, self).delete_port(context, id) + else: + mech_context = driver_context.PortContext(self, context, port, + network) + if "compute:" in port['device_owner']: + self.dvr_deletens_ifnovm(context, id) + self.mechanism_manager.delete_port_precommit(mech_context) + self._delete_port_security_group_bindings(context, id) + LOG.debug(_("Calling base delete_port")) + if l3plugin: + l3plugin.disassociate_floatingips(context, id) + super(Ml2Plugin, self).delete_port(context, id) + + try: + # for both normal and DVR Interface ports, only one invocation of + # delete_port_postcommit + if mech_context: + self.mechanism_manager.delete_port_postcommit(mech_context) + else: + LOG.error(_("Unable to invoke delete_port_postcommit," + " mech_context NULL for port %s"), id) + except ml2_exc.MechanismDriverError: + # TODO(apech) - One or more mechanism driver failed to + # delete the port. Ideally we'd notify the caller of the + # fact that an error occurred. + LOG.error(_("mechanism_manager.delete_port_postcommit failed for" + " port %s"), id) + self.notify_security_groups_member_updated(context, port) + + def _generate_dvr_port_status(self, session, port_id): + # an OR'ed value of status assigned to parent port from the + # dvrportbinding bucket + query = session.query(models.DVRPortBinding) + bindings = query.filter(models.DVRPortBinding.port_id == port_id).all() + for bind in bindings: + if bind.status == const.PORT_STATUS_ACTIVE: + return const.PORT_STATUS_ACTIVE + for bind in bindings: + if bind.status == const.PORT_STATUS_DOWN: + return const.PORT_STATUS_DOWN + return const.PORT_STATUS_BUILD + + def update_port_status(self, context, port_id, status, host=None): + updated = False + session = context.session + # REVISIT: Serialize this operation with a semaphore to prevent + # undesired eventlet yields leading to 'lock wait timeout' errors + with contextlib.nested(lockutils.lock('db-access'), + session.begin(subtransactions=True)): + port = db.get_port(session, port_id) + if not port: + LOG.warning(_("Port %(port)s updated up by agent not found"), + {'port': port_id}) + return False + if port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE: + binding = db.get_dvr_port_binding_by_host(port_id=port['id'], + host=host, + session=session) + if not binding: + LOG.error(_("Binding info for port %s not found"), + port_id) + return False + binding['status'] = status + binding.update(binding) + + # binding already updated + with contextlib.nested(lockutils.lock('db-access'), + session.begin(subtransactions=True)): + port = db.get_port(session, port_id) + if not port: + LOG.warning(_("Port %(port)s updated up by agent not found"), + {'port': port_id}) + return False + if port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE: + original_port = self._make_port_dict(port) + network = self.get_network(context, + original_port['network_id']) + port.status = self._generate_dvr_port_status(session, + port['id']) + updated_port = self._make_port_dict(port) + mech_context = (driver_context.PortContext( + self, context, updated_port, network, + original_port=original_port, + binding=binding)) + self.mechanism_manager.update_port_precommit(mech_context) + updated = True + elif port.status != status: + original_port = self._make_port_dict(port) + port.status = status + updated_port = self._make_port_dict(port) + network = self.get_network(context, + original_port['network_id']) + mech_context = driver_context.PortContext( + self, context, updated_port, network, + original_port=original_port) + self.mechanism_manager.update_port_precommit(mech_context) + updated = True + + if updated: + self.mechanism_manager.update_port_postcommit(mech_context) + + if port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE: + self._check_and_delete_dvr_port_binding(mech_context, context) + + return True + + def get_bindinghost_by_portid(self, port_id): + return db.get_port_binding_host(port_id) + + def get_dvr_port_binding_by_host(self, context, port_id, host): + session = context.session + return db.get_dvr_port_binding_by_host(port_id, host, + session) + + def port_bound_to_host(self, context, port_id, host): + try: + port = self.get_port(context, port_id) + if port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE: + bindings = db.get_dvr_port_bindings(port_id) + for b in bindings: + if (b.host == host): + #LOG.debug(_("Binding with host %s exists for port %s") + # % (host, port_id)) + return True + LOG.debug(_("No Binding exists for port %s"), port_id) + return False + else: + port_host = db.get_port_binding_host(port_id) + return (port_host == host) + except exc.PortNotFound: + LOG.debug(_("Port not found %s"), port_id) + return False diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/ml2/rpc.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/ml2/rpc.py new file mode 100644 index 00000000..080e0629 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/ml2/rpc.py @@ -0,0 +1,276 @@ +# Copyright (c) 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from neutron.agent import securitygroups_rpc as sg_rpc +from neutron.api.rpc import dvr_rpc +from neutron.common import constants as q_const +from neutron.common import rpc as n_rpc +from neutron.common import topics +from neutron.db import api as db_api +from neutron.db import dhcp_rpc_base +from neutron.db import securitygroups_rpc_base as sg_db_rpc +from neutron import manager +from neutron.openstack.common import log +from neutron.openstack.common import uuidutils +from neutron.plugins.ml2 import db +from neutron.plugins.ml2 import driver_api as api +from neutron.plugins.ml2.drivers import type_tunnel +# REVISIT(kmestery): Allow the type and mechanism drivers to supply the +# mixins and eventually remove the direct dependencies on type_tunnel. + +LOG = log.getLogger(__name__) + +TAP_DEVICE_PREFIX = 'tap' +TAP_DEVICE_PREFIX_LENGTH = 3 + + +class RpcCallbacks(n_rpc.RpcCallback, + dhcp_rpc_base.DhcpRpcCallbackMixin, + dvr_rpc.DVRServerRpcCallbackMixin, + sg_db_rpc.SecurityGroupServerRpcCallbackMixin, + type_tunnel.TunnelRpcCallbackMixin): + + RPC_API_VERSION = '1.3' + # history + # 1.0 Initial version (from openvswitch/linuxbridge) + # 1.1 Support Security Group RPC + # 1.2 Support get_devices_details_list + # 1.3 Support Distributed Virtual Router (DVR) + + def __init__(self, notifier, type_manager): + self.setup_tunnel_callback_mixin(notifier, type_manager) + super(RpcCallbacks, self).__init__() + + @classmethod + def _device_to_port_id(cls, device): + # REVISIT(rkukura): Consider calling into MechanismDrivers to + # process device names, or having MechanismDrivers supply list + # of device prefixes to strip. + if device.startswith(TAP_DEVICE_PREFIX): + return device[TAP_DEVICE_PREFIX_LENGTH:] + else: + # REVISIT(irenab): Consider calling into bound MD to + # handle the get_device_details RPC, then remove the 'else' clause + if not uuidutils.is_uuid_like(device): + port = db.get_port_from_device_mac(device) + if port: + return port.id + return device + + @classmethod + def get_port_from_device(cls, device): + port_id = cls._device_to_port_id(device) + port = db.get_port_and_sgs(port_id) + if port: + port['device'] = device + return port + + def get_device_details(self, rpc_context, **kwargs): + """Agent requests device details.""" + agent_id = kwargs.get('agent_id') + device = kwargs.get('device') + host = kwargs.get('host') + LOG.debug("Device %(device)s details requested by agent " + "%(agent_id)s with host %(host)s", + {'device': device, 'agent_id': agent_id, 'host': host}) + port_id = self._device_to_port_id(device) + + session = db_api.get_session() + with session.begin(subtransactions=True): + port = db.get_port(session, port_id) + if not port: + LOG.warning(_("Device %(device)s requested by agent " + "%(agent_id)s not found in database"), + {'device': device, 'agent_id': agent_id}) + return {'device': device} + + segments = db.get_network_segments(session, port.network_id) + if not segments: + LOG.warning(_("Device %(device)s requested by agent " + "%(agent_id)s has network %(network_id)s with " + "no segments"), + {'device': device, + 'agent_id': agent_id, + 'network_id': port.network_id}) + return {'device': device} + + if port['device_owner'] == q_const.DEVICE_OWNER_DVR_INTERFACE: + binding = db.ensure_dvr_port_binding(session, port_id, host) + else: + binding = db.ensure_port_binding(session, port.id) + + if not binding.segment: + LOG.warning(_("Device %(device)s requested by agent " + "%(agent_id)s on network %(network_id)s not " + "bound, vif_type: %(vif_type)s"), + {'device': device, + 'agent_id': agent_id, + 'network_id': port.network_id, + 'vif_type': binding.vif_type}) + return {'device': device} + + segment = self._find_segment(segments, binding.segment) + if not segment: + LOG.warning(_("Device %(device)s requested by agent " + "%(agent_id)s on network %(network_id)s " + "invalid segment, vif_type: %(vif_type)s"), + {'device': device, + 'agent_id': agent_id, + 'network_id': port.network_id, + 'vif_type': binding.vif_type}) + return {'device': device} + + new_status = (q_const.PORT_STATUS_BUILD if port.admin_state_up + else q_const.PORT_STATUS_DOWN) + if port.status != new_status: + plugin = manager.NeutronManager.get_plugin() + plugin.update_port_status(rpc_context, + port_id, + new_status, + host) + port.status = new_status + entry = {'device': device, + 'network_id': port.network_id, + 'port_id': port.id, + 'admin_state_up': port.admin_state_up, + 'network_type': segment[api.NETWORK_TYPE], + 'segmentation_id': segment[api.SEGMENTATION_ID], + 'physical_network': segment[api.PHYSICAL_NETWORK], + 'fixed_ips': port['fixed_ips'], + 'device_owner': port['device_owner']} + LOG.debug(_("Returning: %s"), entry) + return entry + + def get_devices_details_list(self, rpc_context, **kwargs): + return [ + self.get_device_details( + rpc_context, + device=device, + **kwargs + ) + for device in kwargs.pop('devices', []) + ] + + def _find_segment(self, segments, segment_id): + for segment in segments: + if segment[api.ID] == segment_id: + return segment + + def update_device_down(self, rpc_context, **kwargs): + """Device no longer exists on agent.""" + # TODO(garyk) - live migration and port status + agent_id = kwargs.get('agent_id') + device = kwargs.get('device') + host = kwargs.get('host') + LOG.debug(_("Device %(device)s no longer exists at agent " + "%(agent_id)s"), + {'device': device, 'agent_id': agent_id}) + plugin = manager.NeutronManager.get_plugin() + port_id = self._device_to_port_id(device) + port_exists = True + if (host and not plugin.port_bound_to_host(rpc_context, + port_id, host)): + LOG.debug(_("Device %(device)s not bound to the" + " agent host %(host)s"), + {'device': device, 'host': host}) + return {'device': device, + 'exists': port_exists} + + port_exists = plugin.update_port_status(rpc_context, port_id, + q_const.PORT_STATUS_DOWN, + host) + + return {'device': device, + 'exists': port_exists} + + def update_device_up(self, rpc_context, **kwargs): + """Device is up on agent.""" + agent_id = kwargs.get('agent_id') + device = kwargs.get('device') + host = kwargs.get('host') + LOG.debug(_("Device %(device)s up at agent %(agent_id)s"), + {'device': device, 'agent_id': agent_id}) + plugin = manager.NeutronManager.get_plugin() + port_id = self._device_to_port_id(device) + if (host and not plugin.port_bound_to_host(rpc_context, + port_id, host)): + LOG.debug(_("Device %(device)s not bound to the" + " agent host %(host)s"), + {'device': device, 'host': host}) + return + + plugin.update_port_status(rpc_context, port_id, + q_const.PORT_STATUS_ACTIVE, + host) + + def get_dvr_mac_address_by_host(self, rpc_context, **kwargs): + host = kwargs.get('host') + LOG.debug("DVR Agent requests mac_address for host %r", host) + return super(RpcCallbacks, self).get_dvr_mac_address_by_host( + rpc_context, host) + + def get_compute_ports_on_host_by_subnet(self, rpc_context, **kwargs): + host = kwargs.get('host') + subnet = kwargs.get('subnet') + LOG.debug("DVR Agent requests list of VM ports on host %r", host) + return super(RpcCallbacks, self).get_compute_ports_on_host_by_subnet( + rpc_context, host, subnet) + + def get_subnet_for_dvr(self, rpc_context, **kwargs): + subnet = kwargs.get('subnet') + return super(RpcCallbacks, self).get_subnet_for_dvr(rpc_context, + subnet) + + +class AgentNotifierApi(n_rpc.RpcProxy, + dvr_rpc.DVRAgentRpcApiMixin, + sg_rpc.SecurityGroupAgentRpcApiMixin, + type_tunnel.TunnelAgentRpcApiMixin): + """Agent side of the openvswitch rpc API. + + API version history: + 1.0 - Initial version. + 1.1 - Added get_active_networks_info, create_dhcp_port, + update_dhcp_port, and removed get_dhcp_port methods. + + """ + + BASE_RPC_API_VERSION = '1.1' + + def __init__(self, topic): + super(AgentNotifierApi, self).__init__( + topic=topic, default_version=self.BASE_RPC_API_VERSION) + self.topic_network_delete = topics.get_topic_name(topic, + topics.NETWORK, + topics.DELETE) + self.topic_port_update = topics.get_topic_name(topic, + topics.PORT, + topics.UPDATE) + + def network_delete(self, context, network_id): + self.fanout_cast(context, + self.make_msg('network_delete', + network_id=network_id), + topic=self.topic_network_delete) + + def port_update(self, context, port, network_type, segmentation_id, + physical_network): + self.fanout_cast(context, + self.make_msg('port_update', + port=port, + network_type=network_type, + segmentation_id=segmentation_id, + physical_network=physical_network), + topic=self.topic_port_update) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/mlnx/__init__.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/mlnx/__init__.py new file mode 100644 index 00000000..7182ac41 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/mlnx/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2013 Mellanox Technologies, Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/mlnx/agent/__init__.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/mlnx/agent/__init__.py new file mode 100644 index 00000000..7182ac41 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/mlnx/agent/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2013 Mellanox Technologies, Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/mlnx/agent/eswitch_neutron_agent.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/mlnx/agent/eswitch_neutron_agent.py new file mode 100644 index 00000000..e3e0e4fe --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/mlnx/agent/eswitch_neutron_agent.py @@ -0,0 +1,438 @@ +# Copyright 2013 Mellanox Technologies, Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import socket +import sys +import time + +import eventlet +eventlet.monkey_patch() + +from oslo.config import cfg + +from neutron.agent import rpc as agent_rpc +from neutron.agent import securitygroups_rpc as sg_rpc +from neutron.common import config as common_config +from neutron.common import constants as q_constants +from neutron.common import rpc as n_rpc +from neutron.common import topics +from neutron.common import utils as q_utils +from neutron import context +from neutron.openstack.common import log as logging +from neutron.openstack.common import loopingcall +from neutron.plugins.common import constants as p_const +from neutron.plugins.mlnx.agent import utils +from neutron.plugins.mlnx.common import config # noqa +from neutron.plugins.mlnx.common import exceptions + +LOG = logging.getLogger(__name__) + + +class EswitchManager(object): + def __init__(self, interface_mappings, endpoint, timeout): + self.utils = utils.EswitchUtils(endpoint, timeout) + self.interface_mappings = interface_mappings + self.network_map = {} + self.utils.define_fabric_mappings(interface_mappings) + + def get_port_id_by_mac(self, port_mac): + for network_id, data in self.network_map.iteritems(): + for port in data['ports']: + if port['port_mac'] == port_mac: + return port['port_id'] + err_msg = _("Agent cache inconsistency - port id " + "is not stored for %s") % port_mac + LOG.error(err_msg) + raise exceptions.MlnxException(err_msg=err_msg) + + def get_vnics_mac(self): + return set(self.utils.get_attached_vnics().keys()) + + def vnic_port_exists(self, port_mac): + return port_mac in self.utils.get_attached_vnics() + + def remove_network(self, network_id): + if network_id in self.network_map: + del self.network_map[network_id] + else: + LOG.debug(_("Network %s not defined on Agent."), network_id) + + def port_down(self, network_id, physical_network, port_mac): + """Sets port to down. + + Check internal network map for port data. + If port exists set port to Down + """ + for network_id, data in self.network_map.iteritems(): + for port in data['ports']: + if port['port_mac'] == port_mac: + self.utils.port_down(physical_network, port_mac) + return + LOG.info(_('Network %s is not available on this agent'), network_id) + + def port_up(self, network_id, network_type, + physical_network, seg_id, port_id, port_mac): + """Sets port to up. + + Update internal network map with port data. + - Check if vnic defined + - configure eswitch vport + - set port to Up + """ + LOG.debug(_("Connecting port %s"), port_id) + + if network_id not in self.network_map: + self.provision_network(port_id, port_mac, + network_id, network_type, + physical_network, seg_id) + net_map = self.network_map[network_id] + net_map['ports'].append({'port_id': port_id, 'port_mac': port_mac}) + + if network_type == p_const.TYPE_VLAN: + LOG.info(_('Binding Segmentation ID %(seg_id)s' + 'to eSwitch for vNIC mac_address %(mac)s'), + {'seg_id': seg_id, + 'mac': port_mac}) + self.utils.set_port_vlan_id(physical_network, + seg_id, + port_mac) + self.utils.port_up(physical_network, port_mac) + else: + LOG.error(_('Unsupported network type %s'), network_type) + + def port_release(self, port_mac): + """Clear port configuration from eSwitch.""" + for network_id, net_data in self.network_map.iteritems(): + for port in net_data['ports']: + if port['port_mac'] == port_mac: + self.utils.port_release(net_data['physical_network'], + port['port_mac']) + return + LOG.info(_('Port_mac %s is not available on this agent'), port_mac) + + def provision_network(self, port_id, port_mac, + network_id, network_type, + physical_network, segmentation_id): + LOG.info(_("Provisioning network %s"), network_id) + if network_type == p_const.TYPE_VLAN: + LOG.debug(_("Creating VLAN Network")) + else: + LOG.error(_("Unknown network type %(network_type)s " + "for network %(network_id)s"), + {'network_type': network_type, + 'network_id': network_id}) + return + data = { + 'physical_network': physical_network, + 'network_type': network_type, + 'ports': [], + 'vlan_id': segmentation_id} + self.network_map[network_id] = data + + +class MlnxEswitchRpcCallbacks(n_rpc.RpcCallback, + sg_rpc.SecurityGroupAgentRpcCallbackMixin): + + # Set RPC API version to 1.0 by default. + # history + # 1.1 Support Security Group RPC + RPC_API_VERSION = '1.1' + + def __init__(self, context, agent): + super(MlnxEswitchRpcCallbacks, self).__init__() + self.context = context + self.agent = agent + self.eswitch = agent.eswitch + self.sg_agent = agent + + def network_delete(self, context, **kwargs): + LOG.debug(_("network_delete received")) + network_id = kwargs.get('network_id') + if not network_id: + LOG.warning(_("Invalid Network ID, cannot remove Network")) + else: + LOG.debug(_("Delete network %s"), network_id) + self.eswitch.remove_network(network_id) + + def port_update(self, context, **kwargs): + LOG.debug(_("port_update received")) + port = kwargs.get('port') + net_type = kwargs.get('network_type') + segmentation_id = kwargs.get('segmentation_id') + if not segmentation_id: + # compatibility with pre-Havana RPC vlan_id encoding + segmentation_id = kwargs.get('vlan_id') + physical_network = kwargs.get('physical_network') + net_id = port['network_id'] + if self.eswitch.vnic_port_exists(port['mac_address']): + if 'security_groups' in port: + self.sg_agent.refresh_firewall() + try: + if port['admin_state_up']: + self.eswitch.port_up(net_id, + net_type, + physical_network, + segmentation_id, + port['id'], + port['mac_address']) + # update plugin about port status + self.agent.plugin_rpc.update_device_up(self.context, + port['mac_address'], + self.agent.agent_id, + cfg.CONF.host) + else: + self.eswitch.port_down(net_id, + physical_network, + port['mac_address']) + # update plugin about port status + self.agent.plugin_rpc.update_device_down( + self.context, + port['mac_address'], + self.agent.agent_id, + cfg.CONF.host) + except n_rpc.MessagingTimeout: + LOG.error(_("RPC timeout while updating port %s"), port['id']) + else: + LOG.debug(_("No port %s defined on agent."), port['id']) + + +class MlnxEswitchPluginApi(agent_rpc.PluginApi, + sg_rpc.SecurityGroupServerRpcApiMixin): + pass + + +class MlnxEswitchNeutronAgent(sg_rpc.SecurityGroupAgentRpcMixin): + # Set RPC API version to 1.0 by default. + #RPC_API_VERSION = '1.0' + + def __init__(self, interface_mapping): + self._polling_interval = cfg.CONF.AGENT.polling_interval + self._setup_eswitches(interface_mapping) + configurations = {'interface_mappings': interface_mapping} + self.agent_state = { + 'binary': 'neutron-mlnx-agent', + 'host': cfg.CONF.host, + 'topic': q_constants.L2_AGENT_TOPIC, + 'configurations': configurations, + 'agent_type': q_constants.AGENT_TYPE_MLNX, + 'start_flag': True} + self._setup_rpc() + self.init_firewall() + + def _setup_eswitches(self, interface_mapping): + daemon = cfg.CONF.ESWITCH.daemon_endpoint + timeout = cfg.CONF.ESWITCH.request_timeout + self.eswitch = EswitchManager(interface_mapping, daemon, timeout) + + def _report_state(self): + try: + devices = len(self.eswitch.get_vnics_mac()) + self.agent_state.get('configurations')['devices'] = devices + self.state_rpc.report_state(self.context, + self.agent_state) + self.agent_state.pop('start_flag', None) + except Exception: + LOG.exception(_("Failed reporting state!")) + + def _setup_rpc(self): + self.agent_id = 'mlnx-agent.%s' % socket.gethostname() + LOG.info(_("RPC agent_id: %s"), self.agent_id) + + self.topic = topics.AGENT + self.plugin_rpc = MlnxEswitchPluginApi(topics.PLUGIN) + self.state_rpc = agent_rpc.PluginReportStateAPI(topics.PLUGIN) + # RPC network init + self.context = context.get_admin_context_without_session() + # Handle updates from service + self.endpoints = [MlnxEswitchRpcCallbacks(self.context, self)] + # Define the listening consumers for the agent + consumers = [[topics.PORT, topics.UPDATE], + [topics.NETWORK, topics.DELETE], + [topics.SECURITY_GROUP, topics.UPDATE]] + self.connection = agent_rpc.create_consumers(self.endpoints, + self.topic, + consumers) + + report_interval = cfg.CONF.AGENT.report_interval + if report_interval: + heartbeat = loopingcall.FixedIntervalLoopingCall( + self._report_state) + heartbeat.start(interval=report_interval) + + def update_ports(self, registered_ports): + ports = self.eswitch.get_vnics_mac() + if ports == registered_ports: + return + added = ports - registered_ports + removed = registered_ports - ports + return {'current': ports, + 'added': added, + 'removed': removed} + + def process_network_ports(self, port_info): + resync_a = False + resync_b = False + if port_info.get('added'): + LOG.debug(_("Ports added!")) + resync_a = self.treat_devices_added(port_info['added']) + if port_info.get('removed'): + LOG.debug(_("Ports removed!")) + resync_b = self.treat_devices_removed(port_info['removed']) + # If one of the above opertaions fails => resync with plugin + return (resync_a | resync_b) + + def treat_vif_port(self, port_id, port_mac, + network_id, network_type, + physical_network, segmentation_id, + admin_state_up): + if self.eswitch.vnic_port_exists(port_mac): + if admin_state_up: + self.eswitch.port_up(network_id, + network_type, + physical_network, + segmentation_id, + port_id, + port_mac) + else: + self.eswitch.port_down(network_id, physical_network, port_mac) + else: + LOG.debug(_("No port %s defined on agent."), port_id) + + def treat_devices_added(self, devices): + try: + devs_details_list = self.plugin_rpc.get_devices_details_list( + self.context, + devices, + self.agent_id) + except Exception as e: + LOG.debug("Unable to get device details for devices " + "with MAC address %(devices)s: due to %(exc)s", + {'devices': devices, 'exc': e}) + # resync is needed + return True + + for dev_details in devs_details_list: + device = dev_details['device'] + LOG.info(_("Adding port with mac %s"), device) + + if 'port_id' in dev_details: + LOG.info(_("Port %s updated"), device) + LOG.debug(_("Device details %s"), str(dev_details)) + self.treat_vif_port(dev_details['port_id'], + dev_details['device'], + dev_details['network_id'], + dev_details['network_type'], + dev_details['physical_network'], + dev_details['segmentation_id'], + dev_details['admin_state_up']) + if dev_details.get('admin_state_up'): + self.plugin_rpc.update_device_up(self.context, + device, + self.agent_id) + else: + LOG.debug(_("Device with mac_address %s not defined " + "on Neutron Plugin"), device) + return False + + def treat_devices_removed(self, devices): + resync = False + for device in devices: + LOG.info(_("Removing device with mac_address %s"), device) + try: + port_id = self.eswitch.get_port_id_by_mac(device) + dev_details = self.plugin_rpc.update_device_down(self.context, + port_id, + self.agent_id, + cfg.CONF.host) + except Exception as e: + LOG.debug(_("Removing port failed for device %(device)s " + "due to %(exc)s"), {'device': device, 'exc': e}) + resync = True + continue + if dev_details['exists']: + LOG.info(_("Port %s updated."), device) + else: + LOG.debug(_("Device %s not defined on plugin"), device) + self.eswitch.port_release(device) + return resync + + def daemon_loop(self): + sync = True + ports = set() + + LOG.info(_("eSwitch Agent Started!")) + + while True: + try: + start = time.time() + if sync: + LOG.info(_("Agent out of sync with plugin!")) + ports.clear() + sync = False + + port_info = self.update_ports(ports) + # notify plugin about port deltas + if port_info: + LOG.debug(_("Agent loop process devices!")) + # If treat devices fails - must resync with plugin + sync = self.process_network_ports(port_info) + ports = port_info['current'] + except exceptions.RequestTimeout: + LOG.exception(_("Request timeout in agent event loop " + "eSwitchD is not responding - exiting...")) + raise SystemExit(1) + except Exception: + LOG.exception(_("Error in agent event loop")) + sync = True + # sleep till end of polling interval + elapsed = (time.time() - start) + if (elapsed < self._polling_interval): + time.sleep(self._polling_interval - elapsed) + else: + LOG.debug(_("Loop iteration exceeded interval " + "(%(polling_interval)s vs. %(elapsed)s)"), + {'polling_interval': self._polling_interval, + 'elapsed': elapsed}) + + +def main(): + common_config.init(sys.argv[1:]) + common_config.setup_logging(cfg.CONF) + + try: + interface_mappings = q_utils.parse_mappings( + cfg.CONF.ESWITCH.physical_interface_mappings) + except ValueError as e: + LOG.error(_("Parsing physical_interface_mappings failed: %s." + " Agent terminated!"), e) + sys.exit(1) + LOG.info(_("Interface mappings: %s"), interface_mappings) + + try: + agent = MlnxEswitchNeutronAgent(interface_mappings) + except Exception as e: + LOG.error(_("Failed on Agent initialisation : %s." + " Agent terminated!"), e) + sys.exit(1) + + # Start everything. + LOG.info(_("Agent initialised successfully, now running... ")) + agent.daemon_loop() + sys.exit(0) + + +if __name__ == '__main__': + main() diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/mlnx/agent/utils.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/mlnx/agent/utils.py new file mode 100644 index 00000000..a7500863 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/mlnx/agent/utils.py @@ -0,0 +1,142 @@ +# Copyright 2013 Mellanox Technologies, Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from neutron.openstack.common import importutils +from neutron.openstack.common import jsonutils +from neutron.openstack.common import log as logging +from neutron.plugins.mlnx.common import comm_utils +from neutron.plugins.mlnx.common import exceptions + +zmq = importutils.try_import('eventlet.green.zmq') + +LOG = logging.getLogger(__name__) + + +class EswitchUtils(object): + def __init__(self, daemon_endpoint, timeout): + if not zmq: + msg = _("Failed to import eventlet.green.zmq. " + "Won't connect to eSwitchD - exiting...") + LOG.error(msg) + raise SystemExit(1) + self.__conn = None + self.daemon = daemon_endpoint + self.timeout = timeout + + @property + def _conn(self): + if self.__conn is None: + context = zmq.Context() + socket = context.socket(zmq.REQ) + socket.setsockopt(zmq.LINGER, 0) + socket.connect(self.daemon) + self.__conn = socket + self.poller = zmq.Poller() + self.poller.register(self._conn, zmq.POLLIN) + return self.__conn + + @comm_utils.RetryDecorator(exceptions.RequestTimeout) + def send_msg(self, msg): + self._conn.send(msg) + + socks = dict(self.poller.poll(self.timeout)) + if socks.get(self._conn) == zmq.POLLIN: + recv_msg = self._conn.recv() + response = self.parse_response_msg(recv_msg) + return response + else: + self._conn.setsockopt(zmq.LINGER, 0) + self._conn.close() + self.poller.unregister(self._conn) + self.__conn = None + raise exceptions.RequestTimeout() + + def parse_response_msg(self, recv_msg): + msg = jsonutils.loads(recv_msg) + if msg['status'] == 'OK': + if 'response' in msg: + return msg.get('response') + return + elif msg['status'] == 'FAIL': + msg_dict = dict(action=msg['action'], reason=msg['reason']) + error_msg = _("Action %(action)s failed: %(reason)s") % msg_dict + else: + error_msg = _("Unknown operation status %s") % msg['status'] + LOG.error(error_msg) + raise exceptions.OperationFailed(err_msg=error_msg) + + def get_attached_vnics(self): + LOG.debug(_("get_attached_vnics")) + msg = jsonutils.dumps({'action': 'get_vnics', 'fabric': '*'}) + vnics = self.send_msg(msg) + return vnics + + def set_port_vlan_id(self, physical_network, + segmentation_id, port_mac): + LOG.debug(_("Set Vlan %(segmentation_id)s on Port %(port_mac)s " + "on Fabric %(physical_network)s"), + {'port_mac': port_mac, + 'segmentation_id': segmentation_id, + 'physical_network': physical_network}) + msg = jsonutils.dumps({'action': 'set_vlan', + 'fabric': physical_network, + 'port_mac': port_mac, + 'vlan': segmentation_id}) + self.send_msg(msg) + + def define_fabric_mappings(self, interface_mapping): + for fabric, phy_interface in interface_mapping.iteritems(): + LOG.debug(_("Define Fabric %(fabric)s on interface %(ifc)s"), + {'fabric': fabric, + 'ifc': phy_interface}) + msg = jsonutils.dumps({'action': 'define_fabric_mapping', + 'fabric': fabric, + 'interface': phy_interface}) + self.send_msg(msg) + + def port_up(self, fabric, port_mac): + LOG.debug(_("Port Up for %(port_mac)s on fabric %(fabric)s"), + {'port_mac': port_mac, 'fabric': fabric}) + msg = jsonutils.dumps({'action': 'port_up', + 'fabric': fabric, + 'ref_by': 'mac_address', + 'mac': 'port_mac'}) + self.send_msg(msg) + + def port_down(self, fabric, port_mac): + LOG.debug(_("Port Down for %(port_mac)s on fabric %(fabric)s"), + {'port_mac': port_mac, 'fabric': fabric}) + msg = jsonutils.dumps({'action': 'port_down', + 'fabric': fabric, + 'ref_by': 'mac_address', + 'mac': port_mac}) + self.send_msg(msg) + + def port_release(self, fabric, port_mac): + LOG.debug(_("Port Release for %(port_mac)s on fabric %(fabric)s"), + {'port_mac': port_mac, 'fabric': fabric}) + msg = jsonutils.dumps({'action': 'port_release', + 'fabric': fabric, + 'ref_by': 'mac_address', + 'mac': port_mac}) + self.send_msg(msg) + + def get_eswitch_ports(self, fabric): + # TODO(irena) - to implement for next phase + return {} + + def get_eswitch_id(self, fabric): + # TODO(irena) - to implement for next phase + return "" diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/mlnx/agent_notify_api.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/mlnx/agent_notify_api.py new file mode 100644 index 00000000..b17ad7e6 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/mlnx/agent_notify_api.py @@ -0,0 +1,65 @@ +# Copyright 2013 Mellanox Technologies, Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from oslo.config import cfg + +from neutron.agent import securitygroups_rpc as sg_rpc +from neutron.common import rpc as n_rpc +from neutron.common import topics +from neutron.openstack.common import log as logging + +LOG = logging.getLogger(__name__) + + +class AgentNotifierApi(n_rpc.RpcProxy, + sg_rpc.SecurityGroupAgentRpcApiMixin): + """Agent side of the Embedded Switch RPC API. + + API version history: + 1.0 - Initial version. + 1.1 - Added get_active_networks_info, create_dhcp_port, + and update_dhcp_port methods. + """ + BASE_RPC_API_VERSION = '1.1' + + def __init__(self, topic): + super(AgentNotifierApi, self).__init__( + topic=topic, default_version=self.BASE_RPC_API_VERSION) + self.topic = topic + self.topic_network_delete = topics.get_topic_name(topic, + topics.NETWORK, + topics.DELETE) + self.topic_port_update = topics.get_topic_name(topic, + topics.PORT, + topics.UPDATE) + + def network_delete(self, context, network_id): + LOG.debug(_("Sending delete network message")) + self.fanout_cast(context, + self.make_msg('network_delete', + network_id=network_id), + topic=self.topic_network_delete) + + def port_update(self, context, port, physical_network, + network_type, vlan_id): + LOG.debug(_("Sending update port message")) + kwargs = {'port': port, + 'network_type': network_type, + 'physical_network': physical_network, + 'segmentation_id': vlan_id} + if cfg.CONF.AGENT.rpc_support_old_agents: + kwargs['vlan_id'] = vlan_id + msg = self.make_msg('port_update', **kwargs) + self.fanout_cast(context, msg, + topic=self.topic_port_update) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/mlnx/common/__init__.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/mlnx/common/__init__.py new file mode 100644 index 00000000..7182ac41 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/mlnx/common/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2013 Mellanox Technologies, Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/mlnx/common/comm_utils.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/mlnx/common/comm_utils.py new file mode 100644 index 00000000..834b5a2c --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/mlnx/common/comm_utils.py @@ -0,0 +1,64 @@ +# Copyright 2013 Mellanox Technologies, Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import time + +from oslo.config import cfg + +from neutron.openstack.common import log as logging +from neutron.plugins.mlnx.common import config # noqa + +LOG = logging.getLogger(__name__) + + +class RetryDecorator(object): + """Retry decorator reruns a method 'retries' times if an exception occurs. + + Decorator for retrying a method if exceptionToCheck exception occurs + If method raises exception, retries 'retries' times with increasing + back off period between calls with 'interval' multiplier + + :param exceptionToCheck: the exception to check + :param interval: initial delay between retries in seconds + :param retries: number of times to try before giving up + :raises: exceptionToCheck + """ + sleep_fn = time.sleep + + def __init__(self, exceptionToCheck, + interval=cfg.CONF.ESWITCH.request_timeout / 1000, + retries=cfg.CONF.ESWITCH.retries, + backoff_rate=cfg.CONF.ESWITCH.backoff_rate): + self.exc = exceptionToCheck + self.interval = interval + self.retries = retries + self.backoff_rate = backoff_rate + + def __call__(self, original_func): + def decorated(*args, **kwargs): + sleep_interval = self.interval + num_of_iter = self.retries + while num_of_iter > 0: + try: + return original_func(*args, **kwargs) + except self.exc: + LOG.debug(_("Request timeout - call again after " + "%s seconds"), sleep_interval) + RetryDecorator.sleep_fn(sleep_interval) + num_of_iter -= 1 + sleep_interval *= self.backoff_rate + + return original_func(*args, **kwargs) + return decorated diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/mlnx/common/config.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/mlnx/common/config.py new file mode 100644 index 00000000..50685986 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/mlnx/common/config.py @@ -0,0 +1,78 @@ +# Copyright 2013 Mellanox Technologies, Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from oslo.config import cfg + +from neutron.agent.common import config +from neutron.plugins.mlnx.common import constants + +DEFAULT_VLAN_RANGES = ['default:1:1000'] +DEFAULT_INTERFACE_MAPPINGS = [] + +vlan_opts = [ + cfg.StrOpt('tenant_network_type', default='vlan', + help=_("Network type for tenant networks " + "(local, vlan, or none)")), + cfg.ListOpt('network_vlan_ranges', + default=DEFAULT_VLAN_RANGES, + help=_("List of :: " + "or ")), + cfg.ListOpt('physical_network_type_mappings', + default=[], + help=_("List of : " + " with physical_network_type is either eth or ib")), + cfg.StrOpt('physical_network_type', default='eth', + help=_("Physical network type for provider network " + "(eth or ib)")) +] + + +eswitch_opts = [ + cfg.ListOpt('physical_interface_mappings', + default=DEFAULT_INTERFACE_MAPPINGS, + help=_("List of :")), + cfg.StrOpt('vnic_type', + default=constants.VIF_TYPE_DIRECT, + help=_("Type of VM network interface: mlnx_direct or " + "hostdev")), + cfg.StrOpt('daemon_endpoint', + default='tcp://127.0.0.1:60001', + help=_('eswitch daemon end point')), + cfg.IntOpt('request_timeout', default=3000, + help=_("The number of milliseconds the agent will wait for " + "response on request to daemon.")), + cfg.IntOpt('retries', default=3, + help=_("The number of retries the agent will send request " + "to daemon before giving up")), + cfg.IntOpt('backoff_rate', default=2, + help=_("backoff rate multiplier for waiting period between " + "retries for request to daemon, i.e. value of 2 will " + " double the request timeout each retry")), +] + +agent_opts = [ + cfg.IntOpt('polling_interval', default=2, + help=_("The number of seconds the agent will wait between " + "polling for local device changes.")), + cfg.BoolOpt('rpc_support_old_agents', default=False, + help=_("Enable server RPC compatibility with old agents")), +] + + +cfg.CONF.register_opts(vlan_opts, "MLNX") +cfg.CONF.register_opts(eswitch_opts, "ESWITCH") +cfg.CONF.register_opts(agent_opts, "AGENT") +config.register_agent_state_opts_helper(cfg.CONF) +config.register_root_helper(cfg.CONF) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/mlnx/common/constants.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/mlnx/common/constants.py new file mode 100644 index 00000000..4ca82d75 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/mlnx/common/constants.py @@ -0,0 +1,26 @@ +# Copyright 2013 Mellanox Technologies, Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +LOCAL_VLAN_ID = -2 +FLAT_VLAN_ID = -1 + +# Values for physical network_type +TYPE_IB = 'ib' +TYPE_ETH = 'eth' + +VIF_TYPE_DIRECT = 'mlnx_direct' +VIF_TYPE_HOSTDEV = 'hostdev' + +VNIC_TYPE = 'vnic_type' diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/mlnx/common/exceptions.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/mlnx/common/exceptions.py new file mode 100644 index 00000000..457a1006 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/mlnx/common/exceptions.py @@ -0,0 +1,28 @@ +# Copyright 2013 Mellanox Technologies, Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from neutron.common import exceptions as qexc + + +class MlnxException(qexc.NeutronException): + message = _("Mlnx Exception: %(err_msg)s") + + +class RequestTimeout(qexc.NeutronException): + message = _("Request Timeout: no response from eSwitchD") + + +class OperationFailed(qexc.NeutronException): + message = _("Operation Failed: %(err_msg)s") diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/mlnx/db/__init__.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/mlnx/db/__init__.py new file mode 100644 index 00000000..7182ac41 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/mlnx/db/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2013 Mellanox Technologies, Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/mlnx/db/mlnx_db_v2.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/mlnx/db/mlnx_db_v2.py new file mode 100644 index 00000000..bf400ff7 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/mlnx/db/mlnx_db_v2.py @@ -0,0 +1,255 @@ +# Copyright 2013 Mellanox Technologies, Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from six import moves +from sqlalchemy.orm import exc + +from neutron.common import exceptions as n_exc +import neutron.db.api as db +from neutron.db import models_v2 +from neutron.db import securitygroups_db as sg_db +from neutron import manager +from neutron.openstack.common import log as logging +from neutron.plugins.mlnx.common import config # noqa +from neutron.plugins.mlnx.db import mlnx_models_v2 + +LOG = logging.getLogger(__name__) + + +def _remove_non_allocatable_vlans(session, allocations, + physical_network, vlan_ids): + if physical_network in allocations: + for entry in allocations[physical_network]: + try: + # see if vlan is allocatable + vlan_ids.remove(entry.segmentation_id) + except KeyError: + # it's not allocatable, so check if its allocated + if not entry.allocated: + # it's not, so remove it from table + LOG.debug(_( + "Removing vlan %(seg_id)s on " + "physical network " + "%(net)s from pool"), + {'seg_id': entry.segmentation_id, + 'net': physical_network}) + session.delete(entry) + del allocations[physical_network] + + +def _add_missing_allocatable_vlans(session, physical_network, vlan_ids): + for vlan_id in sorted(vlan_ids): + entry = mlnx_models_v2.SegmentationIdAllocation(physical_network, + vlan_id) + session.add(entry) + + +def _remove_unconfigured_vlans(session, allocations): + for entries in allocations.itervalues(): + for entry in entries: + if not entry.allocated: + LOG.debug(_("Removing vlan %(seg_id)s on physical " + "network %(net)s from pool"), + {'seg_id': entry.segmentation_id, + 'net': entry.physical_network}) + session.delete(entry) + + +def sync_network_states(network_vlan_ranges): + """Synchronize network_states table with current configured VLAN ranges.""" + + session = db.get_session() + with session.begin(): + # get existing allocations for all physical networks + allocations = dict() + entries = (session.query(mlnx_models_v2.SegmentationIdAllocation). + all()) + for entry in entries: + allocations.setdefault(entry.physical_network, set()).add(entry) + + # process vlan ranges for each configured physical network + for physical_network, vlan_ranges in network_vlan_ranges.iteritems(): + # determine current configured allocatable vlans for this + # physical network + vlan_ids = set() + for vlan_range in vlan_ranges: + vlan_ids |= set(moves.xrange(vlan_range[0], vlan_range[1] + 1)) + + # remove from table unallocated vlans not currently allocatable + _remove_non_allocatable_vlans(session, allocations, + physical_network, vlan_ids) + + # add missing allocatable vlans to table + _add_missing_allocatable_vlans(session, physical_network, vlan_ids) + + # remove from table unallocated vlans for any unconfigured physical + # networks + _remove_unconfigured_vlans(session, allocations) + + +def get_network_state(physical_network, segmentation_id): + """Get entry of specified network.""" + session = db.get_session() + qry = session.query(mlnx_models_v2.SegmentationIdAllocation) + qry = qry.filter_by(physical_network=physical_network, + segmentation_id=segmentation_id) + return qry.first() + + +def reserve_network(session): + with session.begin(subtransactions=True): + entry = (session.query(mlnx_models_v2.SegmentationIdAllocation). + filter_by(allocated=False). + with_lockmode('update'). + first()) + if not entry: + raise n_exc.NoNetworkAvailable() + LOG.debug(_("Reserving vlan %(seg_id)s on physical network " + "%(net)s from pool"), + {'seg_id': entry.segmentation_id, + 'net': entry.physical_network}) + entry.allocated = True + return (entry.physical_network, entry.segmentation_id) + + +def reserve_specific_network(session, physical_network, segmentation_id): + with session.begin(subtransactions=True): + log_args = {'seg_id': segmentation_id, 'phy_net': physical_network} + try: + entry = (session.query(mlnx_models_v2.SegmentationIdAllocation). + filter_by(physical_network=physical_network, + segmentation_id=segmentation_id). + with_lockmode('update').one()) + if entry.allocated: + raise n_exc.VlanIdInUse(vlan_id=segmentation_id, + physical_network=physical_network) + LOG.debug(_("Reserving specific vlan %(seg_id)s " + "on physical network %(phy_net)s from pool"), + log_args) + entry.allocated = True + except exc.NoResultFound: + LOG.debug(_("Reserving specific vlan %(seg_id)s on " + "physical network %(phy_net)s outside pool"), + log_args) + entry = mlnx_models_v2.SegmentationIdAllocation(physical_network, + segmentation_id) + entry.allocated = True + session.add(entry) + + +def release_network(session, physical_network, + segmentation_id, network_vlan_ranges): + with session.begin(subtransactions=True): + log_args = {'seg_id': segmentation_id, 'phy_net': physical_network} + try: + state = (session.query(mlnx_models_v2.SegmentationIdAllocation). + filter_by(physical_network=physical_network, + segmentation_id=segmentation_id). + with_lockmode('update'). + one()) + state.allocated = False + inside = False + for vlan_range in network_vlan_ranges.get(physical_network, []): + if (segmentation_id >= vlan_range[0] and + segmentation_id <= vlan_range[1]): + inside = True + break + if inside: + LOG.debug(_("Releasing vlan %(seg_id)s " + "on physical network " + "%(phy_net)s to pool"), + log_args) + else: + LOG.debug(_("Releasing vlan %(seg_id)s " + "on physical network " + "%(phy_net)s outside pool"), + log_args) + session.delete(state) + except exc.NoResultFound: + LOG.warning(_("vlan_id %(seg_id)s on physical network " + "%(phy_net)s not found"), + log_args) + + +def add_network_binding(session, network_id, network_type, + physical_network, vlan_id): + with session.begin(subtransactions=True): + binding = mlnx_models_v2.NetworkBinding(network_id, network_type, + physical_network, vlan_id) + session.add(binding) + + +def get_network_binding(session, network_id): + return (session.query(mlnx_models_v2.NetworkBinding). + filter_by(network_id=network_id).first()) + + +def add_port_profile_binding(session, port_id, vnic_type): + with session.begin(subtransactions=True): + binding = mlnx_models_v2.PortProfileBinding(port_id, vnic_type) + session.add(binding) + + +def get_port_profile_binding(session, port_id): + return (session.query(mlnx_models_v2.PortProfileBinding). + filter_by(port_id=port_id).first()) + + +def get_port_from_device(device): + """Get port from database.""" + LOG.debug(_("get_port_from_device() called")) + session = db.get_session() + sg_binding_port = sg_db.SecurityGroupPortBinding.port_id + + query = session.query(models_v2.Port, + sg_db.SecurityGroupPortBinding.security_group_id) + query = query.outerjoin(sg_db.SecurityGroupPortBinding, + models_v2.Port.id == sg_binding_port) + query = query.filter(models_v2.Port.id.startswith(device)) + port_and_sgs = query.all() + if not port_and_sgs: + return + port = port_and_sgs[0][0] + plugin = manager.NeutronManager.get_plugin() + port_dict = plugin._make_port_dict(port) + port_dict['security_groups'] = [ + sg_id for port_in_db, sg_id in port_and_sgs if sg_id + ] + port_dict['security_group_rules'] = [] + port_dict['security_group_source_groups'] = [] + port_dict['fixed_ips'] = [ip['ip_address'] + for ip in port['fixed_ips']] + return port_dict + + +def get_port_from_device_mac(device_mac): + """Get port from database.""" + LOG.debug(_("Get_port_from_device_mac() called")) + session = db.get_session() + qry = session.query(models_v2.Port).filter_by(mac_address=device_mac) + return qry.first() + + +def set_port_status(port_id, status): + """Set the port status.""" + LOG.debug(_("Set_port_status as %s called"), status) + session = db.get_session() + try: + port = session.query(models_v2.Port).filter_by(id=port_id).one() + port['status'] = status + session.merge(port) + session.flush() + except exc.NoResultFound: + raise n_exc.PortNotFound(port_id=port_id) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/mlnx/db/mlnx_models_v2.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/mlnx/db/mlnx_models_v2.py new file mode 100644 index 00000000..8e55acea --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/mlnx/db/mlnx_models_v2.py @@ -0,0 +1,84 @@ +# Copyright 2013 Mellanox Technologies, Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sqlalchemy as sa + +from neutron.db import model_base + + +class SegmentationIdAllocation(model_base.BASEV2): + """Represents allocation state of segmentation_id on physical network.""" + __tablename__ = 'segmentation_id_allocation' + + physical_network = sa.Column(sa.String(64), nullable=False, + primary_key=True) + segmentation_id = sa.Column(sa.Integer, nullable=False, primary_key=True, + autoincrement=False) + allocated = sa.Column(sa.Boolean, nullable=False, default=False) + + def __init__(self, physical_network, segmentation_id): + self.physical_network = physical_network + self.segmentation_id = segmentation_id + self.allocated = False + + def __repr__(self): + return "" % (self.physical_network, + self.segmentation_id, + self.allocated) + + +class NetworkBinding(model_base.BASEV2): + """Represents binding of virtual network. + + Binds network to physical_network and segmentation_id + """ + __tablename__ = 'mlnx_network_bindings' + + network_id = sa.Column(sa.String(36), + sa.ForeignKey('networks.id', ondelete="CASCADE"), + primary_key=True) + network_type = sa.Column(sa.String(32), nullable=False) + physical_network = sa.Column(sa.String(64)) + segmentation_id = sa.Column(sa.Integer, nullable=False) + + def __init__(self, network_id, network_type, physical_network, vlan_id): + self.network_id = network_id + self.network_type = network_type + self.physical_network = physical_network + self.segmentation_id = vlan_id + + def __repr__(self): + return "" % (self.network_id, + self.network_type, + self.physical_network, + self.segmentation_id) + + +class PortProfileBinding(model_base.BASEV2): + """Represents port profile binding to the port on virtual network.""" + __tablename__ = 'port_profile' + + port_id = sa.Column(sa.String(36), + sa.ForeignKey('ports.id', ondelete="CASCADE"), + primary_key=True) + vnic_type = sa.Column(sa.String(32), nullable=False) + + def __init__(self, port_id, vnic_type): + self.port_id = port_id + self.vnic_type = vnic_type + + def __repr__(self): + return "" % (self.port_id, + self.vnic_type) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/mlnx/mlnx_plugin.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/mlnx/mlnx_plugin.py new file mode 100644 index 00000000..79af9225 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/mlnx/mlnx_plugin.py @@ -0,0 +1,510 @@ +# Copyright 2013 Mellanox Technologies, Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys + +from oslo.config import cfg + +from neutron.agent import securitygroups_rpc as sg_rpc +from neutron.api.rpc.agentnotifiers import dhcp_rpc_agent_api +from neutron.api.rpc.agentnotifiers import l3_rpc_agent_api +from neutron.api.v2 import attributes +from neutron.common import constants as q_const +from neutron.common import exceptions as n_exc +from neutron.common import rpc as n_rpc +from neutron.common import topics +from neutron.common import utils +from neutron.db import agents_db +from neutron.db import agentschedulers_db +from neutron.db import db_base_plugin_v2 +from neutron.db import external_net_db +from neutron.db import extraroute_db +from neutron.db import l3_agentschedulers_db +from neutron.db import l3_gwmode_db +from neutron.db import portbindings_db +from neutron.db import quota_db # noqa +from neutron.db import securitygroups_rpc_base as sg_db_rpc +from neutron.extensions import portbindings +from neutron.extensions import providernet as provider +from neutron.openstack.common import importutils +from neutron.openstack.common import log as logging +from neutron.plugins.common import constants as svc_constants +from neutron.plugins.common import utils as plugin_utils +from neutron.plugins.mlnx import agent_notify_api +from neutron.plugins.mlnx.common import constants +from neutron.plugins.mlnx.db import mlnx_db_v2 as db +from neutron.plugins.mlnx import rpc_callbacks + +LOG = logging.getLogger(__name__) + + +class MellanoxEswitchPlugin(db_base_plugin_v2.NeutronDbPluginV2, + external_net_db.External_net_db_mixin, + extraroute_db.ExtraRoute_db_mixin, + l3_gwmode_db.L3_NAT_db_mixin, + sg_db_rpc.SecurityGroupServerRpcMixin, + l3_agentschedulers_db.L3AgentSchedulerDbMixin, + agentschedulers_db.DhcpAgentSchedulerDbMixin, + portbindings_db.PortBindingMixin): + """Realization of Neutron API on Mellanox HCA embedded switch technology. + + Current plugin provides embedded HCA Switch connectivity. + Code is based on the Linux Bridge plugin content to + support consistency with L3 & DHCP Agents. + + A new VLAN is created for each network. An agent is relied upon + to perform the actual HCA configuration on each host. + + The provider extension is also supported. + + The port binding extension enables an external application relay + information to and from the plugin. + """ + + # This attribute specifies whether the plugin supports or not + # bulk operations. Name mangling is used in order to ensure it + # is qualified by class + __native_bulk_support = True + + _supported_extension_aliases = ["provider", "external-net", "router", + "ext-gw-mode", "binding", "quotas", + "security-group", "agent", "extraroute", + "l3_agent_scheduler", + "dhcp_agent_scheduler"] + + @property + def supported_extension_aliases(self): + if not hasattr(self, '_aliases'): + aliases = self._supported_extension_aliases[:] + sg_rpc.disable_security_group_extension_by_config(aliases) + self._aliases = aliases + return self._aliases + + def __init__(self): + """Start Mellanox Neutron Plugin.""" + super(MellanoxEswitchPlugin, self).__init__() + self._parse_network_config() + db.sync_network_states(self.network_vlan_ranges) + self._set_tenant_network_type() + self.vnic_type = cfg.CONF.ESWITCH.vnic_type + self.base_binding_dict = { + portbindings.VIF_TYPE: self.vnic_type, + portbindings.VIF_DETAILS: { + # TODO(rkukura): Replace with new VIF security details + portbindings.CAP_PORT_FILTER: + 'security-group' in self.supported_extension_aliases}} + self._setup_rpc() + self.network_scheduler = importutils.import_object( + cfg.CONF.network_scheduler_driver + ) + self.router_scheduler = importutils.import_object( + cfg.CONF.router_scheduler_driver + ) + LOG.debug(_("Mellanox Embedded Switch Plugin initialisation complete")) + + def _setup_rpc(self): + # RPC support + self.service_topics = {svc_constants.CORE: topics.PLUGIN, + svc_constants.L3_ROUTER_NAT: topics.L3PLUGIN} + self.conn = n_rpc.create_connection(new=True) + self.endpoints = [rpc_callbacks.MlnxRpcCallbacks(), + agents_db.AgentExtRpcCallback()] + for svc_topic in self.service_topics.values(): + self.conn.create_consumer(svc_topic, self.endpoints, fanout=False) + # Consume from all consumers in threads + self.conn.consume_in_threads() + self.notifier = agent_notify_api.AgentNotifierApi(topics.AGENT) + self.agent_notifiers[q_const.AGENT_TYPE_DHCP] = ( + dhcp_rpc_agent_api.DhcpAgentNotifyAPI() + ) + self.agent_notifiers[q_const.AGENT_TYPE_L3] = ( + l3_rpc_agent_api.L3AgentNotifyAPI() + ) + + def _parse_network_config(self): + self._parse_physical_network_types() + self._parse_network_vlan_ranges() + for network in self.network_vlan_ranges.keys(): + if not self.phys_network_type_maps.get(network): + self.phys_network_type_maps[network] = self.physical_net_type + + def _parse_physical_network_types(self): + """Parse physical network types configuration. + + Verify default physical network type is valid. + Parse physical network mappings. + """ + self.physical_net_type = cfg.CONF.MLNX.physical_network_type + if self.physical_net_type not in (constants.TYPE_ETH, + constants.TYPE_IB): + LOG.error(_("Invalid physical network type %(type)s." + "Server terminated!"), {'type': self.physical_net_type}) + raise SystemExit(1) + try: + self.phys_network_type_maps = utils.parse_mappings( + cfg.CONF.MLNX.physical_network_type_mappings) + except ValueError as e: + LOG.error(_("Parsing physical_network_type failed: %s." + " Server terminated!"), e) + raise SystemExit(1) + for network, type in self.phys_network_type_maps.iteritems(): + if type not in (constants.TYPE_ETH, constants.TYPE_IB): + LOG.error(_("Invalid physical network type %(type)s " + " for network %(net)s. Server terminated!"), + {'net': network, 'type': type}) + raise SystemExit(1) + LOG.info(_("Physical Network type mappings: %s"), + self.phys_network_type_maps) + + def _parse_network_vlan_ranges(self): + try: + self.network_vlan_ranges = plugin_utils.parse_network_vlan_ranges( + cfg.CONF.MLNX.network_vlan_ranges) + except Exception as ex: + LOG.error(_("%s. Server terminated!"), ex) + sys.exit(1) + LOG.info(_("Network VLAN ranges: %s"), self.network_vlan_ranges) + + def _extend_network_dict_provider(self, context, network): + binding = db.get_network_binding(context.session, network['id']) + network[provider.NETWORK_TYPE] = binding.network_type + if binding.network_type == svc_constants.TYPE_FLAT: + network[provider.PHYSICAL_NETWORK] = binding.physical_network + network[provider.SEGMENTATION_ID] = None + elif binding.network_type == svc_constants.TYPE_LOCAL: + network[provider.PHYSICAL_NETWORK] = None + network[provider.SEGMENTATION_ID] = None + else: + network[provider.PHYSICAL_NETWORK] = binding.physical_network + network[provider.SEGMENTATION_ID] = binding.segmentation_id + + def _set_tenant_network_type(self): + self.tenant_network_type = cfg.CONF.MLNX.tenant_network_type + if self.tenant_network_type not in [svc_constants.TYPE_VLAN, + svc_constants.TYPE_LOCAL, + svc_constants.TYPE_NONE]: + LOG.error(_("Invalid tenant_network_type: %s. " + "Service terminated!"), + self.tenant_network_type) + sys.exit(1) + + def _process_provider_create(self, context, attrs): + network_type = attrs.get(provider.NETWORK_TYPE) + physical_network = attrs.get(provider.PHYSICAL_NETWORK) + segmentation_id = attrs.get(provider.SEGMENTATION_ID) + + network_type_set = attributes.is_attr_set(network_type) + physical_network_set = attributes.is_attr_set(physical_network) + segmentation_id_set = attributes.is_attr_set(segmentation_id) + + if not (network_type_set or physical_network_set or + segmentation_id_set): + return (None, None, None) + + if not network_type_set: + msg = _("provider:network_type required") + raise n_exc.InvalidInput(error_message=msg) + elif network_type == svc_constants.TYPE_FLAT: + self._process_flat_net(segmentation_id_set) + segmentation_id = constants.FLAT_VLAN_ID + + elif network_type == svc_constants.TYPE_VLAN: + self._process_vlan_net(segmentation_id, segmentation_id_set) + + elif network_type == svc_constants.TYPE_LOCAL: + self._process_local_net(physical_network_set, + segmentation_id_set) + segmentation_id = constants.LOCAL_VLAN_ID + physical_network = None + + else: + msg = _("provider:network_type %s not supported") % network_type + raise n_exc.InvalidInput(error_message=msg) + physical_network = self._process_net_type(network_type, + physical_network, + physical_network_set) + return (network_type, physical_network, segmentation_id) + + def _process_flat_net(self, segmentation_id_set): + if segmentation_id_set: + msg = _("provider:segmentation_id specified for flat network") + raise n_exc.InvalidInput(error_message=msg) + + def _process_vlan_net(self, segmentation_id, segmentation_id_set): + if not segmentation_id_set: + msg = _("provider:segmentation_id required") + raise n_exc.InvalidInput(error_message=msg) + if not utils.is_valid_vlan_tag(segmentation_id): + msg = (_("provider:segmentation_id out of range " + "(%(min_id)s through %(max_id)s)") % + {'min_id': q_const.MIN_VLAN_TAG, + 'max_id': q_const.MAX_VLAN_TAG}) + raise n_exc.InvalidInput(error_message=msg) + + def _process_local_net(self, physical_network_set, segmentation_id_set): + if physical_network_set: + msg = _("provider:physical_network specified for local " + "network") + raise n_exc.InvalidInput(error_message=msg) + if segmentation_id_set: + msg = _("provider:segmentation_id specified for local " + "network") + raise n_exc.InvalidInput(error_message=msg) + + def _process_net_type(self, network_type, + physical_network, + physical_network_set): + if network_type in [svc_constants.TYPE_VLAN, + svc_constants.TYPE_FLAT]: + if physical_network_set: + if physical_network not in self.network_vlan_ranges: + msg = _("Unknown provider:physical_network " + "%s") % physical_network + raise n_exc.InvalidInput(error_message=msg) + elif 'default' in self.network_vlan_ranges: + physical_network = 'default' + else: + msg = _("provider:physical_network required") + raise n_exc.InvalidInput(error_message=msg) + return physical_network + + def _check_port_binding_for_net_type(self, vnic_type, net_type): + """ + VIF_TYPE_DIRECT is valid only for Ethernet fabric + """ + if net_type == constants.TYPE_ETH: + return vnic_type in (constants.VIF_TYPE_DIRECT, + constants.VIF_TYPE_HOSTDEV) + elif net_type == constants.TYPE_IB: + return vnic_type == constants.VIF_TYPE_HOSTDEV + return False + + def _process_port_binding_create(self, context, attrs): + binding_profile = attrs.get(portbindings.PROFILE) + binding_profile_set = attributes.is_attr_set(binding_profile) + + net_binding = db.get_network_binding(context.session, + attrs.get('network_id')) + phy_net = net_binding.physical_network + + if not binding_profile_set: + return self.vnic_type + if constants.VNIC_TYPE in binding_profile: + vnic_type = binding_profile[constants.VNIC_TYPE] + phy_net_type = self.phys_network_type_maps[phy_net] + if vnic_type in (constants.VIF_TYPE_DIRECT, + constants.VIF_TYPE_HOSTDEV): + if self._check_port_binding_for_net_type(vnic_type, + phy_net_type): + self.base_binding_dict[portbindings.VIF_TYPE] = vnic_type + return vnic_type + else: + msg = (_("Unsupported vnic type %(vnic_type)s " + "for physical network type %(net_type)s") % + {'vnic_type': vnic_type, 'net_type': phy_net_type}) + else: + msg = _("Invalid vnic_type on port_create") + else: + msg = _("vnic_type is not defined in port profile") + raise n_exc.InvalidInput(error_message=msg) + + def create_network(self, context, network): + (network_type, physical_network, + vlan_id) = self._process_provider_create(context, + network['network']) + session = context.session + with session.begin(subtransactions=True): + #set up default security groups + tenant_id = self._get_tenant_id_for_create( + context, network['network']) + self._ensure_default_security_group(context, tenant_id) + + if not network_type: + # tenant network + network_type = self.tenant_network_type + if network_type == svc_constants.TYPE_NONE: + raise n_exc.TenantNetworksDisabled() + elif network_type == svc_constants.TYPE_VLAN: + physical_network, vlan_id = db.reserve_network(session) + else: # TYPE_LOCAL + vlan_id = constants.LOCAL_VLAN_ID + else: + # provider network + if network_type in [svc_constants.TYPE_VLAN, + svc_constants.TYPE_FLAT]: + db.reserve_specific_network(session, + physical_network, + vlan_id) + net = super(MellanoxEswitchPlugin, self).create_network(context, + network) + db.add_network_binding(session, net['id'], + network_type, + physical_network, + vlan_id) + + self._process_l3_create(context, net, network['network']) + self._extend_network_dict_provider(context, net) + # note - exception will rollback entire transaction + LOG.debug(_("Created network: %s"), net['id']) + return net + + def update_network(self, context, net_id, network): + LOG.debug(_("Update network")) + provider._raise_if_updates_provider_attributes(network['network']) + + session = context.session + with session.begin(subtransactions=True): + net = super(MellanoxEswitchPlugin, self).update_network(context, + net_id, + network) + self._process_l3_update(context, net, network['network']) + self._extend_network_dict_provider(context, net) + return net + + def delete_network(self, context, net_id): + LOG.debug(_("Delete network")) + session = context.session + with session.begin(subtransactions=True): + binding = db.get_network_binding(session, net_id) + self._process_l3_delete(context, net_id) + super(MellanoxEswitchPlugin, self).delete_network(context, + net_id) + if binding.segmentation_id != constants.LOCAL_VLAN_ID: + db.release_network(session, binding.physical_network, + binding.segmentation_id, + self.network_vlan_ranges) + # the network_binding record is deleted via cascade from + # the network record, so explicit removal is not necessary + self.notifier.network_delete(context, net_id) + + def get_network(self, context, net_id, fields=None): + session = context.session + with session.begin(subtransactions=True): + net = super(MellanoxEswitchPlugin, self).get_network(context, + net_id, + None) + self._extend_network_dict_provider(context, net) + return self._fields(net, fields) + + def get_networks(self, context, filters=None, fields=None, + sorts=None, limit=None, marker=None, page_reverse=False): + session = context.session + with session.begin(subtransactions=True): + nets = super(MellanoxEswitchPlugin, + self).get_networks(context, filters, None, sorts, + limit, marker, page_reverse) + for net in nets: + self._extend_network_dict_provider(context, net) + + return [self._fields(net, fields) for net in nets] + + def _extend_port_dict_binding(self, context, port): + port_binding = db.get_port_profile_binding(context.session, + port['id']) + if port_binding: + port[portbindings.VIF_TYPE] = port_binding.vnic_type + binding = db.get_network_binding(context.session, + port['network_id']) + fabric = binding.physical_network + port[portbindings.PROFILE] = {'physical_network': fabric} + return port + + def create_port(self, context, port): + LOG.debug(_("create_port with %s"), port) + session = context.session + port_data = port['port'] + with session.begin(subtransactions=True): + self._ensure_default_security_group_on_port(context, port) + sgids = self._get_security_groups_on_port(context, port) + # Set port status as 'DOWN'. This will be updated by agent + port['port']['status'] = q_const.PORT_STATUS_DOWN + + vnic_type = self._process_port_binding_create(context, + port['port']) + + port = super(MellanoxEswitchPlugin, + self).create_port(context, port) + + self._process_portbindings_create_and_update(context, + port_data, + port) + db.add_port_profile_binding(context.session, port['id'], vnic_type) + + self._process_port_create_security_group( + context, port, sgids) + self.notify_security_groups_member_updated(context, port) + return self._extend_port_dict_binding(context, port) + + def get_port(self, context, id, fields=None): + port = super(MellanoxEswitchPlugin, self).get_port(context, + id, + fields) + self._extend_port_dict_binding(context, port) + return self._fields(port, fields) + + def get_ports(self, context, filters=None, fields=None, + sorts=None, limit=None, marker=None, page_reverse=False): + res_ports = [] + ports = super(MellanoxEswitchPlugin, + self).get_ports(context, filters, fields, sorts, + limit, marker, page_reverse) + for port in ports: + port = self._extend_port_dict_binding(context, port) + res_ports.append(self._fields(port, fields)) + return res_ports + + def update_port(self, context, port_id, port): + original_port = self.get_port(context, port_id) + session = context.session + need_port_update_notify = False + + with session.begin(subtransactions=True): + updated_port = super(MellanoxEswitchPlugin, self).update_port( + context, port_id, port) + self._process_portbindings_create_and_update(context, + port['port'], + updated_port) + need_port_update_notify = self.update_security_group_on_port( + context, port_id, port, original_port, updated_port) + + need_port_update_notify |= self.is_security_group_member_updated( + context, original_port, updated_port) + + if original_port['admin_state_up'] != updated_port['admin_state_up']: + need_port_update_notify = True + + if need_port_update_notify: + binding = db.get_network_binding(context.session, + updated_port['network_id']) + self.notifier.port_update(context, updated_port, + binding.physical_network, + binding.network_type, + binding.segmentation_id) + return self._extend_port_dict_binding(context, updated_port) + + def delete_port(self, context, port_id, l3_port_check=True): + # if needed, check to see if this is a port owned by + # and l3-router. If so, we should prevent deletion. + if l3_port_check: + self.prevent_l3_port_deletion(context, port_id) + + session = context.session + with session.begin(subtransactions=True): + self.disassociate_floatingips(context, port_id) + port = self.get_port(context, port_id) + self._delete_port_security_group_bindings(context, port_id) + super(MellanoxEswitchPlugin, self).delete_port(context, port_id) + + self.notify_security_groups_member_updated(context, port) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/mlnx/rpc_callbacks.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/mlnx/rpc_callbacks.py new file mode 100644 index 00000000..cd61c3e3 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/mlnx/rpc_callbacks.py @@ -0,0 +1,128 @@ +# Copyright 2013 Mellanox Technologies, Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from oslo.config import cfg + +from neutron.common import constants as q_const +from neutron.common import rpc as n_rpc +from neutron.db import api as db_api +from neutron.db import dhcp_rpc_base +from neutron.db import l3_rpc_base +from neutron.db import securitygroups_rpc_base as sg_db_rpc +from neutron.openstack.common import log as logging +from neutron.plugins.mlnx.db import mlnx_db_v2 as db + +LOG = logging.getLogger(__name__) + + +class MlnxRpcCallbacks(n_rpc.RpcCallback, + dhcp_rpc_base.DhcpRpcCallbackMixin, + l3_rpc_base.L3RpcCallbackMixin, + sg_db_rpc.SecurityGroupServerRpcCallbackMixin): + # History + # 1.1 Support Security Group RPC + # 1.2 Support get_devices_details_list + RPC_API_VERSION = '1.2' + + #to be compatible with Linux Bridge Agent on Network Node + TAP_PREFIX_LEN = 3 + + @classmethod + def get_port_from_device(cls, device): + """Get port according to device. + + To maintain compatibility with Linux Bridge L2 Agent for DHCP/L3 + services get device either by linux bridge plugin + device name convention or by mac address + """ + port = db.get_port_from_device(device[cls.TAP_PREFIX_LEN:]) + if port: + port['device'] = device + else: + port = db.get_port_from_device_mac(device) + if port: + port['device'] = device + return port + + def get_device_details(self, rpc_context, **kwargs): + """Agent requests device details.""" + agent_id = kwargs.get('agent_id') + device = kwargs.get('device') + LOG.debug(_("Device %(device)s details requested from %(agent_id)s"), + {'device': device, 'agent_id': agent_id}) + port = self.get_port_from_device(device) + if port: + binding = db.get_network_binding(db_api.get_session(), + port['network_id']) + entry = {'device': device, + 'physical_network': binding.physical_network, + 'network_type': binding.network_type, + 'segmentation_id': binding.segmentation_id, + 'network_id': port['network_id'], + 'port_mac': port['mac_address'], + 'port_id': port['id'], + 'admin_state_up': port['admin_state_up']} + if cfg.CONF.AGENT.rpc_support_old_agents: + entry['vlan_id'] = binding.segmentation_id + new_status = (q_const.PORT_STATUS_ACTIVE if port['admin_state_up'] + else q_const.PORT_STATUS_DOWN) + if port['status'] != new_status: + db.set_port_status(port['id'], new_status) + else: + entry = {'device': device} + LOG.debug(_("%s can not be found in database"), device) + return entry + + def get_devices_details_list(self, rpc_context, **kwargs): + return [ + self.get_device_details( + rpc_context, + device=device, + **kwargs + ) + for device in kwargs.pop('devices', []) + ] + + def update_device_down(self, rpc_context, **kwargs): + """Device no longer exists on agent.""" + agent_id = kwargs.get('agent_id') + device = kwargs.get('device') + LOG.debug(_("Device %(device)s no longer exists on %(agent_id)s"), + {'device': device, 'agent_id': agent_id}) + port = self.get_port_from_device(device) + if port: + entry = {'device': device, + 'exists': True} + if port['status'] != q_const.PORT_STATUS_DOWN: + # Set port status to DOWN + db.set_port_status(port['id'], q_const.PORT_STATUS_DOWN) + else: + entry = {'device': device, + 'exists': False} + LOG.debug(_("%s can not be found in database"), device) + return entry + + def update_device_up(self, rpc_context, **kwargs): + """Device is up on agent.""" + agent_id = kwargs.get('agent_id') + device = kwargs.get('device') + LOG.debug(_("Device %(device)s up %(agent_id)s"), + {'device': device, 'agent_id': agent_id}) + port = self.get_port_from_device(device) + if port: + if port['status'] != q_const.PORT_STATUS_ACTIVE: + # Set port status to ACTIVE + db.set_port_status(port['id'], q_const.PORT_STATUS_ACTIVE) + else: + LOG.debug(_("%s can not be found in database"), device) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/nec/README b/icehouse-patches/neutron/dvr-patch/neutron/plugins/nec/README new file mode 100644 index 00000000..694b80e9 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/nec/README @@ -0,0 +1,13 @@ +Quantum NEC OpenFlow Plugin + + +# -- What's this? + +https://wiki.openstack.org/wiki/Neutron/NEC_OpenFlow_Plugin + + +# -- Installation + +Use QuickStart Script for this plugin. This provides you auto installation and +configuration of Nova, Neutron and Trema. +https://github.com/nec-openstack/quantum-openflow-plugin/tree/folsom diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/nec/__init__.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/nec/__init__.py new file mode 100644 index 00000000..cff1fb25 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/nec/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2012 NEC Corporation. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/nec/agent/__init__.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/nec/agent/__init__.py new file mode 100644 index 00000000..cff1fb25 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/nec/agent/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2012 NEC Corporation. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/nec/agent/nec_neutron_agent.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/nec/agent/nec_neutron_agent.py new file mode 100644 index 00000000..241503af --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/nec/agent/nec_neutron_agent.py @@ -0,0 +1,252 @@ +#!/usr/bin/env python +# Copyright 2012 NEC Corporation. +# Based on ryu/openvswitch agents. +# +# Copyright 2012 Isaku Yamahata +# Copyright 2011 VMware, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# @author: Ryota MIBU +# @author: Akihiro MOTOKI + +import socket +import sys +import time + +import eventlet +eventlet.monkey_patch() + +from neutron.agent.linux import ovs_lib +from neutron.agent import rpc as agent_rpc +from neutron.agent import securitygroups_rpc as sg_rpc +from neutron.common import config as common_config +from neutron.common import constants as q_const +from neutron.common import rpc as n_rpc +from neutron.common import topics +from neutron import context as q_context +from neutron.extensions import securitygroup as ext_sg +from neutron.openstack.common import log as logging +from neutron.openstack.common import loopingcall +from neutron.plugins.nec.common import config + + +LOG = logging.getLogger(__name__) + + +class NECPluginApi(agent_rpc.PluginApi): + BASE_RPC_API_VERSION = '1.0' + + def update_ports(self, context, agent_id, datapath_id, + port_added, port_removed): + """RPC to update information of ports on Neutron Server.""" + LOG.info(_("Update ports: added=%(added)s, " + "removed=%(removed)s"), + {'added': port_added, 'removed': port_removed}) + self.call(context, + self.make_msg('update_ports', + topic=topics.AGENT, + agent_id=agent_id, + datapath_id=datapath_id, + port_added=port_added, + port_removed=port_removed)) + + +class NECAgentRpcCallback(n_rpc.RpcCallback): + + RPC_API_VERSION = '1.0' + + def __init__(self, context, agent, sg_agent): + super(NECAgentRpcCallback, self).__init__() + self.context = context + self.agent = agent + self.sg_agent = sg_agent + + def port_update(self, context, **kwargs): + LOG.debug(_("port_update received: %s"), kwargs) + port = kwargs.get('port') + # Validate that port is on OVS + vif_port = self.agent.int_br.get_vif_port_by_id(port['id']) + if not vif_port: + return + + if ext_sg.SECURITYGROUPS in port: + self.sg_agent.refresh_firewall() + + +class SecurityGroupServerRpcApi(n_rpc.RpcProxy, + sg_rpc.SecurityGroupServerRpcApiMixin): + + def __init__(self, topic): + super(SecurityGroupServerRpcApi, self).__init__( + topic=topic, default_version=sg_rpc.SG_RPC_VERSION) + + +class SecurityGroupAgentRpcCallback( + n_rpc.RpcCallback, + sg_rpc.SecurityGroupAgentRpcCallbackMixin): + + RPC_API_VERSION = sg_rpc.SG_RPC_VERSION + + def __init__(self, context, sg_agent): + super(SecurityGroupAgentRpcCallback, self).__init__() + self.context = context + self.sg_agent = sg_agent + + +class SecurityGroupAgentRpc(sg_rpc.SecurityGroupAgentRpcMixin): + + def __init__(self, context): + self.context = context + self.plugin_rpc = SecurityGroupServerRpcApi(topics.PLUGIN) + self.init_firewall() + + +class NECNeutronAgent(object): + + def __init__(self, integ_br, root_helper, polling_interval): + '''Constructor. + + :param integ_br: name of the integration bridge. + :param root_helper: utility to use when running shell cmds. + :param polling_interval: interval (secs) to check the bridge. + ''' + self.int_br = ovs_lib.OVSBridge(integ_br, root_helper) + self.polling_interval = polling_interval + self.cur_ports = [] + self.need_sync = True + + self.datapath_id = "0x%s" % self.int_br.get_datapath_id() + + self.agent_state = { + 'binary': 'neutron-nec-agent', + 'host': config.CONF.host, + 'topic': q_const.L2_AGENT_TOPIC, + 'configurations': {}, + 'agent_type': q_const.AGENT_TYPE_NEC, + 'start_flag': True} + + self.setup_rpc() + + def setup_rpc(self): + self.host = socket.gethostname() + self.agent_id = 'nec-q-agent.%s' % self.host + LOG.info(_("RPC agent_id: %s"), self.agent_id) + + self.topic = topics.AGENT + self.context = q_context.get_admin_context_without_session() + + self.plugin_rpc = NECPluginApi(topics.PLUGIN) + self.state_rpc = agent_rpc.PluginReportStateAPI(topics.PLUGIN) + self.sg_agent = SecurityGroupAgentRpc(self.context) + + # RPC network init + # Handle updates from service + self.callback_nec = NECAgentRpcCallback(self.context, + self, self.sg_agent) + self.callback_sg = SecurityGroupAgentRpcCallback(self.context, + self.sg_agent) + self.endpoints = [self.callback_nec, self.callback_sg] + # Define the listening consumer for the agent + consumers = [[topics.PORT, topics.UPDATE], + [topics.SECURITY_GROUP, topics.UPDATE]] + self.connection = agent_rpc.create_consumers(self.endpoints, + self.topic, + consumers) + + report_interval = config.CONF.AGENT.report_interval + if report_interval: + heartbeat = loopingcall.FixedIntervalLoopingCall( + self._report_state) + heartbeat.start(interval=report_interval) + + def _report_state(self): + try: + # How many devices are likely used by a VM + num_devices = len(self.cur_ports) + self.agent_state['configurations']['devices'] = num_devices + self.state_rpc.report_state(self.context, + self.agent_state) + self.agent_state.pop('start_flag', None) + except Exception: + LOG.exception(_("Failed reporting state!")) + + def _vif_port_to_port_info(self, vif_port): + return dict(id=vif_port.vif_id, port_no=vif_port.ofport, + mac=vif_port.vif_mac) + + def _process_security_group(self, port_added, port_removed): + if port_added: + devices_added = [p['id'] for p in port_added] + self.sg_agent.prepare_devices_filter(devices_added) + if port_removed: + self.sg_agent.remove_devices_filter(port_removed) + + def loop_handler(self): + try: + # self.cur_ports will be kept until loop_handler succeeds. + cur_ports = [] if self.need_sync else self.cur_ports + new_ports = [] + + port_added = [] + for vif_port in self.int_br.get_vif_ports(): + port_id = vif_port.vif_id + new_ports.append(port_id) + if port_id not in cur_ports: + port_info = self._vif_port_to_port_info(vif_port) + port_added.append(port_info) + + port_removed = [] + for port_id in cur_ports: + if port_id not in new_ports: + port_removed.append(port_id) + + if port_added or port_removed: + self.plugin_rpc.update_ports(self.context, + self.agent_id, self.datapath_id, + port_added, port_removed) + self._process_security_group(port_added, port_removed) + else: + LOG.debug(_("No port changed.")) + + self.cur_ports = new_ports + self.need_sync = False + except Exception: + LOG.exception(_("Error in agent event loop")) + self.need_sync = True + + def daemon_loop(self): + """Main processing loop for NEC Plugin Agent.""" + while True: + self.loop_handler() + time.sleep(self.polling_interval) + + +def main(): + common_config.init(sys.argv[1:]) + + common_config.setup_logging(config.CONF) + + # Determine which agent type to use. + integ_br = config.OVS.integration_bridge + root_helper = config.AGENT.root_helper + polling_interval = config.AGENT.polling_interval + + agent = NECNeutronAgent(integ_br, root_helper, polling_interval) + + # Start everything. + agent.daemon_loop() + + +if __name__ == "__main__": + main() diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/nec/common/__init__.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/nec/common/__init__.py new file mode 100644 index 00000000..cff1fb25 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/nec/common/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2012 NEC Corporation. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/nec/common/config.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/nec/common/config.py new file mode 100644 index 00000000..76c85c8e --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/nec/common/config.py @@ -0,0 +1,82 @@ +# Copyright 2012 NEC Corporation. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# @author: Ryota MIBU + +from oslo.config import cfg + +from neutron.agent.common import config +from neutron.plugins.nec.common import constants as nconst + + +ovs_opts = [ + cfg.StrOpt('integration_bridge', default='br-int', + help=_("Integration bridge to use")), +] + +agent_opts = [ + cfg.IntOpt('polling_interval', default=2, + help=_("The number of seconds the agent will wait between " + "polling for local device changes.")), +] + +ofc_opts = [ + cfg.StrOpt('host', default='127.0.0.1', + help=_("Host to connect to")), + cfg.StrOpt('path_prefix', default='', + help=_("Base URL of OFC REST API. " + "It is prepended to each API request.")), + cfg.StrOpt('port', default='8888', + help=_("Port to connect to")), + cfg.StrOpt('driver', default='trema', + help=_("Driver to use")), + cfg.BoolOpt('enable_packet_filter', default=True, + help=_("Enable packet filter")), + cfg.BoolOpt('use_ssl', default=False, + help=_("Use SSL to connect")), + cfg.StrOpt('key_file', + help=_("Key file")), + cfg.StrOpt('cert_file', + help=_("Certificate file")), + cfg.BoolOpt('insecure_ssl', default=False, + help=_("Disable SSL certificate verification")), + cfg.IntOpt('api_max_attempts', default=3, + help=_("Maximum attempts per OFC API request." + "NEC plugin retries API request to OFC " + "when OFC returns ServiceUnavailable (503)." + "The value must be greater than 0.")), +] + +provider_opts = [ + cfg.StrOpt('default_router_provider', + default=nconst.DEFAULT_ROUTER_PROVIDER, + help=_('Default router provider to use.')), + cfg.ListOpt('router_providers', + default=nconst.DEFAULT_ROUTER_PROVIDERS, + help=_('List of enabled router providers.')) +] + + +cfg.CONF.register_opts(ovs_opts, "OVS") +cfg.CONF.register_opts(agent_opts, "AGENT") +cfg.CONF.register_opts(ofc_opts, "OFC") +cfg.CONF.register_opts(provider_opts, "PROVIDER") +config.register_agent_state_opts_helper(cfg.CONF) +config.register_root_helper(cfg.CONF) + +# shortcuts +CONF = cfg.CONF +OVS = cfg.CONF.OVS +AGENT = cfg.CONF.AGENT +OFC = cfg.CONF.OFC +PROVIDER = cfg.CONF.PROVIDER diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/nec/common/constants.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/nec/common/constants.py new file mode 100644 index 00000000..cb7d43a3 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/nec/common/constants.py @@ -0,0 +1,22 @@ +# Copyright 2013 NEC Corporation. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +ROUTER_PROVIDER_L3AGENT = 'l3-agent' +ROUTER_PROVIDER_OPENFLOW = 'openflow' + +DEFAULT_ROUTER_PROVIDERS = [ROUTER_PROVIDER_L3AGENT, ROUTER_PROVIDER_OPENFLOW] +DEFAULT_ROUTER_PROVIDER = ROUTER_PROVIDER_L3AGENT + +ROUTER_STATUS_ACTIVE = 'ACTIVE' +ROUTER_STATUS_ERROR = 'ERROR' diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/nec/common/exceptions.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/nec/common/exceptions.py new file mode 100644 index 00000000..0a1571ba --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/nec/common/exceptions.py @@ -0,0 +1,83 @@ +# Copyright 2012 NEC Corporation. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# @author: Ryota MIBU + +from neutron.common import exceptions as qexc + + +class OFCException(qexc.NeutronException): + message = _("An OFC exception has occurred: %(reason)s") + + def __init__(self, **kwargs): + super(OFCException, self).__init__(**kwargs) + self.status = kwargs.get('status') + self.err_msg = kwargs.get('err_msg') + self.err_code = kwargs.get('err_code') + + +class OFCResourceNotFound(qexc.NotFound): + message = _("The specified OFC resource (%(resource)s) is not found.") + + +class NECDBException(qexc.NeutronException): + message = _("An exception occurred in NECPluginV2 DB: %(reason)s") + + +class OFCMappingNotFound(qexc.NotFound): + message = _("Neutron-OFC resource mapping for " + "%(resource)s %(neutron_id)s is not found. " + "It may be deleted during processing.") + + +class OFCServiceUnavailable(OFCException): + message = _("OFC returns Server Unavailable (503) " + "(Retry-After=%(retry_after)s)") + + def __init__(self, **kwargs): + super(OFCServiceUnavailable, self).__init__(**kwargs) + self.retry_after = kwargs.get('retry_after') + + +class PortInfoNotFound(qexc.NotFound): + message = _("PortInfo %(id)s could not be found") + + +class ProfilePortInfoInvalidDataPathId(qexc.InvalidInput): + message = _('Invalid input for operation: ' + 'datapath_id should be a hex string ' + 'with at most 8 bytes') + + +class ProfilePortInfoInvalidPortNo(qexc.InvalidInput): + message = _('Invalid input for operation: ' + 'port_no should be [0:65535]') + + +class RouterExternalGatewayNotSupported(qexc.BadRequest): + message = _("Router (provider=%(provider)s) does not support " + "an external network") + + +class ProviderNotFound(qexc.NotFound): + message = _("Provider %(provider)s could not be found") + + +class RouterOverLimit(qexc.Conflict): + message = _("Cannot create more routers with provider=%(provider)s") + + +class RouterProviderMismatch(qexc.Conflict): + message = _("Provider of Router %(router_id)s is %(provider)s. " + "This operation is supported only for router provider " + "%(expected_provider)s.") diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/nec/common/ofc_client.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/nec/common/ofc_client.py new file mode 100644 index 00000000..3d3efc47 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/nec/common/ofc_client.py @@ -0,0 +1,156 @@ +# Copyright 2012 NEC Corporation. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# @author: Ryota MIBU + +import time + +import requests + +from neutron.openstack.common import excutils +from neutron.openstack.common import jsonutils as json +from neutron.openstack.common import log as logging +from neutron.plugins.nec.common import config +from neutron.plugins.nec.common import exceptions as nexc + + +LOG = logging.getLogger(__name__) + + +class OFCClient(object): + """A HTTP/HTTPS client for OFC Drivers.""" + + def __init__(self, host="127.0.0.1", port=8888, use_ssl=False, + key_file=None, cert_file=None, insecure_ssl=False): + """Creates a new client to some OFC. + + :param host: The host where service resides + :param port: The port where service resides + :param use_ssl: True to use SSL, False to use HTTP + :param key_file: The SSL key file to use if use_ssl is true + :param cert_file: The SSL cert file to use if use_ssl is true + :param insecure_ssl: Don't verify SSL certificate + """ + self.host = host + self.port = port + self.use_ssl = use_ssl + self.key_file = key_file + self.cert_file = cert_file + self.insecure_ssl = insecure_ssl + self.connection = None + + def _format_error_message(self, status, detail): + detail = ' ' + detail if detail else '' + return (_("Operation on OFC failed: %(status)s%(msg)s") % + {'status': status, 'msg': detail}) + + def _get_response(self, method, action, body=None): + headers = {"Content-Type": "application/json"} + protocol = "http" + certs = {'key_file': self.key_file, 'cert_file': self.cert_file} + certs = dict((x, certs[x]) for x in certs if certs[x] is not None) + verify = True + + if self.use_ssl: + protocol = "https" + if self.insecure_ssl: + verify = False + + url = "%s://%s:%d%s" % (protocol, self.host, int(self.port), + action) + + res = requests.request(method, url, data=body, headers=headers, + cert=certs, verify=verify) + return res + + def do_single_request(self, method, action, body=None): + action = config.OFC.path_prefix + action + LOG.debug(_("Client request: %(host)s:%(port)s " + "%(method)s %(action)s [%(body)s]"), + {'host': self.host, 'port': self.port, + 'method': method, 'action': action, 'body': body}) + if type(body) is dict: + body = json.dumps(body) + try: + res = self._get_response(method, action, body) + data = res.text + LOG.debug(_("OFC returns [%(status)s:%(data)s]"), + {'status': res.status_code, + 'data': data}) + + # Try to decode JSON data if possible. + try: + data = json.loads(data) + except (ValueError, TypeError): + pass + + if res.status_code in (requests.codes.OK, + requests.codes.CREATED, + requests.codes.ACCEPTED, + requests.codes.NO_CONTENT): + return data + elif res.status_code == requests.codes.SERVICE_UNAVAILABLE: + retry_after = res.headers.get('retry-after') + LOG.warning(_("OFC returns ServiceUnavailable " + "(retry-after=%s)"), retry_after) + raise nexc.OFCServiceUnavailable(retry_after=retry_after) + elif res.status_code == requests.codes.NOT_FOUND: + LOG.info(_("Specified resource %s does not exist on OFC "), + action) + raise nexc.OFCResourceNotFound(resource=action) + else: + LOG.warning(_("Operation on OFC failed: " + "status=%(status)s, detail=%(detail)s"), + {'status': res.status_code, 'detail': data}) + params = {'reason': _("Operation on OFC failed"), + 'status': res.status_code} + if isinstance(data, dict): + params['err_code'] = data.get('err_code') + params['err_msg'] = data.get('err_msg') + else: + params['err_msg'] = data + raise nexc.OFCException(**params) + except requests.exceptions.RequestException as e: + reason = _("Failed to connect OFC : %s") % e + LOG.error(reason) + raise nexc.OFCException(reason=reason) + + def do_request(self, method, action, body=None): + max_attempts = config.OFC.api_max_attempts + for i in range(max_attempts, 0, -1): + try: + return self.do_single_request(method, action, body) + except nexc.OFCServiceUnavailable as e: + with excutils.save_and_reraise_exception() as ctxt: + try: + wait_time = int(e.retry_after) + except (ValueError, TypeError): + wait_time = None + if i > 1 and wait_time: + LOG.info(_("Waiting for %s seconds due to " + "OFC Service_Unavailable."), wait_time) + time.sleep(wait_time) + ctxt.reraise = False + continue + + def get(self, action): + return self.do_request("GET", action) + + def post(self, action, body=None): + return self.do_request("POST", action, body=body) + + def put(self, action, body=None): + return self.do_request("PUT", action, body=body) + + def delete(self, action): + return self.do_request("DELETE", action) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/nec/db/__init__.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/nec/db/__init__.py new file mode 100644 index 00000000..cff1fb25 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/nec/db/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2012 NEC Corporation. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/nec/db/api.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/nec/db/api.py new file mode 100644 index 00000000..ddd53ba5 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/nec/db/api.py @@ -0,0 +1,184 @@ +# Copyright 2012 NEC Corporation. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# @author: Ryota MIBU + +import sqlalchemy as sa + +from neutron.db import api as db +from neutron.db import model_base +from neutron.db import models_v2 +from neutron.db import securitygroups_db as sg_db +from neutron.extensions import securitygroup as ext_sg +from neutron import manager +from neutron.openstack.common import log as logging +from neutron.plugins.nec.common import config # noqa +from neutron.plugins.nec.common import exceptions as nexc +from neutron.plugins.nec.db import models as nmodels + + +LOG = logging.getLogger(__name__) +OFP_VLAN_NONE = 0xffff + + +resource_map = {'ofc_tenant': nmodels.OFCTenantMapping, + 'ofc_network': nmodels.OFCNetworkMapping, + 'ofc_port': nmodels.OFCPortMapping, + 'ofc_router': nmodels.OFCRouterMapping, + 'ofc_packet_filter': nmodels.OFCFilterMapping} + + +# utitlity methods + +def _get_resource_model(resource): + return resource_map[resource] + + +def clear_db(base=model_base.BASEV2): + db.clear_db(base) + + +def get_ofc_item(session, resource, neutron_id): + model = _get_resource_model(resource) + if not model: + return + try: + return session.query(model).filter_by(neutron_id=neutron_id).one() + except sa.orm.exc.NoResultFound: + return + + +def get_ofc_id(session, resource, neutron_id): + ofc_item = get_ofc_item(session, resource, neutron_id) + if ofc_item: + return ofc_item.ofc_id + else: + raise nexc.OFCMappingNotFound(resource=resource, + neutron_id=neutron_id) + + +def exists_ofc_item(session, resource, neutron_id): + if get_ofc_item(session, resource, neutron_id): + return True + else: + return False + + +def find_ofc_item(session, resource, ofc_id): + try: + model = _get_resource_model(resource) + params = dict(ofc_id=ofc_id) + return (session.query(model).filter_by(**params).one()) + except sa.orm.exc.NoResultFound: + return None + + +def add_ofc_item(session, resource, neutron_id, ofc_id): + try: + model = _get_resource_model(resource) + params = dict(neutron_id=neutron_id, ofc_id=ofc_id) + item = model(**params) + with session.begin(subtransactions=True): + session.add(item) + session.flush() + except Exception as exc: + LOG.exception(exc) + raise nexc.NECDBException(reason=exc.message) + return item + + +def del_ofc_item(session, resource, neutron_id): + try: + model = _get_resource_model(resource) + with session.begin(subtransactions=True): + item = session.query(model).filter_by(neutron_id=neutron_id).one() + session.delete(item) + return True + except sa.orm.exc.NoResultFound: + LOG.warning(_("del_ofc_item(): NotFound item " + "(resource=%(resource)s, id=%(id)s) "), + {'resource': resource, 'id': neutron_id}) + return False + + +def get_portinfo(session, id): + try: + return (session.query(nmodels.PortInfo). + filter_by(id=id). + one()) + except sa.orm.exc.NoResultFound: + return None + + +def add_portinfo(session, id, datapath_id='', port_no=0, + vlan_id=OFP_VLAN_NONE, mac=''): + try: + portinfo = nmodels.PortInfo(id=id, datapath_id=datapath_id, + port_no=port_no, vlan_id=vlan_id, mac=mac) + with session.begin(subtransactions=True): + session.add(portinfo) + except Exception as exc: + LOG.exception(exc) + raise nexc.NECDBException(reason=exc.message) + return portinfo + + +def del_portinfo(session, id): + try: + with session.begin(subtransactions=True): + portinfo = session.query(nmodels.PortInfo).filter_by(id=id).one() + session.delete(portinfo) + except sa.orm.exc.NoResultFound: + LOG.warning(_("del_portinfo(): NotFound portinfo for " + "port_id: %s"), id) + + +def get_active_ports_on_ofc(context, network_id, port_id=None): + """Retrieve ports on OFC on a given network. + + It returns a list of tuple (neutron port_id, OFC id). + """ + query = context.session.query(nmodels.OFCPortMapping) + query = query.join(models_v2.Port, + nmodels.OFCPortMapping.neutron_id == models_v2.Port.id) + query = query.filter(models_v2.Port.network_id == network_id) + if port_id: + query = query.filter(nmodels.OFCPortMapping.neutron_id == port_id) + + return [(p['neutron_id'], p['ofc_id']) for p in query] + + +def get_port_from_device(port_id): + """Get port from database.""" + LOG.debug(_("get_port_with_securitygroups() called:port_id=%s"), port_id) + session = db.get_session() + sg_binding_port = sg_db.SecurityGroupPortBinding.port_id + + query = session.query(models_v2.Port, + sg_db.SecurityGroupPortBinding.security_group_id) + query = query.outerjoin(sg_db.SecurityGroupPortBinding, + models_v2.Port.id == sg_binding_port) + query = query.filter(models_v2.Port.id == port_id) + port_and_sgs = query.all() + if not port_and_sgs: + return None + port = port_and_sgs[0][0] + plugin = manager.NeutronManager.get_plugin() + port_dict = plugin._make_port_dict(port) + port_dict[ext_sg.SECURITYGROUPS] = [ + sg_id for port_, sg_id in port_and_sgs if sg_id] + port_dict['security_group_rules'] = [] + port_dict['security_group_source_groups'] = [] + port_dict['fixed_ips'] = [ip['ip_address'] + for ip in port['fixed_ips']] + return port_dict diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/nec/db/models.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/nec/db/models.py new file mode 100644 index 00000000..6ca0c4c1 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/nec/db/models.py @@ -0,0 +1,69 @@ +# Copyright 2012 NEC Corporation. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# @author: Ryota MIBU + +import sqlalchemy as sa +from sqlalchemy import orm + +from neutron.db import model_base +from neutron.db import models_v2 + + +"""New mapping tables.""" + + +class OFCId(object): + """Resource ID on OpenFlow Controller.""" + ofc_id = sa.Column(sa.String(255), unique=True, nullable=False) + + +class NeutronId(object): + """Logical ID on Neutron.""" + neutron_id = sa.Column(sa.String(36), primary_key=True) + + +class OFCTenantMapping(model_base.BASEV2, NeutronId, OFCId): + """Represents a Tenant on OpenFlow Network/Controller.""" + + +class OFCNetworkMapping(model_base.BASEV2, NeutronId, OFCId): + """Represents a Network on OpenFlow Network/Controller.""" + + +class OFCPortMapping(model_base.BASEV2, NeutronId, OFCId): + """Represents a Port on OpenFlow Network/Controller.""" + + +class OFCRouterMapping(model_base.BASEV2, NeutronId, OFCId): + """Represents a router on OpenFlow Network/Controller.""" + + +class OFCFilterMapping(model_base.BASEV2, NeutronId, OFCId): + """Represents a Filter on OpenFlow Network/Controller.""" + + +class PortInfo(model_base.BASEV2): + """Represents a Virtual Interface.""" + id = sa.Column(sa.String(36), + sa.ForeignKey('ports.id', ondelete="CASCADE"), + primary_key=True) + datapath_id = sa.Column(sa.String(36), nullable=False) + port_no = sa.Column(sa.Integer, nullable=False) + vlan_id = sa.Column(sa.Integer, nullable=False) + mac = sa.Column(sa.String(32), nullable=False) + port = orm.relationship( + models_v2.Port, + backref=orm.backref("portinfo", + lazy='joined', uselist=False, + cascade='delete')) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/nec/db/packetfilter.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/nec/db/packetfilter.py new file mode 100644 index 00000000..acfe9044 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/nec/db/packetfilter.py @@ -0,0 +1,218 @@ +# Copyright 2012-2013 NEC Corporation. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# @author: Ryota MIBU + +import sqlalchemy as sa +from sqlalchemy import orm +from sqlalchemy.orm import exc as sa_exc +from sqlalchemy import sql + +from neutron.api.v2 import attributes +from neutron.db import model_base +from neutron.db import models_v2 +from neutron.openstack.common import uuidutils +from neutron.plugins.nec.db import models as nmodels +from neutron.plugins.nec.extensions import packetfilter as ext_pf + + +PF_STATUS_ACTIVE = 'ACTIVE' +PF_STATUS_DOWN = 'DOWN' +PF_STATUS_ERROR = 'ERROR' + +INT_FIELDS = ('eth_type', 'src_port', 'dst_port') + + +class PacketFilter(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant): + """Represents a packet filter.""" + name = sa.Column(sa.String(255)) + network_id = sa.Column(sa.String(36), + sa.ForeignKey('networks.id', ondelete="CASCADE"), + nullable=False) + priority = sa.Column(sa.Integer, nullable=False) + action = sa.Column(sa.String(16), nullable=False) + # condition + in_port = sa.Column(sa.String(36), + sa.ForeignKey('ports.id', ondelete="CASCADE"), + nullable=True) + src_mac = sa.Column(sa.String(32), nullable=False) + dst_mac = sa.Column(sa.String(32), nullable=False) + eth_type = sa.Column(sa.Integer, nullable=False) + src_cidr = sa.Column(sa.String(64), nullable=False) + dst_cidr = sa.Column(sa.String(64), nullable=False) + protocol = sa.Column(sa.String(16), nullable=False) + src_port = sa.Column(sa.Integer, nullable=False) + dst_port = sa.Column(sa.Integer, nullable=False) + # status + admin_state_up = sa.Column(sa.Boolean(), nullable=False) + status = sa.Column(sa.String(16), nullable=False) + + network = orm.relationship( + models_v2.Network, + backref=orm.backref('packetfilters', lazy='joined', cascade='delete'), + uselist=False) + in_port_ref = orm.relationship( + models_v2.Port, + backref=orm.backref('packetfilters', lazy='joined', cascade='delete'), + primaryjoin="Port.id==PacketFilter.in_port", + uselist=False) + + +class PacketFilterDbMixin(object): + + def _make_packet_filter_dict(self, pf_entry, fields=None): + res = {'id': pf_entry['id'], + 'name': pf_entry['name'], + 'tenant_id': pf_entry['tenant_id'], + 'network_id': pf_entry['network_id'], + 'action': pf_entry['action'], + 'priority': pf_entry['priority'], + 'in_port': pf_entry['in_port'], + # "or None" ensure the filed is None if empty + 'src_mac': pf_entry['src_mac'] or None, + 'dst_mac': pf_entry['dst_mac'] or None, + 'eth_type': pf_entry['eth_type'] or None, + 'src_cidr': pf_entry['src_cidr'] or None, + 'dst_cidr': pf_entry['dst_cidr'] or None, + 'protocol': pf_entry['protocol'] or None, + 'src_port': pf_entry['src_port'] or None, + 'dst_port': pf_entry['dst_port'] or None, + 'admin_state_up': pf_entry['admin_state_up'], + 'status': pf_entry['status']} + return self._fields(res, fields) + + def _get_packet_filter(self, context, id): + try: + pf_entry = self._get_by_id(context, PacketFilter, id) + except sa_exc.NoResultFound: + raise ext_pf.PacketFilterNotFound(id=id) + return pf_entry + + def get_packet_filter(self, context, id, fields=None): + pf_entry = self._get_packet_filter(context, id) + return self._make_packet_filter_dict(pf_entry, fields) + + def get_packet_filters(self, context, filters=None, fields=None): + return self._get_collection(context, + PacketFilter, + self._make_packet_filter_dict, + filters=filters, + fields=fields) + + def _replace_unspecified_field(self, params, key): + if not attributes.is_attr_set(params[key]): + if key == 'in_port': + params[key] = None + elif key in INT_FIELDS: + # Integer field + params[key] = 0 + else: + params[key] = '' + + def _get_eth_type_for_protocol(self, protocol): + if protocol.upper() in ("ICMP", "TCP", "UDP"): + return 0x800 + elif protocol.upper() == "ARP": + return 0x806 + + def _set_eth_type_from_protocol(self, filter_dict): + if filter_dict.get('protocol'): + eth_type = self._get_eth_type_for_protocol(filter_dict['protocol']) + if eth_type: + filter_dict['eth_type'] = eth_type + + def _check_eth_type_and_protocol(self, new_filter, current_filter): + if 'protocol' in new_filter or 'eth_type' not in new_filter: + return + eth_type = self._get_eth_type_for_protocol(current_filter['protocol']) + if not eth_type: + return + if eth_type != new_filter['eth_type']: + raise ext_pf.PacketFilterEtherTypeProtocolMismatch( + eth_type=hex(new_filter['eth_type']), + protocol=current_filter['protocol']) + + def create_packet_filter(self, context, packet_filter): + pf_dict = packet_filter['packet_filter'] + tenant_id = self._get_tenant_id_for_create(context, pf_dict) + + if pf_dict['in_port'] == attributes.ATTR_NOT_SPECIFIED: + # validate network ownership + self.get_network(context, pf_dict['network_id']) + else: + # validate port ownership + self.get_port(context, pf_dict['in_port']) + + params = {'tenant_id': tenant_id, + 'id': pf_dict.get('id') or uuidutils.generate_uuid(), + 'name': pf_dict['name'], + 'network_id': pf_dict['network_id'], + 'priority': pf_dict['priority'], + 'action': pf_dict['action'], + 'admin_state_up': pf_dict.get('admin_state_up', True), + 'status': PF_STATUS_DOWN, + 'in_port': pf_dict['in_port'], + 'src_mac': pf_dict['src_mac'], + 'dst_mac': pf_dict['dst_mac'], + 'eth_type': pf_dict['eth_type'], + 'src_cidr': pf_dict['src_cidr'], + 'dst_cidr': pf_dict['dst_cidr'], + 'src_port': pf_dict['src_port'], + 'dst_port': pf_dict['dst_port'], + 'protocol': pf_dict['protocol']} + for key in params: + self._replace_unspecified_field(params, key) + self._set_eth_type_from_protocol(params) + + with context.session.begin(subtransactions=True): + pf_entry = PacketFilter(**params) + context.session.add(pf_entry) + + return self._make_packet_filter_dict(pf_entry) + + def update_packet_filter(self, context, id, packet_filter): + params = packet_filter['packet_filter'] + for key in params: + self._replace_unspecified_field(params, key) + self._set_eth_type_from_protocol(params) + with context.session.begin(subtransactions=True): + pf_entry = self._get_packet_filter(context, id) + self._check_eth_type_and_protocol(params, pf_entry) + pf_entry.update(params) + return self._make_packet_filter_dict(pf_entry) + + def delete_packet_filter(self, context, id): + with context.session.begin(subtransactions=True): + pf_entry = self._get_packet_filter(context, id) + context.session.delete(pf_entry) + + def get_packet_filters_for_port(self, context, port): + """Retrieve packet filters on OFC on a given port. + + It returns a list of tuple (neutron filter_id, OFC id). + """ + query = (context.session.query(nmodels.OFCFilterMapping) + .join(PacketFilter, + nmodels.OFCFilterMapping.neutron_id == PacketFilter.id) + .filter(PacketFilter.admin_state_up == sql.true())) + + network_id = port['network_id'] + net_pf_query = (query.filter(PacketFilter.network_id == network_id) + .filter(PacketFilter.in_port == sql.null())) + net_filters = [(pf['neutron_id'], pf['ofc_id']) for pf in net_pf_query] + + port_pf_query = query.filter(PacketFilter.in_port == port['id']) + port_filters = [(pf['neutron_id'], pf['ofc_id']) + for pf in port_pf_query] + + return net_filters + port_filters diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/nec/db/router.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/nec/db/router.py new file mode 100644 index 00000000..4e9c48ed --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/nec/db/router.py @@ -0,0 +1,90 @@ +# Copyright 2013 NEC Corporation. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import sqlalchemy as sa +from sqlalchemy import orm +from sqlalchemy.orm import exc as sa_exc + +from neutron.db import l3_db +from neutron.db import models_v2 +from neutron.openstack.common import log as logging + +LOG = logging.getLogger(__name__) + + +class RouterProvider(models_v2.model_base.BASEV2): + """Represents a binding of router_id to provider.""" + provider = sa.Column(sa.String(255)) + router_id = sa.Column(sa.String(36), + sa.ForeignKey('routers.id', ondelete="CASCADE"), + primary_key=True) + + router = orm.relationship(l3_db.Router, uselist=False, + backref=orm.backref('provider', uselist=False, + lazy='joined', + cascade='delete')) + + +def _get_router_providers_query(query, provider=None, router_ids=None): + if provider: + query = query.filter_by(provider=provider) + if router_ids: + column = RouterProvider.router_id + query = query.filter(column.in_(router_ids)) + return query + + +def get_router_providers(session, provider=None, router_ids=None): + """Retrieve a list of a pair of router ID and its provider.""" + query = session.query(RouterProvider) + query = _get_router_providers_query(query, provider, router_ids) + return [{'provider': router.provider, 'router_id': router.router_id} + for router in query] + + +def get_routers_by_provider(session, provider, router_ids=None): + """Retrieve a list of router IDs with the given provider.""" + query = session.query(RouterProvider.router_id) + query = _get_router_providers_query(query, provider, router_ids) + return [router[0] for router in query] + + +def get_router_count_by_provider(session, provider, tenant_id=None): + """Return the number of routers with the given provider.""" + query = session.query(RouterProvider).filter_by(provider=provider) + if tenant_id: + query = (query.join('router'). + filter(l3_db.Router.tenant_id == tenant_id)) + return query.count() + + +def get_provider_by_router(session, router_id): + """Retrieve a provider of the given router.""" + try: + binding = (session.query(RouterProvider). + filter_by(router_id=router_id). + one()) + except sa_exc.NoResultFound: + return None + return binding.provider + + +def add_router_provider_binding(session, provider, router_id): + """Add a router provider association.""" + LOG.debug(_("Add provider binding " + "(router=%(router_id)s, provider=%(provider)s)"), + {'router_id': router_id, 'provider': provider}) + binding = RouterProvider(provider=provider, router_id=router_id) + session.add(binding) + return binding diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/nec/drivers/__init__.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/nec/drivers/__init__.py new file mode 100644 index 00000000..0ba7ece6 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/nec/drivers/__init__.py @@ -0,0 +1,38 @@ +# Copyright 2012 NEC Corporation. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# @author: Ryota MIBU + +from neutron.openstack.common import importutils +from neutron.openstack.common import log as logging + + +LOG = logging.getLogger(__name__) +DRIVER_PATH = "neutron.plugins.nec.drivers.%s" +DRIVER_LIST = { + 'trema': DRIVER_PATH % "trema.TremaPortBaseDriver", + 'trema_port': DRIVER_PATH % "trema.TremaPortBaseDriver", + 'trema_portmac': DRIVER_PATH % "trema.TremaPortMACBaseDriver", + 'trema_mac': DRIVER_PATH % "trema.TremaMACBaseDriver", + 'pfc': DRIVER_PATH % "pfc.PFCV51Driver", + 'pfc_v3': DRIVER_PATH % "pfc.PFCV3Driver", + 'pfc_v4': DRIVER_PATH % "pfc.PFCV4Driver", + 'pfc_v5': DRIVER_PATH % "pfc.PFCV5Driver", + 'pfc_v51': DRIVER_PATH % "pfc.PFCV51Driver", +} + + +def get_driver(driver_name): + LOG.info(_("Loading OFC driver: %s"), driver_name) + driver_klass = DRIVER_LIST.get(driver_name) or driver_name + return importutils.import_class(driver_klass) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/nec/drivers/pfc.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/nec/drivers/pfc.py new file mode 100644 index 00000000..0bc92970 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/nec/drivers/pfc.py @@ -0,0 +1,372 @@ +# Copyright 2012 NEC Corporation. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# @author: Ryota MIBU +# @author: Akihiro MOTOKI + +import re +import uuid + +import netaddr + +from neutron.api.v2 import attributes +from neutron.common import constants +from neutron.common import exceptions as qexc +from neutron.common import log as call_log +from neutron import manager +from neutron.plugins.nec.common import ofc_client +from neutron.plugins.nec.extensions import packetfilter as ext_pf +from neutron.plugins.nec import ofc_driver_base + + +class InvalidOFCIdFormat(qexc.NeutronException): + message = _("OFC %(resource)s ID has an invalid format: %(ofc_id)s") + + +class PFCDriverBase(ofc_driver_base.OFCDriverBase): + """Base Class for PDC Drivers. + + PFCDriverBase provides methods to handle PFC resources through REST API. + This uses ofc resource path instead of ofc resource ID. + + The class implements the API for PFC V4.0 or later. + """ + + router_supported = False + + match_ofc_network_id = re.compile( + "^/tenants/(?P[^/]+)/networks/(?P[^/]+)$") + match_ofc_port_id = re.compile( + "^/tenants/(?P[^/]+)/networks/(?P[^/]+)" + "/ports/(?P[^/]+)$") + + def __init__(self, conf_ofc): + self.client = ofc_client.OFCClient(host=conf_ofc.host, + port=conf_ofc.port, + use_ssl=conf_ofc.use_ssl, + key_file=conf_ofc.key_file, + cert_file=conf_ofc.cert_file, + insecure_ssl=conf_ofc.insecure_ssl) + + @classmethod + def filter_supported(cls): + return False + + def _generate_pfc_str(self, raw_str): + """Generate PFC acceptable String.""" + return re.sub(r'[^0-9a-zA-Z]', '_', raw_str) + + def _generate_pfc_id(self, id_str): + """Generate ID on PFC. + + Currently, PFC ID must be less than 32. + Shorten UUID string length from 36 to 31 by follows: + * delete UUID Version and hyphen (see RFC4122) + * ensure str length + """ + try: + # openstack.common.uuidutils.is_uuid_like() returns + # False for KeyStone tenant_id, so uuid.UUID is used + # directly here to accept tenant_id as UUID string + uuid_str = str(uuid.UUID(id_str)).replace('-', '') + uuid_no_version = uuid_str[:12] + uuid_str[13:] + return uuid_no_version[:31] + except Exception: + return self._generate_pfc_str(id_str)[:31] + + def _generate_pfc_description(self, desc): + """Generate Description on PFC. + + Currently, PFC Description must be less than 128. + """ + return self._generate_pfc_str(desc)[:127] + + def _extract_ofc_network_id(self, ofc_network_id): + match = self.match_ofc_network_id.match(ofc_network_id) + if match: + return match.group('network_id') + raise InvalidOFCIdFormat(resource='network', ofc_id=ofc_network_id) + + def _extract_ofc_port_id(self, ofc_port_id): + match = self.match_ofc_port_id.match(ofc_port_id) + if match: + return {'tenant': match.group('tenant_id'), + 'network': match.group('network_id'), + 'port': match.group('port_id')} + raise InvalidOFCIdFormat(resource='port', ofc_id=ofc_port_id) + + def create_tenant(self, description, tenant_id=None): + ofc_tenant_id = self._generate_pfc_id(tenant_id) + body = {'id': ofc_tenant_id} + self.client.post('/tenants', body=body) + return '/tenants/' + ofc_tenant_id + + def delete_tenant(self, ofc_tenant_id): + return self.client.delete(ofc_tenant_id) + + def create_network(self, ofc_tenant_id, description, network_id=None): + path = "%s/networks" % ofc_tenant_id + pfc_desc = self._generate_pfc_description(description) + body = {'description': pfc_desc} + res = self.client.post(path, body=body) + ofc_network_id = res['id'] + return path + '/' + ofc_network_id + + def delete_network(self, ofc_network_id): + return self.client.delete(ofc_network_id) + + def create_port(self, ofc_network_id, portinfo, + port_id=None, filters=None): + path = "%s/ports" % ofc_network_id + body = {'datapath_id': portinfo.datapath_id, + 'port': str(portinfo.port_no), + 'vid': str(portinfo.vlan_id)} + if self.filter_supported() and filters: + body['filters'] = [self._extract_ofc_filter_id(pf[1]) + for pf in filters] + res = self.client.post(path, body=body) + ofc_port_id = res['id'] + return path + '/' + ofc_port_id + + def delete_port(self, ofc_port_id): + return self.client.delete(ofc_port_id) + + +class PFCFilterDriverMixin(object): + """PFC PacketFilter Driver Mixin.""" + filters_path = "/filters" + filter_path = "/filters/%s" + + # PFC specific constants + MIN_PRIORITY = 1 + MAX_PRIORITY = 32766 + CREATE_ONLY_FIELDS = ['action', 'priority'] + PFC_ALLOW_ACTION = "pass" + PFC_DROP_ACTION = "drop" + + match_ofc_filter_id = re.compile("^/filters/(?P[^/]+)$") + + @classmethod + def filter_supported(cls): + return True + + def _set_param(self, filter_dict, body, key, create, convert_to=None): + if key in filter_dict: + if filter_dict[key]: + if convert_to: + body[key] = convert_to(filter_dict[key]) + else: + body[key] = filter_dict[key] + elif not create: + body[key] = "" + + def _generate_body(self, filter_dict, apply_ports=None, create=True): + body = {} + + if create: + # action : pass, drop (mandatory) + if filter_dict['action'].lower() in ext_pf.ALLOW_ACTIONS: + body['action'] = self.PFC_ALLOW_ACTION + else: + body['action'] = self.PFC_DROP_ACTION + # priority : mandatory + body['priority'] = filter_dict['priority'] + + for key in ['src_mac', 'dst_mac', 'src_port', 'dst_port']: + self._set_param(filter_dict, body, key, create) + + for key in ['src_cidr', 'dst_cidr']: + # CIDR must contain netmask even if it is an address. + convert_to = lambda x: str(netaddr.IPNetwork(x)) + self._set_param(filter_dict, body, key, create, convert_to) + + # protocol : decimal (0-255) + if 'protocol' in filter_dict: + if (not filter_dict['protocol'] or + # In the case of ARP, ip_proto should be set to wildcard. + # eth_type is set during adding an entry to DB layer. + filter_dict['protocol'].lower() == ext_pf.PROTO_NAME_ARP): + if not create: + body['protocol'] = "" + elif filter_dict['protocol'].lower() == constants.PROTO_NAME_ICMP: + body['protocol'] = constants.PROTO_NUM_ICMP + elif filter_dict['protocol'].lower() == constants.PROTO_NAME_TCP: + body['protocol'] = constants.PROTO_NUM_TCP + elif filter_dict['protocol'].lower() == constants.PROTO_NAME_UDP: + body['protocol'] = constants.PROTO_NUM_UDP + else: + body['protocol'] = int(filter_dict['protocol'], 0) + + # eth_type : hex (0x0-0xFFFF) + self._set_param(filter_dict, body, 'eth_type', create, hex) + + # apply_ports + if apply_ports: + # each element of apply_ports is a tuple of (neutron_id, ofc_id), + body['apply_ports'] = [] + for p in apply_ports: + try: + body['apply_ports'].append(self._extract_ofc_port_id(p[1])) + except InvalidOFCIdFormat: + pass + + return body + + def _validate_filter_common(self, filter_dict): + # Currently PFC support only IPv4 CIDR. + for field in ['src_cidr', 'dst_cidr']: + if (not filter_dict.get(field) or + filter_dict[field] == attributes.ATTR_NOT_SPECIFIED): + continue + net = netaddr.IPNetwork(filter_dict[field]) + if net.version != 4: + raise ext_pf.PacketFilterIpVersionNonSupported( + version=net.version, field=field, value=filter_dict[field]) + if ('priority' in filter_dict and + not (self.MIN_PRIORITY <= filter_dict['priority'] + <= self.MAX_PRIORITY)): + raise ext_pf.PacketFilterInvalidPriority( + min=self.MIN_PRIORITY, max=self.MAX_PRIORITY) + + def _validate_duplicate_priority(self, context, filter_dict): + plugin = manager.NeutronManager.get_plugin() + filters = {'network_id': [filter_dict['network_id']], + 'priority': [filter_dict['priority']]} + ret = plugin.get_packet_filters(context, filters=filters, + fields=['id']) + if ret: + raise ext_pf.PacketFilterDuplicatedPriority( + priority=filter_dict['priority']) + + def validate_filter_create(self, context, filter_dict): + self._validate_filter_common(filter_dict) + self._validate_duplicate_priority(context, filter_dict) + + def validate_filter_update(self, context, filter_dict): + for field in self.CREATE_ONLY_FIELDS: + if field in filter_dict: + raise ext_pf.PacketFilterUpdateNotSupported(field=field) + self._validate_filter_common(filter_dict) + + @call_log.log + def create_filter(self, ofc_network_id, filter_dict, + portinfo=None, filter_id=None, apply_ports=None): + body = self._generate_body(filter_dict, apply_ports, create=True) + res = self.client.post(self.filters_path, body=body) + # filter_id passed from a caller is not used. + # ofc_filter_id is generated by PFC because the prefix of + # filter_id has special meaning and it is internally used. + ofc_filter_id = res['id'] + return self.filter_path % ofc_filter_id + + @call_log.log + def update_filter(self, ofc_filter_id, filter_dict): + body = self._generate_body(filter_dict, create=False) + self.client.put(ofc_filter_id, body) + + @call_log.log + def delete_filter(self, ofc_filter_id): + return self.client.delete(ofc_filter_id) + + def _extract_ofc_filter_id(self, ofc_filter_id): + match = self.match_ofc_filter_id.match(ofc_filter_id) + if match: + return match.group('filter_id') + raise InvalidOFCIdFormat(resource='filter', ofc_id=ofc_filter_id) + + def convert_ofc_filter_id(self, context, ofc_filter_id): + # PFC Packet Filter is supported after the format of mapping tables + # are changed, so it is enough just to return ofc_filter_id + return ofc_filter_id + + +class PFCRouterDriverMixin(object): + + router_supported = True + router_nat_supported = False + + def create_router(self, ofc_tenant_id, router_id, description): + path = '%s/routers' % ofc_tenant_id + res = self.client.post(path, body=None) + ofc_router_id = res['id'] + return path + '/' + ofc_router_id + + def delete_router(self, ofc_router_id): + return self.client.delete(ofc_router_id) + + def add_router_interface(self, ofc_router_id, ofc_net_id, + ip_address=None, mac_address=None): + # ip_address : / (e.g., 10.0.0.0/24) + path = '%s/interfaces' % ofc_router_id + body = {'net_id': self._extract_ofc_network_id(ofc_net_id)} + if ip_address: + body['ip_address'] = ip_address + if mac_address: + body['mac_address'] = mac_address + res = self.client.post(path, body=body) + return path + '/' + res['id'] + + def update_router_interface(self, ofc_router_inf_id, + ip_address=None, mac_address=None): + # ip_address : / (e.g., 10.0.0.0/24) + if not ip_address and not mac_address: + return + body = {} + if ip_address: + body['ip_address'] = ip_address + if mac_address: + body['mac_address'] = mac_address + return self.client.put(ofc_router_inf_id, body=body) + + def delete_router_interface(self, ofc_router_inf_id): + return self.client.delete(ofc_router_inf_id) + + def list_router_routes(self, ofc_router_id): + path = '%s/routes' % ofc_router_id + ret = self.client.get(path) + # Prepend ofc_router_id to route_id + for r in ret['routes']: + r['id'] = ofc_router_id + '/routes/' + r['id'] + return ret['routes'] + + def add_router_route(self, ofc_router_id, destination, nexthop): + path = '%s/routes' % ofc_router_id + body = {'destination': destination, + 'nexthop': nexthop} + ret = self.client.post(path, body=body) + return path + '/' + ret['id'] + + def delete_router_route(self, ofc_router_route_id): + return self.client.delete(ofc_router_route_id) + + +class PFCV3Driver(PFCDriverBase): + + def create_tenant(self, description, tenant_id): + ofc_tenant_id = self._generate_pfc_id(tenant_id) + return "/tenants/" + ofc_tenant_id + + def delete_tenant(self, ofc_tenant_id): + pass + + +class PFCV4Driver(PFCDriverBase): + pass + + +class PFCV5Driver(PFCRouterDriverMixin, PFCDriverBase): + pass + + +class PFCV51Driver(PFCFilterDriverMixin, PFCV5Driver): + pass diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/nec/drivers/trema.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/nec/drivers/trema.py new file mode 100644 index 00000000..77fee804 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/nec/drivers/trema.py @@ -0,0 +1,248 @@ +# Copyright 2012 NEC Corporation. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# @author: Ryota MIBU +# @author: Akihiro MOTOKI + +from neutron.openstack.common import uuidutils +from neutron.plugins.nec.common import ofc_client +from neutron.plugins.nec import ofc_driver_base + + +class TremaDriverBase(ofc_driver_base.OFCDriverBase): + """Common class for Trema (Sliceable Switch) Drivers.""" + networks_path = "/networks" + network_path = "/networks/%s" + + router_supported = False + + def __init__(self, conf_ofc): + # Trema sliceable REST API does not support HTTPS + self.client = ofc_client.OFCClient(host=conf_ofc.host, + port=conf_ofc.port) + + def _get_network_id(self, ofc_network_id): + # ofc_network_id : /networks/ + return ofc_network_id.split('/')[2] + + def _get_tenant_id(self, tenant_id): + # Trema does not use tenant_id, but it returns + # /tenants/ format to keep consistency with PFC driver. + return '/tenants/' + tenant_id + + def create_tenant(self, description, tenant_id=None): + return self._get_tenant_id(tenant_id or uuidutils.generate_uuid()) + + def update_tenant(self, ofc_tenant_id, description): + pass + + def delete_tenant(self, ofc_tenant_id): + pass + + def create_network(self, ofc_tenant_id, description, network_id=None): + ofc_network_id = network_id or uuidutils.generate_uuid() + body = {'id': ofc_network_id, 'description': description} + self.client.post(self.networks_path, body=body) + return self.network_path % ofc_network_id + + def delete_network(self, ofc_network_id): + return self.client.delete(ofc_network_id) + + +class TremaFilterDriverMixin(object): + """Trema (Sliceable Switch) PacketFilter Driver Mixin.""" + filters_path = "/filters" + filter_path = "/filters/%s" + + @classmethod + def filter_supported(cls): + return True + + def create_filter(self, ofc_network_id, filter_dict, + portinfo=None, filter_id=None, apply_ports=None): + if filter_dict['action'].upper() in ["ACCEPT", "ALLOW"]: + ofc_action = "ALLOW" + elif filter_dict['action'].upper() in ["DROP", "DENY"]: + ofc_action = "DENY" + + body = {'priority': filter_dict['priority'], + 'slice': self._get_network_id(ofc_network_id), + 'action': ofc_action} + ofp_wildcards = ["dl_vlan", "dl_vlan_pcp", "nw_tos"] + + if portinfo: + body['in_datapath_id'] = portinfo.datapath_id + body['in_port'] = portinfo.port_no + else: + body['wildcards'] = "in_datapath_id" + ofp_wildcards.append("in_port") + + if filter_dict['src_mac']: + body['dl_src'] = filter_dict['src_mac'] + else: + ofp_wildcards.append("dl_src") + + if filter_dict['dst_mac']: + body['dl_dst'] = filter_dict['dst_mac'] + else: + ofp_wildcards.append("dl_dst") + + if filter_dict['src_cidr']: + body['nw_src'] = filter_dict['src_cidr'] + else: + ofp_wildcards.append("nw_src:32") + + if filter_dict['dst_cidr']: + body['nw_dst'] = filter_dict['dst_cidr'] + else: + ofp_wildcards.append("nw_dst:32") + + if filter_dict['protocol']: + if filter_dict['protocol'].upper() == "ICMP": + body['dl_type'] = "0x800" + body['nw_proto'] = hex(1) + elif filter_dict['protocol'].upper() == "TCP": + body['dl_type'] = "0x800" + body['nw_proto'] = hex(6) + elif filter_dict['protocol'].upper() == "UDP": + body['dl_type'] = "0x800" + body['nw_proto'] = hex(17) + elif filter_dict['protocol'].upper() == "ARP": + body['dl_type'] = "0x806" + ofp_wildcards.append("nw_proto") + else: + body['nw_proto'] = filter_dict['protocol'] + else: + ofp_wildcards.append("nw_proto") + + if 'dl_type' in body: + pass + elif filter_dict['eth_type']: + body['dl_type'] = filter_dict['eth_type'] + else: + ofp_wildcards.append("dl_type") + + if filter_dict['src_port']: + body['tp_src'] = hex(filter_dict['src_port']) + else: + ofp_wildcards.append("tp_src") + + if filter_dict['dst_port']: + body['tp_dst'] = hex(filter_dict['dst_port']) + else: + ofp_wildcards.append("tp_dst") + + ofc_filter_id = filter_id or uuidutils.generate_uuid() + body['id'] = ofc_filter_id + + body['ofp_wildcards'] = ','.join(ofp_wildcards) + + self.client.post(self.filters_path, body=body) + return self.filter_path % ofc_filter_id + + def delete_filter(self, ofc_filter_id): + return self.client.delete(ofc_filter_id) + + +class TremaPortBaseDriver(TremaDriverBase, TremaFilterDriverMixin): + """Trema (Sliceable Switch) Driver for port base binding. + + TremaPortBaseDriver uses port base binding. + Ports are identified by datapath_id, port_no and vlan_id. + """ + ports_path = "%(network)s/ports" + port_path = "%(network)s/ports/%(port)s" + + def create_port(self, ofc_network_id, portinfo, + port_id=None, filters=None): + ofc_port_id = port_id or uuidutils.generate_uuid() + path = self.ports_path % {'network': ofc_network_id} + body = {'id': ofc_port_id, + 'datapath_id': portinfo.datapath_id, + 'port': str(portinfo.port_no), + 'vid': str(portinfo.vlan_id)} + self.client.post(path, body=body) + return self.port_path % {'network': ofc_network_id, + 'port': ofc_port_id} + + def delete_port(self, ofc_port_id): + return self.client.delete(ofc_port_id) + + +class TremaPortMACBaseDriver(TremaDriverBase, TremaFilterDriverMixin): + """Trema (Sliceable Switch) Driver for port-mac base binding. + + TremaPortBaseDriver uses port-mac base binding. + Ports are identified by datapath_id, port_no, vlan_id and mac. + """ + ports_path = "%(network)s/ports" + port_path = "%(network)s/ports/%(port)s" + attachments_path = "%(network)s/ports/%(port)s/attachments" + attachment_path = "%(network)s/ports/%(port)s/attachments/%(attachment)s" + + def create_port(self, ofc_network_id, portinfo, port_id=None, + filters=None): + #NOTE: This Driver create slices with Port-MAC Based bindings on Trema + # Sliceable. It's REST API requires Port Based binding before you + # define Port-MAC Based binding. + ofc_port_id = port_id or uuidutils.generate_uuid() + dummy_port_id = "dummy-%s" % ofc_port_id + + path = self.ports_path % {'network': ofc_network_id} + body = {'id': dummy_port_id, + 'datapath_id': portinfo.datapath_id, + 'port': str(portinfo.port_no), + 'vid': str(portinfo.vlan_id)} + self.client.post(path, body=body) + + path = self.attachments_path % {'network': ofc_network_id, + 'port': dummy_port_id} + body = {'id': ofc_port_id, 'mac': portinfo.mac} + self.client.post(path, body=body) + + path = self.port_path % {'network': ofc_network_id, + 'port': dummy_port_id} + self.client.delete(path) + + return self.attachment_path % {'network': ofc_network_id, + 'port': dummy_port_id, + 'attachment': ofc_port_id} + + def delete_port(self, ofc_port_id): + return self.client.delete(ofc_port_id) + + +class TremaMACBaseDriver(TremaDriverBase): + """Trema (Sliceable Switch) Driver for mac base binding. + + TremaPortBaseDriver uses mac base binding. + Ports are identified by mac. + """ + attachments_path = "%(network)s/attachments" + attachment_path = "%(network)s/attachments/%(attachment)s" + + @classmethod + def filter_supported(cls): + return False + + def create_port(self, ofc_network_id, portinfo, port_id=None, + filters=None): + ofc_port_id = port_id or uuidutils.generate_uuid() + path = self.attachments_path % {'network': ofc_network_id} + body = {'id': ofc_port_id, 'mac': portinfo.mac} + self.client.post(path, body=body) + return self.attachment_path % {'network': ofc_network_id, + 'attachment': ofc_port_id} + + def delete_port(self, ofc_port_id): + return self.client.delete(ofc_port_id) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/nec/extensions/__init__.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/nec/extensions/__init__.py new file mode 100644 index 00000000..cff1fb25 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/nec/extensions/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2012 NEC Corporation. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/nec/extensions/packetfilter.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/nec/extensions/packetfilter.py new file mode 100644 index 00000000..73e952ba --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/nec/extensions/packetfilter.py @@ -0,0 +1,206 @@ +# Copyright 2012-2013 NEC Corporation. +# All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Ryota MIBU +# + +from oslo.config import cfg + +from neutron.api import extensions +from neutron.api.v2 import attributes +from neutron.api.v2 import base +from neutron.common import constants +from neutron.common import exceptions +from neutron import manager +from neutron import quota + + +quota_packet_filter_opts = [ + cfg.IntOpt('quota_packet_filter', + default=100, + help=_("Number of packet_filters allowed per tenant, " + "-1 for unlimited")) +] +cfg.CONF.register_opts(quota_packet_filter_opts, 'QUOTAS') + + +class PacketFilterNotFound(exceptions.NotFound): + message = _("PacketFilter %(id)s could not be found") + + +class PacketFilterIpVersionNonSupported(exceptions.BadRequest): + message = _("IP version %(version)s is not supported for %(field)s " + "(%(value)s is specified)") + + +class PacketFilterInvalidPriority(exceptions.BadRequest): + message = _("Packet Filter priority should be %(min)s-%(max)s (included)") + + +class PacketFilterUpdateNotSupported(exceptions.BadRequest): + message = _("%(field)s field cannot be updated") + + +class PacketFilterDuplicatedPriority(exceptions.BadRequest): + message = _("The backend does not support duplicated priority. " + "Priority %(priority)s is in use") + + +class PacketFilterEtherTypeProtocolMismatch(exceptions.Conflict): + message = _("Ether Type '%(eth_type)s' conflicts with protocol " + "'%(protocol)s'. Update or clear protocol before " + "changing ether type.") + + +def convert_to_int_dec_and_hex(data): + try: + return int(data, 0) + except (ValueError, TypeError): + pass + try: + return int(data) + except (ValueError, TypeError): + msg = _("'%s' is not a integer") % data + raise exceptions.InvalidInput(error_message=msg) + + +def convert_to_int_or_none(data): + if data is None: + return + return convert_to_int_dec_and_hex(data) + + +PROTO_NAME_ARP = 'arp' +SUPPORTED_PROTOCOLS = [constants.PROTO_NAME_ICMP, + constants.PROTO_NAME_TCP, + constants.PROTO_NAME_UDP, + PROTO_NAME_ARP] +ALLOW_ACTIONS = ['allow', 'accept'] +DROP_ACTIONS = ['drop', 'deny'] +SUPPORTED_ACTIONS = ALLOW_ACTIONS + DROP_ACTIONS + +ALIAS = 'packet-filter' +RESOURCE = 'packet_filter' +COLLECTION = 'packet_filters' +PACKET_FILTER_ACTION_REGEX = '(?i)^(%s)$' % '|'.join(SUPPORTED_ACTIONS) +PACKET_FILTER_PROTOCOL_REGEX = ('(?i)^(%s|0x[0-9a-fA-F]+|[0-9]+|)$' % + '|'.join(SUPPORTED_PROTOCOLS)) +PACKET_FILTER_ATTR_PARAMS = { + 'id': {'allow_post': False, 'allow_put': False, + 'validate': {'type:uuid': None}, + 'is_visible': True}, + 'name': {'allow_post': True, 'allow_put': True, 'default': '', + 'validate': {'type:string': None}, + 'is_visible': True}, + 'tenant_id': {'allow_post': True, 'allow_put': False, + 'validate': {'type:string': None}, + 'required_by_policy': True, + 'is_visible': True}, + 'network_id': {'allow_post': True, 'allow_put': False, + 'validate': {'type:uuid': None}, + 'is_visible': True}, + 'admin_state_up': {'allow_post': True, 'allow_put': True, + 'default': True, + 'convert_to': attributes.convert_to_boolean, + 'is_visible': True}, + 'status': {'allow_post': False, 'allow_put': False, + 'is_visible': True}, + 'action': {'allow_post': True, 'allow_put': True, + 'validate': {'type:regex': PACKET_FILTER_ACTION_REGEX}, + 'is_visible': True}, + 'priority': {'allow_post': True, 'allow_put': True, + 'convert_to': convert_to_int_dec_and_hex, + 'is_visible': True}, + 'in_port': {'allow_post': True, 'allow_put': False, + 'default': attributes.ATTR_NOT_SPECIFIED, + 'validate': {'type:uuid': None}, + 'is_visible': True}, + 'src_mac': {'allow_post': True, 'allow_put': True, + 'default': attributes.ATTR_NOT_SPECIFIED, + 'validate': {'type:mac_address_or_none': None}, + 'is_visible': True}, + 'dst_mac': {'allow_post': True, 'allow_put': True, + 'default': attributes.ATTR_NOT_SPECIFIED, + 'validate': {'type:mac_address_or_none': None}, + 'is_visible': True}, + 'eth_type': {'allow_post': True, 'allow_put': True, + 'default': attributes.ATTR_NOT_SPECIFIED, + 'convert_to': convert_to_int_or_none, + 'is_visible': True}, + 'src_cidr': {'allow_post': True, 'allow_put': True, + 'default': attributes.ATTR_NOT_SPECIFIED, + 'validate': {'type:subnet_or_none': None}, + 'is_visible': True}, + 'dst_cidr': {'allow_post': True, 'allow_put': True, + 'default': attributes.ATTR_NOT_SPECIFIED, + 'validate': {'type:subnet_or_none': None}, + 'is_visible': True}, + 'protocol': {'allow_post': True, 'allow_put': True, + 'default': attributes.ATTR_NOT_SPECIFIED, + 'validate': {'type:regex_or_none': + PACKET_FILTER_PROTOCOL_REGEX}, + 'is_visible': True}, + 'src_port': {'allow_post': True, 'allow_put': True, + 'default': attributes.ATTR_NOT_SPECIFIED, + 'convert_to': convert_to_int_or_none, + 'is_visible': True}, + 'dst_port': {'allow_post': True, 'allow_put': True, + 'default': attributes.ATTR_NOT_SPECIFIED, + 'convert_to': convert_to_int_or_none, + 'is_visible': True}, +} +PACKET_FILTER_ATTR_MAP = {COLLECTION: PACKET_FILTER_ATTR_PARAMS} + + +class Packetfilter(extensions.ExtensionDescriptor): + @classmethod + def get_name(cls): + return ALIAS + + @classmethod + def get_alias(cls): + return ALIAS + + @classmethod + def get_description(cls): + return "PacketFilters on OFC" + + @classmethod + def get_namespace(cls): + return "http://www.nec.co.jp/api/ext/packet_filter/v2.0" + + @classmethod + def get_updated(cls): + return "2013-07-16T00:00:00+09:00" + + @classmethod + def get_resources(cls): + qresource = quota.CountableResource(RESOURCE, + quota._count_resource, + 'quota_%s' % RESOURCE) + quota.QUOTAS.register_resource(qresource) + + resource = base.create_resource(COLLECTION, RESOURCE, + manager.NeutronManager.get_plugin(), + PACKET_FILTER_ATTR_PARAMS) + pf_ext = extensions.ResourceExtension( + COLLECTION, resource, attr_map=PACKET_FILTER_ATTR_PARAMS) + return [pf_ext] + + def get_extended_resources(self, version): + if version == "2.0": + return PACKET_FILTER_ATTR_MAP + else: + return {} diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/nec/extensions/router_provider.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/nec/extensions/router_provider.py new file mode 100644 index 00000000..0c8f0255 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/nec/extensions/router_provider.py @@ -0,0 +1,58 @@ +# Copyright 2013 NEC Corporation. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from neutron.api.v2 import attributes +from neutron.openstack.common import log as logging + + +LOG = logging.getLogger(__name__) + +ROUTER_PROVIDER = 'provider' + +ROUTER_PROVIDER_ATTRIBUTE = { + 'routers': {ROUTER_PROVIDER: + {'allow_post': True, + 'allow_put': False, + 'is_visible': True, + 'default': attributes.ATTR_NOT_SPECIFIED} + } +} + + +class Router_provider(object): + @classmethod + def get_name(cls): + return "Router Provider" + + @classmethod + def get_alias(cls): + return "router_provider" + + @classmethod + def get_description(cls): + return "Router Provider Support" + + @classmethod + def get_namespace(cls): + return "http://docs.openstack.org/ext/router_provider/api/v1.0" + + @classmethod + def get_updated(cls): + return "2013-08-20T10:00:00-00:00" + + def get_extended_resources(self, version): + if version == "2.0": + return ROUTER_PROVIDER_ATTRIBUTE + else: + return {} diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/nec/nec_plugin.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/nec/nec_plugin.py new file mode 100644 index 00000000..fbeebd7a --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/nec/nec_plugin.py @@ -0,0 +1,779 @@ +# Copyright 2012-2013 NEC Corporation. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# @author: Ryota MIBU +# @author: Akihiro MOTOKI + +from neutron.agent import securitygroups_rpc as sg_rpc +from neutron.api import extensions as neutron_extensions +from neutron.api.rpc.agentnotifiers import dhcp_rpc_agent_api +from neutron.api.v2 import attributes as attrs +from neutron.common import constants as const +from neutron.common import exceptions as n_exc +from neutron.common import rpc as n_rpc +from neutron.common import topics +from neutron.db import agents_db +from neutron.db import agentschedulers_db +from neutron.db import allowedaddresspairs_db as addr_pair_db +from neutron.db import db_base_plugin_v2 +from neutron.db import dhcp_rpc_base +from neutron.db import external_net_db +from neutron.db import l3_rpc_base +from neutron.db import portbindings_base +from neutron.db import portbindings_db +from neutron.db import quota_db # noqa +from neutron.db import securitygroups_rpc_base as sg_db_rpc +from neutron.extensions import allowedaddresspairs as addr_pair +from neutron.extensions import portbindings +from neutron.openstack.common import excutils +from neutron.openstack.common import importutils +from neutron.openstack.common import log as logging +from neutron.openstack.common import uuidutils +from neutron.plugins.common import constants as svc_constants +from neutron.plugins.nec.common import config +from neutron.plugins.nec.common import exceptions as nexc +from neutron.plugins.nec.common import utils as necutils +from neutron.plugins.nec.db import api as ndb +from neutron.plugins.nec.db import router as rdb +from neutron.plugins.nec import extensions +from neutron.plugins.nec import nec_router +from neutron.plugins.nec import ofc_manager +from neutron.plugins.nec import packet_filter + +LOG = logging.getLogger(__name__) + + +class NECPluginV2(db_base_plugin_v2.NeutronDbPluginV2, + external_net_db.External_net_db_mixin, + nec_router.RouterMixin, + sg_db_rpc.SecurityGroupServerRpcMixin, + agentschedulers_db.DhcpAgentSchedulerDbMixin, + nec_router.L3AgentSchedulerDbMixin, + packet_filter.PacketFilterMixin, + portbindings_db.PortBindingMixin, + addr_pair_db.AllowedAddressPairsMixin): + """NECPluginV2 controls an OpenFlow Controller. + + The Neutron NECPluginV2 maps L2 logical networks to L2 virtualized networks + on an OpenFlow enabled network. An OpenFlow Controller (OFC) provides + L2 network isolation without VLAN and this plugin controls the OFC. + + NOTE: This is for Neutron API V2. Codes for V1.0 and V1.1 are available + at https://github.com/nec-openstack/neutron-openflow-plugin . + + The port binding extension enables an external application relay + information to and from the plugin. + """ + _supported_extension_aliases = ["agent", + "allowed-address-pairs", + "binding", + "dhcp_agent_scheduler", + "external-net", + "ext-gw-mode", + "extraroute", + "l3_agent_scheduler", + "packet-filter", + "quotas", + "router", + "router_provider", + "security-group", + ] + + @property + def supported_extension_aliases(self): + if not hasattr(self, '_aliases'): + aliases = self._supported_extension_aliases[:] + sg_rpc.disable_security_group_extension_by_config(aliases) + self.remove_packet_filter_extension_if_disabled(aliases) + self._aliases = aliases + return self._aliases + + def __init__(self): + super(NECPluginV2, self).__init__() + self.ofc = ofc_manager.OFCManager(self.safe_reference) + self.base_binding_dict = self._get_base_binding_dict() + portbindings_base.register_port_dict_function() + + neutron_extensions.append_api_extensions_path(extensions.__path__) + + self.setup_rpc() + self.l3_rpc_notifier = nec_router.L3AgentNotifyAPI() + + self.network_scheduler = importutils.import_object( + config.CONF.network_scheduler_driver + ) + self.router_scheduler = importutils.import_object( + config.CONF.router_scheduler_driver + ) + + nec_router.load_driver(self.safe_reference, self.ofc) + self.port_handlers = { + 'create': { + const.DEVICE_OWNER_ROUTER_GW: self.create_router_port, + const.DEVICE_OWNER_ROUTER_INTF: self.create_router_port, + 'default': self.activate_port_if_ready, + }, + 'delete': { + const.DEVICE_OWNER_ROUTER_GW: self.delete_router_port, + const.DEVICE_OWNER_ROUTER_INTF: self.delete_router_port, + 'default': self.deactivate_port, + } + } + + def setup_rpc(self): + self.service_topics = {svc_constants.CORE: topics.PLUGIN, + svc_constants.L3_ROUTER_NAT: topics.L3PLUGIN} + self.conn = n_rpc.create_connection(new=True) + self.notifier = NECPluginV2AgentNotifierApi(topics.AGENT) + self.agent_notifiers[const.AGENT_TYPE_DHCP] = ( + dhcp_rpc_agent_api.DhcpAgentNotifyAPI() + ) + self.agent_notifiers[const.AGENT_TYPE_L3] = ( + nec_router.L3AgentNotifyAPI() + ) + + # NOTE: callback_sg is referred to from the sg unit test. + self.callback_sg = SecurityGroupServerRpcCallback() + self.endpoints = [ + NECPluginV2RPCCallbacks(self.safe_reference), + DhcpRpcCallback(), + L3RpcCallback(), + self.callback_sg, + agents_db.AgentExtRpcCallback()] + for svc_topic in self.service_topics.values(): + self.conn.create_consumer(svc_topic, self.endpoints, fanout=False) + # Consume from all consumers in threads + self.conn.consume_in_threads() + + def _update_resource_status(self, context, resource, id, status): + """Update status of specified resource.""" + request = {'status': status} + obj_getter = getattr(self, '_get_%s' % resource) + with context.session.begin(subtransactions=True): + obj_db = obj_getter(context, id) + obj_db.update(request) + + def _update_resource_status_if_changed(self, context, resource_type, + resource_dict, new_status): + if resource_dict['status'] != new_status: + self._update_resource_status(context, resource_type, + resource_dict['id'], + new_status) + resource_dict['status'] = new_status + + def _check_ofc_tenant_in_use(self, context, tenant_id): + """Check if the specified tenant is used.""" + # All networks are created on OFC + filters = {'tenant_id': [tenant_id]} + if self.get_networks_count(context, filters=filters): + return True + if rdb.get_router_count_by_provider(context.session, + nec_router.PROVIDER_OPENFLOW, + tenant_id): + return True + return False + + def _cleanup_ofc_tenant(self, context, tenant_id): + if not self._check_ofc_tenant_in_use(context, tenant_id): + try: + if self.ofc.exists_ofc_tenant(context, tenant_id): + self.ofc.delete_ofc_tenant(context, tenant_id) + else: + LOG.debug(_('_cleanup_ofc_tenant: No OFC tenant for %s'), + tenant_id) + except (nexc.OFCException, nexc.OFCMappingNotFound) as exc: + reason = _("delete_ofc_tenant() failed due to %s") % exc + LOG.warn(reason) + + def activate_port_if_ready(self, context, port, network=None): + """Activate port by creating port on OFC if ready. + + Conditions to activate port on OFC are: + * port admin_state is UP + * network admin_state is UP + * portinfo are available (to identify port on OFC) + """ + if not network: + network = super(NECPluginV2, self).get_network(context, + port['network_id']) + + if not port['admin_state_up']: + LOG.debug(_("activate_port_if_ready(): skip, " + "port.admin_state_up is False.")) + return port + elif not network['admin_state_up']: + LOG.debug(_("activate_port_if_ready(): skip, " + "network.admin_state_up is False.")) + return port + elif not ndb.get_portinfo(context.session, port['id']): + LOG.debug(_("activate_port_if_ready(): skip, " + "no portinfo for this port.")) + return port + elif self.ofc.exists_ofc_port(context, port['id']): + LOG.debug(_("activate_port_if_ready(): skip, " + "ofc_port already exists.")) + return port + + try: + self.ofc.create_ofc_port(context, port['id'], port) + port_status = const.PORT_STATUS_ACTIVE + except (nexc.OFCException, nexc.OFCMappingNotFound) as exc: + LOG.error(_("create_ofc_port() failed due to %s"), exc) + port_status = const.PORT_STATUS_ERROR + + if port_status != port['status']: + self._update_resource_status(context, "port", port['id'], + port_status) + port['status'] = port_status + + return port + + def deactivate_port(self, context, port, raise_exc=True): + """Deactivate port by deleting port from OFC if exists.""" + if not self.ofc.exists_ofc_port(context, port['id']): + LOG.debug(_("deactivate_port(): skip, ofc_port for port=%s " + "does not exist."), port['id']) + return port + + try: + self.ofc.delete_ofc_port(context, port['id'], port) + self._update_resource_status_if_changed( + context, "port", port, const.PORT_STATUS_DOWN) + return port + except (nexc.OFCResourceNotFound, nexc.OFCMappingNotFound): + # There is a case where multiple delete_port operation are + # running concurrently. For example, delete_port from + # release_dhcp_port and deletion of network owned ports in + # delete_network. In such cases delete_ofc_port may receive + # 404 error from OFC. + # Also there is a case where neutron port is deleted + # between exists_ofc_port and get_ofc_id in delete_ofc_port. + # In this case OFCMappingNotFound is raised. + # These two cases are valid situations. + LOG.info(_("deactivate_port(): OFC port for port=%s is " + "already removed."), port['id']) + # The port is already removed, so there is no need + # to update status in the database. + port['status'] = const.PORT_STATUS_DOWN + return port + except nexc.OFCException as exc: + with excutils.save_and_reraise_exception() as ctxt: + LOG.error(_("Failed to delete port=%(port)s from OFC: " + "%(exc)s"), {'port': port['id'], 'exc': exc}) + self._update_resource_status_if_changed( + context, "port", port, const.PORT_STATUS_ERROR) + if not raise_exc: + ctxt.reraise = False + return port + + def _net_status(self, network): + # NOTE: NEC Plugin accept admin_state_up. When it's False, this plugin + # deactivate all ports on the network to drop all packet and show + # status='DOWN' to users. But the network is kept defined on OFC. + if network['network']['admin_state_up']: + return const.NET_STATUS_ACTIVE + else: + return const.NET_STATUS_DOWN + + def create_network(self, context, network): + """Create a new network entry on DB, and create it on OFC.""" + LOG.debug(_("NECPluginV2.create_network() called, " + "network=%s ."), network) + tenant_id = self._get_tenant_id_for_create(context, network['network']) + net_name = network['network']['name'] + net_id = uuidutils.generate_uuid() + + #set up default security groups + self._ensure_default_security_group(context, tenant_id) + + network['network']['id'] = net_id + network['network']['status'] = self._net_status(network) + + try: + if not self.ofc.exists_ofc_tenant(context, tenant_id): + self.ofc.create_ofc_tenant(context, tenant_id) + self.ofc.create_ofc_network(context, tenant_id, net_id, net_name) + except (nexc.OFCException, nexc.OFCMappingNotFound) as exc: + LOG.error(_("Failed to create network id=%(id)s on " + "OFC: %(exc)s"), {'id': net_id, 'exc': exc}) + network['network']['status'] = const.NET_STATUS_ERROR + + with context.session.begin(subtransactions=True): + new_net = super(NECPluginV2, self).create_network(context, network) + self._process_l3_create(context, new_net, network['network']) + + return new_net + + def update_network(self, context, id, network): + """Update network and handle resources associated with the network. + + Update network entry on DB. If 'admin_state_up' was changed, activate + or deactivate ports and packetfilters associated with the network. + """ + LOG.debug(_("NECPluginV2.update_network() called, " + "id=%(id)s network=%(network)s ."), + {'id': id, 'network': network}) + + if 'admin_state_up' in network['network']: + network['network']['status'] = self._net_status(network) + + session = context.session + with session.begin(subtransactions=True): + old_net = super(NECPluginV2, self).get_network(context, id) + new_net = super(NECPluginV2, self).update_network(context, id, + network) + self._process_l3_update(context, new_net, network['network']) + + changed = (old_net['admin_state_up'] != new_net['admin_state_up']) + if changed and not new_net['admin_state_up']: + # disable all active ports of the network + filters = dict(network_id=[id], status=[const.PORT_STATUS_ACTIVE]) + ports = super(NECPluginV2, self).get_ports(context, + filters=filters) + for port in ports: + # If some error occurs, status of errored port is set to ERROR. + # This is avoids too many rollback. + # TODO(amotoki): Raise an exception after all port operations + # are finished to inform the caller of API of the failure. + self.deactivate_port(context, port, raise_exc=False) + elif changed and new_net['admin_state_up']: + # enable ports of the network + filters = dict(network_id=[id], status=[const.PORT_STATUS_DOWN], + admin_state_up=[True]) + ports = super(NECPluginV2, self).get_ports(context, + filters=filters) + for port in ports: + self.activate_port_if_ready(context, port, new_net) + + return new_net + + def delete_network(self, context, id): + """Delete network and packet_filters associated with the network. + + Delete network entry from DB and OFC. Then delete packet_filters + associated with the network. If the network is the last resource + of the tenant, delete unnessary ofc_tenant. + """ + LOG.debug(_("NECPluginV2.delete_network() called, id=%s ."), id) + net_db = self._get_network(context, id) + tenant_id = net_db['tenant_id'] + ports = self.get_ports(context, filters={'network_id': [id]}) + + # check if there are any tenant owned ports in-use; + # consider ports owned by floating ips as auto_delete as if there are + # no other tenant owned ports, those floating ips are disassociated + # and will be auto deleted with self._process_l3_delete() + only_auto_del = all(p['device_owner'] in + db_base_plugin_v2.AUTO_DELETE_PORT_OWNERS or + p['device_owner'] == const.DEVICE_OWNER_FLOATINGIP + for p in ports) + if not only_auto_del: + raise n_exc.NetworkInUse(net_id=id) + + self._process_l3_delete(context, id) + + # Make sure auto-delete ports on OFC are deleted. + # If an error occurs during port deletion, + # delete_network will be aborted. + for port in [p for p in ports if p['device_owner'] + in db_base_plugin_v2.AUTO_DELETE_PORT_OWNERS]: + port = self.deactivate_port(context, port) + + # delete all packet_filters of the network from the controller + for pf in net_db.packetfilters: + self.delete_packet_filter(context, pf['id']) + + if self.ofc.exists_ofc_network(context, id): + try: + self.ofc.delete_ofc_network(context, id, net_db) + except (nexc.OFCException, nexc.OFCMappingNotFound) as exc: + with excutils.save_and_reraise_exception(): + reason = _("delete_network() failed due to %s") % exc + LOG.error(reason) + self._update_resource_status( + context, "network", net_db['id'], + const.NET_STATUS_ERROR) + + super(NECPluginV2, self).delete_network(context, id) + + self._cleanup_ofc_tenant(context, tenant_id) + + def _get_base_binding_dict(self): + binding = { + portbindings.VIF_TYPE: portbindings.VIF_TYPE_OVS, + portbindings.VIF_DETAILS: { + # TODO(rkukura): Replace with new VIF security details + portbindings.CAP_PORT_FILTER: + 'security-group' in self.supported_extension_aliases, + portbindings.OVS_HYBRID_PLUG: True + } + } + return binding + + def _extend_port_dict_binding_portinfo(self, port_res, portinfo): + if portinfo: + port_res[portbindings.PROFILE] = { + 'datapath_id': portinfo['datapath_id'], + 'port_no': portinfo['port_no'], + } + elif portbindings.PROFILE in port_res: + del port_res[portbindings.PROFILE] + + def _validate_portinfo(self, profile): + key_specs = { + 'datapath_id': {'type:string': None, 'required': True}, + 'port_no': {'type:non_negative': None, 'required': True, + 'convert_to': attrs.convert_to_int} + } + msg = attrs._validate_dict_or_empty(profile, key_specs=key_specs) + if msg: + raise n_exc.InvalidInput(error_message=msg) + + datapath_id = profile.get('datapath_id') + port_no = profile.get('port_no') + try: + dpid = int(datapath_id, 16) + except ValueError: + raise nexc.ProfilePortInfoInvalidDataPathId() + if dpid > 0xffffffffffffffffL: + raise nexc.ProfilePortInfoInvalidDataPathId() + # Make sure dpid is a hex string beginning with 0x. + dpid = hex(dpid) + + if int(port_no) > 65535: + raise nexc.ProfilePortInfoInvalidPortNo() + + return {'datapath_id': dpid, 'port_no': port_no} + + def _process_portbindings_portinfo_create(self, context, port_data, port): + """Add portinfo according to bindings:profile in create_port(). + + :param context: neutron api request context + :param port_data: port attributes passed in PUT request + :param port: port attributes to be returned + """ + profile = port_data.get(portbindings.PROFILE) + # If portbindings.PROFILE is None, unspecified or an empty dict + # it is regarded that portbinding.PROFILE is not set. + profile_set = attrs.is_attr_set(profile) and profile + if profile_set: + portinfo = self._validate_portinfo(profile) + portinfo['mac'] = port['mac_address'] + ndb.add_portinfo(context.session, port['id'], **portinfo) + else: + portinfo = None + self._extend_port_dict_binding_portinfo(port, portinfo) + + def _process_portbindings_portinfo_update(self, context, port_data, port): + """Update portinfo according to bindings:profile in update_port(). + + :param context: neutron api request context + :param port_data: port attributes passed in PUT request + :param port: port attributes to be returned + :returns: 'ADD', 'MOD', 'DEL' or None + """ + if portbindings.PROFILE not in port_data: + return + profile = port_data.get(portbindings.PROFILE) + # If binding:profile is None or an empty dict, + # it means binding:.profile needs to be cleared. + # TODO(amotoki): Allow Make None in binding:profile in + # the API layer. See LP bug #1220011. + profile_set = attrs.is_attr_set(profile) and profile + cur_portinfo = ndb.get_portinfo(context.session, port['id']) + if profile_set: + portinfo = self._validate_portinfo(profile) + portinfo_changed = 'ADD' + if cur_portinfo: + if (necutils.cmp_dpid(portinfo['datapath_id'], + cur_portinfo.datapath_id) and + portinfo['port_no'] == cur_portinfo.port_no): + return + ndb.del_portinfo(context.session, port['id']) + portinfo_changed = 'MOD' + portinfo['mac'] = port['mac_address'] + ndb.add_portinfo(context.session, port['id'], **portinfo) + elif cur_portinfo: + portinfo_changed = 'DEL' + portinfo = None + ndb.del_portinfo(context.session, port['id']) + else: + portinfo = None + portinfo_changed = None + self._extend_port_dict_binding_portinfo(port, portinfo) + return portinfo_changed + + def extend_port_dict_binding(self, port_res, port_db): + super(NECPluginV2, self).extend_port_dict_binding(port_res, port_db) + self._extend_port_dict_binding_portinfo(port_res, port_db.portinfo) + + def _process_portbindings_create(self, context, port_data, port): + super(NECPluginV2, self)._process_portbindings_create_and_update( + context, port_data, port) + self._process_portbindings_portinfo_create(context, port_data, port) + + def _process_portbindings_update(self, context, port_data, port): + super(NECPluginV2, self)._process_portbindings_create_and_update( + context, port_data, port) + portinfo_changed = self._process_portbindings_portinfo_update( + context, port_data, port) + return portinfo_changed + + def _get_port_handler(self, operation, device_owner): + handlers = self.port_handlers[operation] + handler = handlers.get(device_owner) + if handler: + return handler + else: + return handlers['default'] + + def create_port(self, context, port): + """Create a new port entry on DB, then try to activate it.""" + LOG.debug(_("NECPluginV2.create_port() called, port=%s ."), port) + + port['port']['status'] = const.PORT_STATUS_DOWN + + port_data = port['port'] + with context.session.begin(subtransactions=True): + self._ensure_default_security_group_on_port(context, port) + sgids = self._get_security_groups_on_port(context, port) + port = super(NECPluginV2, self).create_port(context, port) + self._process_portbindings_create(context, port_data, port) + self._process_port_create_security_group( + context, port, sgids) + port[addr_pair.ADDRESS_PAIRS] = ( + self._process_create_allowed_address_pairs( + context, port, + port_data.get(addr_pair.ADDRESS_PAIRS))) + self.notify_security_groups_member_updated(context, port) + + handler = self._get_port_handler('create', port['device_owner']) + return handler(context, port) + + def _update_ofc_port_if_required(self, context, old_port, new_port, + portinfo_changed): + def get_ofport_exist(port): + return (port['admin_state_up'] and + bool(port.get(portbindings.PROFILE))) + + # Determine it is required to update OFC port + need_add = False + need_del = False + need_packet_filter_update = False + + old_ofport_exist = get_ofport_exist(old_port) + new_ofport_exist = get_ofport_exist(new_port) + + if old_port['admin_state_up'] != new_port['admin_state_up']: + if new_port['admin_state_up']: + need_add |= new_ofport_exist + else: + need_del |= old_ofport_exist + + if portinfo_changed: + if portinfo_changed in ['DEL', 'MOD']: + need_del |= old_ofport_exist + if portinfo_changed in ['ADD', 'MOD']: + need_add |= new_ofport_exist + need_packet_filter_update |= True + + # Update OFC port if required + if need_del: + self.deactivate_port(context, new_port) + if need_packet_filter_update: + self.deactivate_packet_filters_by_port(context, id) + if need_add: + if need_packet_filter_update: + self.activate_packet_filters_by_port(context, id) + self.activate_port_if_ready(context, new_port) + + def update_port(self, context, id, port): + """Update port, and handle packetfilters associated with the port. + + Update network entry on DB. If admin_state_up was changed, activate + or deactivate the port and packetfilters associated with it. + """ + LOG.debug(_("NECPluginV2.update_port() called, " + "id=%(id)s port=%(port)s ."), + {'id': id, 'port': port}) + need_port_update_notify = False + with context.session.begin(subtransactions=True): + old_port = super(NECPluginV2, self).get_port(context, id) + new_port = super(NECPluginV2, self).update_port(context, id, port) + portinfo_changed = self._process_portbindings_update( + context, port['port'], new_port) + if addr_pair.ADDRESS_PAIRS in port['port']: + need_port_update_notify |= ( + self.update_address_pairs_on_port(context, id, port, + old_port, + new_port)) + need_port_update_notify |= self.update_security_group_on_port( + context, id, port, old_port, new_port) + + need_port_update_notify |= self.is_security_group_member_updated( + context, old_port, new_port) + if need_port_update_notify: + self.notifier.port_update(context, new_port) + + self._update_ofc_port_if_required(context, old_port, new_port, + portinfo_changed) + return new_port + + def delete_port(self, context, id, l3_port_check=True): + """Delete port and packet_filters associated with the port.""" + LOG.debug(_("NECPluginV2.delete_port() called, id=%s ."), id) + # ext_sg.SECURITYGROUPS attribute for the port is required + # since notifier.security_groups_member_updated() need the attribute. + # Thus we need to call self.get_port() instead of super().get_port() + port_db = self._get_port(context, id) + port = self._make_port_dict(port_db) + + handler = self._get_port_handler('delete', port['device_owner']) + # handler() raises an exception if an error occurs during processing. + port = handler(context, port) + + # delete all packet_filters of the port from the controller + for pf in port_db.packetfilters: + self.delete_packet_filter(context, pf['id']) + + # if needed, check to see if this is a port owned by + # and l3-router. If so, we should prevent deletion. + if l3_port_check: + self.prevent_l3_port_deletion(context, id) + with context.session.begin(subtransactions=True): + self.disassociate_floatingips(context, id) + self._delete_port_security_group_bindings(context, id) + super(NECPluginV2, self).delete_port(context, id) + self.notify_security_groups_member_updated(context, port) + + +class NECPluginV2AgentNotifierApi(n_rpc.RpcProxy, + sg_rpc.SecurityGroupAgentRpcApiMixin): + '''RPC API for NEC plugin agent.''' + + BASE_RPC_API_VERSION = '1.0' + + def __init__(self, topic): + super(NECPluginV2AgentNotifierApi, self).__init__( + topic=topic, default_version=self.BASE_RPC_API_VERSION) + self.topic_port_update = topics.get_topic_name( + topic, topics.PORT, topics.UPDATE) + + def port_update(self, context, port): + self.fanout_cast(context, + self.make_msg('port_update', + port=port), + topic=self.topic_port_update) + + +class DhcpRpcCallback(n_rpc.RpcCallback, + dhcp_rpc_base.DhcpRpcCallbackMixin): + # DhcpPluginApi BASE_RPC_API_VERSION + RPC_API_VERSION = '1.1' + + +class L3RpcCallback(n_rpc.RpcCallback, l3_rpc_base.L3RpcCallbackMixin): + # 1.0 L3PluginApi BASE_RPC_API_VERSION + # 1.1 Support update_floatingip_statuses + RPC_API_VERSION = '1.1' + + +class SecurityGroupServerRpcCallback( + n_rpc.RpcCallback, + sg_db_rpc.SecurityGroupServerRpcCallbackMixin): + + RPC_API_VERSION = sg_rpc.SG_RPC_VERSION + + @staticmethod + def get_port_from_device(device): + port = ndb.get_port_from_device(device) + if port: + port['device'] = device + LOG.debug(_("NECPluginV2RPCCallbacks.get_port_from_device() called, " + "device=%(device)s => %(ret)s."), + {'device': device, 'ret': port}) + return port + + +class NECPluginV2RPCCallbacks(n_rpc.RpcCallback): + + RPC_API_VERSION = '1.0' + + def __init__(self, plugin): + super(NECPluginV2RPCCallbacks, self).__init__() + self.plugin = plugin + + def update_ports(self, rpc_context, **kwargs): + """Update ports' information and activate/deavtivate them. + + Expected input format is: + {'topic': 'q-agent-notifier', + 'agent_id': 'nec-q-agent.' + , + 'datapath_id': , + 'port_added': [,...], + 'port_removed': [,...]} + """ + LOG.debug(_("NECPluginV2RPCCallbacks.update_ports() called, " + "kwargs=%s ."), kwargs) + datapath_id = kwargs['datapath_id'] + session = rpc_context.session + for p in kwargs.get('port_added', []): + id = p['id'] + portinfo = ndb.get_portinfo(session, id) + if portinfo: + if (necutils.cmp_dpid(portinfo.datapath_id, datapath_id) and + portinfo.port_no == p['port_no']): + LOG.debug(_("update_ports(): ignore unchanged portinfo in " + "port_added message (port_id=%s)."), id) + continue + ndb.del_portinfo(session, id) + port = self._get_port(rpc_context, id) + if port: + ndb.add_portinfo(session, id, datapath_id, p['port_no'], + mac=p.get('mac', '')) + # NOTE: Make sure that packet filters on this port exist while + # the port is active to avoid unexpected packet transfer. + if portinfo: + self.plugin.deactivate_port(rpc_context, port, + raise_exc=False) + self.plugin.deactivate_packet_filters_by_port( + rpc_context, id, raise_exc=False) + self.plugin.activate_packet_filters_by_port(rpc_context, id) + self.plugin.activate_port_if_ready(rpc_context, port) + for id in kwargs.get('port_removed', []): + portinfo = ndb.get_portinfo(session, id) + if not portinfo: + LOG.debug(_("update_ports(): ignore port_removed message " + "due to portinfo for port_id=%s was not " + "registered"), id) + continue + if not necutils.cmp_dpid(portinfo.datapath_id, datapath_id): + LOG.debug(_("update_ports(): ignore port_removed message " + "received from different host " + "(registered_datapath_id=%(registered)s, " + "received_datapath_id=%(received)s)."), + {'registered': portinfo.datapath_id, + 'received': datapath_id}) + continue + ndb.del_portinfo(session, id) + port = self._get_port(rpc_context, id) + if port: + self.plugin.deactivate_port(rpc_context, port, raise_exc=False) + self.plugin.deactivate_packet_filters_by_port( + rpc_context, id, raise_exc=False) + + def _get_port(self, context, port_id): + try: + return self.plugin.get_port(context, port_id) + except n_exc.PortNotFound: + return None diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/nec/nec_router.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/nec/nec_router.py new file mode 100644 index 00000000..99f40766 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/nec/nec_router.py @@ -0,0 +1,356 @@ +# Copyright 2013 NEC Corporation. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Akihiro Motoki + +from neutron.api.rpc.agentnotifiers import l3_rpc_agent_api +from neutron.api.v2 import attributes as attr +from neutron.common import exceptions as n_exc +from neutron.db import db_base_plugin_v2 +from neutron.db import extraroute_db +from neutron.db import l3_agentschedulers_db +from neutron.db import l3_db +from neutron.db import l3_gwmode_db +from neutron.db import models_v2 +from neutron.extensions import l3 +from neutron.openstack.common import excutils +from neutron.openstack.common import importutils +from neutron.openstack.common import log as logging +from neutron.plugins.nec.common import config +from neutron.plugins.nec.common import constants as nconst +from neutron.plugins.nec.common import exceptions as nexc +from neutron.plugins.nec.db import router as rdb +from neutron.plugins.nec.extensions import router_provider as ext_provider + +LOG = logging.getLogger(__name__) + +PROVIDER_L3AGENT = nconst.ROUTER_PROVIDER_L3AGENT +PROVIDER_OPENFLOW = nconst.ROUTER_PROVIDER_OPENFLOW + +ROUTER_DRIVER_PATH = 'neutron.plugins.nec.router_drivers.' +ROUTER_DRIVER_MAP = { + PROVIDER_L3AGENT: ROUTER_DRIVER_PATH + 'RouterL3AgentDriver', + PROVIDER_OPENFLOW: ROUTER_DRIVER_PATH + 'RouterOpenFlowDriver' +} + +ROUTER_DRIVERS = {} + +STATUS_ACTIVE = nconst.ROUTER_STATUS_ACTIVE +STATUS_ERROR = nconst.ROUTER_STATUS_ERROR + + +class RouterMixin(extraroute_db.ExtraRoute_db_mixin, + l3_gwmode_db.L3_NAT_db_mixin): + + def create_router(self, context, router): + """Create a new router entry on DB, and create it on OFC.""" + LOG.debug(_("RouterMixin.create_router() called, " + "router=%s ."), router) + tenant_id = self._get_tenant_id_for_create(context, router['router']) + + provider = get_provider_with_default( + router['router'].get(ext_provider.ROUTER_PROVIDER)) + driver = get_driver_by_provider(provider) + + with context.session.begin(subtransactions=True): + new_router = super(RouterMixin, self).create_router(context, + router) + new_router['gw_port'] = self._get_gw_port_detail( + context, driver, new_router['gw_port_id']) + rdb.add_router_provider_binding(context.session, + provider, str(new_router['id'])) + self._extend_router_dict_provider(new_router, provider) + + # create router on the network controller + try: + return driver.create_router(context, tenant_id, new_router) + except nexc.RouterOverLimit: + with excutils.save_and_reraise_exception(): + super(RouterMixin, self).delete_router(context, + new_router['id']) + + def update_router(self, context, router_id, router): + LOG.debug(_("RouterMixin.update_router() called, " + "id=%(id)s, router=%(router)s ."), + {'id': router_id, 'router': router}) + + with context.session.begin(subtransactions=True): + old_rtr = super(RouterMixin, self).get_router(context, router_id) + provider = old_rtr[ext_provider.ROUTER_PROVIDER] + driver = get_driver_by_provider(provider) + old_rtr['gw_port'] = self._get_gw_port_detail( + context, driver, old_rtr['gw_port_id']) + new_rtr = super(RouterMixin, self).update_router( + context, router_id, router) + new_rtr['gw_port'] = self._get_gw_port_detail( + context, driver, new_rtr['gw_port_id']) + driver.update_router(context, router_id, old_rtr, new_rtr) + return new_rtr + + def delete_router(self, context, router_id): + LOG.debug(_("RouterMixin.delete_router() called, id=%s."), router_id) + + router = super(RouterMixin, self).get_router(context, router_id) + tenant_id = router['tenant_id'] + # Since l3_db.delete_router() has no interaction with the plugin layer, + # we need to check if the router can be deleted first. + self._check_router_in_use(context, router_id) + driver = self._get_router_driver_by_id(context, router_id) + # If gw_port exists, remove it. + gw_port = self._get_gw_port(context, router_id) + if gw_port: + driver.delete_interface(context, router_id, gw_port) + driver.delete_router(context, router_id, router) + + super(RouterMixin, self).delete_router(context, router_id) + + self._cleanup_ofc_tenant(context, tenant_id) + + def add_router_interface(self, context, router_id, interface_info): + LOG.debug(_("RouterMixin.add_router_interface() called, " + "id=%(id)s, interface=%(interface)s."), + {'id': router_id, 'interface': interface_info}) + return super(RouterMixin, self).add_router_interface( + context, router_id, interface_info) + + def remove_router_interface(self, context, router_id, interface_info): + LOG.debug(_("RouterMixin.remove_router_interface() called, " + "id=%(id)s, interface=%(interface)s."), + {'id': router_id, 'interface': interface_info}) + return super(RouterMixin, self).remove_router_interface( + context, router_id, interface_info) + + def create_router_port(self, context, port): + # This method is called from plugin.create_port() + router_id = port['device_id'] + driver = self._get_router_driver_by_id(context, router_id) + port = driver.add_interface(context, router_id, port) + return port + + def delete_router_port(self, context, port): + # This method is called from plugin.delete_port() + router_id = port['device_id'] + driver = self._get_router_driver_by_id(context, router_id) + return driver.delete_interface(context, router_id, port) + + def _get_gw_port_detail(self, context, driver, gw_port_id): + if not gw_port_id or not driver.need_gw_info: + return + ctx_elevated = context.elevated() + gw_port = self._get_port(ctx_elevated, gw_port_id) + # At this moment gw_port has been created, so it is guaranteed + # that fixed_ip is assigned for the gw_port. + ext_subnet_id = gw_port['fixed_ips'][0]['subnet_id'] + ext_subnet = self._get_subnet(ctx_elevated, ext_subnet_id) + gw_info = {'network_id': gw_port['network_id'], + 'ip_address': gw_port['fixed_ips'][0]['ip_address'], + 'mac_address': gw_port['mac_address'], + 'cidr': ext_subnet['cidr'], + 'gateway_ip': ext_subnet['gateway_ip']} + return gw_info + + def _get_gw_port(self, context, router_id): + device_filter = {'device_id': [router_id], + 'device_owner': [l3_db.DEVICE_OWNER_ROUTER_GW]} + ports = self.get_ports(context.elevated(), filters=device_filter) + if ports: + return ports[0] + + def _check_router_in_use(self, context, router_id): + with context.session.begin(subtransactions=True): + # Ensure that the router is not used + router_filter = {'router_id': [router_id]} + fips = self.get_floatingips_count(context.elevated(), + filters=router_filter) + if fips: + raise l3.RouterInUse(router_id=router_id) + + device_filter = {'device_id': [router_id], + 'device_owner': [l3_db.DEVICE_OWNER_ROUTER_INTF]} + ports = self.get_ports_count(context.elevated(), + filters=device_filter) + if ports: + raise l3.RouterInUse(router_id=router_id) + + def _get_router_for_floatingip(self, context, internal_port, + internal_subnet_id, + external_network_id): + """Get a router for a requested floating IP. + + OpenFlow vrouter does not support NAT, so we need to exclude them + from candidate routers for floating IP association. + This method is called in l3_db.get_assoc_data(). + """ + subnet_db = self._get_subnet(context, internal_subnet_id) + if not subnet_db['gateway_ip']: + msg = (_('Cannot add floating IP to port on subnet %s ' + 'which has no gateway_ip') % internal_subnet_id) + raise n_exc.BadRequest(resource='floatingip', msg=msg) + + # find router interface ports on this network + router_intf_qry = context.session.query(models_v2.Port) + router_intf_ports = router_intf_qry.filter_by( + network_id=internal_port['network_id'], + device_owner=l3_db.DEVICE_OWNER_ROUTER_INTF) + + for intf_p in router_intf_ports: + if intf_p['fixed_ips'][0]['subnet_id'] == internal_subnet_id: + router_id = intf_p['device_id'] + router_gw_qry = context.session.query(models_v2.Port) + has_gw_port = router_gw_qry.filter_by( + network_id=external_network_id, + device_id=router_id, + device_owner=l3_db.DEVICE_OWNER_ROUTER_GW).count() + driver = self._get_router_driver_by_id(context, router_id) + if (has_gw_port and driver.floating_ip_support()): + return router_id + + raise l3.ExternalGatewayForFloatingIPNotFound( + subnet_id=internal_subnet_id, + external_network_id=external_network_id, + port_id=internal_port['id']) + + def _get_sync_routers(self, context, router_ids=None, active=None): + """Query routers and their gw ports for l3 agent. + + The difference from the superclass in l3_db is that this method + only lists routers hosted on l3-agents. + """ + router_list = super(RouterMixin, self)._get_sync_routers( + context, router_ids, active) + if router_list: + _router_ids = [r['id'] for r in router_list] + agent_routers = rdb.get_routers_by_provider( + context.session, 'l3-agent', + router_ids=_router_ids) + router_list = [r for r in router_list + if r['id'] in agent_routers] + return router_list + + def _get_router_driver_by_id(self, context, router_id): + provider = self._get_provider_by_router_id(context, router_id) + return get_driver_by_provider(provider) + + def _get_provider_by_router_id(self, context, router_id): + return rdb.get_provider_by_router(context.session, router_id) + + def _extend_router_dict_provider(self, router_res, provider): + router_res[ext_provider.ROUTER_PROVIDER] = provider + + def extend_router_dict_provider(self, router_res, router_db): + # NOTE: router_db.provider is None just after creating a router, + # so we need to skip setting router_provider here. + if not router_db.provider: + return + self._extend_router_dict_provider(router_res, + router_db.provider['provider']) + + db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs( + l3.ROUTERS, [extend_router_dict_provider]) + + +class L3AgentSchedulerDbMixin(l3_agentschedulers_db.L3AgentSchedulerDbMixin): + + def auto_schedule_routers(self, context, host, router_ids): + router_ids = rdb.get_routers_by_provider( + context.session, nconst.ROUTER_PROVIDER_L3AGENT, router_ids) + # If no l3-agent hosted router, there is no need to schedule. + if not router_ids: + return + return super(L3AgentSchedulerDbMixin, self).auto_schedule_routers( + context, host, router_ids) + + def schedule_router(self, context, router, candidates=None, hints=None): + if (self._get_provider_by_router_id(context, router) == + nconst.ROUTER_PROVIDER_L3AGENT): + return super(L3AgentSchedulerDbMixin, self).schedule_router( + context, router, candidates=candidates, hints=hints) + + def add_router_to_l3_agent(self, context, id, router_id): + provider = self._get_provider_by_router_id(context, router_id) + if provider != nconst.ROUTER_PROVIDER_L3AGENT: + raise nexc.RouterProviderMismatch( + router_id=router_id, provider=provider, + expected_provider=nconst.ROUTER_PROVIDER_L3AGENT) + return super(L3AgentSchedulerDbMixin, self).add_router_to_l3_agent( + context, id, router_id) + + +class L3AgentNotifyAPI(l3_rpc_agent_api.L3AgentNotifyAPI): + + def _notification(self, context, method, router_ids, operation, data): + """Notify all the agents that are hosting the routers. + + _notification() is called in L3 db plugin for all routers regardless + the routers are hosted on l3 agents or not. When the routers are + not hosted on l3 agents, there is no need to notify. + This method filters routers not hosted by l3 agents. + """ + router_ids = rdb.get_routers_by_provider( + context.session, nconst.ROUTER_PROVIDER_L3AGENT, router_ids) + super(L3AgentNotifyAPI, self)._notification( + context, method, router_ids, operation, data) + + +def load_driver(plugin, ofc_manager): + + if (PROVIDER_OPENFLOW in ROUTER_DRIVER_MAP and + not ofc_manager.driver.router_supported): + LOG.warning( + _('OFC does not support router with provider=%(provider)s, ' + 'so removed it from supported provider ' + '(new router driver map=%(driver_map)s)'), + {'provider': PROVIDER_OPENFLOW, + 'driver_map': ROUTER_DRIVER_MAP}) + del ROUTER_DRIVER_MAP[PROVIDER_OPENFLOW] + + if config.PROVIDER.default_router_provider not in ROUTER_DRIVER_MAP: + LOG.error(_('default_router_provider %(default)s is supported! ' + 'Please specify one of %(supported)s'), + {'default': config.PROVIDER.default_router_provider, + 'supported': ROUTER_DRIVER_MAP.keys()}) + raise SystemExit(1) + + enabled_providers = (set(config.PROVIDER.router_providers + + [config.PROVIDER.default_router_provider]) & + set(ROUTER_DRIVER_MAP.keys())) + + for driver in enabled_providers: + driver_klass = importutils.import_class(ROUTER_DRIVER_MAP[driver]) + ROUTER_DRIVERS[driver] = driver_klass(plugin, ofc_manager) + + LOG.info(_('Enabled router drivers: %s'), ROUTER_DRIVERS.keys()) + + if not ROUTER_DRIVERS: + LOG.error(_('No router provider is enabled. neutron-server terminated!' + ' (supported=%(supported)s, configured=%(config)s)'), + {'supported': ROUTER_DRIVER_MAP.keys(), + 'config': config.PROVIDER.router_providers}) + raise SystemExit(1) + + +def get_provider_with_default(provider): + if not attr.is_attr_set(provider): + provider = config.PROVIDER.default_router_provider + elif provider not in ROUTER_DRIVERS: + raise nexc.ProviderNotFound(provider=provider) + return provider + + +def get_driver_by_provider(provider): + if provider is None: + provider = config.PROVIDER.default_router_provider + elif provider not in ROUTER_DRIVERS: + raise nexc.ProviderNotFound(provider=provider) + return ROUTER_DRIVERS[provider] diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/nec/ofc_driver_base.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/nec/ofc_driver_base.py new file mode 100644 index 00000000..ba932132 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/nec/ofc_driver_base.py @@ -0,0 +1,103 @@ +# Copyright 2012 NEC Corporation. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# @author: Ryota MIBU +# @author: Akihiro MOTOKI + +import abc +import six + + +@six.add_metaclass(abc.ABCMeta) +class OFCDriverBase(object): + """OpenFlow Controller (OFC) Driver Specification. + + OFCDriverBase defines the minimum set of methods required by this plugin. + It would be better that other methods like update_* are implemented. + """ + + @abc.abstractmethod + def create_tenant(self, description, tenant_id=None): + """Create a new tenant at OpenFlow Controller. + + :param description: A description of this tenant. + :param tenant_id: A hint of OFC tenant ID. + A driver could use this id as a OFC id or ignore it. + :returns: ID of the tenant created at OpenFlow Controller. + :raises: neutron.plugin.nec.common.exceptions.OFCException + """ + pass + + @abc.abstractmethod + def delete_tenant(self, ofc_tenant_id): + """Delete a tenant at OpenFlow Controller. + + :raises: neutron.plugin.nec.common.exceptions.OFCException + """ + pass + + @abc.abstractmethod + def create_network(self, ofc_tenant_id, description, network_id=None): + """Create a new network on specified OFC tenant at OpenFlow Controller. + + :param ofc_tenant_id: a OFC tenant ID in which a new network belongs. + :param description: A description of this network. + :param network_id: A hint of an ID of OFC network. + :returns: ID of the network created at OpenFlow Controller. + ID returned must be unique in the OpenFlow Controller. + If a network is identified in conjunction with other information + such as a tenant ID, such information should be included in the ID. + :raises: neutron.plugin.nec.common.exceptions.OFCException + """ + pass + + @abc.abstractmethod + def delete_network(self, ofc_network_id): + """Delete a netwrok at OpenFlow Controller. + + :raises: neutron.plugin.nec.common.exceptions.OFCException + """ + pass + + @abc.abstractmethod + def create_port(self, ofc_network_id, portinfo, + port_id=None, filters=None): + """Create a new port on specified network at OFC. + + :param ofc_network_id: a OFC tenant ID in which a new port belongs. + :param portinfo: An OpenFlow information of this port. + {'datapath_id': Switch ID that a port connected. + 'port_no': Port Number that a port connected on a Swtich. + 'vlan_id': VLAN ID that a port tagging. + 'mac': Mac address. + } + :param port_id: A hint of an ID of OFC port. + ID returned must be unique in the OpenFlow Controller. + + If a port is identified in combination with a network or + a tenant, such information should be included in the ID. + :param filters: A list of packet filter associated with the port. + Each element is a tuple (neutron ID, OFC ID) + + :returns: ID of the port created at OpenFlow Controller. + :raises: neutron.plugin.nec.common.exceptions.OFCException + """ + pass + + @abc.abstractmethod + def delete_port(self, ofc_port_id): + """Delete a port at OpenFlow Controller. + + :raises: neutron.plugin.nec.common.exceptions.OFCException + """ + pass diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/nec/ofc_manager.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/nec/ofc_manager.py new file mode 100644 index 00000000..41a50ecf --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/nec/ofc_manager.py @@ -0,0 +1,199 @@ +# Copyright 2012 NEC Corporation. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# @author: Ryota MIBU +# @author: Akihiro MOTOKI + +import netaddr + +from neutron.common import utils +from neutron.openstack.common import log as logging +from neutron.plugins.nec.common import config +from neutron.plugins.nec.common import exceptions as nexc +from neutron.plugins.nec.db import api as ndb +from neutron.plugins.nec import drivers + + +LOG = logging.getLogger(__name__) + + +class OFCManager(object): + """This class manages an OpenFlow Controller and map resources. + + This class manage an OpenFlow Controller (OFC) with a driver specified in + a configuration of this plugin. This keeps mappings between IDs on Neutron + and OFC for various entities such as Tenant, Network and Filter. A Port on + OFC is identified by a switch ID 'datapath_id' and a port number 'port_no' + of the switch. An ID named as 'ofc_*' is used to identify resource on OFC. + """ + + def __init__(self, plugin): + self.driver = drivers.get_driver(config.OFC.driver)(config.OFC) + self.plugin = plugin + + def _get_ofc_id(self, context, resource, neutron_id): + return ndb.get_ofc_id(context.session, resource, neutron_id) + + def _exists_ofc_item(self, context, resource, neutron_id): + return ndb.exists_ofc_item(context.session, resource, neutron_id) + + def _add_ofc_item(self, context, resource, neutron_id, ofc_id): + # Ensure a new item is added to the new mapping table + ndb.add_ofc_item(context.session, resource, neutron_id, ofc_id) + + def _del_ofc_item(self, context, resource, neutron_id): + ndb.del_ofc_item(context.session, resource, neutron_id) + + def ensure_ofc_tenant(self, context, tenant_id): + if not self.exists_ofc_tenant(context, tenant_id): + self.create_ofc_tenant(context, tenant_id) + + def create_ofc_tenant(self, context, tenant_id): + desc = "ID=%s at OpenStack." % tenant_id + ofc_tenant_id = self.driver.create_tenant(desc, tenant_id) + self._add_ofc_item(context, "ofc_tenant", tenant_id, ofc_tenant_id) + + def exists_ofc_tenant(self, context, tenant_id): + return self._exists_ofc_item(context, "ofc_tenant", tenant_id) + + def delete_ofc_tenant(self, context, tenant_id): + ofc_tenant_id = self._get_ofc_id(context, "ofc_tenant", tenant_id) + self.driver.delete_tenant(ofc_tenant_id) + self._del_ofc_item(context, "ofc_tenant", tenant_id) + + def create_ofc_network(self, context, tenant_id, network_id, + network_name=None): + ofc_tenant_id = self._get_ofc_id(context, "ofc_tenant", tenant_id) + desc = "ID=%s Name=%s at Neutron." % (network_id, network_name) + ofc_net_id = self.driver.create_network(ofc_tenant_id, desc, + network_id) + self._add_ofc_item(context, "ofc_network", network_id, ofc_net_id) + + def exists_ofc_network(self, context, network_id): + return self._exists_ofc_item(context, "ofc_network", network_id) + + def delete_ofc_network(self, context, network_id, network): + ofc_net_id = self._get_ofc_id(context, "ofc_network", network_id) + self.driver.delete_network(ofc_net_id) + self._del_ofc_item(context, "ofc_network", network_id) + + def create_ofc_port(self, context, port_id, port): + ofc_net_id = self._get_ofc_id(context, "ofc_network", + port['network_id']) + portinfo = ndb.get_portinfo(context.session, port_id) + if not portinfo: + raise nexc.PortInfoNotFound(id=port_id) + + # Associate packet filters + filters = self.plugin.get_packet_filters_for_port(context, port) + if filters is not None: + params = {'filters': filters} + else: + params = {} + + ofc_port_id = self.driver.create_port(ofc_net_id, portinfo, port_id, + **params) + self._add_ofc_item(context, "ofc_port", port_id, ofc_port_id) + + def exists_ofc_port(self, context, port_id): + return self._exists_ofc_item(context, "ofc_port", port_id) + + def delete_ofc_port(self, context, port_id, port): + ofc_port_id = self._get_ofc_id(context, "ofc_port", port_id) + self.driver.delete_port(ofc_port_id) + self._del_ofc_item(context, "ofc_port", port_id) + + def create_ofc_packet_filter(self, context, filter_id, filter_dict): + ofc_net_id = self._get_ofc_id(context, "ofc_network", + filter_dict['network_id']) + in_port_id = filter_dict.get('in_port') + portinfo = None + if in_port_id: + portinfo = ndb.get_portinfo(context.session, in_port_id) + if not portinfo: + raise nexc.PortInfoNotFound(id=in_port_id) + + # Collect ports to be associated with the filter + apply_ports = ndb.get_active_ports_on_ofc( + context, filter_dict['network_id'], in_port_id) + ofc_pf_id = self.driver.create_filter(ofc_net_id, + filter_dict, portinfo, filter_id, + apply_ports) + self._add_ofc_item(context, "ofc_packet_filter", filter_id, ofc_pf_id) + + def update_ofc_packet_filter(self, context, filter_id, filter_dict): + ofc_pf_id = self._get_ofc_id(context, "ofc_packet_filter", filter_id) + ofc_pf_id = self.driver.convert_ofc_filter_id(context, ofc_pf_id) + self.driver.update_filter(ofc_pf_id, filter_dict) + + def exists_ofc_packet_filter(self, context, filter_id): + return self._exists_ofc_item(context, "ofc_packet_filter", filter_id) + + def delete_ofc_packet_filter(self, context, filter_id): + ofc_pf_id = self._get_ofc_id(context, "ofc_packet_filter", filter_id) + self.driver.delete_filter(ofc_pf_id) + self._del_ofc_item(context, "ofc_packet_filter", filter_id) + + def create_ofc_router(self, context, tenant_id, router_id, name=None): + ofc_tenant_id = self._get_ofc_id(context, "ofc_tenant", tenant_id) + desc = "ID=%s Name=%s at Neutron." % (router_id, name) + ofc_router_id = self.driver.create_router(ofc_tenant_id, router_id, + desc) + self._add_ofc_item(context, "ofc_router", router_id, ofc_router_id) + + def exists_ofc_router(self, context, router_id): + return self._exists_ofc_item(context, "ofc_router", router_id) + + def delete_ofc_router(self, context, router_id, router): + ofc_router_id = self._get_ofc_id(context, "ofc_router", router_id) + self.driver.delete_router(ofc_router_id) + self._del_ofc_item(context, "ofc_router", router_id) + + def add_ofc_router_interface(self, context, router_id, port_id, port): + # port must have the following fields: + # network_id, cidr, ip_address, mac_address + ofc_router_id = self._get_ofc_id(context, "ofc_router", router_id) + ofc_net_id = self._get_ofc_id(context, "ofc_network", + port['network_id']) + ip_address = '%s/%s' % (port['ip_address'], + netaddr.IPNetwork(port['cidr']).prefixlen) + mac_address = port['mac_address'] + ofc_inf_id = self.driver.add_router_interface( + ofc_router_id, ofc_net_id, ip_address, mac_address) + # Use port mapping table to maintain an interface of OFC router + self._add_ofc_item(context, "ofc_port", port_id, ofc_inf_id) + + def delete_ofc_router_interface(self, context, router_id, port_id): + # Use port mapping table to maintain an interface of OFC router + ofc_inf_id = self._get_ofc_id(context, "ofc_port", port_id) + self.driver.delete_router_interface(ofc_inf_id) + self._del_ofc_item(context, "ofc_port", port_id) + + def update_ofc_router_route(self, context, router_id, new_routes): + ofc_router_id = self._get_ofc_id(context, "ofc_router", router_id) + ofc_routes = self.driver.list_router_routes(ofc_router_id) + route_dict = {} + cur_routes = [] + for r in ofc_routes: + key = ','.join((r['destination'], r['nexthop'])) + route_dict[key] = r['id'] + del r['id'] + cur_routes.append(r) + added, removed = utils.diff_list_of_dict(cur_routes, new_routes) + for r in removed: + key = ','.join((r['destination'], r['nexthop'])) + route_id = route_dict[key] + self.driver.delete_router_route(route_id) + for r in added: + self.driver.add_router_route(ofc_router_id, r['destination'], + r['nexthop']) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/nec/packet_filter.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/nec/packet_filter.py new file mode 100644 index 00000000..99a28fbb --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/nec/packet_filter.py @@ -0,0 +1,256 @@ +# Copyright 2012-2013 NEC Corporation. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# @author: Ryota MIBU + +from neutron.openstack.common import excutils +from neutron.openstack.common import log as logging +from neutron.plugins.nec.common import config +from neutron.plugins.nec.common import exceptions as nexc +from neutron.plugins.nec.db import api as ndb +from neutron.plugins.nec.db import packetfilter as pf_db + + +LOG = logging.getLogger(__name__) + + +class PacketFilterMixin(pf_db.PacketFilterDbMixin): + """Mixin class to add packet filter to NECPluginV2.""" + + @property + def packet_filter_enabled(self): + if not hasattr(self, '_packet_filter_enabled'): + self._packet_filter_enabled = ( + config.OFC.enable_packet_filter and + self.ofc.driver.filter_supported()) + return self._packet_filter_enabled + + def remove_packet_filter_extension_if_disabled(self, aliases): + if not self.packet_filter_enabled: + LOG.debug(_('Disabled packet-filter extension.')) + aliases.remove('packet-filter') + + def create_packet_filter(self, context, packet_filter): + """Create a new packet_filter entry on DB, then try to activate it.""" + LOG.debug(_("create_packet_filter() called, packet_filter=%s ."), + packet_filter) + + if hasattr(self.ofc.driver, 'validate_filter_create'): + pf = packet_filter['packet_filter'] + self.ofc.driver.validate_filter_create(context, pf) + pf = super(PacketFilterMixin, self).create_packet_filter( + context, packet_filter) + + return self.activate_packet_filter_if_ready(context, pf) + + def update_packet_filter(self, context, id, packet_filter): + """Update packet_filter entry on DB, and recreate it if changed. + + If any rule of the packet_filter was changed, recreate it on OFC. + """ + LOG.debug(_("update_packet_filter() called, " + "id=%(id)s packet_filter=%(packet_filter)s ."), + {'id': id, 'packet_filter': packet_filter}) + + pf_data = packet_filter['packet_filter'] + if hasattr(self.ofc.driver, 'validate_filter_update'): + self.ofc.driver.validate_filter_update(context, pf_data) + + # validate ownership + pf_old = self.get_packet_filter(context, id) + + pf = super(PacketFilterMixin, self).update_packet_filter( + context, id, packet_filter) + + def _packet_filter_changed(old_pf, new_pf): + LOG.debug('old_pf=%(old_pf)s, new_pf=%(new_pf)s', + {'old_pf': old_pf, 'new_pf': new_pf}) + # When the status is ERROR, force sync to OFC. + if old_pf['status'] == pf_db.PF_STATUS_ERROR: + LOG.debug('update_packet_filter: Force filter update ' + 'because the previous status is ERROR.') + return True + for key in new_pf: + if key in ('id', 'name', 'tenant_id', 'network_id', + 'in_port', 'status'): + continue + if old_pf[key] != new_pf[key]: + return True + return False + + if _packet_filter_changed(pf_old, pf): + if hasattr(self.ofc.driver, 'update_filter'): + # admin_state is changed + if pf_old['admin_state_up'] != pf['admin_state_up']: + LOG.debug('update_packet_filter: admin_state ' + 'is changed to %s', pf['admin_state_up']) + if pf['admin_state_up']: + self.activate_packet_filter_if_ready(context, pf) + else: + self.deactivate_packet_filter(context, pf) + elif pf['admin_state_up']: + LOG.debug('update_packet_filter: admin_state is ' + 'unchanged (True)') + if self.ofc.exists_ofc_packet_filter(context, id): + pf = self._update_packet_filter(context, pf, pf_data) + else: + pf = self.activate_packet_filter_if_ready(context, pf) + else: + LOG.debug('update_packet_filter: admin_state is unchanged ' + '(False). No need to update OFC filter.') + else: + pf = self.deactivate_packet_filter(context, pf) + pf = self.activate_packet_filter_if_ready(context, pf) + + return pf + + def _update_packet_filter(self, context, new_pf, pf_data): + pf_id = new_pf['id'] + prev_status = new_pf['status'] + try: + # If previous status is ERROR, try to sync all attributes. + pf = new_pf if prev_status == pf_db.PF_STATUS_ERROR else pf_data + self.ofc.update_ofc_packet_filter(context, pf_id, pf) + new_status = pf_db.PF_STATUS_ACTIVE + if new_status != prev_status: + self._update_resource_status(context, "packet_filter", + pf_id, new_status) + new_pf['status'] = new_status + return new_pf + except Exception as exc: + with excutils.save_and_reraise_exception(): + if (isinstance(exc, nexc.OFCException) or + isinstance(exc, nexc.OFCConsistencyBroken)): + LOG.error(_("Failed to create packet_filter id=%(id)s on " + "OFC: %(exc)s"), + {'id': pf_id, 'exc': exc}) + new_status = pf_db.PF_STATUS_ERROR + if new_status != prev_status: + self._update_resource_status(context, "packet_filter", + pf_id, new_status) + + def delete_packet_filter(self, context, id): + """Deactivate and delete packet_filter.""" + LOG.debug(_("delete_packet_filter() called, id=%s ."), id) + + # validate ownership + pf = self.get_packet_filter(context, id) + + # deactivate_packet_filter() raises an exception + # if an error occurs during processing. + pf = self.deactivate_packet_filter(context, pf) + + super(PacketFilterMixin, self).delete_packet_filter(context, id) + + def activate_packet_filter_if_ready(self, context, packet_filter): + """Activate packet_filter by creating filter on OFC if ready. + + Conditions to create packet_filter on OFC are: + * packet_filter admin_state is UP + * (if 'in_port' is specified) portinfo is available + """ + LOG.debug(_("activate_packet_filter_if_ready() called, " + "packet_filter=%s."), packet_filter) + + pf_id = packet_filter['id'] + in_port_id = packet_filter.get('in_port') + current = packet_filter['status'] + + pf_status = current + if not packet_filter['admin_state_up']: + LOG.debug(_("activate_packet_filter_if_ready(): skip pf_id=%s, " + "packet_filter.admin_state_up is False."), pf_id) + elif in_port_id and not ndb.get_portinfo(context.session, in_port_id): + LOG.debug(_("activate_packet_filter_if_ready(): skip " + "pf_id=%s, no portinfo for the in_port."), pf_id) + elif self.ofc.exists_ofc_packet_filter(context, packet_filter['id']): + LOG.debug(_("_activate_packet_filter_if_ready(): skip, " + "ofc_packet_filter already exists.")) + else: + LOG.debug(_("activate_packet_filter_if_ready(): create " + "packet_filter id=%s on OFC."), pf_id) + try: + self.ofc.create_ofc_packet_filter(context, pf_id, + packet_filter) + pf_status = pf_db.PF_STATUS_ACTIVE + except (nexc.OFCException, nexc.OFCMappingNotFound) as exc: + LOG.error(_("Failed to create packet_filter id=%(id)s on " + "OFC: %(exc)s"), {'id': pf_id, 'exc': exc}) + pf_status = pf_db.PF_STATUS_ERROR + + if pf_status != current: + self._update_resource_status(context, "packet_filter", pf_id, + pf_status) + packet_filter.update({'status': pf_status}) + + return packet_filter + + def deactivate_packet_filter(self, context, packet_filter): + """Deactivate packet_filter by deleting filter from OFC if exixts.""" + LOG.debug(_("deactivate_packet_filter_if_ready() called, " + "packet_filter=%s."), packet_filter) + pf_id = packet_filter['id'] + + if not self.ofc.exists_ofc_packet_filter(context, pf_id): + LOG.debug(_("deactivate_packet_filter(): skip, " + "Not found OFC Mapping for packet_filter id=%s."), + pf_id) + return packet_filter + + LOG.debug(_("deactivate_packet_filter(): " + "deleting packet_filter id=%s from OFC."), pf_id) + try: + self.ofc.delete_ofc_packet_filter(context, pf_id) + self._update_resource_status_if_changed( + context, "packet_filter", packet_filter, pf_db.PF_STATUS_DOWN) + return packet_filter + except (nexc.OFCException, nexc.OFCMappingNotFound) as exc: + with excutils.save_and_reraise_exception(): + LOG.error(_("Failed to delete packet_filter id=%(id)s " + "from OFC: %(exc)s"), + {'id': pf_id, 'exc': str(exc)}) + self._update_resource_status_if_changed( + context, "packet_filter", packet_filter, + pf_db.PF_STATUS_ERROR) + + def activate_packet_filters_by_port(self, context, port_id): + if not self.packet_filter_enabled: + return + + filters = {'in_port': [port_id], 'admin_state_up': [True], + 'status': [pf_db.PF_STATUS_DOWN]} + pfs = self.get_packet_filters(context, filters=filters) + for pf in pfs: + self.activate_packet_filter_if_ready(context, pf) + + def deactivate_packet_filters_by_port(self, context, port_id, + raise_exc=True): + if not self.packet_filter_enabled: + return + + filters = {'in_port': [port_id], 'status': [pf_db.PF_STATUS_ACTIVE]} + pfs = self.get_packet_filters(context, filters=filters) + error = False + for pf in pfs: + try: + self.deactivate_packet_filter(context, pf) + except (nexc.OFCException, nexc.OFCMappingNotFound): + error = True + if raise_exc and error: + raise nexc.OFCException(_('Error occurred while disabling packet ' + 'filter(s) for port %s'), port_id) + + def get_packet_filters_for_port(self, context, port): + if self.packet_filter_enabled: + return super(PacketFilterMixin, + self).get_packet_filters_for_port(context, port) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/nec/router_drivers.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/nec/router_drivers.py new file mode 100644 index 00000000..d06d805c --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/nec/router_drivers.py @@ -0,0 +1,222 @@ +# Copyright 2013 NEC Corporation. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Akihiro Motoki + +import abc +import httplib + +import six + +from neutron.common import log as call_log +from neutron.common import utils +from neutron.openstack.common import excutils +from neutron.openstack.common import log as logging +from neutron.plugins.nec.common import constants as nconst +from neutron.plugins.nec.common import exceptions as nexc + +LOG = logging.getLogger(__name__) + +PROVIDER_OPENFLOW = nconst.ROUTER_PROVIDER_OPENFLOW + + +@six.add_metaclass(abc.ABCMeta) +class RouterDriverBase(object): + + def __init__(self, plugin, ofc_manager): + self.plugin = plugin + self.ofc = ofc_manager + + def floating_ip_support(self): + return True + + @abc.abstractmethod + def create_router(self, context, tenant_id, router): + pass + + @abc.abstractmethod + def update_router(self, context, router_id, old_router, new_router): + pass + + @abc.abstractmethod + def delete_router(self, context, router_id, router): + pass + + @abc.abstractmethod + def add_interface(self, context, router_id, port): + pass + + @abc.abstractmethod + def delete_interface(self, context, router_id, port): + pass + + +class RouterL3AgentDriver(RouterDriverBase): + + need_gw_info = False + + @call_log.log + def create_router(self, context, tenant_id, router): + return router + + @call_log.log + def update_router(self, context, router_id, old_router, new_router): + return new_router + + @call_log.log + def delete_router(self, context, router_id, router): + pass + + @call_log.log + def add_interface(self, context, router_id, port): + return self.plugin.activate_port_if_ready(context, port) + + @call_log.log + def delete_interface(self, context, router_id, port): + return self.plugin.deactivate_port(context, port) + + +class RouterOpenFlowDriver(RouterDriverBase): + + need_gw_info = True + + def floating_ip_support(self): + return self.ofc.driver.router_nat_supported + + def _process_gw_port(self, gw_info, routes): + if gw_info and gw_info['gateway_ip']: + routes.append({'destination': '0.0.0.0/0', + 'nexthop': gw_info['gateway_ip']}) + + @call_log.log + def create_router(self, context, tenant_id, router): + try: + router_id = router['id'] + added_routes = [] + self.ofc.ensure_ofc_tenant(context, tenant_id) + self.ofc.create_ofc_router(context, tenant_id, router_id, + router['name']) + self._process_gw_port(router['gw_port'], added_routes) + if added_routes: + self.ofc.update_ofc_router_route(context, router_id, + added_routes, []) + new_status = nconst.ROUTER_STATUS_ACTIVE + self.plugin._update_resource_status(context, "router", + router['id'], + new_status) + router['status'] = new_status + return router + except (nexc.OFCException, nexc.OFCMappingNotFound) as exc: + with excutils.save_and_reraise_exception(): + if (isinstance(exc, nexc.OFCException) and + exc.status == httplib.CONFLICT): + raise nexc.RouterOverLimit(provider=PROVIDER_OPENFLOW) + reason = _("create_router() failed due to %s") % exc + LOG.error(reason) + new_status = nconst.ROUTER_STATUS_ERROR + self._update_resource_status(context, "router", + router['id'], + new_status) + + @call_log.log + def update_router(self, context, router_id, old_router, new_router): + old_routes = old_router['routes'][:] + new_routes = new_router['routes'][:] + self._process_gw_port(old_router['gw_port'], old_routes) + self._process_gw_port(new_router['gw_port'], new_routes) + added, removed = utils.diff_list_of_dict(old_routes, new_routes) + if added or removed: + try: + # NOTE(amotoki): PFC supports one-by-one route update at now. + # It means there may be a case where some route is updated but + # some not. To allow the next call of failures to sync routes + # with Neutron side, we pass the whole new routes here. + # PFC should support atomic route update in the future. + self.ofc.update_ofc_router_route(context, router_id, + new_routes) + new_status = nconst.ROUTER_STATUS_ACTIVE + self.plugin._update_resource_status( + context, "router", router_id, new_status) + new_router['status'] = new_status + except (nexc.OFCException, nexc.OFCMappingNotFound) as exc: + with excutils.save_and_reraise_exception(): + reason = _("_update_ofc_routes() failed due to %s") % exc + LOG.error(reason) + new_status = nconst.ROUTER_STATUS_ERROR + self.plugin._update_resource_status( + context, "router", router_id, new_status) + return new_router + + @call_log.log + def delete_router(self, context, router_id, router): + if not self.ofc.exists_ofc_router(context, router_id): + return + try: + self.ofc.delete_ofc_router(context, router_id, router) + except (nexc.OFCException, nexc.OFCMappingNotFound) as exc: + with excutils.save_and_reraise_exception(): + LOG.error(_("delete_router() failed due to %s"), exc) + self.plugin._update_resource_status( + context, "router", router_id, nconst.ROUTER_STATUS_ERROR) + + @call_log.log + def add_interface(self, context, router_id, port): + port_id = port['id'] + # port['fixed_ips'] may be empty if ext_net has no subnet. + # Such port is invalid for a router port and we don't create a port + # on OFC. The port is removed in l3_db._create_router_gw_port. + if not port['fixed_ips']: + msg = _('RouterOpenFlowDriver.add_interface(): the requested port ' + 'has no subnet. add_interface() is skipped. ' + 'router_id=%(id)s, port=%(port)s)') + LOG.warning(msg, {'id': router_id, 'port': port}) + return port + fixed_ip = port['fixed_ips'][0] + subnet = self.plugin._get_subnet(context, fixed_ip['subnet_id']) + port_info = {'network_id': port['network_id'], + 'ip_address': fixed_ip['ip_address'], + 'cidr': subnet['cidr'], + 'mac_address': port['mac_address']} + try: + self.ofc.add_ofc_router_interface(context, router_id, + port_id, port_info) + new_status = nconst.ROUTER_STATUS_ACTIVE + self.plugin._update_resource_status( + context, "port", port_id, new_status) + return port + except (nexc.OFCException, nexc.OFCMappingNotFound) as exc: + with excutils.save_and_reraise_exception(): + reason = _("add_router_interface() failed due to %s") % exc + LOG.error(reason) + new_status = nconst.ROUTER_STATUS_ERROR + self.plugin._update_resource_status( + context, "port", port_id, new_status) + + @call_log.log + def delete_interface(self, context, router_id, port): + port_id = port['id'] + try: + self.ofc.delete_ofc_router_interface(context, router_id, port_id) + new_status = nconst.ROUTER_STATUS_ACTIVE + self.plugin._update_resource_status(context, "port", port_id, + new_status) + port['status'] = new_status + return port + except (nexc.OFCException, nexc.OFCMappingNotFound) as exc: + with excutils.save_and_reraise_exception(): + reason = _("delete_router_interface() failed due to %s") % exc + LOG.error(reason) + new_status = nconst.ROUTER_STATUS_ERROR + self.plugin._update_resource_status(context, "port", port_id, + new_status) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/nuage/common/constants.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/nuage/common/constants.py new file mode 100644 index 00000000..ff2680bf --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/nuage/common/constants.py @@ -0,0 +1,28 @@ +# Copyright 2014 Alcatel-Lucent USA Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Ronak Shah, Nuage Networks, Alcatel-Lucent USA Inc. + +from neutron.common import constants + +AUTO_CREATE_PORT_OWNERS = [ + constants.DEVICE_OWNER_DHCP, + constants.DEVICE_OWNER_ROUTER_INTF, + constants.DEVICE_OWNER_ROUTER_GW, + constants.DEVICE_OWNER_FLOATINGIP +] + +NOVA_PORT_OWNER_PREF = 'compute:' + +SR_TYPE_FLOATING = "FLOATING" diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/nuage/extensions/netpartition.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/nuage/extensions/netpartition.py new file mode 100644 index 00000000..c731e1de --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/nuage/extensions/netpartition.py @@ -0,0 +1,107 @@ +# Copyright 2014 Alcatel-Lucent USA Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Ronak Shah, Nuage Networks, Alcatel-Lucent USA Inc. + +import abc + +from neutron.api import extensions +from neutron.api.v2 import base +from neutron import manager +from neutron import quota + + +# Attribute Map +RESOURCE_ATTRIBUTE_MAP = { + 'net_partitions': { + 'id': {'allow_post': False, 'allow_put': False, + 'validate': {'type:uuid': None}, + 'is_visible': True}, + 'name': {'allow_post': True, 'allow_put': False, + 'is_visible': True, 'default': '', + 'validate': {'type:name_not_default': None}}, + 'description': {'allow_post': True, 'allow_put': False, + 'is_visible': True, 'default': '', + 'validate': {'type:string_or_none': None}}, + 'tenant_id': {'allow_post': True, 'allow_put': False, + 'required_by_policy': True, + 'is_visible': True}, + }, +} + + +class Netpartition(object): + """Extension class supporting net_partition. + """ + + @classmethod + def get_name(cls): + return "NetPartition" + + @classmethod + def get_alias(cls): + return "net-partition" + + @classmethod + def get_description(cls): + return "NetPartition" + + @classmethod + def get_namespace(cls): + return "http://nuagenetworks.net/ext/net_partition/api/v1.0" + + @classmethod + def get_updated(cls): + return "2014-01-01T10:00:00-00:00" + + @classmethod + def get_resources(cls): + """Returns Ext Resources.""" + exts = [] + plugin = manager.NeutronManager.get_plugin() + resource_name = 'net_partition' + collection_name = resource_name.replace('_', '-') + "s" + params = RESOURCE_ATTRIBUTE_MAP.get(resource_name + "s", dict()) + quota.QUOTAS.register_resource_by_name(resource_name) + controller = base.create_resource(collection_name, + resource_name, + plugin, params, allow_bulk=True) + ex = extensions.ResourceExtension(collection_name, + controller) + exts.append(ex) + + return exts + + +class NetPartitionPluginBase(object): + + @abc.abstractmethod + def create_net_partition(self, context, router): + pass + + @abc.abstractmethod + def update_net_partition(self, context, id, router): + pass + + @abc.abstractmethod + def get_net_partition(self, context, id, fields=None): + pass + + @abc.abstractmethod + def delete_net_partition(self, context, id): + pass + + @abc.abstractmethod + def get_net_partitions(self, context, filters=None, fields=None): + pass diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/nuage/nuage_models.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/nuage/nuage_models.py new file mode 100644 index 00000000..f3ebcffa --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/nuage/nuage_models.py @@ -0,0 +1,102 @@ +# Copyright 2014 Alcatel-Lucent USA Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Ronak Shah, Nuage Networks, Alcatel-Lucent USA Inc. + +from sqlalchemy import Boolean, Column, ForeignKey, String + +from neutron.db import model_base +from neutron.db import models_v2 + + +class NetPartition(model_base.BASEV2, models_v2.HasId): + __tablename__ = 'nuage_net_partitions' + name = Column(String(64)) + l3dom_tmplt_id = Column(String(36)) + l2dom_tmplt_id = Column(String(36)) + + +class NetPartitionRouter(model_base.BASEV2): + __tablename__ = "nuage_net_partition_router_mapping" + net_partition_id = Column(String(36), + ForeignKey('nuage_net_partitions.id', + ondelete="CASCADE"), + primary_key=True) + router_id = Column(String(36), + ForeignKey('routers.id', ondelete="CASCADE"), + primary_key=True) + nuage_router_id = Column(String(36)) + + +class RouterZone(model_base.BASEV2): + __tablename__ = "nuage_router_zone_mapping" + router_id = Column(String(36), + ForeignKey('routers.id', ondelete="CASCADE"), + primary_key=True) + nuage_zone_id = Column(String(36)) + nuage_user_id = Column(String(36)) + nuage_group_id = Column(String(36)) + + +class SubnetL2Domain(model_base.BASEV2): + __tablename__ = 'nuage_subnet_l2dom_mapping' + subnet_id = Column(String(36), + ForeignKey('subnets.id', ondelete="CASCADE"), + primary_key=True) + net_partition_id = Column(String(36), + ForeignKey('nuage_net_partitions.id', + ondelete="CASCADE")) + nuage_subnet_id = Column(String(36)) + nuage_l2dom_tmplt_id = Column(String(36)) + nuage_user_id = Column(String(36)) + nuage_group_id = Column(String(36)) + + +class PortVPortMapping(model_base.BASEV2): + __tablename__ = 'nuage_port_mapping' + port_id = Column(String(36), + ForeignKey('ports.id', ondelete="CASCADE"), + primary_key=True) + nuage_vport_id = Column(String(36)) + nuage_vif_id = Column(String(36)) + static_ip = Column(Boolean()) + + +class RouterRoutesMapping(model_base.BASEV2, models_v2.Route): + __tablename__ = 'nuage_routerroutes_mapping' + router_id = Column(String(36), + ForeignKey('routers.id', + ondelete="CASCADE"), + primary_key=True, + nullable=False) + nuage_route_id = Column(String(36)) + + +class FloatingIPPoolMapping(model_base.BASEV2): + __tablename__ = "nuage_floatingip_pool_mapping" + fip_pool_id = Column(String(36), primary_key=True) + net_id = Column(String(36), + ForeignKey('networks.id', ondelete="CASCADE")) + router_id = Column(String(36)) + + +class FloatingIPMapping(model_base.BASEV2): + __tablename__ = 'nuage_floatingip_mapping' + fip_id = Column(String(36), + ForeignKey('floatingips.id', + ondelete="CASCADE"), + primary_key=True) + router_id = Column(String(36)) + nuage_fip_id = Column(String(36)) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/nuage/nuagedb.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/nuage/nuagedb.py new file mode 100644 index 00000000..bd1b2f3d --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/nuage/nuagedb.py @@ -0,0 +1,202 @@ +# Copyright 2014 Alcatel-Lucent USA Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Ronak Shah, Nuage Networks, Alcatel-Lucent USA Inc. + +from neutron.db import db_base_plugin_v2 +from neutron.plugins.nuage import nuage_models + + +def add_entrouter_mapping(session, np_id, + router_id, + n_l3id): + ent_rtr_mapping = nuage_models.NetPartitionRouter(net_partition_id=np_id, + router_id=router_id, + nuage_router_id=n_l3id) + session.add(ent_rtr_mapping) + + +def add_rtrzone_mapping(session, neutron_router_id, + nuage_zone_id, + nuage_user_id=None, + nuage_group_id=None): + rtr_zone_mapping = nuage_models.RouterZone(router_id=neutron_router_id, + nuage_zone_id=nuage_zone_id, + nuage_user_id=nuage_user_id, + nuage_group_id=nuage_group_id) + session.add(rtr_zone_mapping) + + +def add_subnetl2dom_mapping(session, neutron_subnet_id, + nuage_sub_id, + np_id, + l2dom_id=None, + nuage_user_id=None, + nuage_group_id=None): + subnet_l2dom = nuage_models.SubnetL2Domain(subnet_id=neutron_subnet_id, + nuage_subnet_id=nuage_sub_id, + net_partition_id=np_id, + nuage_l2dom_tmplt_id=l2dom_id, + nuage_user_id=nuage_user_id, + nuage_group_id=nuage_group_id) + session.add(subnet_l2dom) + + +def update_subnetl2dom_mapping(subnet_l2dom, + new_dict): + subnet_l2dom.update(new_dict) + + +def delete_subnetl2dom_mapping(session, subnet_l2dom): + session.delete(subnet_l2dom) + + +def add_port_vport_mapping(session, port_id, nuage_vport_id, + nuage_vif_id, static_ip): + port_mapping = nuage_models.PortVPortMapping(port_id=port_id, + nuage_vport_id=nuage_vport_id, + nuage_vif_id=nuage_vif_id, + static_ip=static_ip) + session.add(port_mapping) + return port_mapping + + +def update_port_vport_mapping(port_mapping, + new_dict): + port_mapping.update(new_dict) + + +def get_port_mapping_by_id(session, id): + query = session.query(nuage_models.PortVPortMapping) + return query.filter_by(port_id=id).first() + + +def get_ent_rtr_mapping_by_rtrid(session, rtrid): + query = session.query(nuage_models.NetPartitionRouter) + return query.filter_by(router_id=rtrid).first() + + +def get_rtr_zone_mapping(session, router_id): + query = session.query(nuage_models.RouterZone) + return query.filter_by(router_id=router_id).first() + + +def get_subnet_l2dom_by_id(session, id): + query = session.query(nuage_models.SubnetL2Domain) + return query.filter_by(subnet_id=id).first() + + +def add_net_partition(session, netpart_id, + l3dom_id, l2dom_id, + ent_name): + net_partitioninst = nuage_models.NetPartition(id=netpart_id, + name=ent_name, + l3dom_tmplt_id=l3dom_id, + l2dom_tmplt_id=l2dom_id) + session.add(net_partitioninst) + return net_partitioninst + + +def delete_net_partition(session, net_partition): + session.delete(net_partition) + + +def get_ent_rtr_mapping_by_entid(session, + entid): + query = session.query(nuage_models.NetPartitionRouter) + return query.filter_by(net_partition_id=entid).all() + + +def get_net_partition_by_name(session, name): + query = session.query(nuage_models.NetPartition) + return query.filter_by(name=name).first() + + +def get_net_partition_by_id(session, id): + query = session.query(nuage_models.NetPartition) + return query.filter_by(id=id).first() + + +def get_net_partitions(session, filters=None, fields=None): + query = session.query(nuage_models.NetPartition) + common_db = db_base_plugin_v2.CommonDbMixin() + query = common_db._apply_filters_to_query(query, + nuage_models.NetPartition, + filters) + return query + + +def delete_static_route(session, static_route): + session.delete(static_route) + + +def get_router_route_mapping(session, id, route): + qry = session.query(nuage_models.RouterRoutesMapping) + return qry.filter_by(router_id=id, + destination=route['destination'], + nexthop=route['nexthop']).one() + + +def add_static_route(session, router_id, nuage_rtr_id, + destination, nexthop): + staticrt = nuage_models.RouterRoutesMapping(router_id=router_id, + nuage_route_id=nuage_rtr_id, + destination=destination, + nexthop=nexthop) + session.add(staticrt) + return staticrt + + +def add_fip_mapping(session, neutron_fip_id, router_id, nuage_fip_id): + fip = nuage_models.FloatingIPMapping(fip_id=neutron_fip_id, + router_id=router_id, + nuage_fip_id=nuage_fip_id) + session.add(fip) + return fip + + +def delete_fip_mapping(session, fip_mapping): + session.delete(fip_mapping) + + +def add_fip_pool_mapping(session, fip_pool_id, net_id, router_id=None): + fip_pool_mapping = nuage_models.FloatingIPPoolMapping( + fip_pool_id=fip_pool_id, + net_id=net_id, + router_id=router_id) + session.add(fip_pool_mapping) + return fip_pool_mapping + + +def delete_fip_pool_mapping(session, fip_pool_mapping): + session.delete(fip_pool_mapping) + + +def get_fip_pool_by_id(session, id): + query = session.query(nuage_models.FloatingIPPoolMapping) + return query.filter_by(fip_pool_id=id).first() + + +def get_fip_pool_from_netid(session, net_id): + query = session.query(nuage_models.FloatingIPPoolMapping) + return query.filter_by(net_id=net_id).first() + + +def get_fip_mapping_by_id(session, id): + qry = session.query(nuage_models.FloatingIPMapping) + return qry.filter_by(fip_id=id).first() + + +def update_fip_pool_mapping(fip_pool_mapping, new_dict): + fip_pool_mapping.update(new_dict) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/nuage/plugin.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/nuage/plugin.py new file mode 100644 index 00000000..bf95c1ee --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/nuage/plugin.py @@ -0,0 +1,1006 @@ +# Copyright 2014 Alcatel-Lucent USA Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Ronak Shah, Nuage Networks, Alcatel-Lucent USA Inc. + + +import re + +import netaddr +from oslo.config import cfg +from sqlalchemy.orm import exc + +from neutron.api import extensions as neutron_extensions +from neutron.api.v2 import attributes +from neutron.common import constants as os_constants +from neutron.common import exceptions as n_exc +from neutron.common import utils +from neutron.db import api as db +from neutron.db import db_base_plugin_v2 +from neutron.db import external_net_db +from neutron.db import extraroute_db +from neutron.db import l3_db +from neutron.db import models_v2 +from neutron.db import quota_db # noqa +from neutron.extensions import external_net +from neutron.extensions import l3 +from neutron.extensions import portbindings +from neutron.openstack.common import excutils +from neutron.openstack.common import importutils +from neutron.plugins.nuage.common import config +from neutron.plugins.nuage.common import constants +from neutron.plugins.nuage.common import exceptions as nuage_exc +from neutron.plugins.nuage import extensions +from neutron.plugins.nuage.extensions import netpartition +from neutron.plugins.nuage import nuagedb +from neutron import policy + + +class NuagePlugin(db_base_plugin_v2.NeutronDbPluginV2, + external_net_db.External_net_db_mixin, + extraroute_db.ExtraRoute_db_mixin, + l3_db.L3_NAT_db_mixin, + netpartition.NetPartitionPluginBase): + """Class that implements Nuage Networks' plugin functionality.""" + supported_extension_aliases = ["router", "binding", "external-net", + "net-partition", "nuage-router", + "nuage-subnet", "quotas", "extraroute"] + + binding_view = "extension:port_binding:view" + + def __init__(self): + super(NuagePlugin, self).__init__() + neutron_extensions.append_api_extensions_path(extensions.__path__) + config.nuage_register_cfg_opts() + self.nuageclient_init() + net_partition = cfg.CONF.RESTPROXY.default_net_partition_name + self._create_default_net_partition(net_partition) + + def nuageclient_init(self): + server = cfg.CONF.RESTPROXY.server + serverauth = cfg.CONF.RESTPROXY.serverauth + serverssl = cfg.CONF.RESTPROXY.serverssl + base_uri = cfg.CONF.RESTPROXY.base_uri + auth_resource = cfg.CONF.RESTPROXY.auth_resource + organization = cfg.CONF.RESTPROXY.organization + nuageclient = importutils.import_module('nuagenetlib.nuageclient') + self.nuageclient = nuageclient.NuageClient(server, base_uri, + serverssl, serverauth, + auth_resource, + organization) + + def _resource_finder(self, context, for_resource, resource, user_req): + match = re.match(attributes.UUID_PATTERN, user_req[resource]) + if match: + obj_lister = getattr(self, "get_%s" % resource) + found_resource = obj_lister(context, user_req[resource]) + if not found_resource: + msg = (_("%(resource)s with id %(resource_id)s does not " + "exist") % {'resource': resource, + 'resource_id': user_req[resource]}) + raise n_exc.BadRequest(resource=for_resource, msg=msg) + else: + filter = {'name': [user_req[resource]]} + obj_lister = getattr(self, "get_%ss" % resource) + found_resource = obj_lister(context, filters=filter) + if not found_resource: + msg = (_("Either %(resource)s %(req_resource)s not found " + "or you dont have credential to access it") + % {'resource': resource, + 'req_resource': user_req[resource]}) + raise n_exc.BadRequest(resource=for_resource, msg=msg) + if len(found_resource) > 1: + msg = (_("More than one entry found for %(resource)s " + "%(req_resource)s. Use id instead") + % {'resource': resource, + 'req_resource': user_req[resource]}) + raise n_exc.BadRequest(resource=for_resource, msg=msg) + found_resource = found_resource[0] + return found_resource + + def _update_port_ip(self, context, port, new_ip): + subid = port['fixed_ips'][0]['subnet_id'] + new_fixed_ips = {} + new_fixed_ips['subnet_id'] = subid + new_fixed_ips['ip_address'] = new_ip + ips, prev_ips = self._update_ips_for_port(context, + port["network_id"], + port['id'], + port["fixed_ips"], + [new_fixed_ips]) + + # Update ips if necessary + for ip in ips: + allocated = models_v2.IPAllocation( + network_id=port['network_id'], port_id=port['id'], + ip_address=ip['ip_address'], subnet_id=ip['subnet_id']) + context.session.add(allocated) + + def _create_update_port(self, context, port, + port_mapping, subnet_mapping): + filters = {'device_id': [port['device_id']]} + ports = self.get_ports(context, filters) + netpart_id = subnet_mapping['net_partition_id'] + net_partition = nuagedb.get_net_partition_by_id(context.session, + netpart_id) + params = { + 'id': port['device_id'], + 'mac': port['mac_address'], + 'parent_id': subnet_mapping['nuage_subnet_id'], + 'net_partition': net_partition, + 'ip': None, + 'no_of_ports': len(ports), + 'tenant': port['tenant_id'] + } + if port_mapping['static_ip']: + params['ip'] = port['fixed_ips'][0]['ip_address'] + + nuage_vm = self.nuageclient.create_vms(params) + if nuage_vm: + if port['fixed_ips'][0]['ip_address'] != str(nuage_vm['ip']): + self._update_port_ip(context, port, nuage_vm['ip']) + port_dict = { + 'nuage_vport_id': nuage_vm['vport_id'], + 'nuage_vif_id': nuage_vm['vif_id'] + } + nuagedb.update_port_vport_mapping(port_mapping, + port_dict) + + def create_port(self, context, port): + session = context.session + with session.begin(subtransactions=True): + p = port['port'] + port = super(NuagePlugin, self).create_port(context, port) + device_owner = port.get('device_owner', None) + if (device_owner and + device_owner not in constants.AUTO_CREATE_PORT_OWNERS): + if 'fixed_ips' not in port or len(port['fixed_ips']) == 0: + return self._extend_port_dict_binding(context, port) + subnet_id = port['fixed_ips'][0]['subnet_id'] + subnet_mapping = nuagedb.get_subnet_l2dom_by_id(session, + subnet_id) + if subnet_mapping: + static_ip = False + if (attributes.is_attr_set(p['fixed_ips']) and + 'ip_address' in p['fixed_ips'][0]): + static_ip = True + nuage_vport_id = None + nuage_vif_id = None + port_mapping = nuagedb.add_port_vport_mapping( + session, + port['id'], + nuage_vport_id, + nuage_vif_id, + static_ip) + port_prefix = constants.NOVA_PORT_OWNER_PREF + if port['device_owner'].startswith(port_prefix): + #This request is coming from nova + try: + self._create_update_port(context, port, + port_mapping, + subnet_mapping) + except Exception: + with excutils.save_and_reraise_exception(): + super(NuagePlugin, self).delete_port( + context, + port['id']) + return self._extend_port_dict_binding(context, port) + + def update_port(self, context, id, port): + p = port['port'] + if p.get('device_owner', '').startswith( + constants.NOVA_PORT_OWNER_PREF): + session = context.session + with session.begin(subtransactions=True): + port = self._get_port(context, id) + port.update(p) + if 'fixed_ips' not in port or len(port['fixed_ips']) == 0: + return self._make_port_dict(port) + subnet_id = port['fixed_ips'][0]['subnet_id'] + subnet_mapping = nuagedb.get_subnet_l2dom_by_id(session, + subnet_id) + if not subnet_mapping: + msg = (_("Subnet %s not found on VSD") % subnet_id) + raise n_exc.BadRequest(resource='port', msg=msg) + port_mapping = nuagedb.get_port_mapping_by_id(session, + id) + if not port_mapping: + msg = (_("Port-Mapping for port %s not " + " found on VSD") % id) + raise n_exc.BadRequest(resource='port', msg=msg) + if not port_mapping['nuage_vport_id']: + self._create_update_port(context, port, + port_mapping, subnet_mapping) + updated_port = self._make_port_dict(port) + else: + updated_port = super(NuagePlugin, self).update_port(context, id, + port) + return updated_port + + def delete_port(self, context, id, l3_port_check=True): + if l3_port_check: + self.prevent_l3_port_deletion(context, id) + port = self._get_port(context, id) + port_mapping = nuagedb.get_port_mapping_by_id(context.session, + id) + # This is required for to pass ut test_floatingip_port_delete + self.disassociate_floatingips(context, id) + if not port['fixed_ips']: + return super(NuagePlugin, self).delete_port(context, id) + + sub_id = port['fixed_ips'][0]['subnet_id'] + subnet_mapping = nuagedb.get_subnet_l2dom_by_id(context.session, + sub_id) + if not subnet_mapping: + return super(NuagePlugin, self).delete_port(context, id) + + netpart_id = subnet_mapping['net_partition_id'] + net_partition = nuagedb.get_net_partition_by_id(context.session, + netpart_id) + # Need to call this explicitly to delete vport_vporttag_mapping + if constants.NOVA_PORT_OWNER_PREF in port['device_owner']: + # This was a VM Port + filters = {'device_id': [port['device_id']]} + ports = self.get_ports(context, filters) + params = { + 'no_of_ports': len(ports), + 'net_partition': net_partition, + 'tenant': port['tenant_id'], + 'mac': port['mac_address'], + 'nuage_vif_id': port_mapping['nuage_vif_id'], + 'id': port['device_id'] + } + self.nuageclient.delete_vms(params) + super(NuagePlugin, self).delete_port(context, id) + + def _check_view_auth(self, context, resource, action): + return policy.check(context, action, resource) + + def _extend_port_dict_binding(self, context, port): + if self._check_view_auth(context, port, self.binding_view): + port[portbindings.VIF_TYPE] = portbindings.VIF_TYPE_OVS + port[portbindings.VIF_DETAILS] = { + portbindings.CAP_PORT_FILTER: False + } + return port + + def get_port(self, context, id, fields=None): + port = super(NuagePlugin, self).get_port(context, id, fields) + return self._fields(self._extend_port_dict_binding(context, port), + fields) + + def get_ports(self, context, filters=None, fields=None): + ports = super(NuagePlugin, self).get_ports(context, filters, fields) + return [self._fields(self._extend_port_dict_binding(context, port), + fields) for port in ports] + + def _check_router_subnet_for_tenant(self, context): + # Search router and subnet tables. + # If no entry left delete user and group from VSD + filters = {'tenant_id': [context.tenant]} + routers = self.get_routers(context, filters=filters) + subnets = self.get_subnets(context, filters=filters) + return bool(routers or subnets) + + def create_network(self, context, network): + net = network['network'] + with context.session.begin(subtransactions=True): + net = super(NuagePlugin, self).create_network(context, + network) + self._process_l3_create(context, net, network['network']) + return net + + def _validate_update_network(self, context, id, network): + req_data = network['network'] + is_external_set = req_data.get(external_net.EXTERNAL) + if not attributes.is_attr_set(is_external_set): + return (None, None) + neutron_net = self.get_network(context, id) + if neutron_net.get(external_net.EXTERNAL) == is_external_set: + return (None, None) + subnet = self._validate_nuage_sharedresource(context, 'network', id) + if subnet and not is_external_set: + msg = _('External network with subnets can not be ' + 'changed to non-external network') + raise nuage_exc.OperationNotSupported(msg=msg) + return (is_external_set, subnet) + + def update_network(self, context, id, network): + with context.session.begin(subtransactions=True): + is_external_set, subnet = self._validate_update_network(context, + id, + network) + net = super(NuagePlugin, self).update_network(context, id, + network) + self._process_l3_update(context, net, network['network']) + if subnet and is_external_set: + subn = subnet[0] + subnet_l2dom = nuagedb.get_subnet_l2dom_by_id(context.session, + subn['id']) + if subnet_l2dom: + nuage_subnet_id = subnet_l2dom['nuage_subnet_id'] + nuage_l2dom_tid = subnet_l2dom['nuage_l2dom_tmplt_id'] + user_id = subnet_l2dom['nuage_user_id'] + group_id = subnet_l2dom['nuage_group_id'] + self.nuageclient.delete_subnet(nuage_subnet_id, + nuage_l2dom_tid) + self.nuageclient.delete_user(user_id) + self.nuageclient.delete_group(group_id) + nuagedb.delete_subnetl2dom_mapping(context.session, + subnet_l2dom) + self._add_nuage_sharedresource(context, + subnet[0], + id, + constants.SR_TYPE_FLOATING) + return net + + def delete_network(self, context, id): + with context.session.begin(subtransactions=True): + self._process_l3_delete(context, id) + filter = {'network_id': [id]} + subnets = self.get_subnets(context, filters=filter) + for subnet in subnets: + self.delete_subnet(context, subnet['id']) + super(NuagePlugin, self).delete_network(context, id) + + def _get_net_partition_for_subnet(self, context, subnet): + subn = subnet['subnet'] + ent = subn.get('net_partition', None) + if not ent: + def_net_part = cfg.CONF.RESTPROXY.default_net_partition_name + net_partition = nuagedb.get_net_partition_by_name(context.session, + def_net_part) + else: + net_partition = self._resource_finder(context, 'subnet', + 'net_partition', subn) + if not net_partition: + msg = _('Either net_partition is not provided with subnet OR ' + 'default net_partition is not created at the start') + raise n_exc.BadRequest(resource='subnet', msg=msg) + return net_partition + + def _validate_create_subnet(self, subnet): + if ('host_routes' in subnet and + attributes.is_attr_set(subnet['host_routes'])): + msg = 'host_routes extensions not supported for subnets' + raise nuage_exc.OperationNotSupported(msg=msg) + if subnet['gateway_ip'] is None: + msg = "no-gateway option not supported with subnets" + raise nuage_exc.OperationNotSupported(msg=msg) + + def _delete_nuage_sharedresource(self, context, net_id): + sharedresource_id = self.nuageclient.delete_nuage_sharedresource( + net_id) + if sharedresource_id: + fip_pool_mapping = nuagedb.get_fip_pool_by_id(context.session, + sharedresource_id) + if fip_pool_mapping: + with context.session.begin(subtransactions=True): + nuagedb.delete_fip_pool_mapping(context.session, + fip_pool_mapping) + + def _validate_nuage_sharedresource(self, context, resource, net_id): + filter = {'network_id': [net_id]} + existing_subn = self.get_subnets(context, filters=filter) + if len(existing_subn) > 1: + msg = _('Only one subnet is allowed per ' + 'external network %s') % net_id + raise nuage_exc.OperationNotSupported(msg=msg) + return existing_subn + + def _add_nuage_sharedresource(self, context, subnet, net_id, type): + net = netaddr.IPNetwork(subnet['cidr']) + params = { + 'neutron_subnet': subnet, + 'net': net, + 'type': type + } + fip_pool_id = self.nuageclient.create_nuage_sharedresource(params) + nuagedb.add_fip_pool_mapping(context.session, fip_pool_id, net_id) + + def _create_nuage_sharedresource(self, context, subnet, type): + subn = subnet['subnet'] + net_id = subn['network_id'] + self._validate_nuage_sharedresource(context, 'subnet', net_id) + with context.session.begin(subtransactions=True): + subn = super(NuagePlugin, self).create_subnet(context, subnet) + self._add_nuage_sharedresource(context, subn, net_id, type) + return subn + + def _create_nuage_subnet(self, context, neutron_subnet, net_partition): + net = netaddr.IPNetwork(neutron_subnet['cidr']) + params = { + 'net_partition': net_partition, + 'tenant_id': neutron_subnet['tenant_id'], + 'net': net + } + try: + nuage_subnet = self.nuageclient.create_subnet(neutron_subnet, + params) + except Exception: + with excutils.save_and_reraise_exception(): + super(NuagePlugin, self).delete_subnet(context, + neutron_subnet['id']) + + if nuage_subnet: + l2dom_id = str(nuage_subnet['nuage_l2template_id']) + user_id = nuage_subnet['nuage_userid'] + group_id = nuage_subnet['nuage_groupid'] + id = nuage_subnet['nuage_l2domain_id'] + with context.session.begin(subtransactions=True): + nuagedb.add_subnetl2dom_mapping(context.session, + neutron_subnet['id'], + id, + net_partition['id'], + l2dom_id=l2dom_id, + nuage_user_id=user_id, + nuage_group_id=group_id) + + def create_subnet(self, context, subnet): + subn = subnet['subnet'] + net_id = subn['network_id'] + + if self._network_is_external(context, net_id): + return self._create_nuage_sharedresource( + context, subnet, constants.SR_TYPE_FLOATING) + + self._validate_create_subnet(subn) + + net_partition = self._get_net_partition_for_subnet(context, subnet) + neutron_subnet = super(NuagePlugin, self).create_subnet(context, + subnet) + self._create_nuage_subnet(context, neutron_subnet, net_partition) + return neutron_subnet + + def delete_subnet(self, context, id): + subnet = self.get_subnet(context, id) + if self._network_is_external(context, subnet['network_id']): + super(NuagePlugin, self).delete_subnet(context, id) + return self._delete_nuage_sharedresource(context, id) + + subnet_l2dom = nuagedb.get_subnet_l2dom_by_id(context.session, id) + if subnet_l2dom: + template_id = subnet_l2dom['nuage_l2dom_tmplt_id'] + try: + self.nuageclient.delete_subnet(subnet_l2dom['nuage_subnet_id'], + template_id) + except Exception: + msg = (_('Unable to complete operation on subnet %s.' + 'One or more ports have an IP allocation ' + 'from this subnet.') % id) + raise n_exc.BadRequest(resource='subnet', msg=msg) + super(NuagePlugin, self).delete_subnet(context, id) + if subnet_l2dom and not self._check_router_subnet_for_tenant(context): + self.nuageclient.delete_user(subnet_l2dom['nuage_user_id']) + self.nuageclient.delete_group(subnet_l2dom['nuage_group_id']) + + def add_router_interface(self, context, router_id, interface_info): + session = context.session + with session.begin(subtransactions=True): + rtr_if_info = super(NuagePlugin, + self).add_router_interface(context, + router_id, + interface_info) + subnet_id = rtr_if_info['subnet_id'] + subn = self.get_subnet(context, subnet_id) + + rtr_zone_mapping = nuagedb.get_rtr_zone_mapping(session, + router_id) + ent_rtr_mapping = nuagedb.get_ent_rtr_mapping_by_rtrid(session, + router_id) + subnet_l2dom = nuagedb.get_subnet_l2dom_by_id(session, + subnet_id) + if not rtr_zone_mapping or not ent_rtr_mapping: + super(NuagePlugin, + self).remove_router_interface(context, + router_id, + interface_info) + msg = (_("Router %s does not hold default zone OR " + "net_partition mapping. Router-IF add failed") + % router_id) + raise n_exc.BadRequest(resource='router', msg=msg) + + if not subnet_l2dom: + super(NuagePlugin, + self).remove_router_interface(context, + router_id, + interface_info) + msg = (_("Subnet %s does not hold Nuage VSD reference. " + "Router-IF add failed") % subnet_id) + raise n_exc.BadRequest(resource='subnet', msg=msg) + + if (subnet_l2dom['net_partition_id'] != + ent_rtr_mapping['net_partition_id']): + super(NuagePlugin, + self).remove_router_interface(context, + router_id, + interface_info) + msg = (_("Subnet %(subnet)s and Router %(router)s belong to " + "different net_partition Router-IF add " + "not permitted") % {'subnet': subnet_id, + 'router': router_id}) + raise n_exc.BadRequest(resource='subnet', msg=msg) + nuage_subnet_id = subnet_l2dom['nuage_subnet_id'] + nuage_l2dom_tmplt_id = subnet_l2dom['nuage_l2dom_tmplt_id'] + if self.nuageclient.vms_on_l2domain(nuage_subnet_id): + super(NuagePlugin, + self).remove_router_interface(context, + router_id, + interface_info) + msg = (_("Subnet %s has one or more active VMs " + "Router-IF add not permitted") % subnet_id) + raise n_exc.BadRequest(resource='subnet', msg=msg) + self.nuageclient.delete_subnet(nuage_subnet_id, + nuage_l2dom_tmplt_id) + net = netaddr.IPNetwork(subn['cidr']) + params = { + 'net': net, + 'zone_id': rtr_zone_mapping['nuage_zone_id'] + } + if not attributes.is_attr_set(subn['gateway_ip']): + subn['gateway_ip'] = str(netaddr.IPAddress(net.first + 1)) + try: + nuage_subnet = self.nuageclient.create_domain_subnet(subn, + params) + except Exception: + with excutils.save_and_reraise_exception(): + super(NuagePlugin, + self).remove_router_interface(context, + router_id, + interface_info) + if nuage_subnet: + ns_dict = {} + ns_dict['nuage_subnet_id'] = nuage_subnet['nuage_subnetid'] + ns_dict['nuage_l2dom_tmplt_id'] = None + nuagedb.update_subnetl2dom_mapping(subnet_l2dom, + ns_dict) + return rtr_if_info + + def remove_router_interface(self, context, router_id, interface_info): + if 'subnet_id' in interface_info: + subnet_id = interface_info['subnet_id'] + subnet = self.get_subnet(context, subnet_id) + found = False + try: + filters = {'device_id': [router_id], + 'device_owner': + [os_constants.DEVICE_OWNER_ROUTER_INTF], + 'network_id': [subnet['network_id']]} + ports = self.get_ports(context, filters) + + for p in ports: + if p['fixed_ips'][0]['subnet_id'] == subnet_id: + found = True + break + except exc.NoResultFound: + msg = (_("No router interface found for Router %s. " + "Router-IF delete failed") % router_id) + raise n_exc.BadRequest(resource='router', msg=msg) + + if not found: + msg = (_("No router interface found for Router %s. " + "Router-IF delete failed") % router_id) + raise n_exc.BadRequest(resource='router', msg=msg) + elif 'port_id' in interface_info: + port_db = self._get_port(context, interface_info['port_id']) + if not port_db: + msg = (_("No router interface found for Router %s. " + "Router-IF delete failed") % router_id) + raise n_exc.BadRequest(resource='router', msg=msg) + subnet_id = port_db['fixed_ips'][0]['subnet_id'] + + session = context.session + with session.begin(subtransactions=True): + subnet_l2dom = nuagedb.get_subnet_l2dom_by_id(session, + subnet_id) + if not subnet_l2dom: + return super(NuagePlugin, + self).remove_router_interface(context, + router_id, + interface_info) + nuage_subn_id = subnet_l2dom['nuage_subnet_id'] + if self.nuageclient.vms_on_l2domain(nuage_subn_id): + msg = (_("Subnet %s has one or more active VMs " + "Router-IF delete not permitted") % subnet_id) + raise n_exc.BadRequest(resource='subnet', msg=msg) + + neutron_subnet = self.get_subnet(context, subnet_id) + ent_rtr_mapping = nuagedb.get_ent_rtr_mapping_by_rtrid( + context.session, + router_id) + if not ent_rtr_mapping: + msg = (_("Router %s does not hold net_partition " + "assoc on Nuage VSD. Router-IF delete failed") + % router_id) + raise n_exc.BadRequest(resource='router', msg=msg) + net = netaddr.IPNetwork(neutron_subnet['cidr']) + net_part_id = ent_rtr_mapping['net_partition_id'] + net_partition = self.get_net_partition(context, + net_part_id) + params = { + 'net_partition': net_partition, + 'tenant_id': neutron_subnet['tenant_id'], + 'net': net + } + nuage_subnet = self.nuageclient.create_subnet(neutron_subnet, + params) + self.nuageclient.delete_domain_subnet(nuage_subn_id) + info = super(NuagePlugin, + self).remove_router_interface(context, router_id, + interface_info) + if nuage_subnet: + tmplt_id = str(nuage_subnet['nuage_l2template_id']) + ns_dict = {} + ns_dict['nuage_subnet_id'] = nuage_subnet['nuage_l2domain_id'] + ns_dict['nuage_l2dom_tmplt_id'] = tmplt_id + nuagedb.update_subnetl2dom_mapping(subnet_l2dom, + ns_dict) + return info + + def _get_net_partition_for_router(self, context, router): + rtr = router['router'] + ent = rtr.get('net_partition', None) + if not ent: + def_net_part = cfg.CONF.RESTPROXY.default_net_partition_name + net_partition = nuagedb.get_net_partition_by_name(context.session, + def_net_part) + else: + net_partition = self._resource_finder(context, 'router', + 'net_partition', rtr) + if not net_partition: + msg = _("Either net_partition is not provided with router OR " + "default net_partition is not created at the start") + raise n_exc.BadRequest(resource='router', msg=msg) + return net_partition + + def create_router(self, context, router): + net_partition = self._get_net_partition_for_router(context, router) + neutron_router = super(NuagePlugin, self).create_router(context, + router) + params = { + 'net_partition': net_partition, + 'tenant_id': neutron_router['tenant_id'] + } + try: + nuage_router = self.nuageclient.create_router(neutron_router, + router['router'], + params) + except Exception: + with excutils.save_and_reraise_exception(): + super(NuagePlugin, self).delete_router(context, + neutron_router['id']) + if nuage_router: + user_id = nuage_router['nuage_userid'] + group_id = nuage_router['nuage_groupid'] + with context.session.begin(subtransactions=True): + nuagedb.add_entrouter_mapping(context.session, + net_partition['id'], + neutron_router['id'], + nuage_router['nuage_domain_id']) + nuagedb.add_rtrzone_mapping(context.session, + neutron_router['id'], + nuage_router['nuage_def_zone_id'], + nuage_user_id=user_id, + nuage_group_id=group_id) + return neutron_router + + def _validate_nuage_staticroutes(self, old_routes, added, removed): + cidrs = [] + for old in old_routes: + if old not in removed: + ip = netaddr.IPNetwork(old['destination']) + cidrs.append(ip) + for route in added: + ip = netaddr.IPNetwork(route['destination']) + matching = netaddr.all_matching_cidrs(ip.ip, cidrs) + if matching: + msg = _('for same subnet, multiple static routes not allowed') + raise n_exc.BadRequest(resource='router', msg=msg) + cidrs.append(ip) + + def update_router(self, context, id, router): + r = router['router'] + with context.session.begin(subtransactions=True): + if 'routes' in r: + old_routes = self._get_extra_routes_by_router_id(context, + id) + added, removed = utils.diff_list_of_dict(old_routes, + r['routes']) + self._validate_nuage_staticroutes(old_routes, added, removed) + ent_rtr_mapping = nuagedb.get_ent_rtr_mapping_by_rtrid( + context.session, id) + if not ent_rtr_mapping: + msg = (_("Router %s does not hold net-partition " + "assoc on VSD. extra-route failed") % id) + raise n_exc.BadRequest(resource='router', msg=msg) + # Let it do internal checks first and verify it. + router_updated = super(NuagePlugin, + self).update_router(context, + id, + router) + for route in removed: + rtr_rt_mapping = nuagedb.get_router_route_mapping( + context.session, id, route) + if rtr_rt_mapping: + self.nuageclient.delete_nuage_staticroute( + rtr_rt_mapping['nuage_route_id']) + nuagedb.delete_static_route(context.session, + rtr_rt_mapping) + for route in added: + params = { + 'parent_id': ent_rtr_mapping['nuage_router_id'], + 'net': netaddr.IPNetwork(route['destination']), + 'nexthop': route['nexthop'] + } + nuage_rt_id = self.nuageclient.create_nuage_staticroute( + params) + nuagedb.add_static_route(context.session, + id, nuage_rt_id, + route['destination'], + route['nexthop']) + else: + router_updated = super(NuagePlugin, self).update_router( + context, id, router) + return router_updated + + def delete_router(self, context, id): + session = context.session + ent_rtr_mapping = nuagedb.get_ent_rtr_mapping_by_rtrid(session, + id) + if ent_rtr_mapping: + filters = { + 'device_id': [id], + 'device_owner': [os_constants.DEVICE_OWNER_ROUTER_INTF] + } + ports = self.get_ports(context, filters) + if ports: + raise l3.RouterInUse(router_id=id) + nuage_router_id = ent_rtr_mapping['nuage_router_id'] + self.nuageclient.delete_router(nuage_router_id) + router_zone = nuagedb.get_rtr_zone_mapping(session, id) + super(NuagePlugin, self).delete_router(context, id) + if router_zone and not self._check_router_subnet_for_tenant(context): + self.nuageclient.delete_user(router_zone['nuage_user_id']) + self.nuageclient.delete_group(router_zone['nuage_group_id']) + + def _make_net_partition_dict(self, net_partition, fields=None): + res = { + 'id': net_partition['id'], + 'name': net_partition['name'], + 'l3dom_tmplt_id': net_partition['l3dom_tmplt_id'], + 'l2dom_tmplt_id': net_partition['l2dom_tmplt_id'], + } + return self._fields(res, fields) + + def _create_net_partition(self, session, net_part_name): + fip_quota = cfg.CONF.RESTPROXY.default_floatingip_quota + params = { + "name": net_part_name, + "fp_quota": str(fip_quota) + } + nuage_net_partition = self.nuageclient.create_net_partition(params) + net_partitioninst = None + if nuage_net_partition: + nuage_entid = nuage_net_partition['nuage_entid'] + l3dom_id = nuage_net_partition['l3dom_id'] + l2dom_id = nuage_net_partition['l2dom_id'] + with session.begin(): + net_partitioninst = nuagedb.add_net_partition(session, + nuage_entid, + l3dom_id, + l2dom_id, + net_part_name) + if not net_partitioninst: + return {} + return self._make_net_partition_dict(net_partitioninst) + + def _create_default_net_partition(self, default_net_part): + def_netpart = self.nuageclient.get_def_netpartition_data( + default_net_part) + session = db.get_session() + if def_netpart: + net_partition = nuagedb.get_net_partition_by_name( + session, default_net_part) + with session.begin(subtransactions=True): + if net_partition: + nuagedb.delete_net_partition(session, net_partition) + net_part = nuagedb.add_net_partition(session, + def_netpart['np_id'], + def_netpart['l3dom_tid'], + def_netpart['l2dom_tid'], + default_net_part) + return self._make_net_partition_dict(net_part) + else: + return self._create_net_partition(session, default_net_part) + + def create_net_partition(self, context, net_partition): + ent = net_partition['net_partition'] + session = context.session + return self._create_net_partition(session, ent["name"]) + + def delete_net_partition(self, context, id): + ent_rtr_mapping = nuagedb.get_ent_rtr_mapping_by_entid( + context.session, + id) + if ent_rtr_mapping: + msg = (_("One or more router still attached to " + "net_partition %s.") % id) + raise n_exc.BadRequest(resource='net_partition', msg=msg) + net_partition = nuagedb.get_net_partition_by_id(context.session, id) + if not net_partition: + msg = (_("NetPartition with %s does not exist") % id) + raise n_exc.BadRequest(resource='net_partition', msg=msg) + l3dom_tmplt_id = net_partition['l3dom_tmplt_id'] + l2dom_tmplt_id = net_partition['l2dom_tmplt_id'] + self.nuageclient.delete_net_partition(net_partition['id'], + l3dom_id=l3dom_tmplt_id, + l2dom_id=l2dom_tmplt_id) + with context.session.begin(subtransactions=True): + nuagedb.delete_net_partition(context.session, + net_partition) + + def get_net_partition(self, context, id, fields=None): + net_partition = nuagedb.get_net_partition_by_id(context.session, + id) + return self._make_net_partition_dict(net_partition) + + def get_net_partitions(self, context, filters=None, fields=None): + net_partitions = nuagedb.get_net_partitions(context.session, + filters=filters, + fields=fields) + return [self._make_net_partition_dict(net_partition, fields) + for net_partition in net_partitions] + + def _check_floatingip_update(self, context, port): + filter = {'fixed_port_id': [port['id']]} + local_fip = self.get_floatingips(context, + filters=filter) + if local_fip: + fip = local_fip[0] + self._create_update_floatingip(context, + fip, port['id']) + + def _create_update_floatingip(self, context, + neutron_fip, port_id): + rtr_id = neutron_fip['router_id'] + net_id = neutron_fip['floating_network_id'] + + fip_pool_mapping = nuagedb.get_fip_pool_from_netid(context.session, + net_id) + fip_mapping = nuagedb.get_fip_mapping_by_id(context.session, + neutron_fip['id']) + + if not fip_mapping: + ent_rtr_mapping = nuagedb.get_ent_rtr_mapping_by_rtrid( + context.session, rtr_id) + if not ent_rtr_mapping: + msg = _('router %s is not associated with ' + 'any net-partition') % rtr_id + raise n_exc.BadRequest(resource='floatingip', + msg=msg) + params = { + 'nuage_rtr_id': ent_rtr_mapping['nuage_router_id'], + 'nuage_fippool_id': fip_pool_mapping['fip_pool_id'], + 'neutron_fip_ip': neutron_fip['floating_ip_address'] + } + nuage_fip_id = self.nuageclient.create_nuage_floatingip(params) + nuagedb.add_fip_mapping(context.session, + neutron_fip['id'], + rtr_id, nuage_fip_id) + else: + if rtr_id != fip_mapping['router_id']: + msg = _('Floating IP can not be associated to VM in ' + 'different router context') + raise nuage_exc.OperationNotSupported(msg=msg) + nuage_fip_id = fip_mapping['nuage_fip_id'] + + fip_pool_dict = {'router_id': neutron_fip['router_id']} + nuagedb.update_fip_pool_mapping(fip_pool_mapping, + fip_pool_dict) + + # Update VM if required + port_mapping = nuagedb.get_port_mapping_by_id(context.session, + port_id) + if port_mapping: + params = { + 'nuage_vport_id': port_mapping['nuage_vport_id'], + 'nuage_fip_id': nuage_fip_id + } + self.nuageclient.update_nuage_vm_vport(params) + + def create_floatingip(self, context, floatingip): + fip = floatingip['floatingip'] + with context.session.begin(subtransactions=True): + neutron_fip = super(NuagePlugin, self).create_floatingip( + context, floatingip) + if not neutron_fip['router_id']: + return neutron_fip + try: + self._create_update_floatingip(context, neutron_fip, + fip['port_id']) + except (nuage_exc.OperationNotSupported, n_exc.BadRequest): + with excutils.save_and_reraise_exception(): + super(NuagePlugin, self).delete_floatingip( + context, neutron_fip['id']) + return neutron_fip + + def disassociate_floatingips(self, context, port_id): + super(NuagePlugin, self).disassociate_floatingips(context, port_id) + port_mapping = nuagedb.get_port_mapping_by_id(context.session, + port_id) + if port_mapping: + params = { + 'nuage_vport_id': port_mapping['nuage_vport_id'], + 'nuage_fip_id': None + } + self.nuageclient.update_nuage_vm_vport(params) + + def update_floatingip(self, context, id, floatingip): + fip = floatingip['floatingip'] + orig_fip = self._get_floatingip(context, id) + port_id = orig_fip['fixed_port_id'] + with context.session.begin(subtransactions=True): + neutron_fip = super(NuagePlugin, self).update_floatingip( + context, id, floatingip) + if fip['port_id'] is not None: + if not neutron_fip['router_id']: + ret_msg = 'floating-ip is not associated yet' + raise n_exc.BadRequest(resource='floatingip', + msg=ret_msg) + + try: + self._create_update_floatingip(context, + neutron_fip, + fip['port_id']) + except nuage_exc.OperationNotSupported: + with excutils.save_and_reraise_exception(): + super(NuagePlugin, + self).disassociate_floatingips(context, + fip['port_id']) + except n_exc.BadRequest: + with excutils.save_and_reraise_exception(): + super(NuagePlugin, self).delete_floatingip(context, + id) + else: + port_mapping = nuagedb.get_port_mapping_by_id(context.session, + port_id) + if port_mapping: + params = { + 'nuage_vport_id': port_mapping['nuage_vport_id'], + 'nuage_fip_id': None + } + self.nuageclient.update_nuage_vm_vport(params) + return neutron_fip + + def delete_floatingip(self, context, id): + fip = self._get_floatingip(context, id) + port_id = fip['fixed_port_id'] + with context.session.begin(subtransactions=True): + if port_id: + port_mapping = nuagedb.get_port_mapping_by_id(context.session, + port_id) + if (port_mapping and + port_mapping['nuage_vport_id'] is not None): + params = { + 'nuage_vport_id': port_mapping['nuage_vport_id'], + 'nuage_fip_id': None + } + self.nuageclient.update_nuage_vm_vport(params) + fip_mapping = nuagedb.get_fip_mapping_by_id(context.session, + id) + if fip_mapping: + self.nuageclient.delete_nuage_floatingip( + fip_mapping['nuage_fip_id']) + nuagedb.delete_fip_mapping(context.session, fip_mapping) + super(NuagePlugin, self).delete_floatingip(context, id) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/ofagent/agent/main.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/ofagent/agent/main.py new file mode 100644 index 00000000..df000f05 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/ofagent/agent/main.py @@ -0,0 +1,39 @@ +# Copyright (C) 2014 VA Linux Systems Japan K.K. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Fumihiko Kakuma, VA Linux Systems Japan K.K. +# @author: YAMAMOTO Takashi, VA Linux Systems Japan K.K. + +from ryu.lib import hub +hub.patch() + +import sys + +from oslo.config import cfg +from ryu.base.app_manager import AppManager +from ryu import cfg as ryu_cfg + +from neutron.common import config as common_config + + +def main(): + common_config.init(sys.argv[1:]) + # the following check is a transitional workaround to make this work + # with different versions of ryu. + # TODO(yamamoto) remove this later + if ryu_cfg.CONF is not cfg.CONF: + ryu_cfg.CONF(project='ryu', args=[]) + common_config.setup_logging(cfg.CONF) + AppManager.run_apps(['neutron.plugins.ofagent.agent.ofa_neutron_agent']) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/ofagent/agent/ofa_neutron_agent.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/ofagent/agent/ofa_neutron_agent.py new file mode 100644 index 00000000..28223a5e --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/ofagent/agent/ofa_neutron_agent.py @@ -0,0 +1,1460 @@ +# Copyright (C) 2014 VA Linux Systems Japan K.K. +# Based on openvswitch agent. +# +# Copyright 2011 VMware, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# @author: Fumihiko Kakuma, VA Linux Systems Japan K.K. + +import time + +import netaddr +from oslo.config import cfg +from ryu.app.ofctl import api as ryu_api +from ryu.base import app_manager +from ryu.lib import hub +from ryu.ofproto import ofproto_v1_3 as ryu_ofp13 + +from neutron.agent.linux import ip_lib +from neutron.agent.linux import ovs_lib +from neutron.agent.linux import polling +from neutron.agent.linux import utils +from neutron.agent import rpc as agent_rpc +from neutron.agent import securitygroups_rpc as sg_rpc +from neutron.common import constants as n_const +from neutron.common import rpc as n_rpc +from neutron.common import topics +from neutron.common import utils as n_utils +from neutron import context +from neutron.openstack.common import log as logging +from neutron.openstack.common import loopingcall +from neutron.plugins.common import constants as p_const +from neutron.plugins.ofagent.agent import ports +from neutron.plugins.ofagent.common import config # noqa +from neutron.plugins.openvswitch.common import constants + + +LOG = logging.getLogger(__name__) + +# A placeholder for dead vlans. +DEAD_VLAN_TAG = str(n_const.MAX_VLAN_TAG + 1) + + +# A class to represent a VIF (i.e., a port that has 'iface-id' and 'vif-mac' +# attributes set). +class LocalVLANMapping: + def __init__(self, vlan, network_type, physical_network, segmentation_id, + vif_ports=None): + if vif_ports is None: + vif_ports = {} + self.vlan = vlan + self.network_type = network_type + self.physical_network = physical_network + self.segmentation_id = segmentation_id + self.vif_ports = vif_ports + # set of tunnel ports on which packets should be flooded + self.tun_ofports = set() + + def __str__(self): + return ("lv-id = %s type = %s phys-net = %s phys-id = %s" % + (self.vlan, self.network_type, self.physical_network, + self.segmentation_id)) + + +class OVSBridge(ovs_lib.OVSBridge): + def __init__(self, br_name, root_helper, ryuapp): + super(OVSBridge, self).__init__(br_name, root_helper) + self.datapath_id = None + self.datapath = None + self.ofparser = None + self.ryuapp = ryuapp + + def find_datapath_id(self): + self.datapath_id = self.get_datapath_id() + + def get_datapath(self, retry_max=cfg.CONF.AGENT.get_datapath_retry_times): + retry = 0 + while self.datapath is None: + self.datapath = ryu_api.get_datapath(self.ryuapp, + int(self.datapath_id, 16)) + retry += 1 + if retry >= retry_max: + LOG.error(_('Agent terminated!: Failed to get a datapath.')) + raise SystemExit(1) + time.sleep(1) + self.ofparser = self.datapath.ofproto_parser + + def setup_ofp(self, controller_names=None, + protocols='OpenFlow13', + retry_max=cfg.CONF.AGENT.get_datapath_retry_times): + if not controller_names: + host = cfg.CONF.ofp_listen_host + if not host: + # 127.0.0.1 is a default for agent style of controller + host = '127.0.0.1' + controller_names = ["tcp:%s:%d" % (host, + cfg.CONF.ofp_tcp_listen_port)] + try: + self.set_protocols(protocols) + self.set_controller(controller_names) + except RuntimeError: + LOG.exception(_("Agent terminated")) + raise SystemExit(1) + self.find_datapath_id() + self.get_datapath(retry_max) + + +class OFAPluginApi(agent_rpc.PluginApi, + sg_rpc.SecurityGroupServerRpcApiMixin): + pass + + +class OFASecurityGroupAgent(sg_rpc.SecurityGroupAgentRpcMixin): + def __init__(self, context, plugin_rpc, root_helper): + self.context = context + self.plugin_rpc = plugin_rpc + self.root_helper = root_helper + self.init_firewall(defer_refresh_firewall=True) + + +class OFANeutronAgentRyuApp(app_manager.RyuApp): + OFP_VERSIONS = [ryu_ofp13.OFP_VERSION] + + def start(self): + + super(OFANeutronAgentRyuApp, self).start() + return hub.spawn(self._agent_main, self) + + def _agent_main(self, ryuapp): + cfg.CONF.register_opts(ip_lib.OPTS) + n_utils.log_opt_values(LOG) + + try: + agent_config = create_agent_config_map(cfg.CONF) + except ValueError: + LOG.exception(_("Agent failed to create agent config map")) + raise SystemExit(1) + + is_xen_compute_host = ('rootwrap-xen-dom0' in + agent_config['root_helper']) + if is_xen_compute_host: + # Force ip_lib to always use the root helper to ensure that ip + # commands target xen dom0 rather than domU. + cfg.CONF.set_default('ip_lib_force_root', True) + + agent = OFANeutronAgent(ryuapp, **agent_config) + + # Start everything. + LOG.info(_("Agent initialized successfully, now running... ")) + agent.daemon_loop() + + +class OFANeutronAgent(n_rpc.RpcCallback, + sg_rpc.SecurityGroupAgentRpcCallbackMixin): + """A agent for OpenFlow Agent ML2 mechanism driver. + + OFANeutronAgent is a OpenFlow Agent agent for a ML2 plugin. + This is as a ryu application thread. + - An agent acts as an OpenFlow controller on each compute nodes. + - OpenFlow 1.3 (vendor agnostic unlike OVS extensions). + """ + + # history + # 1.0 Initial version + # 1.1 Support Security Group RPC + RPC_API_VERSION = '1.1' + + def __init__(self, ryuapp, integ_br, tun_br, local_ip, + bridge_mappings, root_helper, + polling_interval, tunnel_types=None, + veth_mtu=None, l2_population=False, + minimize_polling=False, + ovsdb_monitor_respawn_interval=( + constants.DEFAULT_OVSDBMON_RESPAWN)): + """Constructor. + + :param ryuapp: object of the ryu app. + :param integ_br: name of the integration bridge. + :param tun_br: name of the tunnel bridge. + :param local_ip: local IP address of this hypervisor. + :param bridge_mappings: mappings from physical network name to bridge. + :param root_helper: utility to use when running shell cmds. + :param polling_interval: interval (secs) to poll DB. + :param tunnel_types: A list of tunnel types to enable support for in + the agent. If set, will automatically set enable_tunneling to + True. + :param veth_mtu: MTU size for veth interfaces. + :param minimize_polling: Optional, whether to minimize polling by + monitoring ovsdb for interface changes. + :param ovsdb_monitor_respawn_interval: Optional, when using polling + minimization, the number of seconds to wait before respawning + the ovsdb monitor. + """ + super(OFANeutronAgent, self).__init__() + self.ryuapp = ryuapp + self.veth_mtu = veth_mtu + self.root_helper = root_helper + self.available_local_vlans = set(xrange(n_const.MIN_VLAN_TAG, + n_const.MAX_VLAN_TAG)) + self.tunnel_types = tunnel_types or [] + self.l2_pop = l2_population + self.agent_state = { + 'binary': 'neutron-ofa-agent', + 'host': cfg.CONF.host, + 'topic': n_const.L2_AGENT_TOPIC, + 'configurations': {'bridge_mappings': bridge_mappings, + 'tunnel_types': self.tunnel_types, + 'tunneling_ip': local_ip, + 'l2_population': self.l2_pop}, + 'agent_type': n_const.AGENT_TYPE_OFA, + 'start_flag': True} + + # Keep track of int_br's device count for use by _report_state() + self.int_br_device_count = 0 + + self.int_br = OVSBridge(integ_br, self.root_helper, self.ryuapp) + # Stores port update notifications for processing in main loop + self.updated_ports = set() + self.setup_rpc() + self.setup_integration_br() + self.setup_physical_bridges(bridge_mappings) + self.local_vlan_map = {} + self.tun_br_ofports = {p_const.TYPE_GRE: {}, + p_const.TYPE_VXLAN: {}} + + self.polling_interval = polling_interval + self.minimize_polling = minimize_polling + self.ovsdb_monitor_respawn_interval = ovsdb_monitor_respawn_interval + + self.enable_tunneling = bool(self.tunnel_types) + self.local_ip = local_ip + self.tunnel_count = 0 + self.vxlan_udp_port = cfg.CONF.AGENT.vxlan_udp_port + self.dont_fragment = cfg.CONF.AGENT.dont_fragment + if self.enable_tunneling: + self.setup_tunnel_br(tun_br) + # Collect additional bridges to monitor + self.ancillary_brs = self.setup_ancillary_bridges(integ_br, tun_br) + + # Security group agent support + self.sg_agent = OFASecurityGroupAgent(self.context, + self.plugin_rpc, + self.root_helper) + # Initialize iteration counter + self.iter_num = 0 + + def _report_state(self): + # How many devices are likely used by a VM + self.agent_state.get('configurations')['devices'] = ( + self.int_br_device_count) + try: + self.state_rpc.report_state(self.context, + self.agent_state) + self.agent_state.pop('start_flag', None) + except Exception: + LOG.exception(_("Failed reporting state!")) + + def _create_tunnel_port_name(self, tunnel_type, ip_address): + try: + ip_hex = '%08x' % netaddr.IPAddress(ip_address, version=4) + return '%s-%s' % (tunnel_type, ip_hex) + except Exception: + LOG.warn(_("Unable to create tunnel port. Invalid remote IP: %s"), + ip_address) + + def ryu_send_msg(self, msg): + result = ryu_api.send_msg(self.ryuapp, msg) + LOG.info(_("ryu send_msg() result: %s"), result) + + def setup_rpc(self): + mac = self.int_br.get_local_port_mac() + self.agent_id = '%s%s' % ('ovs', (mac.replace(":", ""))) + self.topic = topics.AGENT + self.plugin_rpc = OFAPluginApi(topics.PLUGIN) + self.state_rpc = agent_rpc.PluginReportStateAPI(topics.PLUGIN) + + # RPC network init + self.context = context.get_admin_context_without_session() + # Handle updates from service + self.endpoints = [self] + # Define the listening consumers for the agent + consumers = [[topics.PORT, topics.UPDATE], + [topics.NETWORK, topics.DELETE], + [constants.TUNNEL, topics.UPDATE], + [topics.SECURITY_GROUP, topics.UPDATE]] + self.connection = agent_rpc.create_consumers(self.endpoints, + self.topic, + consumers) + report_interval = cfg.CONF.AGENT.report_interval + if report_interval: + heartbeat = loopingcall.FixedIntervalLoopingCall( + self._report_state) + heartbeat.start(interval=report_interval) + + @staticmethod + def _get_ofport_name(interface_id): + """Convert from neutron device id (uuid) to OpenFlow port name. + + This needs to be synced with ML2 plugin's _device_to_port_id(). + + An assumption: The switch uses an OS's interface name as the + corresponding OpenFlow port name. + NOTE(yamamoto): While it's true for Open vSwitch, it isn't + necessarily true everywhere. For example, LINC uses something + like "LogicalSwitch0-Port2". + """ + return "tap" + interface_id[0:11] + + def _get_ports(self, br): + """Generate ports.Port instances for the given bridge.""" + datapath = br.datapath + ofpp = datapath.ofproto_parser + msg = ofpp.OFPPortDescStatsRequest(datapath=datapath) + descs = ryu_api.send_msg(app=self.ryuapp, msg=msg, + reply_cls=ofpp.OFPPortDescStatsReply, + reply_multi=True) + for d in descs: + for p in d.body: + yield ports.Port.from_ofp_port(p) + + def _get_ofport_names(self, br): + """Return a set of OpenFlow port names for the given bridge.""" + return set(p.port_name for p in self._get_ports(br)) + + def get_net_uuid(self, vif_id): + for network_id, vlan_mapping in self.local_vlan_map.iteritems(): + if vif_id in vlan_mapping.vif_ports: + return network_id + + def network_delete(self, context, **kwargs): + network_id = kwargs.get('network_id') + LOG.debug(_("network_delete received network %s"), network_id) + # The network may not be defined on this agent + lvm = self.local_vlan_map.get(network_id) + if lvm: + self.reclaim_local_vlan(network_id) + else: + LOG.debug(_("Network %s not used on agent."), network_id) + + def port_update(self, context, **kwargs): + port = kwargs.get('port') + # Put the port identifier in the updated_ports set. + # Even if full port details might be provided to this call, + # they are not used since there is no guarantee the notifications + # are processed in the same order as the relevant API requests + self.updated_ports.add(self._get_ofport_name(port['id'])) + LOG.debug(_("port_update received port %s"), port['id']) + + def tunnel_update(self, context, **kwargs): + LOG.debug(_("tunnel_update received")) + if not self.enable_tunneling: + return + tunnel_ip = kwargs.get('tunnel_ip') + tunnel_type = kwargs.get('tunnel_type') + if not tunnel_type: + LOG.error(_("No tunnel_type specified, cannot create tunnels")) + return + if tunnel_type not in self.tunnel_types: + LOG.error(_("tunnel_type %s not supported by agent"), tunnel_type) + return + if tunnel_ip == self.local_ip: + return + tun_name = self._create_tunnel_port_name(tunnel_type, tunnel_ip) + if not tun_name: + return + self.setup_tunnel_port(tun_name, tunnel_ip, tunnel_type) + + def _provision_local_vlan_outbound_for_tunnel(self, lvid, + segmentation_id, ofports): + br = self.tun_br + match = br.ofparser.OFPMatch( + vlan_vid=int(lvid) | ryu_ofp13.OFPVID_PRESENT) + actions = [br.ofparser.OFPActionPopVlan(), + br.ofparser.OFPActionSetField( + tunnel_id=int(segmentation_id))] + for ofport in ofports: + actions.append(br.ofparser.OFPActionOutput(ofport, 0)) + instructions = [br.ofparser.OFPInstructionActions( + ryu_ofp13.OFPIT_APPLY_ACTIONS, actions)] + msg = br.ofparser.OFPFlowMod( + br.datapath, + table_id=constants.FLOOD_TO_TUN, + priority=1, + match=match, instructions=instructions) + self.ryu_send_msg(msg) + + def _provision_local_vlan_inbound_for_tunnel(self, lvid, network_type, + segmentation_id): + br = self.tun_br + match = br.ofparser.OFPMatch( + tunnel_id=int(segmentation_id)) + actions = [ + br.ofparser.OFPActionPushVlan(), + br.ofparser.OFPActionSetField( + vlan_vid=int(lvid) | ryu_ofp13.OFPVID_PRESENT)] + instructions = [ + br.ofparser.OFPInstructionActions( + ryu_ofp13.OFPIT_APPLY_ACTIONS, actions), + br.ofparser.OFPInstructionGotoTable( + table_id=constants.LEARN_FROM_TUN)] + msg = br.ofparser.OFPFlowMod( + br.datapath, + table_id=constants.TUN_TABLE[network_type], + priority=1, + match=match, + instructions=instructions) + self.ryu_send_msg(msg) + + def _local_vlan_for_tunnel(self, lvid, network_type, segmentation_id): + ofports = [int(ofport) for ofport in + self.tun_br_ofports[network_type].values()] + if ofports: + self._provision_local_vlan_outbound_for_tunnel( + lvid, segmentation_id, ofports) + self._provision_local_vlan_inbound_for_tunnel(lvid, network_type, + segmentation_id) + + def _provision_local_vlan_outbound(self, lvid, vlan_vid, physical_network): + br = self.phys_brs[physical_network] + datapath = br.datapath + ofp = datapath.ofproto + ofpp = datapath.ofproto_parser + match = ofpp.OFPMatch(in_port=int(self.phys_ofports[physical_network]), + vlan_vid=int(lvid) | ofp.OFPVID_PRESENT) + if vlan_vid == ofp.OFPVID_NONE: + actions = [ofpp.OFPActionPopVlan()] + else: + actions = [ofpp.OFPActionSetField(vlan_vid=vlan_vid)] + actions += [ofpp.OFPActionOutput(ofp.OFPP_NORMAL, 0)] + instructions = [ + ofpp.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, actions), + ] + msg = ofpp.OFPFlowMod(datapath, priority=4, match=match, + instructions=instructions) + self.ryu_send_msg(msg) + + def _provision_local_vlan_inbound(self, lvid, vlan_vid, physical_network): + datapath = self.int_br.datapath + ofp = datapath.ofproto + ofpp = datapath.ofproto_parser + match = ofpp.OFPMatch(in_port=int(self.int_ofports[physical_network]), + vlan_vid=vlan_vid) + if vlan_vid == ofp.OFPVID_NONE: + actions = [ofpp.OFPActionPushVlan()] + else: + actions = [] + actions += [ + ofpp.OFPActionSetField(vlan_vid=int(lvid) | ofp.OFPVID_PRESENT), + ofpp.OFPActionOutput(ofp.OFPP_NORMAL, 0), + ] + instructions = [ + ofpp.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, actions), + ] + msg = ofpp.OFPFlowMod(datapath, priority=3, match=match, + instructions=instructions) + self.ryu_send_msg(msg) + + def _local_vlan_for_flat(self, lvid, physical_network): + vlan_vid = ryu_ofp13.OFPVID_NONE + self._provision_local_vlan_outbound(lvid, vlan_vid, physical_network) + self._provision_local_vlan_inbound(lvid, vlan_vid, physical_network) + + def _local_vlan_for_vlan(self, lvid, physical_network, segmentation_id): + vlan_vid = int(segmentation_id) | ryu_ofp13.OFPVID_PRESENT + self._provision_local_vlan_outbound(lvid, vlan_vid, physical_network) + self._provision_local_vlan_inbound(lvid, vlan_vid, physical_network) + + def provision_local_vlan(self, net_uuid, network_type, physical_network, + segmentation_id): + """Provisions a local VLAN. + + :param net_uuid: the uuid of the network associated with this vlan. + :param network_type: the network type ('gre', 'vxlan', 'vlan', 'flat', + 'local') + :param physical_network: the physical network for 'vlan' or 'flat' + :param segmentation_id: the VID for 'vlan' or tunnel ID for 'tunnel' + """ + + if not self.available_local_vlans: + LOG.error(_("No local VLAN available for net-id=%s"), net_uuid) + return + lvid = self.available_local_vlans.pop() + LOG.info(_("Assigning %(vlan_id)s as local vlan for " + "net-id=%(net_uuid)s"), + {'vlan_id': lvid, 'net_uuid': net_uuid}) + self.local_vlan_map[net_uuid] = LocalVLANMapping(lvid, network_type, + physical_network, + segmentation_id) + + if network_type in constants.TUNNEL_NETWORK_TYPES: + if self.enable_tunneling: + self._local_vlan_for_tunnel(lvid, network_type, + segmentation_id) + else: + LOG.error(_("Cannot provision %(network_type)s network for " + "net-id=%(net_uuid)s - tunneling disabled"), + {'network_type': network_type, + 'net_uuid': net_uuid}) + elif network_type == p_const.TYPE_FLAT: + if physical_network in self.phys_brs: + self._local_vlan_for_flat(lvid, physical_network) + else: + LOG.error(_("Cannot provision flat network for " + "net-id=%(net_uuid)s - no bridge for " + "physical_network %(physical_network)s"), + {'net_uuid': net_uuid, + 'physical_network': physical_network}) + elif network_type == p_const.TYPE_VLAN: + if physical_network in self.phys_brs: + self._local_vlan_for_vlan(lvid, physical_network, + segmentation_id) + else: + LOG.error(_("Cannot provision VLAN network for " + "net-id=%(net_uuid)s - no bridge for " + "physical_network %(physical_network)s"), + {'net_uuid': net_uuid, + 'physical_network': physical_network}) + elif network_type == p_const.TYPE_LOCAL: + # no flows needed for local networks + pass + else: + LOG.error(_("Cannot provision unknown network type " + "%(network_type)s for net-id=%(net_uuid)s"), + {'network_type': network_type, + 'net_uuid': net_uuid}) + + def _reclaim_local_vlan_outbound(self, lvm): + br = self.phys_brs[lvm.physical_network] + datapath = br.datapath + ofp = datapath.ofproto + ofpp = datapath.ofproto_parser + match = ofpp.OFPMatch( + in_port=int(self.phys_ofports[lvm.physical_network]), + vlan_vid=int(lvm.vlan) | ofp.OFPVID_PRESENT) + msg = ofpp.OFPFlowMod(datapath, table_id=ofp.OFPTT_ALL, + command=ofp.OFPFC_DELETE, out_group=ofp.OFPG_ANY, + out_port=ofp.OFPP_ANY, match=match) + self.ryu_send_msg(msg) + + def _reclaim_local_vlan_inbound(self, lvm): + datapath = self.int_br.datapath + ofp = datapath.ofproto + ofpp = datapath.ofproto_parser + if lvm.network_type == p_const.TYPE_FLAT: + vid = ofp.OFPVID_NONE + else: # p_const.TYPE_VLAN + vid = lvm.segmentation_id | ofp.OFPVID_PRESENT + match = ofpp.OFPMatch( + in_port=int(self.int_ofports[lvm.physical_network]), + vlan_vid=vid) + msg = ofpp.OFPFlowMod(datapath, table_id=ofp.OFPTT_ALL, + command=ofp.OFPFC_DELETE, out_group=ofp.OFPG_ANY, + out_port=ofp.OFPP_ANY, match=match) + self.ryu_send_msg(msg) + + def reclaim_local_vlan(self, net_uuid): + """Reclaim a local VLAN. + + :param net_uuid: the network uuid associated with this vlan. + :param lvm: a LocalVLANMapping object that tracks (vlan, lsw_id, + vif_ids) mapping. + """ + lvm = self.local_vlan_map.pop(net_uuid, None) + if lvm is None: + LOG.debug(_("Network %s not used on agent."), net_uuid) + return + + LOG.info(_("Reclaiming vlan = %(vlan_id)s from net-id = %(net_uuid)s"), + {'vlan_id': lvm.vlan, + 'net_uuid': net_uuid}) + + if lvm.network_type in constants.TUNNEL_NETWORK_TYPES: + if self.enable_tunneling: + match = self.tun_br.ofparser.OFPMatch( + tunnel_id=int(lvm.segmentation_id)) + msg = self.tun_br.ofparser.OFPFlowMod( + self.tun_br.datapath, + table_id=constants.TUN_TABLE[lvm.network_type], + command=ryu_ofp13.OFPFC_DELETE, + out_group=ryu_ofp13.OFPG_ANY, + out_port=ryu_ofp13.OFPP_ANY, + match=match) + self.ryu_send_msg(msg) + match = self.tun_br.ofparser.OFPMatch( + vlan_vid=int(lvm.vlan) | ryu_ofp13.OFPVID_PRESENT) + msg = self.tun_br.ofparser.OFPFlowMod( + self.tun_br.datapath, + table_id=ryu_ofp13.OFPTT_ALL, + command=ryu_ofp13.OFPFC_DELETE, + out_group=ryu_ofp13.OFPG_ANY, + out_port=ryu_ofp13.OFPP_ANY, + match=match) + self.ryu_send_msg(msg) + elif lvm.network_type in (p_const.TYPE_FLAT, p_const.TYPE_VLAN): + if lvm.physical_network in self.phys_brs: + self._reclaim_local_vlan_outbound(lvm) + self._reclaim_local_vlan_inbound(lvm) + elif lvm.network_type == p_const.TYPE_LOCAL: + # no flows needed for local networks + pass + else: + LOG.error(_("Cannot reclaim unknown network type " + "%(network_type)s for net-id=%(net_uuid)s"), + {'network_type': lvm.network_type, + 'net_uuid': net_uuid}) + + self.available_local_vlans.add(lvm.vlan) + + def port_bound(self, port, net_uuid, + network_type, physical_network, segmentation_id): + """Bind port to net_uuid/lsw_id and install flow for inbound traffic + to vm. + + :param port: a ovs_lib.VifPort object. + :param net_uuid: the net_uuid this port is to be associated with. + :param network_type: the network type ('gre', 'vlan', 'flat', 'local') + :param physical_network: the physical network for 'vlan' or 'flat' + :param segmentation_id: the VID for 'vlan' or tunnel ID for 'tunnel' + """ + if net_uuid not in self.local_vlan_map: + self.provision_local_vlan(net_uuid, network_type, + physical_network, segmentation_id) + lvm = self.local_vlan_map[net_uuid] + lvm.vif_ports[port.port_name] = port + # Do not bind a port if it's already bound + cur_tag = self.int_br.db_get_val("Port", port.port_name, "tag") + if cur_tag != str(lvm.vlan): + self.int_br.set_db_attribute("Port", port.port_name, "tag", + str(lvm.vlan)) + if port.ofport != -1: + match = self.int_br.ofparser.OFPMatch(in_port=port.ofport) + msg = self.int_br.ofparser.OFPFlowMod( + self.int_br.datapath, + table_id=ryu_ofp13.OFPTT_ALL, + command=ryu_ofp13.OFPFC_DELETE, + out_group=ryu_ofp13.OFPG_ANY, + out_port=ryu_ofp13.OFPP_ANY, + match=match) + self.ryu_send_msg(msg) + + def port_unbound(self, vif_id, net_uuid=None): + """Unbind port. + + Removes corresponding local vlan mapping object if this is its last + VIF. + + :param vif_id: the id of the vif + :param net_uuid: the net_uuid this port is associated with. + """ + net_uuid = net_uuid or self.get_net_uuid(vif_id) + + if not self.local_vlan_map.get(net_uuid): + LOG.info(_('port_unbound() net_uuid %s not in local_vlan_map'), + net_uuid) + return + + lvm = self.local_vlan_map[net_uuid] + lvm.vif_ports.pop(vif_id, None) + + if not lvm.vif_ports: + self.reclaim_local_vlan(net_uuid) + + def port_dead(self, port): + """Once a port has no binding, put it on the "dead vlan". + + :param port: a ovs_lib.VifPort object. + """ + # Don't kill a port if it's already dead + cur_tag = self.int_br.db_get_val("Port", port.port_name, "tag") + if cur_tag != DEAD_VLAN_TAG: + self.int_br.set_db_attribute("Port", port.port_name, "tag", + DEAD_VLAN_TAG) + match = self.int_br.ofparser.OFPMatch(in_port=port.ofport) + msg = self.int_br.ofparser.OFPFlowMod(self.int_br.datapath, + priority=2, match=match) + self.ryu_send_msg(msg) + + def setup_integration_br(self): + """Setup the integration bridge. + + Create patch ports and remove all existing flows. + + :param bridge_name: the name of the integration bridge. + :returns: the integration bridge + """ + self.int_br.setup_ofp() + self.int_br.delete_port(cfg.CONF.OVS.int_peer_patch_port) + msg = self.int_br.ofparser.OFPFlowMod(self.int_br.datapath, + table_id=ryu_ofp13.OFPTT_ALL, + command=ryu_ofp13.OFPFC_DELETE, + out_group=ryu_ofp13.OFPG_ANY, + out_port=ryu_ofp13.OFPP_ANY) + self.ryu_send_msg(msg) + # switch all traffic using L2 learning + actions = [self.int_br.ofparser.OFPActionOutput( + ryu_ofp13.OFPP_NORMAL, 0)] + instructions = [self.int_br.ofparser.OFPInstructionActions( + ryu_ofp13.OFPIT_APPLY_ACTIONS, + actions)] + msg = self.int_br.ofparser.OFPFlowMod(self.int_br.datapath, + priority=1, + instructions=instructions) + self.ryu_send_msg(msg) + + def setup_ancillary_bridges(self, integ_br, tun_br): + """Setup ancillary bridges - for example br-ex.""" + ovs_bridges = set(ovs_lib.get_bridges(self.root_helper)) + # Remove all known bridges + ovs_bridges.remove(integ_br) + if self.enable_tunneling: + ovs_bridges.remove(tun_br) + br_names = [self.phys_brs[physical_network].br_name for + physical_network in self.phys_brs] + ovs_bridges.difference_update(br_names) + # Filter list of bridges to those that have external + # bridge-id's configured + br_names = [ + bridge for bridge in ovs_bridges + if bridge != ovs_lib.get_bridge_external_bridge_id( + self.root_helper, bridge) + ] + ovs_bridges.difference_update(br_names) + ancillary_bridges = [] + for bridge in ovs_bridges: + br = OVSBridge(bridge, self.root_helper, self.ryuapp) + ancillary_bridges.append(br) + LOG.info(_('ancillary bridge list: %s.'), ancillary_bridges) + return ancillary_bridges + + def _tun_br_sort_incoming_traffic_depend_in_port(self, br): + match = br.ofparser.OFPMatch( + in_port=int(self.patch_int_ofport)) + instructions = [br.ofparser.OFPInstructionGotoTable( + table_id=constants.PATCH_LV_TO_TUN)] + msg = br.ofparser.OFPFlowMod(br.datapath, + priority=1, + match=match, + instructions=instructions) + self.ryu_send_msg(msg) + msg = br.ofparser.OFPFlowMod(br.datapath, priority=0) + self.ryu_send_msg(msg) + + def _tun_br_goto_table_ucast_unicast(self, br): + match = br.ofparser.OFPMatch(eth_dst=('00:00:00:00:00:00', + '01:00:00:00:00:00')) + instructions = [br.ofparser.OFPInstructionGotoTable( + table_id=constants.UCAST_TO_TUN)] + msg = br.ofparser.OFPFlowMod(br.datapath, + table_id=constants.PATCH_LV_TO_TUN, + match=match, + instructions=instructions) + self.ryu_send_msg(msg) + + def _tun_br_goto_table_flood_broad_multi_cast(self, br): + match = br.ofparser.OFPMatch(eth_dst=('01:00:00:00:00:00', + '01:00:00:00:00:00')) + instructions = [br.ofparser.OFPInstructionGotoTable( + table_id=constants.FLOOD_TO_TUN)] + msg = br.ofparser.OFPFlowMod(br.datapath, + table_id=constants.PATCH_LV_TO_TUN, + match=match, + instructions=instructions) + self.ryu_send_msg(msg) + + def _tun_br_set_table_tun_by_tunnel_type(self, br): + for tunnel_type in constants.TUNNEL_NETWORK_TYPES: + msg = br.ofparser.OFPFlowMod( + br.datapath, + table_id=constants.TUN_TABLE[tunnel_type], + priority=0) + self.ryu_send_msg(msg) + + def _tun_br_output_patch_int(self, br): + actions = [br.ofparser.OFPActionOutput( + int(self.patch_int_ofport), 0)] + instructions = [br.ofparser.OFPInstructionActions( + ryu_ofp13.OFPIT_APPLY_ACTIONS, + actions)] + msg = br.ofparser.OFPFlowMod(br.datapath, + table_id=constants.LEARN_FROM_TUN, + priority=1, + instructions=instructions) + self.ryu_send_msg(msg) + + def _tun_br_goto_table_flood_unknown_unicast(self, br): + instructions = [br.ofparser.OFPInstructionGotoTable( + table_id=constants.FLOOD_TO_TUN)] + msg = br.ofparser.OFPFlowMod(br.datapath, + table_id=constants.UCAST_TO_TUN, + priority=0, + instructions=instructions) + self.ryu_send_msg(msg) + + def _tun_br_default_drop(self, br): + msg = br.ofparser.OFPFlowMod( + br.datapath, + table_id=constants.FLOOD_TO_TUN, + priority=0) + self.ryu_send_msg(msg) + + def setup_tunnel_br(self, tun_br): + """Setup the tunnel bridge. + + Creates tunnel bridge, and links it to the integration bridge + using a patch port. + + :param tun_br: the name of the tunnel bridge. + """ + self.tun_br = OVSBridge(tun_br, self.root_helper, self.ryuapp) + self.tun_br.reset_bridge() + self.tun_br.setup_ofp() + self.patch_tun_ofport = self.int_br.add_patch_port( + cfg.CONF.OVS.int_peer_patch_port, cfg.CONF.OVS.tun_peer_patch_port) + self.patch_int_ofport = self.tun_br.add_patch_port( + cfg.CONF.OVS.tun_peer_patch_port, cfg.CONF.OVS.int_peer_patch_port) + if int(self.patch_tun_ofport) < 0 or int(self.patch_int_ofport) < 0: + LOG.error(_("Failed to create OVS patch port. Cannot have " + "tunneling enabled on this agent, since this version " + "of OVS does not support tunnels or patch ports. " + "Agent terminated!")) + raise SystemExit(1) + msg = self.tun_br.ofparser.OFPFlowMod(self.tun_br.datapath, + table_id=ryu_ofp13.OFPTT_ALL, + command=ryu_ofp13.OFPFC_DELETE, + out_group=ryu_ofp13.OFPG_ANY, + out_port=ryu_ofp13.OFPP_ANY) + self.ryu_send_msg(msg) + + self._tun_br_sort_incoming_traffic_depend_in_port(self.tun_br) + self._tun_br_goto_table_ucast_unicast(self.tun_br) + self._tun_br_goto_table_flood_broad_multi_cast(self.tun_br) + self._tun_br_set_table_tun_by_tunnel_type(self.tun_br) + self._tun_br_output_patch_int(self.tun_br) + self._tun_br_goto_table_flood_unknown_unicast(self.tun_br) + self._tun_br_default_drop(self.tun_br) + + def _phys_br_prepare_create_veth(self, br, int_veth_name, phys_veth_name): + self.int_br.delete_port(int_veth_name) + br.delete_port(phys_veth_name) + if ip_lib.device_exists(int_veth_name, self.root_helper): + ip_lib.IPDevice(int_veth_name, self.root_helper).link.delete() + # Give udev a chance to process its rules here, to avoid + # race conditions between commands launched by udev rules + # and the subsequent call to ip_wrapper.add_veth + utils.execute(['/sbin/udevadm', 'settle', '--timeout=10']) + + def _phys_br_create_veth(self, br, int_veth_name, + phys_veth_name, physical_network, ip_wrapper): + int_veth, phys_veth = ip_wrapper.add_veth(int_veth_name, + phys_veth_name) + self.int_ofports[physical_network] = self.int_br.add_port(int_veth) + self.phys_ofports[physical_network] = br.add_port(phys_veth) + return (int_veth, phys_veth) + + def _phys_br_block_untranslated_traffic(self, br, physical_network): + match = self.int_br.ofparser.OFPMatch(in_port=int( + self.int_ofports[physical_network])) + msg = self.int_br.ofparser.OFPFlowMod(self.int_br.datapath, + priority=2, match=match) + self.ryu_send_msg(msg) + match = br.ofparser.OFPMatch(in_port=int( + self.phys_ofports[physical_network])) + msg = br.ofparser.OFPFlowMod(br.datapath, priority=2, match=match) + self.ryu_send_msg(msg) + + def _phys_br_enable_veth_to_pass_traffic(self, int_veth, phys_veth): + # enable veth to pass traffic + int_veth.link.set_up() + phys_veth.link.set_up() + + if self.veth_mtu: + # set up mtu size for veth interfaces + int_veth.link.set_mtu(self.veth_mtu) + phys_veth.link.set_mtu(self.veth_mtu) + + def _phys_br_patch_physical_bridge_with_integration_bridge( + self, br, physical_network, bridge, ip_wrapper): + int_veth_name = constants.PEER_INTEGRATION_PREFIX + bridge + phys_veth_name = constants.PEER_PHYSICAL_PREFIX + bridge + self._phys_br_prepare_create_veth(br, int_veth_name, phys_veth_name) + int_veth, phys_veth = self._phys_br_create_veth(br, int_veth_name, + phys_veth_name, + physical_network, + ip_wrapper) + self._phys_br_block_untranslated_traffic(br, physical_network) + self._phys_br_enable_veth_to_pass_traffic(int_veth, phys_veth) + + def setup_physical_bridges(self, bridge_mappings): + """Setup the physical network bridges. + + Creates physical network bridges and links them to the + integration bridge using veths. + + :param bridge_mappings: map physical network names to bridge names. + """ + self.phys_brs = {} + self.int_ofports = {} + self.phys_ofports = {} + ip_wrapper = ip_lib.IPWrapper(self.root_helper) + for physical_network, bridge in bridge_mappings.iteritems(): + LOG.info(_("Mapping physical network %(physical_network)s to " + "bridge %(bridge)s"), + {'physical_network': physical_network, + 'bridge': bridge}) + # setup physical bridge + if not ip_lib.device_exists(bridge, self.root_helper): + LOG.error(_("Bridge %(bridge)s for physical network " + "%(physical_network)s does not exist. Agent " + "terminated!"), + {'physical_network': physical_network, + 'bridge': bridge}) + raise SystemExit(1) + br = OVSBridge(bridge, self.root_helper, self.ryuapp) + br.setup_ofp() + msg = br.ofparser.OFPFlowMod(br.datapath, + table_id=ryu_ofp13.OFPTT_ALL, + command=ryu_ofp13.OFPFC_DELETE, + out_group=ryu_ofp13.OFPG_ANY, + out_port=ryu_ofp13.OFPP_ANY) + self.ryu_send_msg(msg) + actions = [br.ofparser.OFPActionOutput(ryu_ofp13.OFPP_NORMAL, 0)] + instructions = [br.ofparser.OFPInstructionActions( + ryu_ofp13.OFPIT_APPLY_ACTIONS, + actions)] + msg = br.ofparser.OFPFlowMod(br.datapath, + priority=1, + instructions=instructions) + self.ryu_send_msg(msg) + self.phys_brs[physical_network] = br + + self._phys_br_patch_physical_bridge_with_integration_bridge( + br, physical_network, bridge, ip_wrapper) + + def scan_ports(self, registered_ports, updated_ports=None): + cur_ports = self._get_ofport_names(self.int_br) + self.int_br_device_count = len(cur_ports) + port_info = {'current': cur_ports} + if updated_ports is None: + updated_ports = set() + updated_ports.update(self._find_lost_vlan_port(registered_ports)) + if updated_ports: + # Some updated ports might have been removed in the + # meanwhile, and therefore should not be processed. + # In this case the updated port won't be found among + # current ports. + updated_ports &= cur_ports + if updated_ports: + port_info['updated'] = updated_ports + + if cur_ports == registered_ports: + # No added or removed ports to set, just return here + return port_info + + port_info['added'] = cur_ports - registered_ports + # Remove all the known ports not found on the integration bridge + port_info['removed'] = registered_ports - cur_ports + return port_info + + def _find_lost_vlan_port(self, registered_ports): + """Return ports which have lost their vlan tag. + + The returned value is a set of port ids of the ports concerned by a + vlan tag loss. + """ + # TODO(yamamoto): stop using ovsdb + # an idea is to use metadata instead of tagged vlans. + # cf. blueprint ofagent-merge-bridges + port_tags = self.int_br.get_port_tag_dict() + changed_ports = set() + for lvm in self.local_vlan_map.values(): + for port in registered_ports: + if ( + port in lvm.vif_ports + and port in port_tags + and port_tags[port] != lvm.vlan + ): + LOG.info( + _("Port '%(port_name)s' has lost " + "its vlan tag '%(vlan_tag)d'!"), + {'port_name': port, + 'vlan_tag': lvm.vlan} + ) + changed_ports.add(port) + return changed_ports + + def update_ancillary_ports(self, registered_ports): + # TODO(yamamoto): stop using ovsdb + # - do the same as scan_ports + # - or, find a way to update status of ancillary ports differently + # eg. let interface drivers mark ports up + ports = set() + for bridge in self.ancillary_brs: + ports |= bridge.get_vif_port_set() + + if ports == registered_ports: + return + added = ports - registered_ports + removed = registered_ports - ports + return {'current': ports, + 'added': added, + 'removed': removed} + + def treat_vif_port(self, vif_port, port_id, network_id, network_type, + physical_network, segmentation_id, admin_state_up): + if vif_port: + # When this function is called for a port, the port should have + # an OVS ofport configured, as only these ports were considered + # for being treated. If that does not happen, it is a potential + # error condition of which operators should be aware + if not vif_port.ofport: + LOG.warn(_("VIF port: %s has no ofport configured, and might " + "not be able to transmit"), vif_port.port_name) + if admin_state_up: + self.port_bound(vif_port, network_id, network_type, + physical_network, segmentation_id) + else: + self.port_dead(vif_port) + else: + LOG.debug(_("No VIF port for port %s defined on agent."), port_id) + + def setup_tunnel_port(self, port_name, remote_ip, tunnel_type): + ofport = self.tun_br.add_tunnel_port(port_name, + remote_ip, + self.local_ip, + tunnel_type, + self.vxlan_udp_port, + self.dont_fragment) + ofport_int = -1 + try: + ofport_int = int(ofport) + except (TypeError, ValueError): + LOG.exception(_("ofport should have a value that can be " + "interpreted as an integer")) + if ofport_int < 0: + LOG.error(_("Failed to set-up %(type)s tunnel port to %(ip)s"), + {'type': tunnel_type, 'ip': remote_ip}) + return 0 + + self.tun_br_ofports[tunnel_type][remote_ip] = ofport + # Add flow in default table to resubmit to the right + # tunelling table (lvid will be set in the latter) + match = self.tun_br.ofparser.OFPMatch(in_port=int(ofport)) + instructions = [self.tun_br.ofparser.OFPInstructionGotoTable( + table_id=constants.TUN_TABLE[tunnel_type])] + msg = self.tun_br.ofparser.OFPFlowMod(self.tun_br.datapath, + priority=1, + match=match, + instructions=instructions) + self.ryu_send_msg(msg) + + ofports = [int(p) for p in self.tun_br_ofports[tunnel_type].values()] + if ofports: + # Update flooding flows to include the new tunnel + for network_id, vlan_mapping in self.local_vlan_map.iteritems(): + if vlan_mapping.network_type == tunnel_type: + match = self.tun_br.ofparser.OFPMatch( + vlan_vid=int(vlan_mapping.vlan) | + ryu_ofp13.OFPVID_PRESENT) + actions = [ + self.tun_br.ofparser.OFPActionPopVlan(), + self.tun_br.ofparser.OFPActionSetField( + tunnel_id=int(vlan_mapping.segmentation_id))] + actions.extend( + self.tun_br.ofparser.OFPActionOutput(p, 0) + for p in ofports + ) + instructions = [ + self.tun_br.ofparser.OFPInstructionActions( + ryu_ofp13.OFPIT_APPLY_ACTIONS, + actions)] + msg = self.tun_br.ofparser.OFPFlowMod( + self.tun_br.datapath, + table_id=constants.FLOOD_TO_TUN, + priority=1, + match=match, + instructions=instructions) + self.ryu_send_msg(msg) + return ofport + + def treat_devices_added_or_updated(self, devices): + resync = False + all_ports = dict((p.port_name, p) for p in self._get_ports()) + for device in devices: + LOG.debug(_("Processing port %s"), device) + if device not in all_ports: + # The port has disappeared and should not be processed + # There is no need to put the port DOWN in the plugin as + # it never went up in the first place + LOG.info(_("Port %s was not found on the integration bridge " + "and will therefore not be processed"), device) + continue + port = all_ports[device] + try: + details = self.plugin_rpc.get_device_details(self.context, + device, + self.agent_id) + except Exception as e: + LOG.debug(_("Unable to get port details for " + "%(device)s: %(e)s"), + {'device': device, 'e': e}) + resync = True + continue + if 'port_id' in details: + LOG.info(_("Port %(device)s updated. Details: %(details)s"), + {'device': device, 'details': details}) + self.treat_vif_port(port, details['port_id'], + details['network_id'], + details['network_type'], + details['physical_network'], + details['segmentation_id'], + details['admin_state_up']) + + # update plugin about port status + if details.get('admin_state_up'): + LOG.debug(_("Setting status for %s to UP"), device) + self.plugin_rpc.update_device_up( + self.context, device, self.agent_id, cfg.CONF.host) + else: + LOG.debug(_("Setting status for %s to DOWN"), device) + self.plugin_rpc.update_device_down( + self.context, device, self.agent_id, cfg.CONF.host) + LOG.info(_("Configuration for device %s completed."), device) + else: + LOG.warn(_("Device %s not defined on plugin"), device) + if (port and port.ofport != -1): + self.port_dead(port) + return resync + + def treat_ancillary_devices_added(self, devices): + resync = False + for device in devices: + LOG.info(_("Ancillary Port %s added"), device) + try: + self.plugin_rpc.get_device_details(self.context, device, + self.agent_id) + except Exception as e: + LOG.debug(_("Unable to get port details for " + "%(device)s: %(e)s"), + {'device': device, 'e': e}) + resync = True + continue + + # update plugin about port status + self.plugin_rpc.update_device_up(self.context, + device, + self.agent_id, + cfg.CONF.host) + return resync + + def treat_devices_removed(self, devices): + resync = False + self.sg_agent.remove_devices_filter(devices) + for device in devices: + LOG.info(_("Attachment %s removed"), device) + try: + self.plugin_rpc.update_device_down(self.context, + device, + self.agent_id, + cfg.CONF.host) + except Exception as e: + LOG.debug(_("port_removed failed for %(device)s: %(e)s"), + {'device': device, 'e': e}) + resync = True + continue + self.port_unbound(device) + return resync + + def treat_ancillary_devices_removed(self, devices): + resync = False + for device in devices: + LOG.info(_("Attachment %s removed"), device) + try: + details = self.plugin_rpc.update_device_down(self.context, + device, + self.agent_id, + cfg.CONF.host) + except Exception as e: + LOG.debug(_("port_removed failed for %(device)s: %(e)s"), + {'device': device, 'e': e}) + resync = True + continue + if details['exists']: + LOG.info(_("Port %s updated."), device) + # Nothing to do regarding local networking + else: + LOG.debug(_("Device %s not defined on plugin"), device) + return resync + + def process_network_ports(self, port_info): + resync_add = False + resync_removed = False + # If there is an exception while processing security groups ports + # will not be wired anyway, and a resync will be triggered + self.sg_agent.setup_port_filters(port_info.get('added', set()), + port_info.get('updated', set())) + # VIF wiring needs to be performed always for 'new' devices. + # For updated ports, re-wiring is not needed in most cases, but needs + # to be performed anyway when the admin state of a device is changed. + # A device might be both in the 'added' and 'updated' + # list at the same time; avoid processing it twice. + devices_added_updated = (port_info.get('added', set()) | + port_info.get('updated', set())) + if devices_added_updated: + start = time.time() + resync_add = self.treat_devices_added_or_updated( + devices_added_updated) + LOG.debug(_("process_network_ports - iteration:%(iter_num)d - " + "treat_devices_added_or_updated completed " + "in %(elapsed).3f"), + {'iter_num': self.iter_num, + 'elapsed': time.time() - start}) + if 'removed' in port_info: + start = time.time() + resync_removed = self.treat_devices_removed(port_info['removed']) + LOG.debug(_("process_network_ports - iteration:%(iter_num)d - " + "treat_devices_removed completed in %(elapsed).3f"), + {'iter_num': self.iter_num, + 'elapsed': time.time() - start}) + # If one of the above opertaions fails => resync with plugin + return (resync_add | resync_removed) + + def process_ancillary_network_ports(self, port_info): + resync_add = False + resync_removed = False + if 'added' in port_info: + start = time.time() + resync_add = self.treat_ancillary_devices_added(port_info['added']) + LOG.debug(_("process_ancillary_network_ports - iteration: " + "%(iter_num)d - treat_ancillary_devices_added " + "completed in %(elapsed).3f"), + {'iter_num': self.iter_num, + 'elapsed': time.time() - start}) + if 'removed' in port_info: + start = time.time() + resync_removed = self.treat_ancillary_devices_removed( + port_info['removed']) + LOG.debug(_("process_ancillary_network_ports - iteration: " + "%(iter_num)d - treat_ancillary_devices_removed " + "completed in %(elapsed).3f"), + {'iter_num': self.iter_num, + 'elapsed': time.time() - start}) + + # If one of the above opertaions fails => resync with plugin + return (resync_add | resync_removed) + + def tunnel_sync(self): + resync = False + try: + for tunnel_type in self.tunnel_types: + details = self.plugin_rpc.tunnel_sync(self.context, + self.local_ip, + tunnel_type) + tunnels = details['tunnels'] + for tunnel in tunnels: + if self.local_ip != tunnel['ip_address']: + tun_name = self._create_tunnel_port_name( + tunnel_type, tunnel['ip_address']) + if not tun_name: + continue + self.setup_tunnel_port(tun_name, + tunnel['ip_address'], + tunnel_type) + except Exception as e: + LOG.debug(_("Unable to sync tunnel IP %(local_ip)s: %(e)s"), + {'local_ip': self.local_ip, 'e': e}) + resync = True + return resync + + def _agent_has_updates(self, polling_manager): + return (polling_manager.is_polling_required or + self.updated_ports or + self.sg_agent.firewall_refresh_needed()) + + def _port_info_has_changes(self, port_info): + return (port_info.get('added') or + port_info.get('removed') or + port_info.get('updated')) + + def ovsdb_monitor_loop(self, polling_manager=None): + if not polling_manager: + polling_manager = polling.AlwaysPoll() + + sync = True + ports = set() + updated_ports_copy = set() + ancillary_ports = set() + tunnel_sync = True + while True: + start = time.time() + port_stats = {'regular': {'added': 0, 'updated': 0, 'removed': 0}, + 'ancillary': {'added': 0, 'removed': 0}} + LOG.debug(_("Agent ovsdb_monitor_loop - " + "iteration:%d started"), + self.iter_num) + if sync: + LOG.info(_("Agent out of sync with plugin!")) + ports.clear() + ancillary_ports.clear() + sync = False + polling_manager.force_polling() + # Notify the plugin of tunnel IP + if self.enable_tunneling and tunnel_sync: + LOG.info(_("Agent tunnel out of sync with plugin!")) + try: + tunnel_sync = self.tunnel_sync() + except Exception: + LOG.exception(_("Error while synchronizing tunnels")) + tunnel_sync = True + if self._agent_has_updates(polling_manager): + try: + LOG.debug(_("Agent ovsdb_monitor_loop - " + "iteration:%(iter_num)d - " + "starting polling. Elapsed:%(elapsed).3f"), + {'iter_num': self.iter_num, + 'elapsed': time.time() - start}) + # Save updated ports dict to perform rollback in + # case resync would be needed, and then clear + # self.updated_ports. As the greenthread should not yield + # between these two statements, this will be thread-safe + updated_ports_copy = self.updated_ports + self.updated_ports = set() + port_info = self.scan_ports(ports, updated_ports_copy) + ports = port_info['current'] + LOG.debug(_("Agent ovsdb_monitor_loop - " + "iteration:%(iter_num)d - " + "port information retrieved. " + "Elapsed:%(elapsed).3f"), + {'iter_num': self.iter_num, + 'elapsed': time.time() - start}) + # Secure and wire/unwire VIFs and update their status + # on Neutron server + if (self._port_info_has_changes(port_info) or + self.sg_agent.firewall_refresh_needed()): + LOG.debug(_("Starting to process devices in:%s"), + port_info) + # If treat devices fails - must resync with plugin + sync = self.process_network_ports(port_info) + LOG.debug(_("Agent ovsdb_monitor_loop - " + "iteration:%(iter_num)d - " + "ports processed. Elapsed:%(elapsed).3f"), + {'iter_num': self.iter_num, + 'elapsed': time.time() - start}) + port_stats['regular']['added'] = ( + len(port_info.get('added', []))) + port_stats['regular']['updated'] = ( + len(port_info.get('updated', []))) + port_stats['regular']['removed'] = ( + len(port_info.get('removed', []))) + # Treat ancillary devices if they exist + if self.ancillary_brs: + port_info = self.update_ancillary_ports( + ancillary_ports) + LOG.debug(_("Agent ovsdb_monitor_loop - " + "iteration:%(iter_num)d - " + "ancillary port info retrieved. " + "Elapsed:%(elapsed).3f"), + {'iter_num': self.iter_num, + 'elapsed': time.time() - start}) + + if port_info: + rc = self.process_ancillary_network_ports( + port_info) + LOG.debug(_("Agent ovsdb_monitor_loop - " + "iteration:" + "%(iter_num)d - ancillary ports " + "processed. Elapsed:%(elapsed).3f"), + {'iter_num': self.iter_num, + 'elapsed': time.time() - start}) + ancillary_ports = port_info['current'] + port_stats['ancillary']['added'] = ( + len(port_info.get('added', []))) + port_stats['ancillary']['removed'] = ( + len(port_info.get('removed', []))) + sync = sync | rc + + polling_manager.polling_completed() + except Exception: + LOG.exception(_("Error while processing VIF ports")) + # Put the ports back in self.updated_port + self.updated_ports |= updated_ports_copy + sync = True + + # sleep till end of polling interval + elapsed = (time.time() - start) + LOG.debug(_("Agent ovsdb_monitor_loop - iteration:%(iter_num)d " + "completed. Processed ports statistics:" + "%(port_stats)s. Elapsed:%(elapsed).3f"), + {'iter_num': self.iter_num, + 'port_stats': port_stats, + 'elapsed': elapsed}) + if (elapsed < self.polling_interval): + time.sleep(self.polling_interval - elapsed) + else: + LOG.debug(_("Loop iteration exceeded interval " + "(%(polling_interval)s vs. %(elapsed)s)!"), + {'polling_interval': self.polling_interval, + 'elapsed': elapsed}) + self.iter_num = self.iter_num + 1 + + def daemon_loop(self): + # TODO(yamamoto): make polling logic stop using ovsdb monitor + # - make it a dumb periodic polling + # - or, monitor port status async messages + with polling.get_polling_manager( + self.minimize_polling, + self.root_helper, + self.ovsdb_monitor_respawn_interval) as pm: + + self.ovsdb_monitor_loop(polling_manager=pm) + + +def create_agent_config_map(config): + """Create a map of agent config parameters. + + :param config: an instance of cfg.CONF + :returns: a map of agent configuration parameters + """ + try: + bridge_mappings = n_utils.parse_mappings(config.OVS.bridge_mappings) + except ValueError as e: + raise ValueError(_("Parsing bridge_mappings failed: %s.") % e) + + kwargs = dict( + integ_br=config.OVS.integration_bridge, + tun_br=config.OVS.tunnel_bridge, + local_ip=config.OVS.local_ip, + bridge_mappings=bridge_mappings, + root_helper=config.AGENT.root_helper, + polling_interval=config.AGENT.polling_interval, + minimize_polling=config.AGENT.minimize_polling, + tunnel_types=config.AGENT.tunnel_types, + veth_mtu=config.AGENT.veth_mtu, + l2_population=False, + ovsdb_monitor_respawn_interval=constants.DEFAULT_OVSDBMON_RESPAWN, + ) + + # If enable_tunneling is TRUE, set tunnel_type to default to GRE + if config.OVS.enable_tunneling and not kwargs['tunnel_types']: + kwargs['tunnel_types'] = [p_const.TYPE_GRE] + + # Verify the tunnel_types specified are valid + for tun in kwargs['tunnel_types']: + if tun not in constants.TUNNEL_NETWORK_TYPES: + msg = _('Invalid tunnel type specificed: %s'), tun + raise ValueError(msg) + if not kwargs['local_ip']: + msg = _('Tunneling cannot be enabled without a valid local_ip.') + raise ValueError(msg) + + return kwargs diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/ofagent/agent/ports.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/ofagent/agent/ports.py new file mode 100644 index 00000000..c78c5cd6 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/ofagent/agent/ports.py @@ -0,0 +1,27 @@ +# Copyright (C) 2014 VA Linux Systems Japan K.K. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: YAMAMOTO Takashi, VA Linux Systems Japan K.K. + + +class Port(object): + def __init__(self, port_name, ofport): + self.port_name = port_name + self.ofport = ofport + + @classmethod + def from_ofp_port(cls, ofp_port): + """Convert from ryu OFPPort.""" + return cls(port_name=ofp_port.name, ofport=ofp_port.port_no) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/oneconvergence/agent/nvsd_neutron_agent.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/oneconvergence/agent/nvsd_neutron_agent.py new file mode 100644 index 00000000..674cc197 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/oneconvergence/agent/nvsd_neutron_agent.py @@ -0,0 +1,176 @@ +# Copyright 2014 OneConvergence, Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Kedar Kulkarni, One Convergence, Inc. + +"""NVSD agent code for security group events.""" + +import socket +import sys +import time + +import eventlet +eventlet.monkey_patch() + +from neutron.agent.linux import ovs_lib +from neutron.agent import rpc as agent_rpc +from neutron.agent import securitygroups_rpc as sg_rpc +from neutron.common import config as common_config +from neutron.common import rpc as n_rpc +from neutron.common import topics +from neutron import context as n_context +from neutron.extensions import securitygroup as ext_sg +from neutron.openstack.common import log as logging +from neutron.plugins.oneconvergence.lib import config + +LOG = logging.getLogger(__name__) + + +class NVSDAgentRpcCallback(n_rpc.RpcCallback): + + RPC_API_VERSION = '1.0' + + def __init__(self, context, agent, sg_agent): + super(NVSDAgentRpcCallback, self).__init__() + self.context = context + self.agent = agent + self.sg_agent = sg_agent + + def port_update(self, context, **kwargs): + LOG.debug(_("port_update received: %s"), kwargs) + port = kwargs.get('port') + # Validate that port is on OVS + vif_port = self.agent.int_br.get_vif_port_by_id(port['id']) + if not vif_port: + return + + if ext_sg.SECURITYGROUPS in port: + self.sg_agent.refresh_firewall() + + +class SecurityGroupServerRpcApi(n_rpc.RpcProxy, + sg_rpc.SecurityGroupServerRpcApiMixin): + def __init__(self, topic): + super(SecurityGroupServerRpcApi, self).__init__( + topic=topic, default_version=sg_rpc.SG_RPC_VERSION) + + +class SecurityGroupAgentRpcCallback( + n_rpc.RpcCallback, + sg_rpc.SecurityGroupAgentRpcCallbackMixin): + + RPC_API_VERSION = sg_rpc.SG_RPC_VERSION + + def __init__(self, context, sg_agent): + super(SecurityGroupAgentRpcCallback, self).__init__() + self.context = context + self.sg_agent = sg_agent + + +class SecurityGroupAgentRpc(sg_rpc.SecurityGroupAgentRpcMixin): + + def __init__(self, context, root_helper): + self.context = context + + self.plugin_rpc = SecurityGroupServerRpcApi(topics.PLUGIN) + self.root_helper = root_helper + self.init_firewall() + + +class NVSDNeutronAgent(n_rpc.RpcCallback): + # history + # 1.0 Initial version + # 1.1 Support Security Group RPC + RPC_API_VERSION = '1.1' + + def __init__(self, integ_br, root_helper, polling_interval): + super(NVSDNeutronAgent, self).__init__() + self.int_br = ovs_lib.OVSBridge(integ_br, root_helper) + self.polling_interval = polling_interval + self.root_helper = root_helper + self.setup_rpc() + self.ports = set() + + def setup_rpc(self): + + self.host = socket.gethostname() + self.agent_id = 'nvsd-q-agent.%s' % self.host + LOG.info(_("RPC agent_id: %s"), self.agent_id) + + self.topic = topics.AGENT + self.context = n_context.get_admin_context_without_session() + self.sg_agent = SecurityGroupAgentRpc(self.context, + self.root_helper) + + # RPC network init + # Handle updates from service + self.callback_oc = NVSDAgentRpcCallback(self.context, + self, self.sg_agent) + self.callback_sg = SecurityGroupAgentRpcCallback(self.context, + self.sg_agent) + self.endpoints = [self.callback_oc, self.callback_sg] + # Define the listening consumer for the agent + consumers = [[topics.PORT, topics.UPDATE], + [topics.SECURITY_GROUP, topics.UPDATE]] + self.connection = agent_rpc.create_consumers(self.endpoints, + self.topic, + consumers) + + def _update_ports(self, registered_ports): + ports = self.int_br.get_vif_port_set() + if ports == registered_ports: + return + added = ports - registered_ports + removed = registered_ports - ports + return {'current': ports, + 'added': added, + 'removed': removed} + + def _process_devices_filter(self, port_info): + if 'added' in port_info: + self.sg_agent.prepare_devices_filter(port_info['added']) + if 'removed' in port_info: + self.sg_agent.remove_devices_filter(port_info['removed']) + + def daemon_loop(self): + """Main processing loop for OC Plugin Agent.""" + + ports = set() + while True: + try: + port_info = self._update_ports(ports) + if port_info: + LOG.debug(_("Port list is updated")) + self._process_devices_filter(port_info) + ports = port_info['current'] + self.ports = ports + except Exception: + LOG.exception(_("Error in agent event loop")) + + LOG.debug(_("AGENT looping.....")) + time.sleep(self.polling_interval) + + +def main(): + common_config.init(sys.argv[1:]) + common_config.setup_logging(config.CONF) + + integ_br = config.AGENT.integration_bridge + root_helper = config.AGENT.root_helper + polling_interval = config.AGENT.polling_interval + agent = NVSDNeutronAgent(integ_br, root_helper, polling_interval) + LOG.info(_("NVSD Agent initialized successfully, now running... ")) + + # Start everything. + agent.daemon_loop() diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/oneconvergence/lib/plugin_helper.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/oneconvergence/lib/plugin_helper.py new file mode 100644 index 00000000..4158257f --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/oneconvergence/lib/plugin_helper.py @@ -0,0 +1,186 @@ +# Copyright 2014 OneConvergence, Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Kedar Kulkarni, One Convergence, Inc. + +"""Library to talk to NVSD controller.""" + +import httplib +import time + +from oslo.config import cfg +import requests +from six.moves.urllib import parse + +from neutron.openstack.common import jsonutils as json +from neutron.openstack.common import log as logging +import neutron.plugins.oneconvergence.lib.exception as exception + +LOG = logging.getLogger(__name__) + + +def initialize_plugin_helper(): + nvsdcontroller = NVSDController() + return nvsdcontroller + + +class NVSDController(object): + + """Encapsulates the NVSD Controller details.""" + + def __init__(self): + + self._host = cfg.CONF.nvsd.nvsd_ip + self._port = cfg.CONF.nvsd.nvsd_port + self._user = cfg.CONF.nvsd.nvsd_user + self._password = cfg.CONF.nvsd.nvsd_passwd + self._retries = cfg.CONF.nvsd.nvsd_retries + self._request_timeout = float(cfg.CONF.nvsd.request_timeout) + self.api_url = 'http://' + self._host + ':' + str(self._port) + + self.pool = requests.Session() + + self.auth_token = None + + def do_request(self, method, url=None, headers=None, data=None, + timeout=10): + response = self.pool.request(method, url=url, + headers=headers, data=data, + timeout=self._request_timeout) + return response + + def login(self): + """Login to NVSD Controller.""" + + headers = {"Content-Type": "application/json"} + + login_url = parse.urljoin(self.api_url, + "/pluginhandler/ocplugin/authmgmt/login") + + data = json.dumps({"user_name": self._user, "passwd": self._password}) + + attempts = 0 + + while True: + if attempts < self._retries: + attempts += 1 + elif self._retries == 0: + attempts = 0 + else: + msg = _("Unable to connect to NVSD controller. Exiting after " + "%(retries)s attempts") % {'retries': self._retries} + LOG.error(msg) + raise exception.ServerException(reason=msg) + try: + response = self.do_request("POST", url=login_url, + headers=headers, data=data, + timeout=self._request_timeout) + break + except Exception as e: + LOG.error(_("Login Failed: %s"), e) + LOG.error(_("Unable to establish connection" + " with Controller %s"), self.api_url) + LOG.error(_("Retrying after 1 second...")) + time.sleep(1) + + if response.status_code == requests.codes.ok: + LOG.debug(_("Login Successful %(uri)s " + "%(status)s"), {'uri': self.api_url, + 'status': response.status_code}) + self.auth_token = json.loads(response.content)["session_uuid"] + LOG.debug(_("AuthToken = %s"), self.auth_token) + else: + LOG.error(_("login failed")) + + return + + def request(self, method, url, body="", content_type="application/json"): + """Issue a request to NVSD controller.""" + + if self.auth_token is None: + LOG.warning(_("No Token, Re-login")) + self.login() + + headers = {"Content-Type": content_type} + + uri = parse.urljoin(url, "?authToken=%s" % self.auth_token) + + url = parse.urljoin(self.api_url, uri) + + request_ok = False + response = None + + try: + response = self.do_request(method, url=url, + headers=headers, data=body, + timeout=self._request_timeout) + + LOG.debug(_("request: %(method)s %(uri)s successful"), + {'method': method, 'uri': self.api_url + uri}) + request_ok = True + except httplib.IncompleteRead as e: + response = e.partial + request_ok = True + except Exception as e: + LOG.error(_("request: Request failed from " + "Controller side :%s"), e) + + if response is None: + # Timeout. + LOG.error(_("Response is Null, Request timed out: %(method)s to " + "%(uri)s"), {'method': method, 'uri': uri}) + self.auth_token = None + raise exception.RequestTimeout() + + status = response.status_code + if status == requests.codes.unauthorized: + self.auth_token = None + # Raise an exception to inform that the request failed. + raise exception.UnAuthorizedException() + + if status in self.error_codes: + LOG.error(_("Request %(method)s %(uri)s body = %(body)s failed " + "with status %(status)s"), {'method': method, + 'uri': uri, 'body': body, + 'status': status}) + LOG.error(_("%s"), response.reason) + raise self.error_codes[status]() + elif status not in (requests.codes.ok, requests.codes.created, + requests.codes.no_content): + LOG.error(_("%(method)s to %(url)s, unexpected response code: " + "%(status)d"), {'method': method, 'url': url, + 'status': status}) + return + + if not request_ok: + LOG.error(_("Request failed from Controller side with " + "Status=%s"), status) + raise exception.ServerException() + else: + LOG.debug(_("Success: %(method)s %(url)s status=%(status)s"), + {'method': method, 'url': self.api_url + uri, + 'status': status}) + response.body = response.content + return response + + error_codes = { + 404: exception.NotFoundException, + 409: exception.BadRequestException, + 500: exception.InternalServerError, + 503: exception.ServerException, + 403: exception.ForbiddenException, + 301: exception.NVSDAPIException, + 307: exception.NVSDAPIException, + 400: exception.NVSDAPIException, + } diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/oneconvergence/plugin.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/oneconvergence/plugin.py new file mode 100644 index 00000000..1456007b --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/oneconvergence/plugin.py @@ -0,0 +1,440 @@ +# Copyright 2014 OneConvergence, Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Kedar Kulkarni, One Convergence, Inc. + +"""Implementation of OneConvergence Neutron Plugin.""" + +from oslo.config import cfg + +from neutron.agent import securitygroups_rpc as sg_rpc +from neutron.api.rpc.agentnotifiers import dhcp_rpc_agent_api +from neutron.api.rpc.agentnotifiers import l3_rpc_agent_api +from neutron.common import constants as q_const +from neutron.common import exceptions as nexception +from neutron.common import rpc as n_rpc +from neutron.common import topics +from neutron.db import agents_db +from neutron.db import agentschedulers_db +from neutron.db import db_base_plugin_v2 +from neutron.db import dhcp_rpc_base +from neutron.db import external_net_db +from neutron.db import extraroute_db +from neutron.db import l3_agentschedulers_db +from neutron.db import l3_gwmode_db +from neutron.db import l3_rpc_base +from neutron.db import portbindings_base +from neutron.db import quota_db # noqa +from neutron.db import securitygroups_rpc_base as sg_db_rpc +from neutron.extensions import portbindings +from neutron.openstack.common import excutils +from neutron.openstack.common import importutils +from neutron.openstack.common import log as logging +from neutron.plugins.common import constants as svc_constants +import neutron.plugins.oneconvergence.lib.config # noqa +import neutron.plugins.oneconvergence.lib.exception as nvsdexception +import neutron.plugins.oneconvergence.lib.nvsd_db as nvsd_db +from neutron.plugins.oneconvergence.lib import nvsdlib as nvsd_lib + +LOG = logging.getLogger(__name__) +IPv6 = 6 + + +class NVSDPluginRpcCallbacks(n_rpc.RpcCallback, + dhcp_rpc_base.DhcpRpcCallbackMixin, + l3_rpc_base.L3RpcCallbackMixin, + sg_db_rpc.SecurityGroupServerRpcCallbackMixin): + + RPC_API_VERSION = '1.1' + + @staticmethod + def get_port_from_device(device): + port = nvsd_db.get_port_from_device(device) + if port: + port['device'] = device + return port + + +class NVSDPluginV2AgentNotifierApi(n_rpc.RpcProxy, + sg_rpc.SecurityGroupAgentRpcApiMixin): + + BASE_RPC_API_VERSION = '1.0' + + def __init__(self, topic): + super(NVSDPluginV2AgentNotifierApi, self).__init__( + topic=topic, default_version=self.BASE_RPC_API_VERSION) + self.topic_port_update = topics.get_topic_name(topic, topics.PORT, + topics.UPDATE) + + def port_update(self, context, port): + self.fanout_cast(context, + self.make_msg('port_update', port=port), + topic=self.topic_port_update) + + +class OneConvergencePluginV2(db_base_plugin_v2.NeutronDbPluginV2, + extraroute_db.ExtraRoute_db_mixin, + l3_agentschedulers_db.L3AgentSchedulerDbMixin, + agentschedulers_db.DhcpAgentSchedulerDbMixin, + external_net_db.External_net_db_mixin, + l3_gwmode_db.L3_NAT_db_mixin, + portbindings_base.PortBindingBaseMixin, + sg_db_rpc.SecurityGroupServerRpcMixin): + + """L2 Virtual Network Plugin. + + OneConvergencePluginV2 is a Neutron plugin that provides L2 Virtual Network + functionality. + """ + + __native_bulk_support = True + __native_pagination_support = True + __native_sorting_support = True + + _supported_extension_aliases = ['agent', + 'binding', + 'dhcp_agent_scheduler', + 'ext-gw-mode', + 'external-net', + 'extraroute', + 'l3_agent_scheduler', + 'quotas', + 'router', + 'security-group' + ] + + @property + def supported_extension_aliases(self): + if not hasattr(self, '_aliases'): + aliases = self._supported_extension_aliases[:] + sg_rpc.disable_security_group_extension_by_config(aliases) + self._aliases = aliases + return self._aliases + + def __init__(self): + + super(OneConvergencePluginV2, self).__init__() + + self.oneconvergence_init() + + self.base_binding_dict = { + portbindings.VIF_TYPE: portbindings.VIF_TYPE_OVS, + portbindings.VIF_DETAILS: { + portbindings.CAP_PORT_FILTER: + 'security-group' in self.supported_extension_aliases}} + + portbindings_base.register_port_dict_function() + + self.setup_rpc() + + self.network_scheduler = importutils.import_object( + cfg.CONF.network_scheduler_driver) + self.router_scheduler = importutils.import_object( + cfg.CONF.router_scheduler_driver) + + def oneconvergence_init(self): + """Initialize the connections and set the log levels for the plugin.""" + + self.nvsdlib = nvsd_lib.NVSDApi() + self.nvsdlib.set_connection() + + def setup_rpc(self): + # RPC support + self.service_topics = {svc_constants.CORE: topics.PLUGIN, + svc_constants.L3_ROUTER_NAT: topics.L3PLUGIN} + self.conn = n_rpc.create_connection(new=True) + self.notifier = NVSDPluginV2AgentNotifierApi(topics.AGENT) + self.agent_notifiers[q_const.AGENT_TYPE_DHCP] = ( + dhcp_rpc_agent_api.DhcpAgentNotifyAPI() + ) + self.agent_notifiers[q_const.AGENT_TYPE_L3] = ( + l3_rpc_agent_api.L3AgentNotifyAPI() + ) + self.endpoints = [NVSDPluginRpcCallbacks(), + agents_db.AgentExtRpcCallback()] + for svc_topic in self.service_topics.values(): + self.conn.create_consumer(svc_topic, self.endpoints, fanout=False) + + # Consume from all consumers in threads + self.conn.consume_in_threads() + + def create_network(self, context, network): + + tenant_id = self._get_tenant_id_for_create( + context, network['network']) + self._ensure_default_security_group(context, tenant_id) + + net = self.nvsdlib.create_network(network['network']) + + network['network']['id'] = net['id'] + + with context.session.begin(subtransactions=True): + try: + neutron_net = super(OneConvergencePluginV2, + self).create_network(context, network) + + #following call checks whether the network is external or not + #and if it is external then adds this network to + #externalnetworks table of neutron db + self._process_l3_create(context, neutron_net, + network['network']) + except nvsdexception.NVSDAPIException: + with excutils.save_and_reraise_exception(): + self.nvsdlib.delete_network(net) + + return neutron_net + + def update_network(self, context, net_id, network): + + with context.session.begin(subtransactions=True): + + neutron_net = super(OneConvergencePluginV2, + self).update_network(context, net_id, network) + + self.nvsdlib.update_network(neutron_net, network['network']) + # updates neutron database e.g. externalnetworks table. + self._process_l3_update(context, neutron_net, network['network']) + + return neutron_net + + def delete_network(self, context, net_id): + + with context.session.begin(subtransactions=True): + network = self._get_network(context, net_id) + #get all the subnets under the network to delete them + subnets = self._get_subnets_by_network(context, net_id) + + self._process_l3_delete(context, net_id) + super(OneConvergencePluginV2, self).delete_network(context, + net_id) + + self.nvsdlib.delete_network(network, subnets) + + def create_subnet(self, context, subnet): + + if subnet['subnet']['ip_version'] == IPv6: + raise nexception.InvalidInput( + error_message="NVSDPlugin doesn't support IPv6.") + + neutron_subnet = super(OneConvergencePluginV2, + self).create_subnet(context, subnet) + + try: + self.nvsdlib.create_subnet(neutron_subnet) + except nvsdexception.NVSDAPIException: + with excutils.save_and_reraise_exception(): + #Log the message and delete the subnet from the neutron + super(OneConvergencePluginV2, + self).delete_subnet(context, neutron_subnet['id']) + LOG.error(_("Failed to create subnet, " + "deleting it from neutron")) + + return neutron_subnet + + def delete_subnet(self, context, subnet_id): + + neutron_subnet = self._get_subnet(context, subnet_id) + + with context.session.begin(subtransactions=True): + + super(OneConvergencePluginV2, self).delete_subnet(context, + subnet_id) + + self.nvsdlib.delete_subnet(neutron_subnet) + + def update_subnet(self, context, subnet_id, subnet): + + with context.session.begin(subtransactions=True): + + neutron_subnet = super(OneConvergencePluginV2, + self).update_subnet(context, subnet_id, + subnet) + + self.nvsdlib.update_subnet(neutron_subnet, subnet) + return neutron_subnet + + def create_port(self, context, port): + + self._ensure_default_security_group_on_port(context, port) + + sgids = self._get_security_groups_on_port(context, port) + + network = {} + + network_id = port['port']['network_id'] + + with context.session.begin(subtransactions=True): + + # Invoke the Neutron API for creating port + neutron_port = super(OneConvergencePluginV2, + self).create_port(context, port) + + self._process_portbindings_create_and_update(context, + port['port'], + neutron_port) + + self._process_port_create_security_group(context, neutron_port, + sgids) + if port['port']['device_owner'] in ('network:router_gateway', + 'network:floatingip'): + # for l3 requests, tenant_id will be None/'' + network = self._get_network(context, network_id) + + tenant_id = network['tenant_id'] + else: + tenant_id = port['port']['tenant_id'] + + port_id = neutron_port['id'] + + try: + self.nvsdlib.create_port(tenant_id, neutron_port) + except nvsdexception.NVSDAPIException: + with excutils.save_and_reraise_exception(): + LOG.error(_("Deleting newly created " + "neutron port %s"), port_id) + super(OneConvergencePluginV2, self).delete_port(context, + port_id) + + self.notify_security_groups_member_updated(context, neutron_port) + + return neutron_port + + def update_port(self, context, port_id, port): + + with context.session.begin(subtransactions=True): + + old_port = super(OneConvergencePluginV2, self).get_port(context, + port_id) + + neutron_port = super(OneConvergencePluginV2, + self).update_port(context, port_id, port) + + if neutron_port['tenant_id'] == '': + network = self._get_network(context, + neutron_port['network_id']) + tenant_id = network['tenant_id'] + else: + tenant_id = neutron_port['tenant_id'] + + self.nvsdlib.update_port(tenant_id, neutron_port, port['port']) + + self._process_portbindings_create_and_update(context, + port['port'], + neutron_port) + need_port_update_notify = self.update_security_group_on_port( + context, port_id, port, old_port, neutron_port) + + if need_port_update_notify: + self.notifier.port_update(context, neutron_port) + + return neutron_port + + def delete_port(self, context, port_id, l3_port_check=True): + + if l3_port_check: + self.prevent_l3_port_deletion(context, port_id) + + with context.session.begin(subtransactions=True): + neutron_port = super(OneConvergencePluginV2, + self).get_port(context, port_id) + + self._delete_port_security_group_bindings(context, port_id) + + self.disassociate_floatingips(context, port_id) + + super(OneConvergencePluginV2, self).delete_port(context, port_id) + + network = self._get_network(context, neutron_port['network_id']) + neutron_port['tenant_id'] = network['tenant_id'] + + self.nvsdlib.delete_port(port_id, neutron_port) + + self.notify_security_groups_member_updated(context, neutron_port) + + def create_floatingip(self, context, floatingip): + + neutron_floatingip = super(OneConvergencePluginV2, + self).create_floatingip(context, + floatingip) + try: + self.nvsdlib.create_floatingip(neutron_floatingip) + except nvsdexception.NVSDAPIException: + with excutils.save_and_reraise_exception(): + LOG.error(_("Failed to create floatingip")) + super(OneConvergencePluginV2, + self).delete_floatingip(context, + neutron_floatingip['id']) + + return neutron_floatingip + + def update_floatingip(self, context, fip_id, floatingip): + + with context.session.begin(subtransactions=True): + + neutron_floatingip = super(OneConvergencePluginV2, + self).update_floatingip(context, + fip_id, + floatingip) + + self.nvsdlib.update_floatingip(neutron_floatingip, floatingip) + + return neutron_floatingip + + def delete_floatingip(self, context, floating_ip_id): + + with context.session.begin(subtransactions=True): + + floating_ip = self._get_floatingip(context, floating_ip_id) + + super(OneConvergencePluginV2, + self).delete_floatingip(context, floating_ip_id) + + self.nvsdlib.delete_floatingip(floating_ip) + + def create_router(self, context, router): + + neutron_router = super(OneConvergencePluginV2, + self).create_router(context, router) + try: + self.nvsdlib.create_router(neutron_router) + except nvsdexception.NVSDAPIException: + with excutils.save_and_reraise_exception(): + LOG.error(_("Failed to create router")) + super(OneConvergencePluginV2, + self).delete_router(context, neutron_router['id']) + + return neutron_router + + def update_router(self, context, router_id, router): + + with context.session.begin(subtransactions=True): + + neutron_router = super(OneConvergencePluginV2, + self).update_router(context, router_id, + router) + + self.nvsdlib.update_router(neutron_router) + + return neutron_router + + def delete_router(self, context, router_id): + + tenant_id = self._get_router(context, router_id)['tenant_id'] + + with context.session.begin(subtransactions=True): + + super(OneConvergencePluginV2, self).delete_router(context, + router_id) + + self.nvsdlib.delete_router(tenant_id, router_id) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/openvswitch/agent/ovs_dvr_neutron_agent.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/openvswitch/agent/ovs_dvr_neutron_agent.py new file mode 100644 index 00000000..2f025c80 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/openvswitch/agent/ovs_dvr_neutron_agent.py @@ -0,0 +1,745 @@ +# Copyright 2014, Hewlett Packard, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# @author: Vivekanandan Narasimhan, Hewlett-Packard Inc + + +from neutron.api.rpc import dvr_rpc +from neutron.common import constants as q_const +from neutron.openstack.common import log as logging +from neutron.plugins.openvswitch.common import constants + + +LOG = logging.getLogger(__name__) + + +# A class to represent a DVR-hosted subnet including vif_ports resident on +# that subnet +class LocalDVRSubnetMapping: + def __init__(self, subnet, csnat_ofport=constants.OFPORT_INVALID): + # set of commpute ports on on this dvr subnet + self.compute_ports = {} + self.subnet = subnet + self.csnat_ofport = csnat_ofport + self.dvr_owned = False + + def __str__(self): + return ("subnet = %s compute_ports = %s csnat_port = %s" + " is_dvr_owned = %s" % + (self.subnet, self.get_compute_ofports(), + self.get_csnat_ofport(), self.is_dvr_owned())) + + def get_subnet_info(self): + return self.subnet + + def set_dvr_owned(self, owned): + self.dvr_owned = owned + + def is_dvr_owned(self): + return self.dvr_owned + + def add_compute_ofport(self, vif_id, ofport): + self.compute_ports[vif_id] = ofport + + def remove_compute_ofport(self, vif_id): + self.compute_ports.pop(vif_id, 0) + + def remove_all_compute_ofports(self): + self.compute_ports.clear() + + def get_compute_ofports(self): + return self.compute_ports + + def set_csnat_ofport(self, ofport): + self.csnat_ofport = ofport + + def get_csnat_ofport(self): + return self.csnat_ofport + + +class OVSPort: + def __init__(self, id, ofport, mac, device_owner): + self.id = id + self.mac = mac + self.ofport = ofport + self.subnets = set() + self.device_owner = device_owner + + def __str__(self): + return ("OVSPort: id = %s, ofport = %s, mac = %s," + "device_owner = %s, subnets = %s" % + (self.id, self.ofport, self.mac, + self.device_owner, self.subnets)) + + def add_subnet(self, subnet_id): + self.subnets.add(subnet_id) + + def remove_subnet(self, subnet_id): + self.subnets.remove(subnet_id) + + def remove_all_subnets(self): + self.subnets.clear() + + def get_subnets(self): + return self.subnets + + def get_device_owner(self): + return self.device_owner + + def get_mac(self): + return self.mac + + def get_ofport(self): + return self.ofport + + +class OVSDVRNeutronAgent(dvr_rpc.DVRAgentRpcApiMixin): + '''Implements OVS-based DVR(Distributed Virtual Router), for + overlay networks. + + ''' + + # history + # 1.0 Initial version + + def __init__(self, context, plugin_rpc, integ_br, tun_br, + patch_int_ofport=constants.OFPORT_INVALID, + patch_tun_ofport=constants.OFPORT_INVALID, + host=None, enable_tunneling=False, + enable_distributed_routing=False): + self.context = context + self.plugin_rpc = plugin_rpc + self.int_br = integ_br + self.tun_br = tun_br + self.patch_int_ofport = patch_int_ofport + self.patch_tun_ofport = patch_tun_ofport + self.host = host + self.enable_tunneling = enable_tunneling + self.enable_distributed_routing = enable_distributed_routing + + def reset_ovs_parameters(self, integ_br, tun_br, + patch_int_ofport, patch_tun_ofport): + '''Reset the openvswitch parameters + ''' + if not self.enable_tunneling: + return + + if not self.enable_distributed_routing: + return + + self.int_br = integ_br + self.tun_br = tun_br + self.patch_int_ofport = patch_int_ofport + self.patch_tun_ofport = patch_tun_ofport + + def setup_dvr_flows_on_integ_tun_br(self): + '''Setup up initial dvr flows into integration bridge and tunnel + bridge. + ''' + if not self.enable_tunneling: + return + + if not self.enable_distributed_routing: + return + + # get the local DVR MAC Address + try: + LOG.debug("L2 Agent operating in DVR Mode") + self.dvr_mac_address = None + self.local_dvr_map = {} + self.local_csnat_map = {} + self.local_ports = {} + self.registered_dvr_macs = set() + details = self.plugin_rpc.\ + get_dvr_mac_address_by_host(self.context, self.host) + LOG.debug("L2 Agent DVR: Received response for " + "get_dvr_mac_address_by_host() from " + "plugin: %r", details) + self.dvr_mac_address = details['mac_address'] + except Exception: + LOG.exception(_("DVR: Failed to obtain local DVR Mac address")) + self.enable_distributed_routing = False + # switch all traffic using L2 learning + self.int_br.add_flow(priority=1, actions="normal") + return + + # Remove existing flows in integration bridge + self.int_br.remove_all_flows() + + # Insert 'drop' action as the default for Table 2 + self.int_br.add_flow(table=constants.DVR_TO_SRC_MAC, + priority=1, + actions="drop") + + # Insert 'normal' action as the default for Table 1 + self.int_br.add_flow(table=constants.LOCAL_SWITCHING, + priority=1, + actions="normal") + + dvr_macs = self.plugin_rpc.get_dvr_mac_address_list(self.context) + LOG.debug("L2 Agent DVR: Received these MACs: %r", dvr_macs) + for mac in dvr_macs: + if mac['mac_address'] == self.dvr_mac_address: + continue + # Table 0 (default) will now sort DVR traffic from other + # traffic depending on in_port + self.int_br.add_flow(table=constants.LOCAL_SWITCHING, + priority=2, + in_port=self.patch_tun_ofport, + dl_src=mac['mac_address'], + actions="resubmit(,%s)" % + constants.DVR_TO_SRC_MAC) + # Table DVR_NOT_LEARN ensures unique dvr macs in the cloud + # are not learnt, as they may + # result in flow explosions + self.tun_br.add_flow(table=constants.DVR_NOT_LEARN, + priority=1, + dl_src=mac['mac_address'], + actions="output:%s" % self.patch_int_ofport) + + self.registered_dvr_macs.add(mac['mac_address']) + + self.tun_br.add_flow(priority=1, + in_port=self.patch_int_ofport, + actions="resubmit(,%s)" % + constants.DVR_PROCESS) + # table-miss should be sent to learning table + self.tun_br.add_flow(table=constants.DVR_NOT_LEARN, + priority=0, + actions="resubmit(,%s)" % + constants.LEARN_FROM_TUN) + + self.tun_br.add_flow(table=constants.DVR_PROCESS, + priority=0, + actions="resubmit(,%s)" % + constants.PATCH_LV_TO_TUN) + + def dvr_mac_address_update(self, dvr_macs): + if not self.enable_tunneling: + return + + if not self.enable_distributed_routing: + return + + LOG.debug("DVR Mac address update with host-mac: %s", dvr_macs) + + if not self.dvr_mac_address: + LOG.debug("Self mac unknown, ignoring this" + " dvr_mac_address_update() ") + return + + dvr_host_macs = set() + for entry in dvr_macs: + if entry['mac_address'] == self.dvr_mac_address: + continue + dvr_host_macs.add(entry['mac_address']) + + if dvr_host_macs == self.registered_dvr_macs: + LOG.debug("DVR Mac address already up to date") + return + + dvr_macs_added = dvr_host_macs - self.registered_dvr_macs + dvr_macs_removed = self.registered_dvr_macs - dvr_host_macs + + for oldmac in dvr_macs_removed: + self.int_br.delete_flows(table=constants.LOCAL_SWITCHING, + in_port=self.patch_tun_ofport, + dl_src=oldmac) + self.tun_br.delete_flows(table=constants.DVR_NOT_LEARN, + dl_src=oldmac) + LOG.debug("Removed DVR MAC flow for %s", oldmac) + self.registered_dvr_macs.remove(oldmac) + + for newmac in dvr_macs_added: + self.int_br.add_flow(table=constants.LOCAL_SWITCHING, + priority=2, + in_port=self.patch_tun_ofport, + dl_src=newmac, + actions="resubmit(,%s)" % + constants.DVR_TO_SRC_MAC) + self.tun_br.add_flow(table=constants.DVR_NOT_LEARN, + priority=1, + dl_src=newmac, + actions="output:%s" % self.patch_int_ofport) + LOG.debug("Added DVR MAC flow for %s", newmac) + self.registered_dvr_macs.add(newmac) + + def is_dvr_router_interface(self, device_owner): + return (device_owner == q_const.DEVICE_OWNER_ROUTER_INTF_DISTRIBUTED) + + def process_tunneled_network(self, network_type, lvid, segmentation_id): + if not self.enable_tunneling: + return + if not self.enable_distributed_routing: + return + self.tun_br.add_flow(table=constants.TUN_TABLE[network_type], + priority=1, + tun_id=segmentation_id, + actions="mod_vlan_vid:%s," + "resubmit(,%s)" % + (lvid, constants.DVR_NOT_LEARN)) + + def _bind_distributed_router_interface_port(self, port, fixed_ips, + device_owner, local_vlan): + # since router port must have only one fixed IP, directly + # use fixed_ips[0] + subnet_uuid = fixed_ips[0]['subnet_id'] + csnat_ofport = constants.OFPORT_INVALID + ldm = None + if subnet_uuid in self.local_dvr_map: + ldm = self.local_dvr_map[subnet_uuid] + csnat_ofport = ldm.get_csnat_ofport() + if csnat_ofport == constants.OFPORT_INVALID: + LOG.error(_("DVR: Duplicate DVR router interface detected " + "for subnet %s"), subnet_uuid) + return + else: + # set up LocalDVRSubnetMapping available for this subnet + subnet_info = self.plugin_rpc.get_subnet_for_dvr(self.context, + subnet_uuid) + if not subnet_info: + LOG.error(_("DVR: Unable to retrieve subnet information" + " for subnet_id %s"), subnet_uuid) + return + LOG.debug("get_subnet_for_dvr for subnet %s returned with %s" % + (subnet_uuid, subnet_info)) + ldm = LocalDVRSubnetMapping(subnet_info) + self.local_dvr_map[subnet_uuid] = ldm + + # DVR takes over + ldm.set_dvr_owned(True) + + subnet_info = ldm.get_subnet_info() + ip_subnet = subnet_info['cidr'] + local_compute_ports = self.plugin_rpc.\ + get_compute_ports_on_host_by_subnet(self.context, + self.host, + subnet_uuid) + LOG.debug("DVR: List of ports received from " + "get_compute_ports_on_host_by_subnet %r", + local_compute_ports) + for prt in local_compute_ports: + vif = self.int_br.get_vif_port_by_id(prt['id']) + if not vif: + continue + ldm.add_compute_ofport(vif.vif_id, vif.ofport) + if vif.vif_id in self.local_ports: + # ensure if a compute port is already on + # a different dvr routed subnet + # if yes, queue this subnet to that port + ovsport = self.local_ports[vif.vif_id] + ovsport.add_subnet(subnet_uuid) + else: + # the compute port is discovered first here that its on + # a dvr routed subnet queue this subnet to that port + ovsport = OVSPort(vif.vif_id, vif.ofport, + vif.vif_mac, prt['device_owner']) + + ovsport.add_subnet(subnet_uuid) + self.local_ports[vif.vif_id] = ovsport + + # create rule for just this vm port + self.int_br.add_flow(table=constants.DVR_TO_SRC_MAC, + priority=4, + dl_vlan=local_vlan, + dl_dst=ovsport.get_mac(), + actions="strip_vlan,mod_dl_src:%s," + "output:%s" % + (subnet_info['gateway_mac'], + ovsport.get_ofport())) + + # create rule in Table LOCAL_SWITCHING to forward + # broadcast/multicast frames from dvr router interface to + # appropriate local tenant ports + ofports = ','.join(map(str, ldm.get_compute_ofports().values())) + if csnat_ofport != constants.OFPORT_INVALID: + ofports = str(csnat_ofport) + ',' + ofports + if ofports: + self.int_br.add_flow(table=constants.DVR_TO_SRC_MAC, + priority=2, + proto='ip', + dl_vlan=local_vlan, + nw_dst=ip_subnet, + actions="strip_vlan,mod_dl_src:%s, " + " output:%s" % + (subnet_info['gateway_mac'], ofports)) + + self.tun_br.add_flow(table=constants.DVR_PROCESS, + priority=3, + dl_vlan=local_vlan, + proto='arp', + nw_dst=subnet_info['gateway_ip'], + actions="drop") + + self.tun_br.add_flow(table=constants.DVR_PROCESS, + priority=2, + dl_vlan=local_vlan, + dl_dst=port.vif_mac, + actions="drop") + + self.tun_br.add_flow(table=constants.DVR_PROCESS, + priority=1, + dl_vlan=local_vlan, + dl_src=port.vif_mac, + actions="mod_dl_src:%s,resubmit(,%s)" % + (self.dvr_mac_address, + constants.PATCH_LV_TO_TUN)) + + # the dvr router interface is itself a port, so capture it + # queue this subnet to that port. A subnet appears only once as + # a router interface on any given router + ovsport = OVSPort(port.vif_id, port.ofport, + port.vif_mac, device_owner) + ovsport.add_subnet(subnet_uuid) + self.local_ports[port.vif_id] = ovsport + + def _bind_compute_port_on_dvr_subnet(self, port, fixed_ips, + device_owner, local_vlan): + # Handle new compute port added use-case + subnet_uuid = None + for ips in fixed_ips: + if ips['subnet_id'] not in self.local_dvr_map: + continue + subnet_uuid = ips['subnet_id'] + ldm = self.local_dvr_map[subnet_uuid] + if not ldm.is_dvr_owned(): + # well this is csnat stuff, let dvr come in + # and do plumbing for this vm later + continue + + # This confirms that this compute port belongs + # to a dvr hosted subnet. + # Accomodate this VM Port into the existing rule in + # the integration bridge + LOG.debug("DVR: Plumbing compute port %s", port.vif_id) + subnet_info = ldm.get_subnet_info() + ip_subnet = subnet_info['cidr'] + csnat_ofport = ldm.get_csnat_ofport() + ldm.add_compute_ofport(port.vif_id, port.ofport) + if port.vif_id in self.local_ports: + # ensure if a compute port is already on a different + # dvr routed subnet + # if yes, queue this subnet to that port + ovsport = self.local_ports[port.vif_id] + ovsport.add_subnet(subnet_uuid) + else: + # the compute port is discovered first here that its + # on a dvr routed subnet, queue this subnet to that port + ovsport = OVSPort(port.vif_id, port.ofport, + port.vif_mac, device_owner) + + ovsport.add_subnet(subnet_uuid) + self.local_ports[port.vif_id] = ovsport + # create a rule for this vm port + self.int_br.add_flow(table=constants.DVR_TO_SRC_MAC, + priority=4, + dl_vlan=local_vlan, + dl_dst=ovsport.get_mac(), + actions="strip_vlan,mod_dl_src:%s," + "output:%s" % + (subnet_info['gateway_mac'], + ovsport.get_ofport())) + ofports = ','.join(map(str, ldm.get_compute_ofports().values())) + + if csnat_ofport != constants.OFPORT_INVALID: + ofports = str(csnat_ofport) + ',' + ofports + self.int_br.add_flow(table=constants.DVR_TO_SRC_MAC, + priority=2, + proto='ip', + dl_vlan=local_vlan, + nw_dst=ip_subnet, + actions="strip_vlan,mod_dl_src:%s," + " output:%s" % + (subnet_info['gateway_mac'], ofports)) + + def _bind_centralized_snat_port_on_dvr_subnet(self, port, fixed_ips, + device_owner, local_vlan): + if port.vif_id in self.local_ports: + # throw an error if CSNAT port is already on a different + # dvr routed subnet + ovsport = self.local_ports[port.vif_id] + subs = list(ovsport.get_subnets()) + LOG.error(_("Centralized-SNAT port %s already seen on "), + port.vif_id) + LOG.error(_("a different subnet %s"), subs[0]) + return + # since centralized-SNAT (CSNAT) port must have only one fixed + # IP, directly use fixed_ips[0] + subnet_uuid = fixed_ips[0]['subnet_id'] + ldm = None + subnet_info = None + if subnet_uuid not in self.local_dvr_map: + # no csnat ports seen on this subnet - create csnat state + # for this subnet + subnet_info = self.plugin_rpc.get_subnet_for_dvr(self.context, + subnet_uuid) + ldm = LocalDVRSubnetMapping(subnet_info, port.ofport) + self.local_dvr_map[subnet_uuid] = ldm + else: + ldm = self.local_dvr_map[subnet_uuid] + subnet_info = ldm.get_subnet_info() + # Store csnat OF Port in the existing DVRSubnetMap + ldm.set_csnat_ofport(port.ofport) + + # create ovsPort footprint for csnat port + ovsport = OVSPort(port.vif_id, port.ofport, + port.vif_mac, device_owner) + ovsport.add_subnet(subnet_uuid) + self.local_ports[port.vif_id] = ovsport + + self.int_br.add_flow(table=constants.DVR_TO_SRC_MAC, + priority=4, + dl_vlan=local_vlan, + dl_dst=ovsport.get_mac(), + actions="strip_vlan,mod_dl_src:%s," + " output:%s" % + (subnet_info['gateway_mac'], + ovsport.get_ofport())) + ofports = ','.join(map(str, ldm.get_compute_ofports().values())) + ofports = str(ldm.get_csnat_ofport()) + ',' + ofports + ip_subnet = subnet_info['cidr'] + self.int_br.add_flow(table=constants.DVR_TO_SRC_MAC, + priority=2, + proto='ip', + dl_vlan=local_vlan, + nw_dst=ip_subnet, + actions="strip_vlan,mod_dl_src:%s," + " output:%s" % + (subnet_info['gateway_mac'], ofports)) + + def bind_port_to_dvr(self, port, network_type, fixed_ips, + device_owner, local_vlan_id): + # a port coming up as distributed router interface + if not self.enable_tunneling: + return + + if not self.enable_distributed_routing: + return + + if network_type not in constants.TUNNEL_NETWORK_TYPES: + return + + if device_owner == q_const.DEVICE_OWNER_ROUTER_INTF_DISTRIBUTED: + self._bind_distributed_router_interface_port(port, fixed_ips, + device_owner, + local_vlan_id) + + if 'compute' in device_owner: + self._bind_compute_port_on_dvr_subnet(port, fixed_ips, + device_owner, + local_vlan_id) + + if device_owner == q_const.DEVICE_OWNER_ROUTER_SNAT: + self._bind_centralized_snat_port_on_dvr_subnet(port, fixed_ips, + device_owner, + local_vlan_id) + + def _unbind_distributed_router_interface_port(self, port, local_vlan): + + ovsport = self.local_ports[port.vif_id] + + # removal of distributed router interface + subnet_ids = ovsport.get_subnets() + subnet_set = set(subnet_ids) + # ensure we process for all the subnets laid on this removed port + for sub_uuid in subnet_set: + if sub_uuid not in self.local_dvr_map: + continue + + ldm = self.local_dvr_map[sub_uuid] + subnet_info = ldm.get_subnet_info() + ip_subnet = subnet_info['cidr'] + + # DVR is no more owner + ldm.set_dvr_owned(False) + + # remove all vm rules for this dvr subnet + # clear of compute_ports altogether + compute_ports = ldm.get_compute_ofports() + for vif_id in compute_ports: + ovsport = self.local_ports[vif_id] + self.int_br.delete_flows(table=constants.DVR_TO_SRC_MAC, + dl_vlan=local_vlan, + dl_dst=ovsport.get_mac()) + ldm.remove_all_compute_ofports() + + if ldm.get_csnat_ofport() != -1: + # If there is a csnat port on this agent, preserve + # the local_dvr_map state + ofports = str(ldm.get_csnat_ofport()) + self.int_br.add_flow(table=constants.DVR_TO_SRC_MAC, + priority=2, + proto='ip', + dl_vlan=local_vlan, + nw_dst=ip_subnet, + actions="strip_vlan,mod_dl_src:%s," + " output:%s" % + (subnet_info['gateway_mac'], ofports)) + else: + # removed port is a distributed router interface + self.int_br.delete_flows(table=constants.DVR_TO_SRC_MAC, + proto='ip', dl_vlan=local_vlan, + nw_dst=ip_subnet) + # remove subnet from local_dvr_map as no dvr (or) csnat + # ports available on this agent anymore + self.local_dvr_map.pop(sub_uuid, None) + + self.tun_br.delete_flows(table=constants.DVR_PROCESS, + dl_vlan=local_vlan, + proto='arp', + nw_dst=subnet_info['gateway_ip']) + ovsport.remove_subnet(sub_uuid) + + self.tun_br.delete_flows(table=constants.DVR_PROCESS, + dl_vlan=local_vlan, + dl_dst=port.vif_mac) + + self.tun_br.delete_flows(table=constants.DVR_PROCESS, + dl_vlan=local_vlan, + dl_src=port.vif_mac) + # release port state + self.local_ports.pop(port.vif_id, None) + + def _unbind_compute_port_on_dvr_subnet(self, port, local_vlan): + + ovsport = self.local_ports[port.vif_id] + # This confirms that this compute port being removed belonged + # to a dvr hosted subnet. + # Accomodate this VM Port into the existing rule in + # the integration bridge + LOG.debug("DVR: Removing plumbing for compute port %s", port) + subnet_ids = ovsport.get_subnets() + # ensure we process for all the subnets laid on this port + for sub_uuid in subnet_ids: + if sub_uuid not in self.local_dvr_map: + continue + + ldm = self.local_dvr_map[sub_uuid] + subnet_info = ldm.get_subnet_info() + ldm.remove_compute_ofport(port.vif_id) + ofports = ','.join(map(str, ldm.get_compute_ofports().values())) + ip_subnet = subnet_info['cidr'] + + # first remove this vm port rule + self.int_br.delete_flows(table=constants.DVR_TO_SRC_MAC, + dl_vlan=local_vlan, + dl_dst=ovsport.get_mac()) + if ldm.get_csnat_ofport() != -1: + # If there is a csnat port on this agent, preserve + # the local_dvr_map state + ofports = str(ldm.get_csnat_ofport()) + ',' + ofports + self.int_br.add_flow(table=constants.DVR_TO_SRC_MAC, + priority=2, + proto='ip', + dl_vlan=local_vlan, + nw_dst=ip_subnet, + actions="strip_vlan,mod_dl_src:%s," + " output:%s" % + (subnet_info['gateway_mac'], ofports)) + else: + if ofports: + self.int_br.add_flow(table=constants.DVR_TO_SRC_MAC, + priority=2, + proto='ip', + dl_vlan=local_vlan, + nw_dst=ip_subnet, + actions="strip_vlan,mod_dl_src:%s," + " output:%s" % + (subnet_info['gateway_mac'], + ofports)) + else: + # remove the flow altogether, as no ports (both csnat/ + # compute) are available on this subnet in this + # agent + self.int_br.delete_flows(table=constants.DVR_TO_SRC_MAC, + proto='ip', + dl_vlan=local_vlan, + nw_dst=ip_subnet) + # release port state + self.local_ports.pop(port.vif_id, None) + + def _unbind_centralized_snat_port_on_dvr_subnet(self, port, local_vlan): + + ovsport = self.local_ports[port.vif_id] + # This comfirms that this compute port being removed belonged + # to a dvr hosted subnet. + # Accomodate this VM Port into the existing rule in + # the integration bridge + LOG.debug("DVR: Removing plumbing for csnat port %s", port) + sub_uuid = list(ovsport.get_subnets())[0] + # ensure we process for all the subnets laid on this port + if sub_uuid not in self.local_dvr_map: + return + ldm = self.local_dvr_map[sub_uuid] + subnet_info = ldm.get_subnet_info() + ip_subnet = subnet_info['cidr'] + ldm.set_csnat_ofport(constants.OFPORT_INVALID) + # then remove csnat port rule + self.int_br.delete_flows(table=constants.DVR_TO_SRC_MAC, + dl_vlan=local_vlan, + dl_dst=ovsport.get_mac()) + + ofports = ','.join(map(str, ldm.get_compute_ofports().values())) + if ofports: + self.int_br.add_flow(table=constants.DVR_TO_SRC_MAC, + priority=2, + proto='ip', + dl_vlan=local_vlan, + nw_dst=ip_subnet, + actions="strip_vlan,mod_dl_src:%s," + " output:%s" % + (subnet_info['gateway_mac'], ofports)) + else: + self.int_br.delete_flows(table=constants.DVR_TO_SRC_MAC, + proto='ip', + dl_vlan=local_vlan, + nw_dst=ip_subnet) + if not ldm.is_dvr_owned(): + # if not owned by DVR (only used for csnat), remove this + # subnet state altogether + self.local_dvr_map.pop(sub_uuid, None) + + # release port state + self.local_ports.pop(port.vif_id, None) + + def unbind_port_from_dvr(self, vif_port, local_vlan_id): + if not self.enable_tunneling: + return + + if not self.enable_distributed_routing: + return + + if not vif_port: + LOG.debug("DVR: VIF Port not available for delete %s", vif_port) + return + + # Handle port removed use-case + if vif_port.vif_id not in self.local_ports: + LOG.debug("DVR: Non distributed port, ignoring %s", vif_port) + return + + ovsport = self.local_ports[vif_port.vif_id] + + if ovsport.get_device_owner() == \ + q_const.DEVICE_OWNER_ROUTER_INTF_DISTRIBUTED: + self._unbind_distributed_router_interface_port(vif_port, + local_vlan_id) + + if 'compute' in ovsport.get_device_owner(): + self._unbind_compute_port_on_dvr_subnet(vif_port, + local_vlan_id) + + if ovsport.get_device_owner() == q_const.DEVICE_OWNER_ROUTER_SNAT: + self._unbind_centralized_snat_port_on_dvr_subnet(vif_port, + local_vlan_id) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/openvswitch/agent/ovs_neutron_agent.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/openvswitch/agent/ovs_neutron_agent.py new file mode 100644 index 00000000..70ff36c5 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/openvswitch/agent/ovs_neutron_agent.py @@ -0,0 +1,1634 @@ +#!/usr/bin/env python +# Copyright 2011 VMware, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import hashlib +import signal +import sys +import time + +import eventlet +eventlet.monkey_patch() + +import netaddr +from neutron.plugins.openvswitch.agent import ovs_dvr_neutron_agent +from oslo.config import cfg +from six import moves + +from neutron.agent import l2population_rpc +from neutron.agent.linux import ip_lib +from neutron.agent.linux import ovs_lib +from neutron.agent.linux import polling +from neutron.agent.linux import utils +from neutron.agent import rpc as agent_rpc +from neutron.agent import securitygroups_rpc as sg_rpc +from neutron.api.rpc import dvr_rpc +from neutron.common import config as common_config +from neutron.common import constants as q_const +from neutron.common import rpc as n_rpc +from neutron.common import topics +from neutron.common import utils as q_utils +from neutron import context +from neutron.openstack.common import log as logging +from neutron.openstack.common import loopingcall +from neutron.plugins.common import constants as p_const +from neutron.plugins.openvswitch.common import config # noqa +from neutron.plugins.openvswitch.common import constants + + +LOG = logging.getLogger(__name__) + +# A placeholder for dead vlans. +DEAD_VLAN_TAG = str(q_const.MAX_VLAN_TAG + 1) + + +# A class to represent a VIF (i.e., a port that has 'iface-id' and 'vif-mac' +# attributes set). +class LocalVLANMapping: + def __init__(self, vlan, network_type, physical_network, segmentation_id, + vif_ports=None): + if vif_ports is None: + vif_ports = {} + self.vlan = vlan + self.network_type = network_type + self.physical_network = physical_network + self.segmentation_id = segmentation_id + self.vif_ports = vif_ports + # set of tunnel ports on which packets should be flooded + self.tun_ofports = set() + + def __str__(self): + return ("lv-id = %s type = %s phys-net = %s phys-id = %s" % + (self.vlan, self.network_type, self.physical_network, + self.segmentation_id)) + + +class OVSPluginApi(agent_rpc.PluginApi, + dvr_rpc.DVRServerRpcApiMixin, + sg_rpc.SecurityGroupServerRpcApiMixin): + pass + + +class OVSSecurityGroupAgent(sg_rpc.SecurityGroupAgentRpcMixin): + def __init__(self, context, plugin_rpc, root_helper): + self.context = context + self.plugin_rpc = plugin_rpc + self.root_helper = root_helper + self.init_firewall(defer_refresh_firewall=True) + + +class OVSNeutronAgent(n_rpc.RpcCallback, + sg_rpc.SecurityGroupAgentRpcCallbackMixin, + l2population_rpc.L2populationRpcCallBackMixin, + dvr_rpc.DVRAgentRpcCallbackMixin): + '''Implements OVS-based tunneling, VLANs and flat networks. + + Two local bridges are created: an integration bridge (defaults to + 'br-int') and a tunneling bridge (defaults to 'br-tun'). An + additional bridge is created for each physical network interface + used for VLANs and/or flat networks. + + All VM VIFs are plugged into the integration bridge. VM VIFs on a + given virtual network share a common "local" VLAN (i.e. not + propagated externally). The VLAN id of this local VLAN is mapped + to the physical networking details realizing that virtual network. + + For virtual networks realized as GRE tunnels, a Logical Switch + (LS) identifier is used to differentiate tenant traffic on + inter-HV tunnels. A mesh of tunnels is created to other + Hypervisors in the cloud. These tunnels originate and terminate on + the tunneling bridge of each hypervisor. Port patching is done to + connect local VLANs on the integration bridge to inter-hypervisor + tunnels on the tunnel bridge. + + For each virtual network realized as a VLAN or flat network, a + veth or a pair of patch ports is used to connect the local VLAN on + the integration bridge with the physical network bridge, with flow + rules adding, modifying, or stripping VLAN tags as necessary. + ''' + + # history + # 1.0 Initial version + # 1.1 Support Security Group RPC + # 1.2 Support DVR (Distributed Virtual Router) RPC + RPC_API_VERSION = '1.2' + + def __init__(self, integ_br, tun_br, local_ip, + bridge_mappings, root_helper, + polling_interval, tunnel_types=None, + veth_mtu=None, l2_population=False, + enable_distributed_routing=False, + minimize_polling=False, + ovsdb_monitor_respawn_interval=( + constants.DEFAULT_OVSDBMON_RESPAWN), + arp_responder=False, + use_veth_interconnection=False): + '''Constructor. + + :param integ_br: name of the integration bridge. + :param tun_br: name of the tunnel bridge. + :param local_ip: local IP address of this hypervisor. + :param bridge_mappings: mappings from physical network name to bridge. + :param root_helper: utility to use when running shell cmds. + :param polling_interval: interval (secs) to poll DB. + :param tunnel_types: A list of tunnel types to enable support for in + the agent. If set, will automatically set enable_tunneling to + True. + :param veth_mtu: MTU size for veth interfaces. + :param l2_population: Optional, whether L2 population is turned on + :param minimize_polling: Optional, whether to minimize polling by + monitoring ovsdb for interface changes. + :param ovsdb_monitor_respawn_interval: Optional, when using polling + minimization, the number of seconds to wait before respawning + the ovsdb monitor. + :param arp_responder: Optional, enable local ARP responder if it is + supported. + :param use_veth_interconnection: use veths instead of patch ports to + interconnect the integration bridge to physical bridges. + ''' + super(OVSNeutronAgent, self).__init__() + self.use_veth_interconnection = use_veth_interconnection + self.veth_mtu = veth_mtu + self.root_helper = root_helper + self.available_local_vlans = set(moves.xrange(q_const.MIN_VLAN_TAG, + q_const.MAX_VLAN_TAG)) + self.tunnel_types = tunnel_types or [] + self.l2_pop = l2_population + # TODO(ethuleau): Initially, local ARP responder is be dependent to the + # ML2 l2 population mechanism driver. + self.arp_responder_enabled = (arp_responder and + self._check_arp_responder_support() and + self.l2_pop) + self.enable_distributed_routing = enable_distributed_routing + self.agent_state = { + 'binary': 'neutron-openvswitch-agent', + 'host': cfg.CONF.host, + 'topic': q_const.L2_AGENT_TOPIC, + 'configurations': {'bridge_mappings': bridge_mappings, + 'tunnel_types': self.tunnel_types, + 'tunneling_ip': local_ip, + 'l2_population': self.l2_pop, + 'arp_responder_enabled': + self.arp_responder_enabled, + 'enable_distributed_routing': + self.enable_distributed_routing}, + 'agent_type': q_const.AGENT_TYPE_OVS, + 'start_flag': True} + + # Keep track of int_br's device count for use by _report_state() + self.int_br_device_count = 0 + + self.int_br = ovs_lib.OVSBridge(integ_br, self.root_helper) + self.setup_integration_br() + # Stores port update notifications for processing in main rpc loop + self.updated_ports = set() + self.setup_rpc() + self.bridge_mappings = bridge_mappings + self.setup_physical_bridges(self.bridge_mappings) + self.local_vlan_map = {} + self.tun_br_ofports = {p_const.TYPE_GRE: {}, + p_const.TYPE_VXLAN: {}} + + self.polling_interval = polling_interval + self.minimize_polling = minimize_polling + self.ovsdb_monitor_respawn_interval = ovsdb_monitor_respawn_interval + + if tunnel_types: + self.enable_tunneling = True + else: + self.enable_tunneling = False + self.local_ip = local_ip + self.tunnel_count = 0 + self.vxlan_udp_port = cfg.CONF.AGENT.vxlan_udp_port + self.dont_fragment = cfg.CONF.AGENT.dont_fragment + self.tun_br = None + self.patch_int_ofport = constants.OFPORT_INVALID + self.patch_tun_ofport = constants.OFPORT_INVALID + if self.enable_tunneling: + # The patch_int_ofport and patch_tun_ofport are updated + # here inside the call to setup_tunnel_br + self.setup_tunnel_br(tun_br) + + self.dvr_agent = ovs_dvr_neutron_agent.OVSDVRNeutronAgent( + self.context, + self.plugin_rpc, + self.int_br, + self.tun_br, + self.patch_int_ofport, + self.patch_tun_ofport, + cfg.CONF.host, + self.enable_tunneling, + self.enable_distributed_routing) + + self.dvr_agent.setup_dvr_flows_on_integ_tun_br() + + # Collect additional bridges to monitor + self.ancillary_brs = self.setup_ancillary_bridges(integ_br, tun_br) + + # Security group agent support + self.sg_agent = OVSSecurityGroupAgent(self.context, + self.plugin_rpc, + root_helper) + # Initialize iteration counter + self.iter_num = 0 + self.run_daemon_loop = True + + def _check_arp_responder_support(self): + '''Check if OVS supports to modify ARP headers. + + This functionality is only available since the development branch 2.1. + ''' + args = ['arp,action=load:0x2->NXM_OF_ARP_OP[],' + 'move:NXM_NX_ARP_SHA[]->NXM_NX_ARP_THA[],' + 'move:NXM_OF_ARP_SPA[]->NXM_OF_ARP_TPA[]'] + supported = ovs_lib.ofctl_arg_supported(self.root_helper, 'add-flow', + args) + if not supported: + LOG.warning(_('OVS version can not support ARP responder.')) + return supported + + def _report_state(self): + # How many devices are likely used by a VM + self.agent_state.get('configurations')['devices'] = ( + self.int_br_device_count) + try: + self.state_rpc.report_state(self.context, + self.agent_state) + self.agent_state.pop('start_flag', None) + except Exception: + LOG.exception(_("Failed reporting state!")) + + def setup_rpc(self): + self.agent_id = 'ovs-agent-%s' % cfg.CONF.host + self.topic = topics.AGENT + self.plugin_rpc = OVSPluginApi(topics.PLUGIN) + self.state_rpc = agent_rpc.PluginReportStateAPI(topics.PLUGIN) + + # RPC network init + self.context = context.get_admin_context_without_session() + # Handle updates from service + self.endpoints = [self] + # Define the listening consumers for the agent + consumers = [[topics.PORT, topics.UPDATE], + [topics.NETWORK, topics.DELETE], + [constants.TUNNEL, topics.UPDATE], + [topics.SECURITY_GROUP, topics.UPDATE], + [topics.DVR, topics.UPDATE]] + if self.l2_pop: + consumers.append([topics.L2POPULATION, + topics.UPDATE, cfg.CONF.host]) + self.connection = agent_rpc.create_consumers(self.endpoints, + self.topic, + consumers) + report_interval = cfg.CONF.AGENT.report_interval + if report_interval: + heartbeat = loopingcall.FixedIntervalLoopingCall( + self._report_state) + heartbeat.start(interval=report_interval) + + def get_net_uuid(self, vif_id): + for network_id, vlan_mapping in self.local_vlan_map.iteritems(): + if vif_id in vlan_mapping.vif_ports: + return network_id + + def network_delete(self, context, **kwargs): + LOG.debug(_("network_delete received")) + network_id = kwargs.get('network_id') + LOG.debug(_("Delete %s"), network_id) + # The network may not be defined on this agent + lvm = self.local_vlan_map.get(network_id) + if lvm: + self.reclaim_local_vlan(network_id) + else: + LOG.debug(_("Network %s not used on agent."), network_id) + + def port_update(self, context, **kwargs): + port = kwargs.get('port') + # Put the port identifier in the updated_ports set. + # Even if full port details might be provided to this call, + # they are not used since there is no guarantee the notifications + # are processed in the same order as the relevant API requests + self.updated_ports.add(port['id']) + LOG.debug(_("port_update message processed for port %s"), port['id']) + + def tunnel_update(self, context, **kwargs): + LOG.debug(_("tunnel_update received")) + if not self.enable_tunneling: + return + tunnel_ip = kwargs.get('tunnel_ip') + tunnel_id = kwargs.get('tunnel_id', self.get_ip_in_hex(tunnel_ip)) + if not tunnel_id: + return + tunnel_type = kwargs.get('tunnel_type') + if not tunnel_type: + LOG.error(_("No tunnel_type specified, cannot create tunnels")) + return + if tunnel_type not in self.tunnel_types: + LOG.error(_("tunnel_type %s not supported by agent"), tunnel_type) + return + if tunnel_ip == self.local_ip: + return + tun_name = '%s-%s' % (tunnel_type, tunnel_id) + if not self.l2_pop: + self.setup_tunnel_port(tun_name, tunnel_ip, tunnel_type) + + def fdb_add(self, context, fdb_entries): + LOG.debug(_("fdb_add received")) + for network_id, values in fdb_entries.items(): + lvm = self.local_vlan_map.get(network_id) + if not lvm: + # Agent doesn't manage any port in this network + continue + agent_ports = values.get('ports') + agent_ports.pop(self.local_ip, None) + if len(agent_ports): + if not self.enable_distributed_routing: + # TODO(vivek): when defer_apply_on is used DVR + # flows are only getting partially configured when + # run concurrently with l2-pop ON. + # Will need make ovs_lib flow API context sensitive + # and then use the same across this file, which will + # address the race issue here. + self.tun_br.defer_apply_on() + for agent_ip, ports in agent_ports.items(): + # Ensure we have a tunnel port with this remote agent + ofport = self.tun_br_ofports[ + lvm.network_type].get(agent_ip) + if not ofport: + remote_ip_hex = self.get_ip_in_hex(agent_ip) + if not remote_ip_hex: + continue + port_name = '%s-%s' % (lvm.network_type, remote_ip_hex) + ofport = self.setup_tunnel_port(port_name, agent_ip, + lvm.network_type) + if ofport == 0: + continue + for port in ports: + self._add_fdb_flow(port, agent_ip, lvm, ofport) + if not self.enable_distributed_routing: + # TODO(vivek): when defer_apply_on is used DVR + # flows are only getting partiall configured when + # run concurrently with l2-pop ON. + # Will need make ovs_lib flow API context sensitive + # and then use the same across this file, which will + # address the race issue here. + self.tun_br.defer_apply_off() + + def fdb_remove(self, context, fdb_entries): + LOG.debug(_("fdb_remove received")) + for network_id, values in fdb_entries.items(): + lvm = self.local_vlan_map.get(network_id) + if not lvm: + # Agent doesn't manage any more ports in this network + continue + agent_ports = values.get('ports') + agent_ports.pop(self.local_ip, None) + if len(agent_ports): + if not self.enable_distributed_routing: + # TODO(vivek): when defer_apply_on is used DVR + # flows are only getting partially configured when + # run concurrently with l2-pop ON. + # Will need make ovs_lib flow API context sensitive + # and then use the same across this file, which will + # address the race issue he + self.tun_br.defer_apply_on() + for agent_ip, ports in agent_ports.items(): + ofport = self.tun_br_ofports[ + lvm.network_type].get(agent_ip) + if not ofport: + continue + for port in ports: + self._del_fdb_flow(port, agent_ip, lvm, ofport) + if not self.enable_distributed_routing: + # TODO(vivek): when defer_apply_on is used DVR + # flows are only getting partially configured when + # run concurrently with l2-pop ON. + # Will need make ovs_lib flow API context sensitive + # and then use the same across this file, which will + # address the race issue here. + self.tun_br.defer_apply_off() + + def _add_fdb_flow(self, port_info, agent_ip, lvm, ofport): + if port_info == q_const.FLOODING_ENTRY: + lvm.tun_ofports.add(ofport) + ofports = ','.join(lvm.tun_ofports) + self.tun_br.mod_flow(table=constants.FLOOD_TO_TUN, + dl_vlan=lvm.vlan, + actions="strip_vlan,set_tunnel:%s," + "output:%s" % (lvm.segmentation_id, ofports)) + else: + self._set_arp_responder('add', lvm.vlan, port_info[0], + port_info[2]) + if not self.dvr_agent.is_dvr_router_interface(port_info[1]): + self.tun_br.add_flow(table=constants.UCAST_TO_TUN, + priority=2, + dl_vlan=lvm.vlan, + dl_dst=port_info[0], + actions="strip_vlan,set_tunnel:%s," + "output:%s" % + (lvm.segmentation_id, ofport)) + + def _del_fdb_flow(self, port_info, agent_ip, lvm, ofport): + if port_info == q_const.FLOODING_ENTRY: + lvm.tun_ofports.remove(ofport) + if len(lvm.tun_ofports) > 0: + ofports = ','.join(lvm.tun_ofports) + self.tun_br.mod_flow(table=constants.FLOOD_TO_TUN, + dl_vlan=lvm.vlan, + actions="strip_vlan," + "set_tunnel:%s,output:%s" % + (lvm.segmentation_id, ofports)) + else: + # This local vlan doesn't require any more tunnelling + self.tun_br.delete_flows(table=constants.FLOOD_TO_TUN, + dl_vlan=lvm.vlan) + # Check if this tunnel port is still used + self.cleanup_tunnel_port(ofport, lvm.network_type) + else: + self._set_arp_responder('remove', lvm.vlan, port_info[0], + port_info[1]) + self.tun_br.delete_flows(table=constants.UCAST_TO_TUN, + dl_vlan=lvm.vlan, + dl_dst=port_info[0]) + + def _fdb_chg_ip(self, context, fdb_entries): + '''fdb update when an IP of a port is updated. + + The ML2 l2-pop mechanism driver send an fdb update rpc message when an + IP of a port is updated. + + :param context: RPC context. + :param fdb_entries: fdb dicts that contain all mac/IP informations per + agent and network. + {'net1': + {'agent_ip': + {'before': [[mac, ip]], + 'after': [[mac, ip]] + } + } + 'net2': + ... + } + ''' + LOG.debug(_("update chg_ip received")) + + # TODO(ethuleau): Use OVS defer apply flows for all rules will be an + # interesting improvement here. But actually, OVS lib defer apply flows + # methods doesn't ensure the add flows will be applied before delete. + for network_id, agent_ports in fdb_entries.items(): + lvm = self.local_vlan_map.get(network_id) + if not lvm: + continue + + for agent_ip, state in agent_ports.items(): + if agent_ip == self.local_ip: + continue + + after = state.get('after') + for mac, ip in after: + self._set_arp_responder('add', lvm.vlan, mac, ip) + + before = state.get('before') + for mac, ip in before: + self._set_arp_responder('remove', lvm.vlan, mac, ip) + + def fdb_update(self, context, fdb_entries): + LOG.debug(_("fdb_update received")) + for action, values in fdb_entries.items(): + method = '_fdb_' + action + if not hasattr(self, method): + raise NotImplementedError() + + getattr(self, method)(context, values) + + def _set_arp_responder(self, action, lvid, mac_str, ip_str): + '''Set the ARP respond entry. + + When the l2 population mechanism driver and OVS supports to edit ARP + fields, a table (ARP_RESPONDER) to resolve ARP locally is added to the + tunnel bridge. + + :param action: add or remove ARP entry. + :param lvid: local VLAN map of network's ARP entry. + :param mac_str: MAC string value. + :param ip_str: IP string value. + ''' + if not self.arp_responder_enabled: + return + + mac = netaddr.EUI(mac_str, dialect=netaddr.mac_unix) + ip = netaddr.IPAddress(ip_str) + + if action == 'add': + actions = ('move:NXM_OF_ETH_SRC[]->NXM_OF_ETH_DST[],' + 'mod_dl_src:%(mac)s,' + 'load:0x2->NXM_OF_ARP_OP[],' + 'move:NXM_NX_ARP_SHA[]->NXM_NX_ARP_THA[],' + 'move:NXM_OF_ARP_SPA[]->NXM_OF_ARP_TPA[],' + 'load:%(mac)#x->NXM_NX_ARP_SHA[],' + 'load:%(ip)#x->NXM_OF_ARP_SPA[],' + 'in_port' % {'mac': mac, 'ip': ip}) + self.tun_br.add_flow(table=constants.ARP_RESPONDER, + priority=1, + proto='arp', + dl_vlan=lvid, + nw_dst='%s' % ip, + actions=actions) + elif action == 'remove': + self.tun_br.delete_flows(table=constants.ARP_RESPONDER, + proto='arp', + dl_vlan=lvid, + nw_dst='%s' % ip) + else: + LOG.warning(_('Action %s not supported'), action) + + def provision_local_vlan(self, net_uuid, network_type, physical_network, + segmentation_id): + '''Provisions a local VLAN. + + :param net_uuid: the uuid of the network associated with this vlan. + :param network_type: the network type ('gre', 'vxlan', 'vlan', 'flat', + 'local') + :param physical_network: the physical network for 'vlan' or 'flat' + :param segmentation_id: the VID for 'vlan' or tunnel ID for 'tunnel' + ''' + + # On a restart or crash of OVS, the network associated with this VLAN + # will already be assigned, so check for that here before assigning a + # new one. + lvm = self.local_vlan_map.get(net_uuid) + if lvm: + lvid = lvm.vlan + else: + if not self.available_local_vlans: + LOG.error(_("No local VLAN available for net-id=%s"), net_uuid) + return + lvid = self.available_local_vlans.pop() + self.local_vlan_map[net_uuid] = LocalVLANMapping(lvid, + network_type, + physical_network, + segmentation_id) + + LOG.info(_("Assigning %(vlan_id)s as local vlan for " + "net-id=%(net_uuid)s"), + {'vlan_id': lvid, 'net_uuid': net_uuid}) + + if network_type in constants.TUNNEL_NETWORK_TYPES: + if self.enable_tunneling: + # outbound broadcast/multicast + ofports = ','.join(self.tun_br_ofports[network_type].values()) + if ofports: + self.tun_br.mod_flow(table=constants.FLOOD_TO_TUN, + dl_vlan=lvid, + actions="strip_vlan," + "set_tunnel:%s,output:%s" % + (segmentation_id, ofports)) + # inbound from tunnels: set lvid in the right table + # and resubmit to Table LEARN_FROM_TUN for mac learning + if self.enable_distributed_routing: + self.dvr_agent.process_tunneled_network( + network_type, lvid, segmentation_id) + else: + self.tun_br.add_flow( + table=constants.TUN_TABLE[network_type], + priority=1, + tun_id=segmentation_id, + actions="mod_vlan_vid:%s," + "resubmit(,%s)" % + (lvid, constants.LEARN_FROM_TUN)) + + else: + LOG.error(_("Cannot provision %(network_type)s network for " + "net-id=%(net_uuid)s - tunneling disabled"), + {'network_type': network_type, + 'net_uuid': net_uuid}) + elif network_type == p_const.TYPE_FLAT: + if physical_network in self.phys_brs: + # outbound + br = self.phys_brs[physical_network] + br.add_flow(priority=4, + in_port=self.phys_ofports[physical_network], + dl_vlan=lvid, + actions="strip_vlan,normal") + # inbound + self.int_br.add_flow( + priority=3, + in_port=self.int_ofports[physical_network], + dl_vlan=0xffff, + actions="mod_vlan_vid:%s,normal" % lvid) + else: + LOG.error(_("Cannot provision flat network for " + "net-id=%(net_uuid)s - no bridge for " + "physical_network %(physical_network)s"), + {'net_uuid': net_uuid, + 'physical_network': physical_network}) + elif network_type == p_const.TYPE_VLAN: + if physical_network in self.phys_brs: + # outbound + br = self.phys_brs[physical_network] + br.add_flow(priority=4, + in_port=self.phys_ofports[physical_network], + dl_vlan=lvid, + actions="mod_vlan_vid:%s,normal" % segmentation_id) + # inbound + self.int_br.add_flow(priority=3, + in_port=self. + int_ofports[physical_network], + dl_vlan=segmentation_id, + actions="mod_vlan_vid:%s,normal" % lvid) + else: + LOG.error(_("Cannot provision VLAN network for " + "net-id=%(net_uuid)s - no bridge for " + "physical_network %(physical_network)s"), + {'net_uuid': net_uuid, + 'physical_network': physical_network}) + elif network_type == p_const.TYPE_LOCAL: + # no flows needed for local networks + pass + else: + LOG.error(_("Cannot provision unknown network type " + "%(network_type)s for net-id=%(net_uuid)s"), + {'network_type': network_type, + 'net_uuid': net_uuid}) + + def reclaim_local_vlan(self, net_uuid): + '''Reclaim a local VLAN. + + :param net_uuid: the network uuid associated with this vlan. + :param lvm: a LocalVLANMapping object that tracks (vlan, lsw_id, + vif_ids) mapping. + ''' + lvm = self.local_vlan_map.pop(net_uuid, None) + if lvm is None: + LOG.debug(_("Network %s not used on agent."), net_uuid) + return + + LOG.info(_("Reclaiming vlan = %(vlan_id)s from net-id = %(net_uuid)s"), + {'vlan_id': lvm.vlan, + 'net_uuid': net_uuid}) + + if lvm.network_type in constants.TUNNEL_NETWORK_TYPES: + if self.enable_tunneling: + self.tun_br.delete_flows( + table=constants.TUN_TABLE[lvm.network_type], + tun_id=lvm.segmentation_id) + self.tun_br.delete_flows(dl_vlan=lvm.vlan) + if self.l2_pop: + # Try to remove tunnel ports if not used by other networks + for ofport in lvm.tun_ofports: + self.cleanup_tunnel_port(ofport, lvm.network_type) + elif lvm.network_type == p_const.TYPE_FLAT: + if lvm.physical_network in self.phys_brs: + # outbound + br = self.phys_brs[lvm.physical_network] + br.delete_flows(in_port=self.phys_ofports[lvm. + physical_network], + dl_vlan=lvm.vlan) + # inbound + br = self.int_br + br.delete_flows(in_port=self.int_ofports[lvm.physical_network], + dl_vlan=0xffff) + elif lvm.network_type == p_const.TYPE_VLAN: + if lvm.physical_network in self.phys_brs: + # outbound + br = self.phys_brs[lvm.physical_network] + br.delete_flows(in_port=self.phys_ofports[lvm. + physical_network], + dl_vlan=lvm.vlan) + # inbound + br = self.int_br + br.delete_flows(in_port=self.int_ofports[lvm.physical_network], + dl_vlan=lvm.segmentation_id) + elif lvm.network_type == p_const.TYPE_LOCAL: + # no flows needed for local networks + pass + else: + LOG.error(_("Cannot reclaim unknown network type " + "%(network_type)s for net-id=%(net_uuid)s"), + {'network_type': lvm.network_type, + 'net_uuid': net_uuid}) + + self.available_local_vlans.add(lvm.vlan) + + def port_bound(self, port, net_uuid, + network_type, physical_network, + segmentation_id, fixed_ips, device_owner, + ovs_restarted): + '''Bind port to net_uuid/lsw_id and install flow for inbound traffic + to vm. + + :param port: a ovslib.VifPort object. + :param net_uuid: the net_uuid this port is to be associated with. + :param network_type: the network type ('gre', 'vlan', 'flat', 'local') + :param physical_network: the physical network for 'vlan' or 'flat' + :param segmentation_id: the VID for 'vlan' or tunnel ID for 'tunnel' + :param fixed_ips: the ip addresses assigned to this port + :param device_owner: the string indicative of owner of this port + :param ovs_restarted: indicates if this is called for an OVS restart. + ''' + if net_uuid not in self.local_vlan_map or ovs_restarted: + self.provision_local_vlan(net_uuid, network_type, + physical_network, segmentation_id) + lvm = self.local_vlan_map[net_uuid] + lvm.vif_ports[port.vif_id] = port + + self.dvr_agent.bind_port_to_dvr(port, network_type, fixed_ips, + device_owner, + local_vlan_id=lvm.vlan) + + # Do not bind a port if it's already bound + cur_tag = self.int_br.db_get_val("Port", port.port_name, "tag") + if cur_tag != str(lvm.vlan): + self.int_br.set_db_attribute("Port", port.port_name, "tag", + str(lvm.vlan)) + if port.ofport != -1: + self.int_br.delete_flows(in_port=port.ofport) + + def port_unbound(self, vif_id, net_uuid=None): + '''Unbind port. + + Removes corresponding local vlan mapping object if this is its last + VIF. + + :param vif_id: the id of the vif + :param net_uuid: the net_uuid this port is associated with. + ''' + if net_uuid is None: + net_uuid = self.get_net_uuid(vif_id) + + if not self.local_vlan_map.get(net_uuid): + LOG.info(_('port_unbound(): net_uuid %s not' + ' in local_vlan_map'), net_uuid) + return + + lvm = self.local_vlan_map[net_uuid] + + if vif_id in lvm.vif_ports: + vif_port = lvm.vif_ports[vif_id] + self.dvr_agent.unbind_port_from_dvr(vif_port, + local_vlan_id=lvm.vlan) + lvm.vif_ports.pop(vif_id, None) + + if not lvm.vif_ports: + self.reclaim_local_vlan(net_uuid) + + def port_dead(self, port): + '''Once a port has no binding, put it on the "dead vlan". + + :param port: a ovs_lib.VifPort object. + ''' + # Don't kill a port if it's already dead + cur_tag = self.int_br.db_get_val("Port", port.port_name, "tag") + if cur_tag != DEAD_VLAN_TAG: + self.int_br.set_db_attribute("Port", port.port_name, "tag", + DEAD_VLAN_TAG) + self.int_br.add_flow(priority=2, in_port=port.ofport, + actions="drop") + + def setup_integration_br(self): + '''Setup the integration bridge. + + Create patch ports and remove all existing flows. + + :param bridge_name: the name of the integration bridge. + :returns: the integration bridge + ''' + # Ensure the integration bridge is created. + # ovs_lib.OVSBridge.create() will run + # ovs-vsctl -- --may-exist add-br BRIDGE_NAME + # which does nothing if bridge already exists. + self.int_br.create() + self.int_br.set_secure_mode() + + self.int_br.delete_port(cfg.CONF.OVS.int_peer_patch_port) + self.int_br.remove_all_flows() + # switch all traffic using L2 learning + self.int_br.add_flow(priority=1, actions="normal") + # Add a canary flow to int_br to track OVS restarts + self.int_br.add_flow(table=constants.CANARY_TABLE, priority=0, + actions="drop") + + def setup_ancillary_bridges(self, integ_br, tun_br): + '''Setup ancillary bridges - for example br-ex.''' + ovs_bridges = set(ovs_lib.get_bridges(self.root_helper)) + # Remove all known bridges + ovs_bridges.remove(integ_br) + if self.enable_tunneling: + ovs_bridges.remove(tun_br) + br_names = [self.phys_brs[physical_network].br_name for + physical_network in self.phys_brs] + ovs_bridges.difference_update(br_names) + # Filter list of bridges to those that have external + # bridge-id's configured + br_names = [] + for bridge in ovs_bridges: + id = ovs_lib.get_bridge_external_bridge_id(self.root_helper, + bridge) + if id != bridge: + br_names.append(bridge) + ovs_bridges.difference_update(br_names) + ancillary_bridges = [] + for bridge in ovs_bridges: + br = ovs_lib.OVSBridge(bridge, self.root_helper) + LOG.info(_('Adding %s to list of bridges.'), bridge) + ancillary_bridges.append(br) + return ancillary_bridges + + def setup_tunnel_br(self, tun_br=None): + '''Setup the tunnel bridge. + + Creates tunnel bridge, and links it to the integration bridge + using a patch port. + + :param tun_br: the name of the tunnel bridge. + ''' + if not self.tun_br: + self.tun_br = ovs_lib.OVSBridge(tun_br, self.root_helper) + + self.tun_br.reset_bridge() + self.patch_tun_ofport = self.int_br.add_patch_port( + cfg.CONF.OVS.int_peer_patch_port, cfg.CONF.OVS.tun_peer_patch_port) + self.patch_int_ofport = self.tun_br.add_patch_port( + cfg.CONF.OVS.tun_peer_patch_port, cfg.CONF.OVS.int_peer_patch_port) + if int(self.patch_tun_ofport) < 0 or int(self.patch_int_ofport) < 0: + LOG.error(_("Failed to create OVS patch port. Cannot have " + "tunneling enabled on this agent, since this version " + "of OVS does not support tunnels or patch ports. " + "Agent terminated!")) + exit(1) + self.tun_br.remove_all_flows() + + # Table 0 (default) will sort incoming traffic depending on in_port + self.tun_br.add_flow(priority=1, + in_port=self.patch_int_ofport, + actions="resubmit(,%s)" % + constants.PATCH_LV_TO_TUN) + self.tun_br.add_flow(priority=0, actions="drop") + if self.arp_responder_enabled: + # ARP broadcast-ed request go to the local ARP_RESPONDER table to + # be locally resolved + self.tun_br.add_flow(table=constants.PATCH_LV_TO_TUN, + priority=1, + proto='arp', + dl_dst="ff:ff:ff:ff:ff:ff", + actions=("resubmit(,%s)" % + constants.ARP_RESPONDER)) + # PATCH_LV_TO_TUN table will handle packets coming from patch_int + # unicasts go to table UCAST_TO_TUN where remote addresses are learnt + self.tun_br.add_flow(table=constants.PATCH_LV_TO_TUN, + priority=0, + dl_dst="00:00:00:00:00:00/01:00:00:00:00:00", + actions="resubmit(,%s)" % constants.UCAST_TO_TUN) + # Broadcasts/multicasts go to table FLOOD_TO_TUN that handles flooding + self.tun_br.add_flow(table=constants.PATCH_LV_TO_TUN, + priority=0, + dl_dst="01:00:00:00:00:00/01:00:00:00:00:00", + actions="resubmit(,%s)" % constants.FLOOD_TO_TUN) + # Tables [tunnel_type]_TUN_TO_LV will set lvid depending on tun_id + # for each tunnel type, and resubmit to table LEARN_FROM_TUN where + # remote mac addresses will be learnt + for tunnel_type in constants.TUNNEL_NETWORK_TYPES: + self.tun_br.add_flow(table=constants.TUN_TABLE[tunnel_type], + priority=0, + actions="drop") + # LEARN_FROM_TUN table will have a single flow using a learn action to + # dynamically set-up flows in UCAST_TO_TUN corresponding to remote mac + # addresses (assumes that lvid has already been set by a previous flow) + learned_flow = ("table=%s," + "priority=1," + "hard_timeout=300," + "NXM_OF_VLAN_TCI[0..11]," + "NXM_OF_ETH_DST[]=NXM_OF_ETH_SRC[]," + "load:0->NXM_OF_VLAN_TCI[]," + "load:NXM_NX_TUN_ID[]->NXM_NX_TUN_ID[]," + "output:NXM_OF_IN_PORT[]" % + constants.UCAST_TO_TUN) + # Once remote mac addresses are learnt, output packet to patch_int + self.tun_br.add_flow(table=constants.LEARN_FROM_TUN, + priority=1, + actions="learn(%s),output:%s" % + (learned_flow, self.patch_int_ofport)) + # Egress unicast will be handled in table UCAST_TO_TUN, where remote + # mac addresses will be learned. For now, just add a default flow that + # will resubmit unknown unicasts to table FLOOD_TO_TUN to treat them + # as broadcasts/multicasts + self.tun_br.add_flow(table=constants.UCAST_TO_TUN, + priority=0, + actions="resubmit(,%s)" % + constants.FLOOD_TO_TUN) + if self.arp_responder_enabled: + # If none of the ARP entries correspond to the requested IP, the + # broadcast-ed packet is resubmitted to the flooding table + self.tun_br.add_flow(table=constants.ARP_RESPONDER, + priority=0, + actions="resubmit(,%s)" % + constants.FLOOD_TO_TUN) + # FLOOD_TO_TUN will handle flooding in tunnels based on lvid, + # for now, add a default drop action + self.tun_br.add_flow(table=constants.FLOOD_TO_TUN, + priority=0, + actions="drop") + + def get_peer_name(self, prefix, name): + """Construct a peer name based on the prefix and name. + + The peer name can not exceed the maximum length allowed for a linux + device. Longer names are hashed to help ensure uniqueness. + """ + if len(prefix + name) <= q_const.DEVICE_NAME_MAX_LEN: + return prefix + name + # We can't just truncate because bridges may be distinguished + # by an ident at the end. A hash over the name should be unique. + # Leave part of the bridge name on for easier identification + hashlen = 6 + namelen = q_const.DEVICE_NAME_MAX_LEN - len(prefix) - hashlen + new_name = ('%(prefix)s%(truncated)s%(hash)s' % + {'prefix': prefix, 'truncated': name[0:namelen], + 'hash': hashlib.sha1(name).hexdigest()[0:hashlen]}) + LOG.warning(_("Creating an interface named %(name)s exceeds the " + "%(limit)d character limitation. It was shortened to " + "%(new_name)s to fit."), + {'name': name, 'limit': q_const.DEVICE_NAME_MAX_LEN, + 'new_name': new_name}) + return new_name + + def setup_physical_bridges(self, bridge_mappings): + '''Setup the physical network bridges. + + Creates physical network bridges and links them to the + integration bridge using veths. + + :param bridge_mappings: map physical network names to bridge names. + ''' + self.phys_brs = {} + self.int_ofports = {} + self.phys_ofports = {} + ip_wrapper = ip_lib.IPWrapper(self.root_helper) + ovs_bridges = ovs_lib.get_bridges(self.root_helper) + for physical_network, bridge in bridge_mappings.iteritems(): + LOG.info(_("Mapping physical network %(physical_network)s to " + "bridge %(bridge)s"), + {'physical_network': physical_network, + 'bridge': bridge}) + # setup physical bridge + if bridge not in ovs_bridges: + LOG.error(_("Bridge %(bridge)s for physical network " + "%(physical_network)s does not exist. Agent " + "terminated!"), + {'physical_network': physical_network, + 'bridge': bridge}) + sys.exit(1) + br = ovs_lib.OVSBridge(bridge, self.root_helper) + br.remove_all_flows() + br.add_flow(priority=1, actions="normal") + self.phys_brs[physical_network] = br + + # interconnect physical and integration bridges using veth/patchs + int_if_name = self.get_peer_name(constants.PEER_INTEGRATION_PREFIX, + bridge) + phys_if_name = self.get_peer_name(constants.PEER_PHYSICAL_PREFIX, + bridge) + self.int_br.delete_port(int_if_name) + br.delete_port(phys_if_name) + if self.use_veth_interconnection: + if ip_lib.device_exists(int_if_name, self.root_helper): + ip_lib.IPDevice(int_if_name, + self.root_helper).link.delete() + # Give udev a chance to process its rules here, to avoid + # race conditions between commands launched by udev rules + # and the subsequent call to ip_wrapper.add_veth + utils.execute(['/sbin/udevadm', 'settle', '--timeout=10']) + int_veth, phys_veth = ip_wrapper.add_veth(int_if_name, + phys_if_name) + int_ofport = self.int_br.add_port(int_veth) + phys_ofport = br.add_port(phys_veth) + else: + # Create patch ports without associating them in order to block + # untranslated traffic before association + int_ofport = self.int_br.add_patch_port( + int_if_name, constants.NONEXISTENT_PEER) + phys_ofport = br.add_patch_port( + phys_if_name, constants.NONEXISTENT_PEER) + + self.int_ofports[physical_network] = int_ofport + self.phys_ofports[physical_network] = phys_ofport + + # block all untranslated traffic between bridges + self.int_br.add_flow(priority=2, in_port=int_ofport, + actions="drop") + br.add_flow(priority=2, in_port=phys_ofport, actions="drop") + + if self.use_veth_interconnection: + # enable veth to pass traffic + int_veth.link.set_up() + phys_veth.link.set_up() + if self.veth_mtu: + # set up mtu size for veth interfaces + int_veth.link.set_mtu(self.veth_mtu) + phys_veth.link.set_mtu(self.veth_mtu) + else: + # associate patch ports to pass traffic + self.int_br.set_db_attribute('Interface', int_if_name, + 'options:peer', phys_if_name) + br.set_db_attribute('Interface', phys_if_name, + 'options:peer', int_if_name) + + def scan_ports(self, registered_ports, updated_ports=None): + cur_ports = self.int_br.get_vif_port_set() + self.int_br_device_count = len(cur_ports) + port_info = {'current': cur_ports} + if updated_ports is None: + updated_ports = set() + updated_ports.update(self.check_changed_vlans(registered_ports)) + if updated_ports: + # Some updated ports might have been removed in the + # meanwhile, and therefore should not be processed. + # In this case the updated port won't be found among + # current ports. + updated_ports &= cur_ports + if updated_ports: + port_info['updated'] = updated_ports + + # FIXME(salv-orlando): It's not really necessary to return early + # if nothing has changed. + if cur_ports == registered_ports: + # No added or removed ports to set, just return here + return port_info + + port_info['added'] = cur_ports - registered_ports + # Remove all the known ports not found on the integration bridge + port_info['removed'] = registered_ports - cur_ports + return port_info + + def check_changed_vlans(self, registered_ports): + """Return ports which have lost their vlan tag. + + The returned value is a set of port ids of the ports concerned by a + vlan tag loss. + """ + port_tags = self.int_br.get_port_tag_dict() + changed_ports = set() + for lvm in self.local_vlan_map.values(): + for port in registered_ports: + if ( + port in lvm.vif_ports + and lvm.vif_ports[port].port_name in port_tags + and port_tags[lvm.vif_ports[port].port_name] != lvm.vlan + ): + LOG.info( + _("Port '%(port_name)s' has lost " + "its vlan tag '%(vlan_tag)d'!"), + {'port_name': lvm.vif_ports[port].port_name, + 'vlan_tag': lvm.vlan} + ) + changed_ports.add(port) + return changed_ports + + def update_ancillary_ports(self, registered_ports): + ports = set() + for bridge in self.ancillary_brs: + ports |= bridge.get_vif_port_set() + + if ports == registered_ports: + return + added = ports - registered_ports + removed = registered_ports - ports + return {'current': ports, + 'added': added, + 'removed': removed} + + def treat_vif_port(self, vif_port, port_id, network_id, network_type, + physical_network, segmentation_id, admin_state_up, + fixed_ips, device_owner, ovs_restarted): + # When this function is called for a port, the port should have + # an OVS ofport configured, as only these ports were considered + # for being treated. If that does not happen, it is a potential + # error condition of which operators should be aware + if not vif_port.ofport: + LOG.warn(_("VIF port: %s has no ofport configured, and might not " + "be able to transmit"), vif_port.vif_id) + if vif_port: + if admin_state_up: + self.port_bound(vif_port, network_id, network_type, + physical_network, segmentation_id, + fixed_ips, device_owner, ovs_restarted) + else: + self.port_dead(vif_port) + else: + LOG.debug(_("No VIF port for port %s defined on agent."), port_id) + + def setup_tunnel_port(self, port_name, remote_ip, tunnel_type): + ofport = self.tun_br.add_tunnel_port(port_name, + remote_ip, + self.local_ip, + tunnel_type, + self.vxlan_udp_port, + self.dont_fragment) + ofport_int = -1 + try: + ofport_int = int(ofport) + except (TypeError, ValueError): + LOG.exception(_("ofport should have a value that can be " + "interpreted as an integer")) + if ofport_int < 0: + LOG.error(_("Failed to set-up %(type)s tunnel port to %(ip)s"), + {'type': tunnel_type, 'ip': remote_ip}) + return 0 + + self.tun_br_ofports[tunnel_type][remote_ip] = ofport + # Add flow in default table to resubmit to the right + # tunnelling table (lvid will be set in the latter) + self.tun_br.add_flow(priority=1, + in_port=ofport, + actions="resubmit(,%s)" % + constants.TUN_TABLE[tunnel_type]) + + ofports = ','.join(self.tun_br_ofports[tunnel_type].values()) + if ofports and not self.l2_pop: + # Update flooding flows to include the new tunnel + for network_id, vlan_mapping in self.local_vlan_map.iteritems(): + if vlan_mapping.network_type == tunnel_type: + self.tun_br.mod_flow(table=constants.FLOOD_TO_TUN, + dl_vlan=vlan_mapping.vlan, + actions="strip_vlan," + "set_tunnel:%s,output:%s" % + (vlan_mapping.segmentation_id, + ofports)) + return ofport + + def cleanup_tunnel_port(self, tun_ofport, tunnel_type): + # Check if this tunnel port is still used + for lvm in self.local_vlan_map.values(): + if tun_ofport in lvm.tun_ofports: + break + # If not, remove it + else: + for remote_ip, ofport in self.tun_br_ofports[tunnel_type].items(): + if ofport == tun_ofport: + port_name = '%s-%s' % (tunnel_type, + self.get_ip_in_hex(remote_ip)) + self.tun_br.delete_port(port_name) + self.tun_br.delete_flows(in_port=ofport) + self.tun_br_ofports[tunnel_type].pop(remote_ip, None) + + def treat_devices_added_or_updated(self, devices, ovs_restarted): + try: + devices_details_list = self.plugin_rpc.get_devices_details_list( + self.context, + devices, + self.agent_id, + cfg.CONF.host) + except Exception as e: + LOG.debug("Unable to get port details for %(devices)s: %(e)s", + {'devices': devices, 'e': e}) + # resync is needed + return True + for details in devices_details_list: + device = details['device'] + LOG.debug("Processing port: %s", device) + port = self.int_br.get_vif_port_by_id(device) + if not port: + # The port has disappeared and should not be processed + # There is no need to put the port DOWN in the plugin as + # it never went up in the first place + LOG.info(_("Port %s was not found on the integration bridge " + "and will therefore not be processed"), device) + continue + + if 'port_id' in details: + LOG.info(_("Port %(device)s updated. Details: %(details)s"), + {'device': device, 'details': details}) + self.treat_vif_port(port, details['port_id'], + details['network_id'], + details['network_type'], + details['physical_network'], + details['segmentation_id'], + details['admin_state_up'], + details['fixed_ips'], + details['device_owner'], + ovs_restarted) + # update plugin about port status + if details.get('admin_state_up'): + LOG.debug(_("Setting status for %s to UP"), device) + self.plugin_rpc.update_device_up( + self.context, device, self.agent_id, cfg.CONF.host) + else: + LOG.debug(_("Setting status for %s to DOWN"), device) + self.plugin_rpc.update_device_down( + self.context, device, self.agent_id, cfg.CONF.host) + LOG.info(_("Configuration for device %s completed."), device) + else: + LOG.warn(_("Device %s not defined on plugin"), device) + if (port and port.ofport != -1): + self.port_dead(port) + return False + + def treat_ancillary_devices_added(self, devices): + try: + devices_details_list = self.plugin_rpc.get_devices_details_list( + self.context, + devices, + self.agent_id, + cfg.CONF.host) + except Exception as e: + LOG.debug("Unable to get port details for " + "%(devices)s: %(e)s", {'devices': devices, 'e': e}) + # resync is needed + return True + + for details in devices_details_list: + device = details['device'] + LOG.info(_("Ancillary Port %s added"), device) + + # update plugin about port status + self.plugin_rpc.update_device_up(self.context, + device, + self.agent_id, + cfg.CONF.host) + return False + + def treat_devices_removed(self, devices): + resync = False + self.sg_agent.remove_devices_filter(devices) + for device in devices: + LOG.info(_("Attachment %s removed"), device) + try: + self.plugin_rpc.update_device_down(self.context, + device, + self.agent_id, + cfg.CONF.host) + except Exception as e: + LOG.debug(_("port_removed failed for %(device)s: %(e)s"), + {'device': device, 'e': e}) + resync = True + continue + self.port_unbound(device) + return resync + + def treat_ancillary_devices_removed(self, devices): + resync = False + for device in devices: + LOG.info(_("Attachment %s removed"), device) + try: + details = self.plugin_rpc.update_device_down(self.context, + device, + self.agent_id, + cfg.CONF.host) + except Exception as e: + LOG.debug(_("port_removed failed for %(device)s: %(e)s"), + {'device': device, 'e': e}) + resync = True + continue + if details['exists']: + LOG.info(_("Port %s updated."), device) + # Nothing to do regarding local networking + else: + LOG.debug(_("Device %s not defined on plugin"), device) + return resync + + def process_network_ports(self, port_info, ovs_restarted): + resync_a = False + resync_b = False + # TODO(salv-orlando): consider a solution for ensuring notifications + # are processed exactly in the same order in which they were + # received. This is tricky because there are two notification + # sources: the neutron server, and the ovs db monitor process + # If there is an exception while processing security groups ports + # will not be wired anyway, and a resync will be triggered + # TODO(salv-orlando): Optimize avoiding applying filters unnecessarily + # (eg: when there are no IP address changes) + self.sg_agent.setup_port_filters(port_info.get('added', set()), + port_info.get('updated', set())) + # VIF wiring needs to be performed always for 'new' devices. + # For updated ports, re-wiring is not needed in most cases, but needs + # to be performed anyway when the admin state of a device is changed. + # A device might be both in the 'added' and 'updated' + # list at the same time; avoid processing it twice. + devices_added_updated = (port_info.get('added', set()) | + port_info.get('updated', set())) + if devices_added_updated: + start = time.time() + resync_a = self.treat_devices_added_or_updated( + devices_added_updated, ovs_restarted) + LOG.debug(_("process_network_ports - iteration:%(iter_num)d -" + "treat_devices_added_or_updated completed " + "in %(elapsed).3f"), + {'iter_num': self.iter_num, + 'elapsed': time.time() - start}) + if 'removed' in port_info: + start = time.time() + resync_b = self.treat_devices_removed(port_info['removed']) + LOG.debug(_("process_network_ports - iteration:%(iter_num)d -" + "treat_devices_removed completed in %(elapsed).3f"), + {'iter_num': self.iter_num, + 'elapsed': time.time() - start}) + # If one of the above operations fails => resync with plugin + return (resync_a | resync_b) + + def process_ancillary_network_ports(self, port_info): + resync_a = False + resync_b = False + if 'added' in port_info: + start = time.time() + resync_a = self.treat_ancillary_devices_added(port_info['added']) + LOG.debug(_("process_ancillary_network_ports - iteration: " + "%(iter_num)d - treat_ancillary_devices_added " + "completed in %(elapsed).3f"), + {'iter_num': self.iter_num, + 'elapsed': time.time() - start}) + if 'removed' in port_info: + start = time.time() + resync_b = self.treat_ancillary_devices_removed( + port_info['removed']) + LOG.debug(_("process_ancillary_network_ports - iteration: " + "%(iter_num)d - treat_ancillary_devices_removed " + "completed in %(elapsed).3f"), + {'iter_num': self.iter_num, + 'elapsed': time.time() - start}) + + # If one of the above operations fails => resync with plugin + return (resync_a | resync_b) + + def get_ip_in_hex(self, ip_address): + try: + return '%08x' % netaddr.IPAddress(ip_address, version=4) + except Exception: + LOG.warn(_("Unable to create tunnel port. Invalid remote IP: %s"), + ip_address) + return + + def tunnel_sync(self): + resync = False + try: + for tunnel_type in self.tunnel_types: + details = self.plugin_rpc.tunnel_sync(self.context, + self.local_ip, + tunnel_type) + if not self.l2_pop: + tunnels = details['tunnels'] + for tunnel in tunnels: + if self.local_ip != tunnel['ip_address']: + tunnel_id = tunnel.get('id') + # Unlike the OVS plugin, ML2 doesn't return an id + # key. So use ip_address to form port name instead. + # Port name must be <=15 chars, so use shorter hex. + remote_ip = tunnel['ip_address'] + remote_ip_hex = self.get_ip_in_hex(remote_ip) + if not tunnel_id and not remote_ip_hex: + continue + tun_name = '%s-%s' % (tunnel_type, + tunnel_id or remote_ip_hex) + self.setup_tunnel_port(tun_name, + tunnel['ip_address'], + tunnel_type) + except Exception as e: + LOG.debug(_("Unable to sync tunnel IP %(local_ip)s: %(e)s"), + {'local_ip': self.local_ip, 'e': e}) + resync = True + return resync + + def _agent_has_updates(self, polling_manager): + return (polling_manager.is_polling_required or + self.updated_ports or + self.sg_agent.firewall_refresh_needed()) + + def _port_info_has_changes(self, port_info): + return (port_info.get('added') or + port_info.get('removed') or + port_info.get('updated')) + + def check_ovs_restart(self): + # Check for the canary flow + canary_flow = self.int_br.dump_flows_for_table(constants.CANARY_TABLE) + return not canary_flow + + def rpc_loop(self, polling_manager=None): + if not polling_manager: + polling_manager = polling.AlwaysPoll() + + sync = True + ports = set() + updated_ports_copy = set() + ancillary_ports = set() + tunnel_sync = True + ovs_restarted = False + while self.run_daemon_loop: + start = time.time() + port_stats = {'regular': {'added': 0, + 'updated': 0, + 'removed': 0}, + 'ancillary': {'added': 0, + 'removed': 0}} + LOG.debug(_("Agent rpc_loop - iteration:%d started"), + self.iter_num) + if sync: + LOG.info(_("Agent out of sync with plugin!")) + ports.clear() + ancillary_ports.clear() + sync = False + polling_manager.force_polling() + ovs_restarted = self.check_ovs_restart() + if ovs_restarted: + self.setup_integration_br() + self.setup_physical_bridges(self.bridge_mappings) + if self.enable_tunneling: + self.setup_tunnel_br() + tunnel_sync = True + self.dvr_agent.reset_ovs_parameters(self.int_br, + self.tun_br, + self.patch_int_ofport, + self.patch_tun_ofport) + self.dvr_agent.setup_dvr_flows_on_integ_tun_br() + # Notify the plugin of tunnel IP + if self.enable_tunneling and tunnel_sync: + LOG.info(_("Agent tunnel out of sync with plugin!")) + try: + tunnel_sync = self.tunnel_sync() + except Exception: + LOG.exception(_("Error while synchronizing tunnels")) + tunnel_sync = True + if self._agent_has_updates(polling_manager) or ovs_restarted: + try: + LOG.debug(_("Agent rpc_loop - iteration:%(iter_num)d - " + "starting polling. Elapsed:%(elapsed).3f"), + {'iter_num': self.iter_num, + 'elapsed': time.time() - start}) + # Save updated ports dict to perform rollback in + # case resync would be needed, and then clear + # self.updated_ports. As the greenthread should not yield + # between these two statements, this will be thread-safe + updated_ports_copy = self.updated_ports + self.updated_ports = set() + reg_ports = (set() if ovs_restarted else ports) + port_info = self.scan_ports(reg_ports, updated_ports_copy) + ports = port_info['current'] + LOG.debug(_("Agent rpc_loop - iteration:%(iter_num)d - " + "port information retrieved. " + "Elapsed:%(elapsed).3f"), + {'iter_num': self.iter_num, + 'elapsed': time.time() - start}) + # Secure and wire/unwire VIFs and update their status + # on Neutron server + if (self._port_info_has_changes(port_info) or + self.sg_agent.firewall_refresh_needed() or + ovs_restarted): + LOG.debug(_("Starting to process devices in:%s"), + port_info) + # If treat devices fails - must resync with plugin + sync = self.process_network_ports(port_info, + ovs_restarted) + LOG.debug(_("Agent rpc_loop - iteration:%(iter_num)d -" + "ports processed. Elapsed:%(elapsed).3f"), + {'iter_num': self.iter_num, + 'elapsed': time.time() - start}) + port_stats['regular']['added'] = ( + len(port_info.get('added', []))) + port_stats['regular']['updated'] = ( + len(port_info.get('updated', []))) + port_stats['regular']['removed'] = ( + len(port_info.get('removed', []))) + # Treat ancillary devices if they exist + if self.ancillary_brs: + port_info = self.update_ancillary_ports( + ancillary_ports) + LOG.debug(_("Agent rpc_loop - iteration:%(iter_num)d -" + "ancillary port info retrieved. " + "Elapsed:%(elapsed).3f"), + {'iter_num': self.iter_num, + 'elapsed': time.time() - start}) + + if port_info: + rc = self.process_ancillary_network_ports( + port_info) + LOG.debug(_("Agent rpc_loop - iteration:" + "%(iter_num)d - ancillary ports " + "processed. Elapsed:%(elapsed).3f"), + {'iter_num': self.iter_num, + 'elapsed': time.time() - start}) + ancillary_ports = port_info['current'] + port_stats['ancillary']['added'] = ( + len(port_info.get('added', []))) + port_stats['ancillary']['removed'] = ( + len(port_info.get('removed', []))) + sync = sync | rc + + polling_manager.polling_completed() + except Exception: + LOG.exception(_("Error while processing VIF ports")) + # Put the ports back in self.updated_port + self.updated_ports |= updated_ports_copy + sync = True + + # sleep till end of polling interval + elapsed = (time.time() - start) + LOG.debug(_("Agent rpc_loop - iteration:%(iter_num)d " + "completed. Processed ports statistics: " + "%(port_stats)s. Elapsed:%(elapsed).3f"), + {'iter_num': self.iter_num, + 'port_stats': port_stats, + 'elapsed': elapsed}) + if (elapsed < self.polling_interval): + time.sleep(self.polling_interval - elapsed) + else: + LOG.debug(_("Loop iteration exceeded interval " + "(%(polling_interval)s vs. %(elapsed)s)!"), + {'polling_interval': self.polling_interval, + 'elapsed': elapsed}) + self.iter_num = self.iter_num + 1 + + def daemon_loop(self): + with polling.get_polling_manager( + self.minimize_polling, + self.root_helper, + self.ovsdb_monitor_respawn_interval) as pm: + + self.rpc_loop(polling_manager=pm) + + def _handle_sigterm(self, signum, frame): + LOG.debug("Agent caught SIGTERM, quitting daemon loop.") + self.run_daemon_loop = False + + +def create_agent_config_map(config): + """Create a map of agent config parameters. + + :param config: an instance of cfg.CONF + :returns: a map of agent configuration parameters + """ + try: + bridge_mappings = q_utils.parse_mappings(config.OVS.bridge_mappings) + except ValueError as e: + raise ValueError(_("Parsing bridge_mappings failed: %s.") % e) + + kwargs = dict( + integ_br=config.OVS.integration_bridge, + tun_br=config.OVS.tunnel_bridge, + local_ip=config.OVS.local_ip, + bridge_mappings=bridge_mappings, + root_helper=config.AGENT.root_helper, + polling_interval=config.AGENT.polling_interval, + minimize_polling=config.AGENT.minimize_polling, + tunnel_types=config.AGENT.tunnel_types, + veth_mtu=config.AGENT.veth_mtu, + enable_distributed_routing=config.AGENT.enable_distributed_routing, + l2_population=config.AGENT.l2_population, + arp_responder=config.AGENT.arp_responder, + use_veth_interconnection=config.OVS.use_veth_interconnection, + ) + + # If enable_tunneling is TRUE, set tunnel_type to default to GRE + if config.OVS.enable_tunneling and not kwargs['tunnel_types']: + kwargs['tunnel_types'] = [p_const.TYPE_GRE] + + # Verify the tunnel_types specified are valid + for tun in kwargs['tunnel_types']: + if tun not in constants.TUNNEL_NETWORK_TYPES: + msg = _('Invalid tunnel type specified: %s'), tun + raise ValueError(msg) + if not kwargs['local_ip']: + msg = _('Tunneling cannot be enabled without a valid local_ip.') + raise ValueError(msg) + + return kwargs + + +def main(): + cfg.CONF.register_opts(ip_lib.OPTS) + common_config.init(sys.argv[1:]) + common_config.setup_logging(cfg.CONF) + q_utils.log_opt_values(LOG) + + try: + agent_config = create_agent_config_map(cfg.CONF) + except ValueError as e: + LOG.error(_('%s Agent terminated!'), e) + sys.exit(1) + + is_xen_compute_host = 'rootwrap-xen-dom0' in agent_config['root_helper'] + if is_xen_compute_host: + # Force ip_lib to always use the root helper to ensure that ip + # commands target xen dom0 rather than domU. + cfg.CONF.set_default('ip_lib_force_root', True) + + agent = OVSNeutronAgent(**agent_config) + signal.signal(signal.SIGTERM, agent._handle_sigterm) + + # Start everything. + LOG.info(_("Agent initialized successfully, now running... ")) + agent.daemon_loop() + + +if __name__ == "__main__": + main() diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/openvswitch/common/__init__.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/openvswitch/common/__init__.py new file mode 100644 index 00000000..8ac9340e --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/openvswitch/common/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2012 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/openvswitch/common/config.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/openvswitch/common/config.py new file mode 100644 index 00000000..3f624e1b --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/openvswitch/common/config.py @@ -0,0 +1,97 @@ +# Copyright 2012 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo.config import cfg + +from neutron.agent.common import config +from neutron.plugins.openvswitch.common import constants + + +DEFAULT_BRIDGE_MAPPINGS = [] +DEFAULT_VLAN_RANGES = [] +DEFAULT_TUNNEL_RANGES = [] +DEFAULT_TUNNEL_TYPES = [] + +ovs_opts = [ + cfg.StrOpt('integration_bridge', default='br-int', + help=_("Integration bridge to use")), + cfg.BoolOpt('enable_tunneling', default=False, + help=_("Enable tunneling support")), + cfg.StrOpt('tunnel_bridge', default='br-tun', + help=_("Tunnel bridge to use")), + cfg.StrOpt('int_peer_patch_port', default='patch-tun', + help=_("Peer patch port in integration bridge for tunnel " + "bridge")), + cfg.StrOpt('tun_peer_patch_port', default='patch-int', + help=_("Peer patch port in tunnel bridge for integration " + "bridge")), + cfg.StrOpt('local_ip', default='', + help=_("Local IP address of GRE tunnel endpoints.")), + cfg.ListOpt('bridge_mappings', + default=DEFAULT_BRIDGE_MAPPINGS, + help=_("List of :")), + cfg.StrOpt('tenant_network_type', default='local', + help=_("Network type for tenant networks " + "(local, vlan, gre, vxlan, or none)")), + cfg.ListOpt('network_vlan_ranges', + default=DEFAULT_VLAN_RANGES, + help=_("List of :: " + "or ")), + cfg.ListOpt('tunnel_id_ranges', + default=DEFAULT_TUNNEL_RANGES, + help=_("List of :")), + cfg.StrOpt('tunnel_type', default='', + help=_("The type of tunnels to use when utilizing tunnels, " + "either 'gre' or 'vxlan'")), + cfg.BoolOpt('use_veth_interconnection', default=False, + help=_("Use veths instead of patch ports to interconnect the " + "integration bridge to physical bridges")), +] + +agent_opts = [ + cfg.IntOpt('polling_interval', default=2, + help=_("The number of seconds the agent will wait between " + "polling for local device changes.")), + cfg.BoolOpt('minimize_polling', + default=True, + help=_("Minimize polling by monitoring ovsdb for interface " + "changes.")), + cfg.IntOpt('ovsdb_monitor_respawn_interval', + default=constants.DEFAULT_OVSDBMON_RESPAWN, + help=_("The number of seconds to wait before respawning the " + "ovsdb monitor after losing communication with it")), + cfg.ListOpt('tunnel_types', default=DEFAULT_TUNNEL_TYPES, + help=_("Network types supported by the agent " + "(gre and/or vxlan)")), + cfg.IntOpt('vxlan_udp_port', default=constants.VXLAN_UDP_PORT, + help=_("The UDP port to use for VXLAN tunnels.")), + cfg.IntOpt('veth_mtu', + help=_("MTU size of veth interfaces")), + cfg.BoolOpt('l2_population', default=False, + help=_("Use ml2 l2population mechanism driver to learn " + "remote mac and IPs and improve tunnel scalability")), + cfg.BoolOpt('arp_responder', default=False, + help=_("Enable local ARP responder if it is supported")), + cfg.BoolOpt('dont_fragment', default=True, + help=_("Set or un-set the don't fragment (DF) bit on " + "outgoing IP packet carrying GRE/VXLAN tunnel")), + cfg.BoolOpt('enable_distributed_routing', default=False, + help=_("Make the l2 agent run in dvr mode ")), +] + + +cfg.CONF.register_opts(ovs_opts, "OVS") +cfg.CONF.register_opts(agent_opts, "AGENT") +config.register_agent_state_opts_helper(cfg.CONF) +config.register_root_helper(cfg.CONF) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/openvswitch/common/constants.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/openvswitch/common/constants.py new file mode 100644 index 00000000..8b65e449 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/openvswitch/common/constants.py @@ -0,0 +1,67 @@ +# Copyright (c) 2012 OpenStack Foundation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from neutron.plugins.common import constants as p_const + + +# Special vlan_id value in ovs_vlan_allocations table indicating flat network +FLAT_VLAN_ID = -1 + +# Topic for tunnel notifications between the plugin and agent +TUNNEL = 'tunnel' + +# Values for network_type +VXLAN_UDP_PORT = 4789 + +# Name prefixes for veth device or patch port pair linking the integration +# bridge with the physical bridge for a physical network +PEER_INTEGRATION_PREFIX = 'int-' +PEER_PHYSICAL_PREFIX = 'phy-' + +# Nonexistent peer used to create patch ports without associating them, it +# allows to define flows before association +NONEXISTENT_PEER = 'nonexistent-peer' + +# The different types of tunnels +TUNNEL_NETWORK_TYPES = [p_const.TYPE_GRE, p_const.TYPE_VXLAN] + +# Various tables for DVR use of integration bridge flows +LOCAL_SWITCHING = 0 +DVR_TO_SRC_MAC = 1 + +# Various tables for tunneling flows +DVR_PROCESS = 1 +PATCH_LV_TO_TUN = 2 +GRE_TUN_TO_LV = 3 +VXLAN_TUN_TO_LV = 4 +DVR_NOT_LEARN = 9 +LEARN_FROM_TUN = 10 +UCAST_TO_TUN = 20 +ARP_RESPONDER = 21 +FLOOD_TO_TUN = 22 +CANARY_TABLE = 23 + +# Map tunnel types to tables number +TUN_TABLE = {p_const.TYPE_GRE: GRE_TUN_TO_LV, + p_const.TYPE_VXLAN: VXLAN_TUN_TO_LV} + +# The default respawn interval for the ovsdb monitor +DEFAULT_OVSDBMON_RESPAWN = 30 + +# Special return value for an invalid OVS ofport +INVALID_OFPORT = '-1' + +# Represent invalid OF Port +OFPORT_INVALID = -1 diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/openvswitch/ovs_db_v2.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/openvswitch/ovs_db_v2.py new file mode 100644 index 00000000..c9820ce3 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/openvswitch/ovs_db_v2.py @@ -0,0 +1,396 @@ +# Copyright 2011 VMware, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo.db import exception as db_exc +from six import moves +from sqlalchemy import func +from sqlalchemy.orm import exc + +from neutron.common import exceptions as n_exc +import neutron.db.api as db +from neutron.db import models_v2 +from neutron.db import securitygroups_db as sg_db +from neutron.extensions import securitygroup as ext_sg +from neutron import manager +from neutron.openstack.common import log as logging +from neutron.plugins.openvswitch.common import constants +from neutron.plugins.openvswitch import ovs_models_v2 + +LOG = logging.getLogger(__name__) + + +def get_network_binding(session, network_id): + session = session or db.get_session() + try: + binding = (session.query(ovs_models_v2.NetworkBinding). + filter_by(network_id=network_id). + one()) + return binding + except exc.NoResultFound: + return + + +def add_network_binding(session, network_id, network_type, + physical_network, segmentation_id): + with session.begin(subtransactions=True): + binding = ovs_models_v2.NetworkBinding(network_id, network_type, + physical_network, + segmentation_id) + session.add(binding) + return binding + + +def sync_vlan_allocations(network_vlan_ranges): + """Synchronize vlan_allocations table with configured VLAN ranges.""" + + session = db.get_session() + with session.begin(): + # get existing allocations for all physical networks + allocations = dict() + allocs = (session.query(ovs_models_v2.VlanAllocation). + all()) + for alloc in allocs: + if alloc.physical_network not in allocations: + allocations[alloc.physical_network] = set() + allocations[alloc.physical_network].add(alloc) + + # process vlan ranges for each configured physical network + for physical_network, vlan_ranges in network_vlan_ranges.iteritems(): + # determine current configured allocatable vlans for this + # physical network + vlan_ids = set() + for vlan_range in vlan_ranges: + vlan_ids |= set(moves.xrange(vlan_range[0], vlan_range[1] + 1)) + + # remove from table unallocated vlans not currently allocatable + if physical_network in allocations: + for alloc in allocations[physical_network]: + try: + # see if vlan is allocatable + vlan_ids.remove(alloc.vlan_id) + except KeyError: + # it's not allocatable, so check if its allocated + if not alloc.allocated: + # it's not, so remove it from table + LOG.debug(_("Removing vlan %(vlan_id)s on " + "physical network " + "%(physical_network)s from pool"), + {'vlan_id': alloc.vlan_id, + 'physical_network': physical_network}) + session.delete(alloc) + del allocations[physical_network] + + # add missing allocatable vlans to table + for vlan_id in sorted(vlan_ids): + alloc = ovs_models_v2.VlanAllocation(physical_network, vlan_id) + session.add(alloc) + + # remove from table unallocated vlans for any unconfigured physical + # networks + for allocs in allocations.itervalues(): + for alloc in allocs: + if not alloc.allocated: + LOG.debug(_("Removing vlan %(vlan_id)s on physical " + "network %(physical_network)s from pool"), + {'vlan_id': alloc.vlan_id, + 'physical_network': alloc.physical_network}) + session.delete(alloc) + + +def get_vlan_allocation(physical_network, vlan_id): + session = db.get_session() + try: + alloc = (session.query(ovs_models_v2.VlanAllocation). + filter_by(physical_network=physical_network, + vlan_id=vlan_id). + one()) + return alloc + except exc.NoResultFound: + return + + +def reserve_vlan(session): + with session.begin(subtransactions=True): + alloc = (session.query(ovs_models_v2.VlanAllocation). + filter_by(allocated=False). + with_lockmode('update'). + first()) + if alloc: + LOG.debug(_("Reserving vlan %(vlan_id)s on physical network " + "%(physical_network)s from pool"), + {'vlan_id': alloc.vlan_id, + 'physical_network': alloc.physical_network}) + alloc.allocated = True + return (alloc.physical_network, alloc.vlan_id) + raise n_exc.NoNetworkAvailable() + + +def reserve_specific_vlan(session, physical_network, vlan_id): + with session.begin(subtransactions=True): + try: + alloc = (session.query(ovs_models_v2.VlanAllocation). + filter_by(physical_network=physical_network, + vlan_id=vlan_id). + with_lockmode('update'). + one()) + if alloc.allocated: + if vlan_id == constants.FLAT_VLAN_ID: + raise n_exc.FlatNetworkInUse( + physical_network=physical_network) + else: + raise n_exc.VlanIdInUse(vlan_id=vlan_id, + physical_network=physical_network) + LOG.debug(_("Reserving specific vlan %(vlan_id)s on physical " + "network %(physical_network)s from pool"), + {'vlan_id': vlan_id, + 'physical_network': physical_network}) + alloc.allocated = True + except exc.NoResultFound: + LOG.debug(_("Reserving specific vlan %(vlan_id)s on physical " + "network %(physical_network)s outside pool"), + {'vlan_id': vlan_id, + 'physical_network': physical_network}) + alloc = ovs_models_v2.VlanAllocation(physical_network, vlan_id) + alloc.allocated = True + session.add(alloc) + + +def release_vlan(session, physical_network, vlan_id, network_vlan_ranges): + with session.begin(subtransactions=True): + try: + alloc = (session.query(ovs_models_v2.VlanAllocation). + filter_by(physical_network=physical_network, + vlan_id=vlan_id). + with_lockmode('update'). + one()) + alloc.allocated = False + inside = False + for vlan_range in network_vlan_ranges.get(physical_network, []): + if vlan_id >= vlan_range[0] and vlan_id <= vlan_range[1]: + inside = True + break + if not inside: + session.delete(alloc) + LOG.debug(_("Releasing vlan %(vlan_id)s on physical network " + "%(physical_network)s outside pool"), + {'vlan_id': vlan_id, + 'physical_network': physical_network}) + else: + LOG.debug(_("Releasing vlan %(vlan_id)s on physical network " + "%(physical_network)s to pool"), + {'vlan_id': vlan_id, + 'physical_network': physical_network}) + except exc.NoResultFound: + LOG.warning(_("vlan_id %(vlan_id)s on physical network " + "%(physical_network)s not found"), + {'vlan_id': vlan_id, + 'physical_network': physical_network}) + + +def sync_tunnel_allocations(tunnel_id_ranges): + """Synchronize tunnel_allocations table with configured tunnel ranges.""" + + # determine current configured allocatable tunnels + tunnel_ids = set() + for tunnel_id_range in tunnel_id_ranges: + tun_min, tun_max = tunnel_id_range + if tun_max + 1 - tun_min > 1000000: + LOG.error(_("Skipping unreasonable tunnel ID range " + "%(tun_min)s:%(tun_max)s"), + {'tun_min': tun_min, 'tun_max': tun_max}) + else: + tunnel_ids |= set(moves.xrange(tun_min, tun_max + 1)) + + session = db.get_session() + with session.begin(): + # remove from table unallocated tunnels not currently allocatable + allocs = (session.query(ovs_models_v2.TunnelAllocation). + all()) + for alloc in allocs: + try: + # see if tunnel is allocatable + tunnel_ids.remove(alloc.tunnel_id) + except KeyError: + # it's not allocatable, so check if its allocated + if not alloc.allocated: + # it's not, so remove it from table + LOG.debug(_("Removing tunnel %s from pool"), + alloc.tunnel_id) + session.delete(alloc) + + # add missing allocatable tunnels to table + for tunnel_id in sorted(tunnel_ids): + alloc = ovs_models_v2.TunnelAllocation(tunnel_id) + session.add(alloc) + + +def get_tunnel_allocation(tunnel_id): + session = db.get_session() + try: + alloc = (session.query(ovs_models_v2.TunnelAllocation). + filter_by(tunnel_id=tunnel_id). + with_lockmode('update'). + one()) + return alloc + except exc.NoResultFound: + return + + +def reserve_tunnel(session): + with session.begin(subtransactions=True): + alloc = (session.query(ovs_models_v2.TunnelAllocation). + filter_by(allocated=False). + with_lockmode('update'). + first()) + if alloc: + LOG.debug(_("Reserving tunnel %s from pool"), alloc.tunnel_id) + alloc.allocated = True + return alloc.tunnel_id + raise n_exc.NoNetworkAvailable() + + +def reserve_specific_tunnel(session, tunnel_id): + with session.begin(subtransactions=True): + try: + alloc = (session.query(ovs_models_v2.TunnelAllocation). + filter_by(tunnel_id=tunnel_id). + with_lockmode('update'). + one()) + if alloc.allocated: + raise n_exc.TunnelIdInUse(tunnel_id=tunnel_id) + LOG.debug(_("Reserving specific tunnel %s from pool"), tunnel_id) + alloc.allocated = True + except exc.NoResultFound: + LOG.debug(_("Reserving specific tunnel %s outside pool"), + tunnel_id) + alloc = ovs_models_v2.TunnelAllocation(tunnel_id) + alloc.allocated = True + session.add(alloc) + + +def release_tunnel(session, tunnel_id, tunnel_id_ranges): + with session.begin(subtransactions=True): + try: + alloc = (session.query(ovs_models_v2.TunnelAllocation). + filter_by(tunnel_id=tunnel_id). + with_lockmode('update'). + one()) + alloc.allocated = False + inside = False + for tunnel_id_range in tunnel_id_ranges: + if (tunnel_id >= tunnel_id_range[0] + and tunnel_id <= tunnel_id_range[1]): + inside = True + break + if not inside: + session.delete(alloc) + LOG.debug(_("Releasing tunnel %s outside pool"), tunnel_id) + else: + LOG.debug(_("Releasing tunnel %s to pool"), tunnel_id) + except exc.NoResultFound: + LOG.warning(_("tunnel_id %s not found"), tunnel_id) + + +def get_port(port_id): + session = db.get_session() + try: + port = session.query(models_v2.Port).filter_by(id=port_id).one() + except exc.NoResultFound: + port = None + return port + + +def get_port_from_device(port_id): + """Get port from database.""" + LOG.debug(_("get_port_with_securitygroups() called:port_id=%s"), port_id) + session = db.get_session() + sg_binding_port = sg_db.SecurityGroupPortBinding.port_id + + query = session.query(models_v2.Port, + sg_db.SecurityGroupPortBinding.security_group_id) + query = query.outerjoin(sg_db.SecurityGroupPortBinding, + models_v2.Port.id == sg_binding_port) + query = query.filter(models_v2.Port.id == port_id) + port_and_sgs = query.all() + if not port_and_sgs: + return None + port = port_and_sgs[0][0] + plugin = manager.NeutronManager.get_plugin() + port_dict = plugin._make_port_dict(port) + port_dict[ext_sg.SECURITYGROUPS] = [ + sg_id for port_, sg_id in port_and_sgs if sg_id] + port_dict['security_group_rules'] = [] + port_dict['security_group_source_groups'] = [] + port_dict['fixed_ips'] = [ip['ip_address'] + for ip in port['fixed_ips']] + return port_dict + + +def set_port_status(port_id, status): + session = db.get_session() + try: + port = session.query(models_v2.Port).filter_by(id=port_id).one() + port['status'] = status + session.merge(port) + session.flush() + except exc.NoResultFound: + raise n_exc.PortNotFound(port_id=port_id) + + +def get_tunnel_endpoints(): + session = db.get_session() + + tunnels = session.query(ovs_models_v2.TunnelEndpoint) + return [{'id': tunnel.id, + 'ip_address': tunnel.ip_address} for tunnel in tunnels] + + +def _generate_tunnel_id(session): + max_tunnel_id = session.query( + func.max(ovs_models_v2.TunnelEndpoint.id)).scalar() or 0 + return max_tunnel_id + 1 + + +def add_tunnel_endpoint(ip, max_retries=10): + """Return the endpoint of the given IP address or generate a new one.""" + + # NOTE(rpodolyaka): generation of a new tunnel endpoint must be put into a + # repeatedly executed transactional block to ensure it + # doesn't conflict with any other concurrently executed + # DB transactions in spite of the specified transactions + # isolation level value + for i in moves.xrange(max_retries): + LOG.debug(_('Adding a tunnel endpoint for %s'), ip) + try: + session = db.get_session() + with session.begin(subtransactions=True): + tunnel = (session.query(ovs_models_v2.TunnelEndpoint). + filter_by(ip_address=ip).with_lockmode('update'). + first()) + + if tunnel is None: + tunnel_id = _generate_tunnel_id(session) + tunnel = ovs_models_v2.TunnelEndpoint(ip, tunnel_id) + session.add(tunnel) + + return tunnel + except db_exc.DBDuplicateEntry: + # a concurrent transaction has been committed, try again + LOG.debug(_('Adding a tunnel endpoint failed due to a concurrent' + 'transaction had been committed (%s attempts left)'), + max_retries - (i + 1)) + + raise n_exc.NeutronException( + message=_('Unable to generate a new tunnel id')) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/openvswitch/ovs_models_v2.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/openvswitch/ovs_models_v2.py new file mode 100644 index 00000000..59b2c14a --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/openvswitch/ovs_models_v2.py @@ -0,0 +1,107 @@ +# Copyright 2011 VMware, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +from sqlalchemy import Boolean, Column, ForeignKey, Integer, String +from sqlalchemy.schema import UniqueConstraint + +from neutron.db import model_base +from neutron.db import models_v2 +from sqlalchemy import orm + + +class VlanAllocation(model_base.BASEV2): + """Represents allocation state of vlan_id on physical network.""" + __tablename__ = 'ovs_vlan_allocations' + + physical_network = Column(String(64), nullable=False, primary_key=True) + vlan_id = Column(Integer, nullable=False, primary_key=True, + autoincrement=False) + allocated = Column(Boolean, nullable=False) + + def __init__(self, physical_network, vlan_id): + self.physical_network = physical_network + self.vlan_id = vlan_id + self.allocated = False + + def __repr__(self): + return "" % (self.physical_network, + self.vlan_id, self.allocated) + + +class TunnelAllocation(model_base.BASEV2): + """Represents allocation state of tunnel_id.""" + __tablename__ = 'ovs_tunnel_allocations' + + tunnel_id = Column(Integer, nullable=False, primary_key=True, + autoincrement=False) + allocated = Column(Boolean, nullable=False) + + def __init__(self, tunnel_id): + self.tunnel_id = tunnel_id + self.allocated = False + + def __repr__(self): + return "" % (self.tunnel_id, self.allocated) + + +class NetworkBinding(model_base.BASEV2): + """Represents binding of virtual network to physical realization.""" + __tablename__ = 'ovs_network_bindings' + + network_id = Column(String(36), + ForeignKey('networks.id', ondelete="CASCADE"), + primary_key=True) + # 'gre', 'vlan', 'flat', 'local' + network_type = Column(String(32), nullable=False) + physical_network = Column(String(64)) + segmentation_id = Column(Integer) # tunnel_id or vlan_id + + network = orm.relationship( + models_v2.Network, + backref=orm.backref("binding", lazy='joined', + uselist=False, cascade='delete')) + + def __init__(self, network_id, network_type, physical_network, + segmentation_id): + self.network_id = network_id + self.network_type = network_type + self.physical_network = physical_network + self.segmentation_id = segmentation_id + + def __repr__(self): + return "" % (self.network_id, + self.network_type, + self.physical_network, + self.segmentation_id) + + +class TunnelEndpoint(model_base.BASEV2): + """Represents tunnel endpoint in RPC mode.""" + __tablename__ = 'ovs_tunnel_endpoints' + __table_args__ = ( + UniqueConstraint('id', name='uniq_ovs_tunnel_endpoints0id'), + model_base.BASEV2.__table_args__, + ) + + ip_address = Column(String(64), primary_key=True) + id = Column(Integer, nullable=False) + + def __init__(self, ip_address, id): + self.ip_address = ip_address + self.id = id + + def __repr__(self): + return "" % (self.ip_address, self.id) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/openvswitch/ovs_neutron_plugin.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/openvswitch/ovs_neutron_plugin.py new file mode 100644 index 00000000..d5f89902 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/openvswitch/ovs_neutron_plugin.py @@ -0,0 +1,634 @@ +# Copyright 2011 VMware, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import sys + +from oslo.config import cfg + +from neutron.agent import securitygroups_rpc as sg_rpc +from neutron.api.rpc.agentnotifiers import dhcp_rpc_agent_api +from neutron.api.rpc.agentnotifiers import l3_rpc_agent_api +from neutron.api.v2 import attributes +from neutron.common import constants as q_const +from neutron.common import exceptions as n_exc +from neutron.common import rpc as n_rpc +from neutron.common import topics +from neutron.common import utils +from neutron.db import agents_db +from neutron.db import agentschedulers_db +from neutron.db import allowedaddresspairs_db as addr_pair_db +from neutron.db import db_base_plugin_v2 +from neutron.db import dhcp_rpc_base +from neutron.db import external_net_db +from neutron.db import extradhcpopt_db +from neutron.db import extraroute_db +from neutron.db import l3_agentschedulers_db +from neutron.db import l3_gwmode_db +from neutron.db import l3_rpc_base +from neutron.db import portbindings_db +from neutron.db import quota_db # noqa +from neutron.db import securitygroups_rpc_base as sg_db_rpc +from neutron.extensions import allowedaddresspairs as addr_pair +from neutron.extensions import extra_dhcp_opt as edo_ext +from neutron.extensions import portbindings +from neutron.extensions import providernet as provider +from neutron import manager +from neutron.openstack.common import importutils +from neutron.openstack.common import log as logging +from neutron.plugins.common import constants as svc_constants +from neutron.plugins.common import utils as plugin_utils +from neutron.plugins.openvswitch.common import config # noqa +from neutron.plugins.openvswitch.common import constants +from neutron.plugins.openvswitch import ovs_db_v2 + + +LOG = logging.getLogger(__name__) + + +class OVSRpcCallbacks(n_rpc.RpcCallback, + dhcp_rpc_base.DhcpRpcCallbackMixin, + l3_rpc_base.L3RpcCallbackMixin, + sg_db_rpc.SecurityGroupServerRpcCallbackMixin): + + # history + # 1.0 Initial version + # 1.1 Support Security Group RPC + # 1.2 Support get_devices_details_list + + RPC_API_VERSION = '1.2' + + def __init__(self, notifier, tunnel_type): + super(OVSRpcCallbacks, self).__init__() + self.notifier = notifier + self.tunnel_type = tunnel_type + + @classmethod + def get_port_from_device(cls, device): + port = ovs_db_v2.get_port_from_device(device) + if port: + port['device'] = device + return port + + def get_device_details(self, rpc_context, **kwargs): + """Agent requests device details.""" + agent_id = kwargs.get('agent_id') + device = kwargs.get('device') + LOG.debug(_("Device %(device)s details requested from %(agent_id)s"), + {'device': device, 'agent_id': agent_id}) + port = ovs_db_v2.get_port(device) + if port: + binding = ovs_db_v2.get_network_binding(None, port['network_id']) + entry = {'device': device, + 'network_id': port['network_id'], + 'port_id': port['id'], + 'admin_state_up': port['admin_state_up'], + 'network_type': binding.network_type, + 'segmentation_id': binding.segmentation_id, + 'physical_network': binding.physical_network} + new_status = (q_const.PORT_STATUS_ACTIVE if port['admin_state_up'] + else q_const.PORT_STATUS_DOWN) + if port['status'] != new_status: + ovs_db_v2.set_port_status(port['id'], new_status) + else: + entry = {'device': device} + LOG.debug(_("%s can not be found in database"), device) + return entry + + def get_devices_details_list(self, rpc_context, **kwargs): + return [ + self.get_device_details( + rpc_context, + device=device, + **kwargs + ) + for device in kwargs.pop('devices', []) + ] + + def update_device_down(self, rpc_context, **kwargs): + """Device no longer exists on agent.""" + agent_id = kwargs.get('agent_id') + device = kwargs.get('device') + host = kwargs.get('host') + port = ovs_db_v2.get_port(device) + LOG.debug(_("Device %(device)s no longer exists on %(agent_id)s"), + {'device': device, 'agent_id': agent_id}) + if port: + entry = {'device': device, + 'exists': True} + plugin = manager.NeutronManager.get_plugin() + if (host and + not plugin.get_port_host(rpc_context, port['id']) == host): + LOG.debug(_("Device %(device)s not bound to the" + " agent host %(host)s"), + {'device': device, 'host': host}) + elif port['status'] != q_const.PORT_STATUS_DOWN: + # Set port status to DOWN + ovs_db_v2.set_port_status(port['id'], + q_const.PORT_STATUS_DOWN) + else: + entry = {'device': device, + 'exists': False} + LOG.debug(_("%s can not be found in database"), device) + return entry + + def update_device_up(self, rpc_context, **kwargs): + """Device is up on agent.""" + agent_id = kwargs.get('agent_id') + device = kwargs.get('device') + host = kwargs.get('host') + port = ovs_db_v2.get_port(device) + LOG.debug(_("Device %(device)s up on %(agent_id)s"), + {'device': device, 'agent_id': agent_id}) + plugin = manager.NeutronManager.get_plugin() + if port: + if (host and + not plugin.get_port_host(rpc_context, port['id']) == host): + LOG.debug(_("Device %(device)s not bound to the" + " agent host %(host)s"), + {'device': device, 'host': host}) + return + elif port['status'] != q_const.PORT_STATUS_ACTIVE: + ovs_db_v2.set_port_status(port['id'], + q_const.PORT_STATUS_ACTIVE) + else: + LOG.debug(_("%s can not be found in database"), device) + + def tunnel_sync(self, rpc_context, **kwargs): + """Update new tunnel. + + Updates the datbase with the tunnel IP. All listening agents will also + be notified about the new tunnel IP. + """ + tunnel_ip = kwargs.get('tunnel_ip') + # Update the database with the IP + tunnel = ovs_db_v2.add_tunnel_endpoint(tunnel_ip) + tunnels = ovs_db_v2.get_tunnel_endpoints() + entry = dict() + entry['tunnels'] = tunnels + # Notify all other listening agents + self.notifier.tunnel_update(rpc_context, tunnel.ip_address, + tunnel.id, self.tunnel_type) + # Return the list of tunnels IP's to the agent + return entry + + +class AgentNotifierApi(n_rpc.RpcProxy, + sg_rpc.SecurityGroupAgentRpcApiMixin): + '''Agent side of the openvswitch rpc API. + + API version history: + 1.0 - Initial version. + + ''' + + BASE_RPC_API_VERSION = '1.0' + + def __init__(self, topic): + super(AgentNotifierApi, self).__init__( + topic=topic, default_version=self.BASE_RPC_API_VERSION) + self.topic_network_delete = topics.get_topic_name(topic, + topics.NETWORK, + topics.DELETE) + self.topic_port_update = topics.get_topic_name(topic, + topics.PORT, + topics.UPDATE) + self.topic_tunnel_update = topics.get_topic_name(topic, + constants.TUNNEL, + topics.UPDATE) + + def network_delete(self, context, network_id): + self.fanout_cast(context, + self.make_msg('network_delete', + network_id=network_id), + topic=self.topic_network_delete) + + def port_update(self, context, port, network_type, segmentation_id, + physical_network): + self.fanout_cast(context, + self.make_msg('port_update', + port=port, + network_type=network_type, + segmentation_id=segmentation_id, + physical_network=physical_network), + topic=self.topic_port_update) + + def tunnel_update(self, context, tunnel_ip, tunnel_id, tunnel_type): + self.fanout_cast(context, + self.make_msg('tunnel_update', + tunnel_ip=tunnel_ip, + tunnel_id=tunnel_id, + tunnel_type=tunnel_type), + topic=self.topic_tunnel_update) + + +class OVSNeutronPluginV2(db_base_plugin_v2.NeutronDbPluginV2, + external_net_db.External_net_db_mixin, + extraroute_db.ExtraRoute_db_mixin, + l3_gwmode_db.L3_NAT_db_mixin, + sg_db_rpc.SecurityGroupServerRpcMixin, + l3_agentschedulers_db.L3AgentSchedulerDbMixin, + agentschedulers_db.DhcpAgentSchedulerDbMixin, + portbindings_db.PortBindingMixin, + extradhcpopt_db.ExtraDhcpOptMixin, + addr_pair_db.AllowedAddressPairsMixin): + + """Implement the Neutron abstractions using Open vSwitch. + + Depending on whether tunneling is enabled, either a GRE, VXLAN tunnel or + a new VLAN is created for each network. An agent is relied upon to + perform the actual OVS configuration on each host. + + The provider extension is also supported. As discussed in + https://bugs.launchpad.net/neutron/+bug/1023156, this class could + be simplified, and filtering on extended attributes could be + handled, by adding support for extended attributes to the + NeutronDbPluginV2 base class. When that occurs, this class should + be updated to take advantage of it. + + The port binding extension enables an external application relay + information to and from the plugin. + """ + + # This attribute specifies whether the plugin supports or not + # bulk/pagination/sorting operations. Name mangling is used in + # order to ensure it is qualified by class + __native_bulk_support = True + __native_pagination_support = True + __native_sorting_support = True + + _supported_extension_aliases = ["provider", "external-net", "router", + "ext-gw-mode", "binding", "quotas", + "security-group", "agent", "extraroute", + "l3_agent_scheduler", + "dhcp_agent_scheduler", + "extra_dhcp_opt", + "allowed-address-pairs"] + + @property + def supported_extension_aliases(self): + if not hasattr(self, '_aliases'): + aliases = self._supported_extension_aliases[:] + sg_rpc.disable_security_group_extension_by_config(aliases) + self._aliases = aliases + return self._aliases + + db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs( + attributes.NETWORKS, ['_extend_network_dict_provider_ovs']) + + def __init__(self, configfile=None): + super(OVSNeutronPluginV2, self).__init__() + self.base_binding_dict = { + portbindings.VIF_TYPE: portbindings.VIF_TYPE_OVS, + portbindings.VIF_DETAILS: { + # TODO(rkukura): Replace with new VIF security details + portbindings.CAP_PORT_FILTER: + 'security-group' in self.supported_extension_aliases, + portbindings.OVS_HYBRID_PLUG: True}} + self._parse_network_vlan_ranges() + ovs_db_v2.sync_vlan_allocations(self.network_vlan_ranges) + self.tenant_network_type = cfg.CONF.OVS.tenant_network_type + if self.tenant_network_type not in [svc_constants.TYPE_LOCAL, + svc_constants.TYPE_VLAN, + svc_constants.TYPE_GRE, + svc_constants.TYPE_VXLAN, + svc_constants.TYPE_NONE]: + LOG.error(_("Invalid tenant_network_type: %s. " + "Server terminated!"), + self.tenant_network_type) + sys.exit(1) + self.enable_tunneling = cfg.CONF.OVS.enable_tunneling + self.tunnel_type = None + if self.enable_tunneling: + self.tunnel_type = (cfg.CONF.OVS.tunnel_type or + svc_constants.TYPE_GRE) + elif cfg.CONF.OVS.tunnel_type: + self.tunnel_type = cfg.CONF.OVS.tunnel_type + self.enable_tunneling = True + self.tunnel_id_ranges = [] + if self.enable_tunneling: + self._parse_tunnel_id_ranges() + ovs_db_v2.sync_tunnel_allocations(self.tunnel_id_ranges) + elif self.tenant_network_type in constants.TUNNEL_NETWORK_TYPES: + LOG.error(_("Tunneling disabled but tenant_network_type is '%s'. " + "Server terminated!"), self.tenant_network_type) + sys.exit(1) + self.setup_rpc() + self.network_scheduler = importutils.import_object( + cfg.CONF.network_scheduler_driver + ) + self.router_scheduler = importutils.import_object( + cfg.CONF.router_scheduler_driver + ) + + def setup_rpc(self): + # RPC support + self.service_topics = {svc_constants.CORE: topics.PLUGIN, + svc_constants.L3_ROUTER_NAT: topics.L3PLUGIN} + self.conn = n_rpc.create_connection(new=True) + self.notifier = AgentNotifierApi(topics.AGENT) + self.agent_notifiers[q_const.AGENT_TYPE_DHCP] = ( + dhcp_rpc_agent_api.DhcpAgentNotifyAPI() + ) + self.agent_notifiers[q_const.AGENT_TYPE_L3] = ( + l3_rpc_agent_api.L3AgentNotifyAPI() + ) + self.endpoints = [OVSRpcCallbacks(self.notifier, self.tunnel_type), + agents_db.AgentExtRpcCallback()] + for svc_topic in self.service_topics.values(): + self.conn.create_consumer(svc_topic, self.endpoints, fanout=False) + # Consume from all consumers in threads + self.conn.consume_in_threads() + + def _parse_network_vlan_ranges(self): + try: + self.network_vlan_ranges = plugin_utils.parse_network_vlan_ranges( + cfg.CONF.OVS.network_vlan_ranges) + except Exception as ex: + LOG.error(_("%s. Server terminated!"), ex) + sys.exit(1) + LOG.info(_("Network VLAN ranges: %s"), self.network_vlan_ranges) + + def _parse_tunnel_id_ranges(self): + for entry in cfg.CONF.OVS.tunnel_id_ranges: + entry = entry.strip() + try: + tun_min, tun_max = entry.split(':') + self.tunnel_id_ranges.append((int(tun_min), int(tun_max))) + except ValueError as ex: + LOG.error(_("Invalid tunnel ID range: " + "'%(range)s' - %(e)s. Server terminated!"), + {'range': entry, 'e': ex}) + sys.exit(1) + LOG.info(_("Tunnel ID ranges: %s"), self.tunnel_id_ranges) + + def _extend_network_dict_provider_ovs(self, network, net_db, + net_binding=None): + # this method used in two cases: when binding is provided explicitly + # and when it is a part of db model object + binding = net_db.binding if net_db else net_binding + network[provider.NETWORK_TYPE] = binding.network_type + if binding.network_type in constants.TUNNEL_NETWORK_TYPES: + network[provider.PHYSICAL_NETWORK] = None + network[provider.SEGMENTATION_ID] = binding.segmentation_id + elif binding.network_type == svc_constants.TYPE_FLAT: + network[provider.PHYSICAL_NETWORK] = binding.physical_network + network[provider.SEGMENTATION_ID] = None + elif binding.network_type == svc_constants.TYPE_VLAN: + network[provider.PHYSICAL_NETWORK] = binding.physical_network + network[provider.SEGMENTATION_ID] = binding.segmentation_id + elif binding.network_type == svc_constants.TYPE_LOCAL: + network[provider.PHYSICAL_NETWORK] = None + network[provider.SEGMENTATION_ID] = None + + def _process_provider_create(self, context, attrs): + network_type = attrs.get(provider.NETWORK_TYPE) + physical_network = attrs.get(provider.PHYSICAL_NETWORK) + segmentation_id = attrs.get(provider.SEGMENTATION_ID) + + network_type_set = attributes.is_attr_set(network_type) + physical_network_set = attributes.is_attr_set(physical_network) + segmentation_id_set = attributes.is_attr_set(segmentation_id) + + if not (network_type_set or physical_network_set or + segmentation_id_set): + return (None, None, None) + + if not network_type_set: + msg = _("provider:network_type required") + raise n_exc.InvalidInput(error_message=msg) + elif network_type == svc_constants.TYPE_FLAT: + if segmentation_id_set: + msg = _("provider:segmentation_id specified for flat network") + raise n_exc.InvalidInput(error_message=msg) + else: + segmentation_id = constants.FLAT_VLAN_ID + elif network_type == svc_constants.TYPE_VLAN: + if not segmentation_id_set: + msg = _("provider:segmentation_id required") + raise n_exc.InvalidInput(error_message=msg) + if not utils.is_valid_vlan_tag(segmentation_id): + msg = (_("provider:segmentation_id out of range " + "(%(min_id)s through %(max_id)s)") % + {'min_id': q_const.MIN_VLAN_TAG, + 'max_id': q_const.MAX_VLAN_TAG}) + raise n_exc.InvalidInput(error_message=msg) + elif network_type in constants.TUNNEL_NETWORK_TYPES: + if not self.enable_tunneling: + msg = _("%s networks are not enabled") % network_type + raise n_exc.InvalidInput(error_message=msg) + if physical_network_set: + msg = _("provider:physical_network specified for %s " + "network") % network_type + raise n_exc.InvalidInput(error_message=msg) + else: + physical_network = None + if not segmentation_id_set: + msg = _("provider:segmentation_id required") + raise n_exc.InvalidInput(error_message=msg) + elif network_type == svc_constants.TYPE_LOCAL: + if physical_network_set: + msg = _("provider:physical_network specified for local " + "network") + raise n_exc.InvalidInput(error_message=msg) + else: + physical_network = None + if segmentation_id_set: + msg = _("provider:segmentation_id specified for local " + "network") + raise n_exc.InvalidInput(error_message=msg) + else: + segmentation_id = None + else: + msg = _("provider:network_type %s not supported") % network_type + raise n_exc.InvalidInput(error_message=msg) + + if network_type in [svc_constants.TYPE_VLAN, svc_constants.TYPE_FLAT]: + if physical_network_set: + if physical_network not in self.network_vlan_ranges: + msg = _("Unknown provider:physical_network " + "%s") % physical_network + raise n_exc.InvalidInput(error_message=msg) + elif 'default' in self.network_vlan_ranges: + physical_network = 'default' + else: + msg = _("provider:physical_network required") + raise n_exc.InvalidInput(error_message=msg) + + return (network_type, physical_network, segmentation_id) + + def create_network(self, context, network): + (network_type, physical_network, + segmentation_id) = self._process_provider_create(context, + network['network']) + + session = context.session + #set up default security groups + tenant_id = self._get_tenant_id_for_create( + context, network['network']) + self._ensure_default_security_group(context, tenant_id) + + with session.begin(subtransactions=True): + if not network_type: + # tenant network + network_type = self.tenant_network_type + if network_type == svc_constants.TYPE_NONE: + raise n_exc.TenantNetworksDisabled() + elif network_type == svc_constants.TYPE_VLAN: + (physical_network, + segmentation_id) = ovs_db_v2.reserve_vlan(session) + elif network_type in constants.TUNNEL_NETWORK_TYPES: + segmentation_id = ovs_db_v2.reserve_tunnel(session) + # no reservation needed for TYPE_LOCAL + else: + # provider network + if network_type in [svc_constants.TYPE_VLAN, + svc_constants.TYPE_FLAT]: + ovs_db_v2.reserve_specific_vlan(session, physical_network, + segmentation_id) + elif network_type in constants.TUNNEL_NETWORK_TYPES: + ovs_db_v2.reserve_specific_tunnel(session, segmentation_id) + # no reservation needed for TYPE_LOCAL + net = super(OVSNeutronPluginV2, self).create_network(context, + network) + binding = ovs_db_v2.add_network_binding(session, net['id'], + network_type, + physical_network, + segmentation_id) + + self._process_l3_create(context, net, network['network']) + # passing None as db model to use binding object + self._extend_network_dict_provider_ovs(net, None, binding) + # note - exception will rollback entire transaction + LOG.debug(_("Created network: %s"), net['id']) + return net + + def update_network(self, context, id, network): + provider._raise_if_updates_provider_attributes(network['network']) + + session = context.session + with session.begin(subtransactions=True): + net = super(OVSNeutronPluginV2, self).update_network(context, id, + network) + self._process_l3_update(context, net, network['network']) + return net + + def delete_network(self, context, id): + session = context.session + with session.begin(subtransactions=True): + binding = ovs_db_v2.get_network_binding(session, id) + self._process_l3_delete(context, id) + super(OVSNeutronPluginV2, self).delete_network(context, id) + if binding.network_type in constants.TUNNEL_NETWORK_TYPES: + ovs_db_v2.release_tunnel(session, binding.segmentation_id, + self.tunnel_id_ranges) + elif binding.network_type in [svc_constants.TYPE_VLAN, + svc_constants.TYPE_FLAT]: + ovs_db_v2.release_vlan(session, binding.physical_network, + binding.segmentation_id, + self.network_vlan_ranges) + # the network_binding record is deleted via cascade from + # the network record, so explicit removal is not necessary + self.notifier.network_delete(context, id) + + def get_network(self, context, id, fields=None): + session = context.session + with session.begin(subtransactions=True): + net = super(OVSNeutronPluginV2, self).get_network(context, + id, None) + return self._fields(net, fields) + + def get_networks(self, context, filters=None, fields=None, + sorts=None, + limit=None, marker=None, page_reverse=False): + session = context.session + with session.begin(subtransactions=True): + nets = super(OVSNeutronPluginV2, + self).get_networks(context, filters, None, sorts, + limit, marker, page_reverse) + + return [self._fields(net, fields) for net in nets] + + def create_port(self, context, port): + # Set port status as 'DOWN'. This will be updated by agent + port['port']['status'] = q_const.PORT_STATUS_DOWN + port_data = port['port'] + session = context.session + with session.begin(subtransactions=True): + self._ensure_default_security_group_on_port(context, port) + sgids = self._get_security_groups_on_port(context, port) + dhcp_opts = port['port'].get(edo_ext.EXTRADHCPOPTS, []) + port = super(OVSNeutronPluginV2, self).create_port(context, port) + self._process_portbindings_create_and_update(context, + port_data, port) + self._process_port_create_security_group(context, port, sgids) + self._process_port_create_extra_dhcp_opts(context, port, + dhcp_opts) + port[addr_pair.ADDRESS_PAIRS] = ( + self._process_create_allowed_address_pairs( + context, port, + port_data.get(addr_pair.ADDRESS_PAIRS))) + self.notify_security_groups_member_updated(context, port) + return port + + def update_port(self, context, id, port): + session = context.session + need_port_update_notify = False + with session.begin(subtransactions=True): + original_port = super(OVSNeutronPluginV2, self).get_port( + context, id) + updated_port = super(OVSNeutronPluginV2, self).update_port( + context, id, port) + if addr_pair.ADDRESS_PAIRS in port['port']: + need_port_update_notify |= ( + self.update_address_pairs_on_port(context, id, port, + original_port, + updated_port)) + need_port_update_notify |= self.update_security_group_on_port( + context, id, port, original_port, updated_port) + self._process_portbindings_create_and_update(context, + port['port'], + updated_port) + need_port_update_notify |= self._update_extra_dhcp_opts_on_port( + context, id, port, updated_port) + + need_port_update_notify |= self.is_security_group_member_updated( + context, original_port, updated_port) + if original_port['admin_state_up'] != updated_port['admin_state_up']: + need_port_update_notify = True + + if need_port_update_notify: + binding = ovs_db_v2.get_network_binding(None, + updated_port['network_id']) + self.notifier.port_update(context, updated_port, + binding.network_type, + binding.segmentation_id, + binding.physical_network) + return updated_port + + def delete_port(self, context, id, l3_port_check=True): + + # if needed, check to see if this is a port owned by + # and l3-router. If so, we should prevent deletion. + if l3_port_check: + self.prevent_l3_port_deletion(context, id) + + session = context.session + with session.begin(subtransactions=True): + self.disassociate_floatingips(context, id) + port = self.get_port(context, id) + self._delete_port_security_group_bindings(context, id) + super(OVSNeutronPluginV2, self).delete_port(context, id) + + self.notify_security_groups_member_updated(context, port) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/plumgrid/__init__.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/plumgrid/__init__.py new file mode 100644 index 00000000..c166c352 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/plumgrid/__init__.py @@ -0,0 +1,15 @@ +# Copyright 2013 PLUMgrid, Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Edgar Magana, emagana@plumgrid.com, PLUMgrid, Inc. diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/plumgrid/common/__init__.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/plumgrid/common/__init__.py new file mode 100644 index 00000000..c166c352 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/plumgrid/common/__init__.py @@ -0,0 +1,15 @@ +# Copyright 2013 PLUMgrid, Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Edgar Magana, emagana@plumgrid.com, PLUMgrid, Inc. diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/plumgrid/common/exceptions.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/plumgrid/common/exceptions.py new file mode 100644 index 00000000..58fdcad3 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/plumgrid/common/exceptions.py @@ -0,0 +1,28 @@ +# Copyright 2013 PLUMgrid, Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Edgar Magana, emagana@plumgrid.com, PLUMgrid, Inc. + + +"""Neutron PLUMgrid Plugin exceptions""" + +from neutron.common import exceptions as base_exec + + +class PLUMgridException(base_exec.NeutronException): + message = _("PLUMgrid Plugin Error: %(err_msg)s") + + +class PLUMgridConnectionFailed(PLUMgridException): + message = _("Connection failed with PLUMgrid Director: %(err_msg)s") diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/plumgrid/drivers/__init__.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/plumgrid/drivers/__init__.py new file mode 100644 index 00000000..c166c352 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/plumgrid/drivers/__init__.py @@ -0,0 +1,15 @@ +# Copyright 2013 PLUMgrid, Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Edgar Magana, emagana@plumgrid.com, PLUMgrid, Inc. diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/plumgrid/drivers/fake_plumlib.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/plumgrid/drivers/fake_plumlib.py new file mode 100644 index 00000000..16ce799f --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/plumgrid/drivers/fake_plumlib.py @@ -0,0 +1,98 @@ +# Copyright 2013 PLUMgrid, Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Edgar Magana, emagana@plumgrid.com, PLUMgrid, Inc. + +from neutron.extensions import providernet as provider +from neutron.openstack.common import log as logging + +LOG = logging.getLogger(__name__) + + +class Plumlib(): + """ + Class PLUMgrid Fake Library. This library is a by-pass implementation + for the PLUMgrid Library. This class is being used by the unit test + integration in Neutron. + """ + + def __init__(self): + LOG.info(_('Python PLUMgrid Fake Library Started ')) + pass + + def director_conn(self, director_plumgrid, director_port, timeout, + director_admin, director_password): + LOG.info(_('Fake Director: %s'), + director_plumgrid + ':' + director_port) + pass + + def create_network(self, tenant_id, net_db, network): + net_db["network"] = {} + for key in (provider.NETWORK_TYPE, + provider.PHYSICAL_NETWORK, + provider.SEGMENTATION_ID): + net_db["network"][key] = network["network"][key] + return net_db + + def update_network(self, tenant_id, net_id): + pass + + def delete_network(self, net_db, net_id): + pass + + def create_subnet(self, sub_db, net_db, ipnet): + pass + + def update_subnet(self, orig_sub_db, new_sub_db, ipnet): + pass + + def delete_subnet(self, tenant_id, net_db, net_id): + pass + + def create_port(self, port_db, router_db): + pass + + def update_port(self, port_db, router_db): + pass + + def delete_port(self, port_db, router_db): + pass + + def create_router(self, tenant_id, router_db): + pass + + def update_router(self, router_db, router_id): + pass + + def delete_router(self, tenant_id, router_id): + pass + + def add_router_interface(self, tenant_id, router_id, port_db, ipnet): + pass + + def remove_router_interface(self, tenant_id, net_id, router_id): + pass + + def create_floatingip(self, floating_ip): + pass + + def update_floatingip(self, floating_ip_orig, floating_ip, id): + pass + + def delete_floatingip(self, floating_ip_orig, id): + pass + + def disassociate_floatingips(self, fip, port_id): + return dict((key, fip[key]) for key in ("id", "floating_network_id", + "floating_ip_address")) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/plumgrid/drivers/plumlib.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/plumgrid/drivers/plumlib.py new file mode 100644 index 00000000..e0c59dd5 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/plumgrid/drivers/plumlib.py @@ -0,0 +1,99 @@ +# Copyright 2013 PLUMgrid, Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Edgar Magana, emagana@plumgrid.com, PLUMgrid, Inc. + +""" +Neutron Plug-in for PLUMgrid Virtual Networking Infrastructure (VNI) +This plugin will forward authenticated REST API calls +to the PLUMgrid Network Management System called Director +""" + +from plumgridlib import plumlib + +from neutron.openstack.common import log as logging + +LOG = logging.getLogger(__name__) + + +class Plumlib(object): + """ + Class PLUMgrid Python Library. This library is a third-party tool + needed by PLUMgrid plugin to implement all core API in Neutron. + """ + + def __init__(self): + LOG.info(_('Python PLUMgrid Library Started ')) + + def director_conn(self, director_plumgrid, director_port, timeout, + director_admin, director_password): + self.plumlib = plumlib.Plumlib(director_plumgrid, + director_port, + timeout, + director_admin, + director_password) + + def create_network(self, tenant_id, net_db, network): + self.plumlib.create_network(tenant_id, net_db, network) + + def update_network(self, tenant_id, net_id): + self.plumlib.update_network(tenant_id, net_id) + + def delete_network(self, net_db, net_id): + self.plumlib.delete_network(net_db, net_id) + + def create_subnet(self, sub_db, net_db, ipnet): + self.plumlib.create_subnet(sub_db, net_db, ipnet) + + def update_subnet(self, orig_sub_db, new_sub_db, ipnet): + self.plumlib.update_subnet(orig_sub_db, new_sub_db, ipnet) + + def delete_subnet(self, tenant_id, net_db, net_id): + self.plumlib.delete_subnet(tenant_id, net_db, net_id) + + def create_port(self, port_db, router_db): + self.plumlib.create_port(port_db, router_db) + + def update_port(self, port_db, router_db): + self.plumlib.update_port(port_db, router_db) + + def delete_port(self, port_db, router_db): + self.plumlib.delete_port(port_db, router_db) + + def create_router(self, tenant_id, router_db): + self.plumlib.create_router(tenant_id, router_db) + + def update_router(self, router_db, router_id): + self.plumlib.update_router(router_db, router_id) + + def delete_router(self, tenant_id, router_id): + self.plumlib.delete_router(tenant_id, router_id) + + def add_router_interface(self, tenant_id, router_id, port_db, ipnet): + self.plumlib.add_router_interface(tenant_id, router_id, port_db, ipnet) + + def remove_router_interface(self, tenant_id, net_id, router_id): + self.plumlib.remove_router_interface(tenant_id, net_id, router_id) + + def create_floatingip(self, floating_ip): + self.plumlib.create_floatingip(floating_ip) + + def update_floatingip(self, floating_ip_orig, floating_ip, id): + self.plumlib.update_floatingip(floating_ip_orig, floating_ip, id) + + def delete_floatingip(self, floating_ip_orig, id): + self.plumlib.delete_floatingip(floating_ip_orig, id) + + def disassociate_floatingips(self, floating_ip, port_id): + self.plumlib.disassociate_floatingips(floating_ip, port_id) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/plumgrid/plumgrid_plugin/__init__.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/plumgrid/plumgrid_plugin/__init__.py new file mode 100644 index 00000000..c166c352 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/plumgrid/plumgrid_plugin/__init__.py @@ -0,0 +1,15 @@ +# Copyright 2013 PLUMgrid, Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Edgar Magana, emagana@plumgrid.com, PLUMgrid, Inc. diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/plumgrid/plumgrid_plugin/plugin_ver.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/plumgrid/plumgrid_plugin/plugin_ver.py new file mode 100644 index 00000000..33a2396f --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/plumgrid/plumgrid_plugin/plugin_ver.py @@ -0,0 +1,17 @@ +# Copyright 2013 PLUMgrid, Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Edgar Magana, emagana@plumgrid.com, PLUMgrid, Inc. + +VERSION = "0.2" diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py new file mode 100644 index 00000000..4fb61bf7 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py @@ -0,0 +1,608 @@ +# Copyright 2013 PLUMgrid, Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Edgar Magana, emagana@plumgrid.com, PLUMgrid, Inc. + +""" +Neutron Plug-in for PLUMgrid Virtual Networking Infrastructure (VNI) +This plugin will forward authenticated REST API calls +to the PLUMgrid Network Management System called Director +""" + +import netaddr +from oslo.config import cfg +from sqlalchemy.orm import exc as sa_exc + +from neutron.api.v2 import attributes +from neutron.common import constants +from neutron.db import db_base_plugin_v2 +from neutron.db import external_net_db +from neutron.db import l3_db +from neutron.db import portbindings_db +from neutron.db import quota_db # noqa +from neutron.extensions import portbindings +from neutron.openstack.common import importutils +from neutron.openstack.common import log as logging +from neutron.plugins.plumgrid.common import exceptions as plum_excep +from neutron.plugins.plumgrid.plumgrid_plugin import plugin_ver + +LOG = logging.getLogger(__name__) + +director_server_opts = [ + cfg.StrOpt('director_server', default='localhost', + help=_("PLUMgrid Director server to connect to")), + cfg.StrOpt('director_server_port', default='8080', + help=_("PLUMgrid Director server port to connect to")), + cfg.StrOpt('username', default='username', + help=_("PLUMgrid Director admin username")), + cfg.StrOpt('password', default='password', secret=True, + help=_("PLUMgrid Director admin password")), + cfg.IntOpt('servertimeout', default=5, + help=_("PLUMgrid Director server timeout")), + cfg.StrOpt('driver', + default="neutron.plugins.plumgrid.drivers.plumlib.Plumlib", + help=_("PLUMgrid Driver")), ] + +cfg.CONF.register_opts(director_server_opts, "plumgriddirector") + + +class NeutronPluginPLUMgridV2(db_base_plugin_v2.NeutronDbPluginV2, + portbindings_db.PortBindingMixin, + external_net_db.External_net_db_mixin, + l3_db.L3_NAT_db_mixin): + + supported_extension_aliases = ["external-net", "router", "binding", + "quotas", "provider"] + + binding_view = "extension:port_binding:view" + binding_set = "extension:port_binding:set" + + def __init__(self): + LOG.info(_('Neutron PLUMgrid Director: Starting Plugin')) + + super(NeutronPluginPLUMgridV2, self).__init__() + self.plumgrid_init() + + LOG.debug(_('Neutron PLUMgrid Director: Neutron server with ' + 'PLUMgrid Plugin has started')) + + def plumgrid_init(self): + """PLUMgrid initialization.""" + director_plumgrid = cfg.CONF.plumgriddirector.director_server + director_port = cfg.CONF.plumgriddirector.director_server_port + director_admin = cfg.CONF.plumgriddirector.username + director_password = cfg.CONF.plumgriddirector.password + timeout = cfg.CONF.plumgriddirector.servertimeout + plum_driver = cfg.CONF.plumgriddirector.driver + + # PLUMgrid Director info validation + LOG.info(_('Neutron PLUMgrid Director: %s'), director_plumgrid) + self._plumlib = importutils.import_object(plum_driver) + self._plumlib.director_conn(director_plumgrid, director_port, timeout, + director_admin, director_password) + + def create_network(self, context, network): + """Create Neutron network. + + Creates a PLUMgrid-based bridge. + """ + + LOG.debug(_('Neutron PLUMgrid Director: create_network() called')) + + # Plugin DB - Network Create and validation + tenant_id = self._get_tenant_id_for_create(context, + network["network"]) + self._network_admin_state(network) + + with context.session.begin(subtransactions=True): + net_db = super(NeutronPluginPLUMgridV2, + self).create_network(context, network) + # Propagate all L3 data into DB + self._process_l3_create(context, net_db, network['network']) + + try: + LOG.debug(_('PLUMgrid Library: create_network() called')) + self._plumlib.create_network(tenant_id, net_db, network) + + except Exception as err_message: + raise plum_excep.PLUMgridException(err_msg=err_message) + + # Return created network + return net_db + + def update_network(self, context, net_id, network): + """Update Neutron network. + + Updates a PLUMgrid-based bridge. + """ + + LOG.debug(_("Neutron PLUMgrid Director: update_network() called")) + self._network_admin_state(network) + tenant_id = self._get_tenant_id_for_create(context, network["network"]) + + with context.session.begin(subtransactions=True): + # Plugin DB - Network Update + net_db = super( + NeutronPluginPLUMgridV2, self).update_network(context, + net_id, network) + self._process_l3_update(context, net_db, network['network']) + + try: + LOG.debug(_("PLUMgrid Library: update_network() called")) + self._plumlib.update_network(tenant_id, net_id) + + except Exception as err_message: + raise plum_excep.PLUMgridException(err_msg=err_message) + + # Return updated network + return net_db + + def delete_network(self, context, net_id): + """Delete Neutron network. + + Deletes a PLUMgrid-based bridge. + """ + + LOG.debug(_("Neutron PLUMgrid Director: delete_network() called")) + net_db = super(NeutronPluginPLUMgridV2, + self).get_network(context, net_id) + + with context.session.begin(subtransactions=True): + self._process_l3_delete(context, net_id) + # Plugin DB - Network Delete + super(NeutronPluginPLUMgridV2, self).delete_network(context, + net_id) + + try: + LOG.debug(_("PLUMgrid Library: update_network() called")) + self._plumlib.delete_network(net_db, net_id) + + except Exception as err_message: + raise plum_excep.PLUMgridException(err_msg=err_message) + + def create_port(self, context, port): + """Create Neutron port. + + Creates a PLUMgrid-based port on the specific Virtual Network + Function (VNF). + """ + LOG.debug(_("Neutron PLUMgrid Director: create_port() called")) + + # Port operations on PLUMgrid Director is an automatic operation + # from the VIF driver operations in Nova. + # It requires admin_state_up to be True + + port["port"]["admin_state_up"] = True + + with context.session.begin(subtransactions=True): + # Plugin DB - Port Create and Return port + port_db = super(NeutronPluginPLUMgridV2, self).create_port(context, + port) + device_id = port_db["device_id"] + if port_db["device_owner"] == constants.DEVICE_OWNER_ROUTER_GW: + router_db = self._get_router(context, device_id) + else: + router_db = None + + try: + LOG.debug(_("PLUMgrid Library: create_port() called")) + self._plumlib.create_port(port_db, router_db) + + except Exception as err_message: + raise plum_excep.PLUMgridException(err_msg=err_message) + + # Plugin DB - Port Create and Return port + return self._port_viftype_binding(context, port_db) + + def update_port(self, context, port_id, port): + """Update Neutron port. + + Updates a PLUMgrid-based port on the specific Virtual Network + Function (VNF). + """ + LOG.debug(_("Neutron PLUMgrid Director: update_port() called")) + + with context.session.begin(subtransactions=True): + # Plugin DB - Port Create and Return port + port_db = super(NeutronPluginPLUMgridV2, self).update_port( + context, port_id, port) + device_id = port_db["device_id"] + if port_db["device_owner"] == constants.DEVICE_OWNER_ROUTER_GW: + router_db = self._get_router(context, device_id) + else: + router_db = None + try: + LOG.debug(_("PLUMgrid Library: create_port() called")) + self._plumlib.update_port(port_db, router_db) + + except Exception as err_message: + raise plum_excep.PLUMgridException(err_msg=err_message) + + # Plugin DB - Port Update + return self._port_viftype_binding(context, port_db) + + def delete_port(self, context, port_id, l3_port_check=True): + """Delete Neutron port. + + Deletes a PLUMgrid-based port on the specific Virtual Network + Function (VNF). + """ + + LOG.debug(_("Neutron PLUMgrid Director: delete_port() called")) + + with context.session.begin(subtransactions=True): + # Plugin DB - Port Create and Return port + port_db = super(NeutronPluginPLUMgridV2, + self).get_port(context, port_id) + self.disassociate_floatingips(context, port_id) + super(NeutronPluginPLUMgridV2, self).delete_port(context, port_id) + + if port_db["device_owner"] == constants.DEVICE_OWNER_ROUTER_GW: + device_id = port_db["device_id"] + router_db = self._get_router(context, device_id) + else: + router_db = None + try: + LOG.debug(_("PLUMgrid Library: delete_port() called")) + self._plumlib.delete_port(port_db, router_db) + + except Exception as err_message: + raise plum_excep.PLUMgridException(err_msg=err_message) + + def get_port(self, context, id, fields=None): + with context.session.begin(subtransactions=True): + port_db = super(NeutronPluginPLUMgridV2, + self).get_port(context, id, fields) + + self._port_viftype_binding(context, port_db) + return self._fields(port_db, fields) + + def get_ports(self, context, filters=None, fields=None): + with context.session.begin(subtransactions=True): + ports_db = super(NeutronPluginPLUMgridV2, + self).get_ports(context, filters, fields) + for port_db in ports_db: + self._port_viftype_binding(context, port_db) + return [self._fields(port, fields) for port in ports_db] + + def create_subnet(self, context, subnet): + """Create Neutron subnet. + + Creates a PLUMgrid-based DHCP and NAT Virtual Network + Functions (VNFs). + """ + + LOG.debug(_("Neutron PLUMgrid Director: create_subnet() called")) + + with context.session.begin(subtransactions=True): + # Plugin DB - Subnet Create + net_db = super(NeutronPluginPLUMgridV2, self).get_network( + context, subnet['subnet']['network_id'], fields=None) + s = subnet['subnet'] + ipnet = netaddr.IPNetwork(s['cidr']) + + # PLUMgrid Director reserves the last IP address for GW + # when is not defined + if s['gateway_ip'] is attributes.ATTR_NOT_SPECIFIED: + gw_ip = str(netaddr.IPAddress(ipnet.last - 1)) + subnet['subnet']['gateway_ip'] = gw_ip + + # PLUMgrid reserves the first IP + if s['allocation_pools'] == attributes.ATTR_NOT_SPECIFIED: + allocation_pool = self._allocate_pools_for_subnet(context, s) + subnet['subnet']['allocation_pools'] = allocation_pool + + sub_db = super(NeutronPluginPLUMgridV2, self).create_subnet( + context, subnet) + + try: + LOG.debug(_("PLUMgrid Library: create_subnet() called")) + self._plumlib.create_subnet(sub_db, net_db, ipnet) + except Exception as err_message: + raise plum_excep.PLUMgridException(err_msg=err_message) + + return sub_db + + def delete_subnet(self, context, subnet_id): + """Delete subnet core Neutron API.""" + + LOG.debug(_("Neutron PLUMgrid Director: delete_subnet() called")) + # Collecting subnet info + sub_db = self._get_subnet(context, subnet_id) + tenant_id = self._get_tenant_id_for_create(context, subnet_id) + net_id = sub_db["network_id"] + net_db = self.get_network(context, net_id) + + with context.session.begin(subtransactions=True): + # Plugin DB - Subnet Delete + super(NeutronPluginPLUMgridV2, self).delete_subnet( + context, subnet_id) + try: + LOG.debug(_("PLUMgrid Library: delete_subnet() called")) + self._plumlib.delete_subnet(tenant_id, net_db, net_id) + except Exception as err_message: + raise plum_excep.PLUMgridException(err_msg=err_message) + + def update_subnet(self, context, subnet_id, subnet): + """Update subnet core Neutron API.""" + + LOG.debug(_("update_subnet() called")) + # Collecting subnet info + orig_sub_db = self._get_subnet(context, subnet_id) + + with context.session.begin(subtransactions=True): + # Plugin DB - Subnet Update + new_sub_db = super(NeutronPluginPLUMgridV2, + self).update_subnet(context, subnet_id, subnet) + ipnet = netaddr.IPNetwork(new_sub_db['cidr']) + + try: + # PLUMgrid Server does not support updating resources yet + LOG.debug(_("PLUMgrid Library: update_network() called")) + self._plumlib.update_subnet(orig_sub_db, new_sub_db, ipnet) + + except Exception as err_message: + raise plum_excep.PLUMgridException(err_msg=err_message) + + return new_sub_db + + def create_router(self, context, router): + """ + Create router extension Neutron API + """ + LOG.debug(_("Neutron PLUMgrid Director: create_router() called")) + + tenant_id = self._get_tenant_id_for_create(context, router["router"]) + + with context.session.begin(subtransactions=True): + + # Create router in DB + router_db = super(NeutronPluginPLUMgridV2, + self).create_router(context, router) + # Create router on the network controller + try: + # Add Router to VND + LOG.debug(_("PLUMgrid Library: create_router() called")) + self._plumlib.create_router(tenant_id, router_db) + except Exception as err_message: + raise plum_excep.PLUMgridException(err_msg=err_message) + + # Return created router + return router_db + + def update_router(self, context, router_id, router): + + LOG.debug(_("Neutron PLUMgrid Director: update_router() called")) + + with context.session.begin(subtransactions=True): + router_db = super(NeutronPluginPLUMgridV2, + self).update_router(context, router_id, router) + try: + LOG.debug(_("PLUMgrid Library: update_router() called")) + self._plumlib.update_router(router_db, router_id) + except Exception as err_message: + raise plum_excep.PLUMgridException(err_msg=err_message) + + # Return updated router + return router_db + + def delete_router(self, context, router_id): + LOG.debug(_("Neutron PLUMgrid Director: delete_router() called")) + + with context.session.begin(subtransactions=True): + orig_router = self._get_router(context, router_id) + tenant_id = orig_router["tenant_id"] + + super(NeutronPluginPLUMgridV2, self).delete_router(context, + router_id) + + try: + LOG.debug(_("PLUMgrid Library: delete_router() called")) + self._plumlib.delete_router(tenant_id, router_id) + + except Exception as err_message: + raise plum_excep.PLUMgridException(err_msg=err_message) + + def add_router_interface(self, context, router_id, interface_info): + + LOG.debug(_("Neutron PLUMgrid Director: " + "add_router_interface() called")) + with context.session.begin(subtransactions=True): + # Validate args + router_db = self._get_router(context, router_id) + tenant_id = router_db['tenant_id'] + + # Create interface in DB + int_router = super(NeutronPluginPLUMgridV2, + self).add_router_interface(context, + router_id, + interface_info) + port_db = self._get_port(context, int_router['port_id']) + subnet_id = port_db["fixed_ips"][0]["subnet_id"] + subnet_db = super(NeutronPluginPLUMgridV2, + self)._get_subnet(context, subnet_id) + ipnet = netaddr.IPNetwork(subnet_db['cidr']) + + # Create interface on the network controller + try: + LOG.debug(_("PLUMgrid Library: add_router_interface() called")) + self._plumlib.add_router_interface(tenant_id, router_id, + port_db, ipnet) + + except Exception as err_message: + raise plum_excep.PLUMgridException(err_msg=err_message) + + return int_router + + def remove_router_interface(self, context, router_id, int_info): + + LOG.debug(_("Neutron PLUMgrid Director: " + "remove_router_interface() called")) + with context.session.begin(subtransactions=True): + # Validate args + router_db = self._get_router(context, router_id) + tenant_id = router_db['tenant_id'] + if 'port_id' in int_info: + port = self._get_port(context, int_info['port_id']) + net_id = port['network_id'] + + elif 'subnet_id' in int_info: + subnet_id = int_info['subnet_id'] + subnet = self._get_subnet(context, subnet_id) + net_id = subnet['network_id'] + + # Remove router in DB + del_int_router = super(NeutronPluginPLUMgridV2, + self).remove_router_interface(context, + router_id, + int_info) + + try: + LOG.debug(_("PLUMgrid Library: " + "remove_router_interface() called")) + self._plumlib.remove_router_interface(tenant_id, + net_id, router_id) + + except Exception as err_message: + raise plum_excep.PLUMgridException(err_msg=err_message) + + return del_int_router + + def create_floatingip(self, context, floatingip): + LOG.debug(_("Neutron PLUMgrid Director: create_floatingip() called")) + + with context.session.begin(subtransactions=True): + + floating_ip = super(NeutronPluginPLUMgridV2, + self).create_floatingip(context, floatingip) + try: + LOG.debug(_("PLUMgrid Library: create_floatingip() called")) + self._plumlib.create_floatingip(floating_ip) + + except Exception as err_message: + raise plum_excep.PLUMgridException(err_msg=err_message) + + return floating_ip + + def update_floatingip(self, context, id, floatingip): + LOG.debug(_("Neutron PLUMgrid Director: update_floatingip() called")) + + with context.session.begin(subtransactions=True): + floating_ip_orig = super(NeutronPluginPLUMgridV2, + self).get_floatingip(context, id) + floating_ip = super(NeutronPluginPLUMgridV2, + self).update_floatingip(context, id, + floatingip) + try: + LOG.debug(_("PLUMgrid Library: update_floatingip() called")) + self._plumlib.update_floatingip(floating_ip_orig, floating_ip, + id) + + except Exception as err_message: + raise plum_excep.PLUMgridException(err_msg=err_message) + + return floating_ip + + def delete_floatingip(self, context, id): + LOG.debug(_("Neutron PLUMgrid Director: delete_floatingip() called")) + + with context.session.begin(subtransactions=True): + + floating_ip_orig = super(NeutronPluginPLUMgridV2, + self).get_floatingip(context, id) + + super(NeutronPluginPLUMgridV2, self).delete_floatingip(context, id) + + try: + LOG.debug(_("PLUMgrid Library: delete_floatingip() called")) + self._plumlib.delete_floatingip(floating_ip_orig, id) + + except Exception as err_message: + raise plum_excep.PLUMgridException(err_msg=err_message) + + def disassociate_floatingips(self, context, port_id): + LOG.debug(_("Neutron PLUMgrid Director: disassociate_floatingips() " + "called")) + + try: + fip_qry = context.session.query(l3_db.FloatingIP) + floating_ip = fip_qry.filter_by(fixed_port_id=port_id).one() + + LOG.debug(_("PLUMgrid Library: disassociate_floatingips()" + " called")) + self._plumlib.disassociate_floatingips(floating_ip, port_id) + + except sa_exc.NoResultFound: + pass + + except Exception as err_message: + raise plum_excep.PLUMgridException(err_msg=err_message) + + super(NeutronPluginPLUMgridV2, + self).disassociate_floatingips(context, port_id) + + """ + Internal PLUMgrid Fuctions + """ + + def _get_plugin_version(self): + return plugin_ver.VERSION + + def _port_viftype_binding(self, context, port): + port[portbindings.VIF_TYPE] = portbindings.VIF_TYPE_IOVISOR + port[portbindings.VIF_DETAILS] = { + # TODO(rkukura): Replace with new VIF security details + portbindings.CAP_PORT_FILTER: + 'security-group' in self.supported_extension_aliases} + return port + + def _network_admin_state(self, network): + if network["network"].get("admin_state_up") is False: + LOG.warning(_("Networks with admin_state_up=False are not " + "supported by PLUMgrid plugin yet.")) + return network + + def _allocate_pools_for_subnet(self, context, subnet): + """Create IP allocation pools for a given subnet + + Pools are defined by the 'allocation_pools' attribute, + a list of dict objects with 'start' and 'end' keys for + defining the pool range. + Modified from Neutron DB based class + + """ + + pools = [] + # Auto allocate the pool around gateway_ip + net = netaddr.IPNetwork(subnet['cidr']) + boundary = int(netaddr.IPAddress(subnet['gateway_ip'] or net.last)) + potential_dhcp_ip = int(net.first + 1) + if boundary == potential_dhcp_ip: + first_ip = net.first + 3 + boundary = net.first + 2 + else: + first_ip = net.first + 2 + last_ip = net.last - 1 + # Use the gw_ip to find a point for splitting allocation pools + # for this subnet + split_ip = min(max(boundary, net.first), net.last) + if split_ip > first_ip: + pools.append({'start': str(netaddr.IPAddress(first_ip)), + 'end': str(netaddr.IPAddress(split_ip - 1))}) + if split_ip < last_ip: + pools.append({'start': str(netaddr.IPAddress(split_ip + 1)), + 'end': str(netaddr.IPAddress(last_ip))}) + # return auto-generated pools + # no need to check for their validity + return pools diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/ryu/agent/ryu_neutron_agent.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/ryu/agent/ryu_neutron_agent.py new file mode 100644 index 00000000..18db0f91 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/ryu/agent/ryu_neutron_agent.py @@ -0,0 +1,314 @@ +#!/usr/bin/env python +# Copyright 2012 Isaku Yamahata +# Based on openvswitch agent. +# +# Copyright 2011 VMware, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# @author: Isaku Yamahata + +import httplib +import socket +import sys +import time + +import eventlet +eventlet.monkey_patch() + +from oslo.config import cfg +from ryu.app import client +from ryu.app import conf_switch_key +from ryu.app import rest_nw_id + +from neutron.agent.linux import ip_lib +from neutron.agent.linux import ovs_lib +from neutron.agent import rpc as agent_rpc +from neutron.agent import securitygroups_rpc as sg_rpc +from neutron.common import config as common_config +from neutron.common import exceptions as n_exc +from neutron.common import rpc as n_rpc +from neutron.common import topics +from neutron import context as q_context +from neutron.extensions import securitygroup as ext_sg +from neutron.openstack.common import log +from neutron.plugins.ryu.common import config # noqa + + +LOG = log.getLogger(__name__) + + +# This is copied of nova.flags._get_my_ip() +# Agent shouldn't depend on nova module +def _get_my_ip(): + """Return the actual ip of the local machine. + + This code figures out what source address would be used if some traffic + were to be sent out to some well known address on the Internet. In this + case, a Google DNS server is used, but the specific address does not + matter much. No traffic is actually sent. + """ + csock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) + csock.connect(('8.8.8.8', 80)) + (addr, _port) = csock.getsockname() + csock.close() + return addr + + +def _get_ip_from_nic(nic): + ip_wrapper = ip_lib.IPWrapper() + dev = ip_wrapper.device(nic) + addrs = dev.addr.list(scope='global') + for addr in addrs: + if addr['ip_version'] == 4: + return addr['cidr'].split('/')[0] + + +def _get_ip(cfg_ip_str, cfg_interface_str): + ip = None + try: + ip = getattr(cfg.CONF.OVS, cfg_ip_str) + except (cfg.NoSuchOptError, cfg.NoSuchGroupError): + pass + if ip: + return ip + + iface = None + try: + iface = getattr(cfg.CONF.OVS, cfg_interface_str) + except (cfg.NoSuchOptError, cfg.NoSuchGroupError): + pass + if iface: + ip = _get_ip_from_nic(iface) + if ip: + return ip + LOG.warning(_('Could not get IPv4 address from %(nic)s: %(cfg)s'), + {'nic': iface, 'cfg': cfg_interface_str}) + + return _get_my_ip() + + +def _get_tunnel_ip(): + return _get_ip('tunnel_ip', 'tunnel_interface') + + +def _get_ovsdb_ip(): + return _get_ip('ovsdb_ip', 'ovsdb_interface') + + +class OVSBridge(ovs_lib.OVSBridge): + def __init__(self, br_name, root_helper): + ovs_lib.OVSBridge.__init__(self, br_name, root_helper) + self.datapath_id = None + + def find_datapath_id(self): + self.datapath_id = self.get_datapath_id() + + def set_manager(self, target): + self.run_vsctl(["set-manager", target]) + + def get_ofport(self, name): + return self.db_get_val("Interface", name, "ofport") + + def _get_ports(self, get_port): + ports = [] + port_names = self.get_port_name_list() + for name in port_names: + if self.get_ofport(name) < 0: + continue + port = get_port(name) + if port: + ports.append(port) + + return ports + + def _get_external_port(self, name): + # exclude vif ports + external_ids = self.db_get_map("Interface", name, "external_ids") + if external_ids: + return + + # exclude tunnel ports + options = self.db_get_map("Interface", name, "options") + if "remote_ip" in options: + return + + ofport = self.get_ofport(name) + return ovs_lib.VifPort(name, ofport, None, None, self) + + def get_external_ports(self): + return self._get_ports(self._get_external_port) + + +class VifPortSet(object): + def __init__(self, int_br, ryu_rest_client): + super(VifPortSet, self).__init__() + self.int_br = int_br + self.api = ryu_rest_client + + def setup(self): + for port in self.int_br.get_external_ports(): + LOG.debug(_('External port %s'), port) + self.api.update_port(rest_nw_id.NW_ID_EXTERNAL, + port.switch.datapath_id, port.ofport) + + +class RyuPluginApi(agent_rpc.PluginApi, + sg_rpc.SecurityGroupServerRpcApiMixin): + def get_ofp_rest_api_addr(self, context): + LOG.debug(_("Get Ryu rest API address")) + return self.call(context, + self.make_msg('get_ofp_rest_api'), + topic=self.topic) + + +class RyuSecurityGroupAgent(sg_rpc.SecurityGroupAgentRpcMixin): + def __init__(self, context, plugin_rpc, root_helper): + self.context = context + self.plugin_rpc = plugin_rpc + self.root_helper = root_helper + self.init_firewall() + + +class OVSNeutronOFPRyuAgent(n_rpc.RpcCallback, + sg_rpc.SecurityGroupAgentRpcCallbackMixin): + + RPC_API_VERSION = '1.1' + + def __init__(self, integ_br, tunnel_ip, ovsdb_ip, ovsdb_port, + polling_interval, root_helper): + super(OVSNeutronOFPRyuAgent, self).__init__() + self.polling_interval = polling_interval + self._setup_rpc() + self.sg_agent = RyuSecurityGroupAgent(self.context, + self.plugin_rpc, + root_helper) + self._setup_integration_br(root_helper, integ_br, tunnel_ip, + ovsdb_port, ovsdb_ip) + + def _setup_rpc(self): + self.topic = topics.AGENT + self.plugin_rpc = RyuPluginApi(topics.PLUGIN) + self.context = q_context.get_admin_context_without_session() + self.endpoints = [self] + consumers = [[topics.PORT, topics.UPDATE], + [topics.SECURITY_GROUP, topics.UPDATE]] + self.connection = agent_rpc.create_consumers(self.endpoints, + self.topic, + consumers) + + def _setup_integration_br(self, root_helper, integ_br, + tunnel_ip, ovsdb_port, ovsdb_ip): + self.int_br = OVSBridge(integ_br, root_helper) + self.int_br.find_datapath_id() + + rest_api_addr = self.plugin_rpc.get_ofp_rest_api_addr(self.context) + if not rest_api_addr: + raise n_exc.Invalid(_("Ryu rest API port isn't specified")) + LOG.debug(_("Going to ofp controller mode %s"), rest_api_addr) + + ryu_rest_client = client.OFPClient(rest_api_addr) + + self.vif_ports = VifPortSet(self.int_br, ryu_rest_client) + self.vif_ports.setup() + + sc_client = client.SwitchConfClient(rest_api_addr) + sc_client.set_key(self.int_br.datapath_id, + conf_switch_key.OVS_TUNNEL_ADDR, tunnel_ip) + + # Currently Ryu supports only tcp methods. (ssl isn't supported yet) + self.int_br.set_manager('ptcp:%d' % ovsdb_port) + sc_client.set_key(self.int_br.datapath_id, conf_switch_key.OVSDB_ADDR, + 'tcp:%s:%d' % (ovsdb_ip, ovsdb_port)) + + def port_update(self, context, **kwargs): + LOG.debug(_("Port update received")) + port = kwargs.get('port') + vif_port = self.int_br.get_vif_port_by_id(port['id']) + if not vif_port: + return + + if ext_sg.SECURITYGROUPS in port: + self.sg_agent.refresh_firewall() + + def _update_ports(self, registered_ports): + ports = self.int_br.get_vif_port_set() + if ports == registered_ports: + return + added = ports - registered_ports + removed = registered_ports - ports + return {'current': ports, + 'added': added, + 'removed': removed} + + def _process_devices_filter(self, port_info): + if 'added' in port_info: + self.sg_agent.prepare_devices_filter(port_info['added']) + if 'removed' in port_info: + self.sg_agent.remove_devices_filter(port_info['removed']) + + def daemon_loop(self): + ports = set() + + while True: + start = time.time() + try: + port_info = self._update_ports(ports) + if port_info: + LOG.debug(_("Agent loop has new device")) + self._process_devices_filter(port_info) + ports = port_info['current'] + except Exception: + LOG.exception(_("Error in agent event loop")) + + elapsed = max(time.time() - start, 0) + if (elapsed < self.polling_interval): + time.sleep(self.polling_interval - elapsed) + else: + LOG.debug(_("Loop iteration exceeded interval " + "(%(polling_interval)s vs. %(elapsed)s)!"), + {'polling_interval': self.polling_interval, + 'elapsed': elapsed}) + + +def main(): + common_config.init(sys.argv[1:]) + + common_config.setup_logging(cfg.CONF) + + integ_br = cfg.CONF.OVS.integration_bridge + polling_interval = cfg.CONF.AGENT.polling_interval + root_helper = cfg.CONF.AGENT.root_helper + + tunnel_ip = _get_tunnel_ip() + LOG.debug(_('tunnel_ip %s'), tunnel_ip) + ovsdb_port = cfg.CONF.OVS.ovsdb_port + LOG.debug(_('ovsdb_port %s'), ovsdb_port) + ovsdb_ip = _get_ovsdb_ip() + LOG.debug(_('ovsdb_ip %s'), ovsdb_ip) + try: + agent = OVSNeutronOFPRyuAgent(integ_br, tunnel_ip, ovsdb_ip, + ovsdb_port, polling_interval, + root_helper) + except httplib.HTTPException as e: + LOG.error(_("Initialization failed: %s"), e) + sys.exit(1) + + LOG.info(_("Ryu initialization on the node is done. " + "Agent initialized successfully, now running...")) + agent.daemon_loop() + sys.exit(0) + + +if __name__ == "__main__": + main() diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/ryu/common/__init__.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/ryu/common/__init__.py new file mode 100644 index 00000000..8ac9340e --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/ryu/common/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2012 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/ryu/common/config.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/ryu/common/config.py new file mode 100644 index 00000000..57c3e784 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/ryu/common/config.py @@ -0,0 +1,50 @@ +# Copyright 2012 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo.config import cfg + +from neutron.agent.common import config +from neutron.agent.linux import ovs_lib # noqa + +ovs_opts = [ + cfg.StrOpt('integration_bridge', default='br-int', + help=_("Integration bridge to use")), + cfg.StrOpt('openflow_rest_api', default='127.0.0.1:8080', + help=_("OpenFlow REST API location")), + cfg.IntOpt('tunnel_key_min', default=1, + help=_("Minimum tunnel ID to use")), + cfg.IntOpt('tunnel_key_max', default=0xffffff, + help=_("Maximum tunnel ID to use")), + cfg.StrOpt('tunnel_ip', + help=_("Tunnel IP to use")), + cfg.StrOpt('tunnel_interface', + help=_("Tunnel interface to use")), + cfg.IntOpt('ovsdb_port', default=6634, + help=_("OVSDB port to connect to")), + cfg.StrOpt('ovsdb_ip', + help=_("OVSDB IP to connect to")), + cfg.StrOpt('ovsdb_interface', + help=_("OVSDB interface to connect to")), +] + +agent_opts = [ + cfg.IntOpt('polling_interval', default=2, + help=_("The number of seconds the agent will wait between " + "polling for local device changes.")), +] + + +cfg.CONF.register_opts(ovs_opts, "OVS") +cfg.CONF.register_opts(agent_opts, "AGENT") +config.register_root_helper(cfg.CONF) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/ryu/db/api_v2.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/ryu/db/api_v2.py new file mode 100644 index 00000000..af60775a --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/ryu/db/api_v2.py @@ -0,0 +1,214 @@ +# Copyright 2012 Isaku Yamahata +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import exc as sa_exc +from sqlalchemy import func +from sqlalchemy.orm import exc as orm_exc + +from neutron.common import exceptions as n_exc +import neutron.db.api as db +from neutron.db import models_v2 +from neutron.db import securitygroups_db as sg_db +from neutron.extensions import securitygroup as ext_sg +from neutron import manager +from neutron.openstack.common import log as logging +from neutron.plugins.ryu.db import models_v2 as ryu_models_v2 + + +LOG = logging.getLogger(__name__) + + +def network_all_tenant_list(): + session = db.get_session() + return session.query(models_v2.Network).all() + + +def get_port_from_device(port_id): + LOG.debug(_("get_port_from_device() called:port_id=%s"), port_id) + session = db.get_session() + sg_binding_port = sg_db.SecurityGroupPortBinding.port_id + + query = session.query(models_v2.Port, + sg_db.SecurityGroupPortBinding.security_group_id) + query = query.outerjoin(sg_db.SecurityGroupPortBinding, + models_v2.Port.id == sg_binding_port) + query = query.filter(models_v2.Port.id == port_id) + port_and_sgs = query.all() + if not port_and_sgs: + return None + port = port_and_sgs[0][0] + plugin = manager.NeutronManager.get_plugin() + port_dict = plugin._make_port_dict(port) + port_dict[ext_sg.SECURITYGROUPS] = [ + sg_id for port_, sg_id in port_and_sgs if sg_id] + port_dict['security_group_rules'] = [] + port_dict['security_group_source_groups'] = [] + port_dict['fixed_ips'] = [ip['ip_address'] for ip in port['fixed_ips']] + return port_dict + + +class TunnelKey(object): + # VLAN: 12 bits + # GRE, VXLAN: 24bits + # TODO(yamahata): STT: 64bits + _KEY_MIN_HARD = 1 + _KEY_MAX_HARD = 0xffffffff + + def __init__(self, key_min=_KEY_MIN_HARD, key_max=_KEY_MAX_HARD): + self.key_min = key_min + self.key_max = key_max + + if (key_min < self._KEY_MIN_HARD or key_max > self._KEY_MAX_HARD or + key_min > key_max): + raise ValueError(_('Invalid tunnel key options ' + 'tunnel_key_min: %(key_min)d ' + 'tunnel_key_max: %(key_max)d. ' + 'Using default value') % {'key_min': key_min, + 'key_max': key_max}) + + def _last_key(self, session): + try: + return session.query(ryu_models_v2.TunnelKeyLast).one() + except orm_exc.MultipleResultsFound: + max_key = session.query( + func.max(ryu_models_v2.TunnelKeyLast.last_key)) + if max_key > self.key_max: + max_key = self.key_min + + session.query(ryu_models_v2.TunnelKeyLast).delete() + last_key = ryu_models_v2.TunnelKeyLast(last_key=max_key) + except orm_exc.NoResultFound: + last_key = ryu_models_v2.TunnelKeyLast(last_key=self.key_min) + + session.add(last_key) + session.flush() + return session.query(ryu_models_v2.TunnelKeyLast).one() + + def _find_key(self, session, last_key): + """Try to find unused tunnel key. + + Trying to find unused tunnel key in TunnelKey table starting + from last_key + 1. + When all keys are used, raise sqlalchemy.orm.exc.NoResultFound + """ + # key 0 is used for special meanings. So don't allocate 0. + + # sqlite doesn't support + # '(select order by limit) union all (select order by limit) ' + # 'order by limit' + # So do it manually + # new_key = session.query("new_key").from_statement( + # # If last_key + 1 isn't used, it's the result + # 'SELECT new_key ' + # 'FROM (SELECT :last_key + 1 AS new_key) q1 ' + # 'WHERE NOT EXISTS ' + # '(SELECT 1 FROM tunnelkeys WHERE tunnel_key = :last_key + 1) ' + # + # 'UNION ALL ' + # + # # if last_key + 1 used, + # # find the least unused key from last_key + 1 + # '(SELECT t.tunnel_key + 1 AS new_key ' + # 'FROM tunnelkeys t ' + # 'WHERE NOT EXISTS ' + # '(SELECT 1 FROM tunnelkeys ti ' + # ' WHERE ti.tunnel_key = t.tunnel_key + 1) ' + # 'AND t.tunnel_key >= :last_key ' + # 'ORDER BY new_key LIMIT 1) ' + # + # 'ORDER BY new_key LIMIT 1' + # ).params(last_key=last_key).one() + try: + new_key = session.query("new_key").from_statement( + # If last_key + 1 isn't used, it's the result + 'SELECT new_key ' + 'FROM (SELECT :last_key + 1 AS new_key) q1 ' + 'WHERE NOT EXISTS ' + '(SELECT 1 FROM tunnelkeys WHERE tunnel_key = :last_key + 1) ' + ).params(last_key=last_key).one() + except orm_exc.NoResultFound: + new_key = session.query("new_key").from_statement( + # if last_key + 1 used, + # find the least unused key from last_key + 1 + '(SELECT t.tunnel_key + 1 AS new_key ' + 'FROM tunnelkeys t ' + 'WHERE NOT EXISTS ' + '(SELECT 1 FROM tunnelkeys ti ' + ' WHERE ti.tunnel_key = t.tunnel_key + 1) ' + 'AND t.tunnel_key >= :last_key ' + 'ORDER BY new_key LIMIT 1) ' + ).params(last_key=last_key).one() + + new_key = new_key[0] # the result is tuple. + LOG.debug(_("last_key %(last_key)s new_key %(new_key)s"), + {'last_key': last_key, 'new_key': new_key}) + if new_key > self.key_max: + LOG.debug(_("No key found")) + raise orm_exc.NoResultFound() + return new_key + + def _allocate(self, session, network_id): + last_key = self._last_key(session) + try: + new_key = self._find_key(session, last_key.last_key) + except orm_exc.NoResultFound: + new_key = self._find_key(session, self.key_min) + + tunnel_key = ryu_models_v2.TunnelKey(network_id=network_id, + tunnel_key=new_key) + last_key.last_key = new_key + session.add(tunnel_key) + return new_key + + _TRANSACTION_RETRY_MAX = 16 + + def allocate(self, session, network_id): + count = 0 + while True: + session.begin(subtransactions=True) + try: + new_key = self._allocate(session, network_id) + session.commit() + break + except sa_exc.SQLAlchemyError: + session.rollback() + + count += 1 + if count > self._TRANSACTION_RETRY_MAX: + # if this happens too often, increase _TRANSACTION_RETRY_MAX + LOG.warn(_("Transaction retry exhausted (%d). " + "Abandoned tunnel key allocation."), count) + raise n_exc.ResourceExhausted() + + return new_key + + def delete(self, session, network_id): + session.query(ryu_models_v2.TunnelKey).filter_by( + network_id=network_id).delete() + session.flush() + + def all_list(self): + session = db.get_session() + return session.query(ryu_models_v2.TunnelKey).all() + + +def set_port_status(session, port_id, status): + try: + port = session.query(models_v2.Port).filter_by(id=port_id).one() + port['status'] = status + session.merge(port) + session.flush() + except orm_exc.NoResultFound: + raise n_exc.PortNotFound(port_id=port_id) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/ryu/db/models_v2.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/ryu/db/models_v2.py new file mode 100644 index 00000000..21d39dd5 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/ryu/db/models_v2.py @@ -0,0 +1,40 @@ +# Copyright 2012 Isaku Yamahata +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import sqlalchemy as sa + +from neutron.db import model_base + + +class TunnelKeyLast(model_base.BASEV2): + """Last allocated Tunnel key. + + The next key allocation will be started from this value + 1 + """ + last_key = sa.Column(sa.Integer, primary_key=True) + + def __repr__(self): + return "" % self.last_key + + +class TunnelKey(model_base.BASEV2): + """Netowrk ID <-> tunnel key mapping.""" + network_id = sa.Column(sa.String(36), sa.ForeignKey("networks.id"), + nullable=False) + tunnel_key = sa.Column(sa.Integer, primary_key=True, + nullable=False, autoincrement=False) + + def __repr__(self): + return "" % (self.network_id, self.tunnel_key) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/ryu/ryu_neutron_plugin.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/ryu/ryu_neutron_plugin.py new file mode 100644 index 00000000..34ace9d7 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/ryu/ryu_neutron_plugin.py @@ -0,0 +1,268 @@ +# Copyright 2012 Isaku Yamahata +# +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# @author: Isaku Yamahata + +from oslo.config import cfg +from ryu.app import client +from ryu.app import rest_nw_id + +from neutron.agent import securitygroups_rpc as sg_rpc +from neutron.common import constants as q_const +from neutron.common import exceptions as n_exc +from neutron.common import rpc as n_rpc +from neutron.common import topics +from neutron.db import api as db +from neutron.db import db_base_plugin_v2 +from neutron.db import dhcp_rpc_base +from neutron.db import external_net_db +from neutron.db import extraroute_db +from neutron.db import l3_gwmode_db +from neutron.db import l3_rpc_base +from neutron.db import models_v2 +from neutron.db import portbindings_base +from neutron.db import securitygroups_rpc_base as sg_db_rpc +from neutron.extensions import portbindings +from neutron.openstack.common import excutils +from neutron.openstack.common import log as logging +from neutron.plugins.common import constants as svc_constants +from neutron.plugins.ryu.common import config # noqa +from neutron.plugins.ryu.db import api_v2 as db_api_v2 + + +LOG = logging.getLogger(__name__) + + +class RyuRpcCallbacks(n_rpc.RpcCallback, + dhcp_rpc_base.DhcpRpcCallbackMixin, + l3_rpc_base.L3RpcCallbackMixin, + sg_db_rpc.SecurityGroupServerRpcCallbackMixin): + + RPC_API_VERSION = '1.1' + + def __init__(self, ofp_rest_api_addr): + super(RyuRpcCallbacks, self).__init__() + self.ofp_rest_api_addr = ofp_rest_api_addr + + def get_ofp_rest_api(self, context, **kwargs): + LOG.debug(_("get_ofp_rest_api: %s"), self.ofp_rest_api_addr) + return self.ofp_rest_api_addr + + @classmethod + def get_port_from_device(cls, device): + port = db_api_v2.get_port_from_device(device) + if port: + port['device'] = device + return port + + +class AgentNotifierApi(n_rpc.RpcProxy, + sg_rpc.SecurityGroupAgentRpcApiMixin): + + BASE_RPC_API_VERSION = '1.0' + + def __init__(self, topic): + super(AgentNotifierApi, self).__init__( + topic=topic, default_version=self.BASE_RPC_API_VERSION) + self.topic_port_update = topics.get_topic_name(topic, + topics.PORT, + topics.UPDATE) + + def port_update(self, context, port): + self.fanout_cast(context, + self.make_msg('port_update', port=port), + topic=self.topic_port_update) + + +class RyuNeutronPluginV2(db_base_plugin_v2.NeutronDbPluginV2, + external_net_db.External_net_db_mixin, + extraroute_db.ExtraRoute_db_mixin, + l3_gwmode_db.L3_NAT_db_mixin, + sg_db_rpc.SecurityGroupServerRpcMixin, + portbindings_base.PortBindingBaseMixin): + + _supported_extension_aliases = ["external-net", "router", "ext-gw-mode", + "extraroute", "security-group", + "binding", "quotas"] + + @property + def supported_extension_aliases(self): + if not hasattr(self, '_aliases'): + aliases = self._supported_extension_aliases[:] + sg_rpc.disable_security_group_extension_by_config(aliases) + self._aliases = aliases + return self._aliases + + def __init__(self, configfile=None): + super(RyuNeutronPluginV2, self).__init__() + self.base_binding_dict = { + portbindings.VIF_TYPE: portbindings.VIF_TYPE_OVS, + portbindings.VIF_DETAILS: { + # TODO(rkukura): Replace with new VIF security details + portbindings.CAP_PORT_FILTER: + 'security-group' in self.supported_extension_aliases, + portbindings.OVS_HYBRID_PLUG: True + } + } + portbindings_base.register_port_dict_function() + self.tunnel_key = db_api_v2.TunnelKey( + cfg.CONF.OVS.tunnel_key_min, cfg.CONF.OVS.tunnel_key_max) + self.ofp_api_host = cfg.CONF.OVS.openflow_rest_api + if not self.ofp_api_host: + raise n_exc.Invalid(_('Invalid configuration. check ryu.ini')) + + self.client = client.OFPClient(self.ofp_api_host) + self.tun_client = client.TunnelClient(self.ofp_api_host) + self.iface_client = client.NeutronIfaceClient(self.ofp_api_host) + for nw_id in rest_nw_id.RESERVED_NETWORK_IDS: + if nw_id != rest_nw_id.NW_ID_UNKNOWN: + self.client.update_network(nw_id) + self._setup_rpc() + + # register known all network list on startup + self._create_all_tenant_network() + + def _setup_rpc(self): + self.service_topics = {svc_constants.CORE: topics.PLUGIN, + svc_constants.L3_ROUTER_NAT: topics.L3PLUGIN} + self.conn = n_rpc.create_connection(new=True) + self.notifier = AgentNotifierApi(topics.AGENT) + self.endpoints = [RyuRpcCallbacks(self.ofp_api_host)] + for svc_topic in self.service_topics.values(): + self.conn.create_consumer(svc_topic, self.endpoints, fanout=False) + self.conn.consume_in_threads() + + def _create_all_tenant_network(self): + for net in db_api_v2.network_all_tenant_list(): + self.client.update_network(net.id) + for tun in self.tunnel_key.all_list(): + self.tun_client.update_tunnel_key(tun.network_id, tun.tunnel_key) + session = db.get_session() + for port in session.query(models_v2.Port): + self.iface_client.update_network_id(port.id, port.network_id) + + def _client_create_network(self, net_id, tunnel_key): + self.client.create_network(net_id) + self.tun_client.create_tunnel_key(net_id, tunnel_key) + + def _client_delete_network(self, net_id): + RyuNeutronPluginV2._safe_client_delete_network(self.safe_reference, + net_id) + + @staticmethod + def _safe_client_delete_network(safe_reference, net_id): + # Avoid handing naked plugin references to the client. When + # the client is mocked for testing, such references can + # prevent the plugin from being deallocated. + client.ignore_http_not_found( + lambda: safe_reference.client.delete_network(net_id)) + client.ignore_http_not_found( + lambda: safe_reference.tun_client.delete_tunnel_key(net_id)) + + def create_network(self, context, network): + session = context.session + with session.begin(subtransactions=True): + #set up default security groups + tenant_id = self._get_tenant_id_for_create( + context, network['network']) + self._ensure_default_security_group(context, tenant_id) + + net = super(RyuNeutronPluginV2, self).create_network(context, + network) + self._process_l3_create(context, net, network['network']) + + tunnel_key = self.tunnel_key.allocate(session, net['id']) + try: + self._client_create_network(net['id'], tunnel_key) + except Exception: + with excutils.save_and_reraise_exception(): + self._client_delete_network(net['id']) + + return net + + def update_network(self, context, id, network): + session = context.session + with session.begin(subtransactions=True): + net = super(RyuNeutronPluginV2, self).update_network(context, id, + network) + self._process_l3_update(context, net, network['network']) + return net + + def delete_network(self, context, id): + self._client_delete_network(id) + session = context.session + with session.begin(subtransactions=True): + self.tunnel_key.delete(session, id) + self._process_l3_delete(context, id) + super(RyuNeutronPluginV2, self).delete_network(context, id) + + def create_port(self, context, port): + session = context.session + port_data = port['port'] + with session.begin(subtransactions=True): + self._ensure_default_security_group_on_port(context, port) + sgids = self._get_security_groups_on_port(context, port) + port = super(RyuNeutronPluginV2, self).create_port(context, port) + self._process_portbindings_create_and_update(context, + port_data, + port) + self._process_port_create_security_group( + context, port, sgids) + self.notify_security_groups_member_updated(context, port) + self.iface_client.create_network_id(port['id'], port['network_id']) + return port + + def delete_port(self, context, id, l3_port_check=True): + # if needed, check to see if this is a port owned by + # and l3-router. If so, we should prevent deletion. + if l3_port_check: + self.prevent_l3_port_deletion(context, id) + + with context.session.begin(subtransactions=True): + self.disassociate_floatingips(context, id) + port = self.get_port(context, id) + self._delete_port_security_group_bindings(context, id) + super(RyuNeutronPluginV2, self).delete_port(context, id) + + self.notify_security_groups_member_updated(context, port) + + def update_port(self, context, id, port): + deleted = port['port'].get('deleted', False) + session = context.session + + need_port_update_notify = False + with session.begin(subtransactions=True): + original_port = super(RyuNeutronPluginV2, self).get_port( + context, id) + updated_port = super(RyuNeutronPluginV2, self).update_port( + context, id, port) + self._process_portbindings_create_and_update(context, + port['port'], + updated_port) + need_port_update_notify = self.update_security_group_on_port( + context, id, port, original_port, updated_port) + + need_port_update_notify |= self.is_security_group_member_updated( + context, original_port, updated_port) + + need_port_update_notify |= (original_port['admin_state_up'] != + updated_port['admin_state_up']) + + if need_port_update_notify: + self.notifier.port_update(context, updated_port) + + if deleted: + db_api_v2.set_port_status(session, id, q_const.PORT_STATUS_DOWN) + return updated_port diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/api_client/base.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/api_client/base.py new file mode 100644 index 00000000..e8998b5c --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/api_client/base.py @@ -0,0 +1,249 @@ +# Copyright 2012 VMware, Inc. +# +# All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import abc +import httplib +import six +import time + +from neutron.openstack.common import log as logging +from neutron.plugins.vmware import api_client + +LOG = logging.getLogger(__name__) + +GENERATION_ID_TIMEOUT = -1 +DEFAULT_CONCURRENT_CONNECTIONS = 3 +DEFAULT_CONNECT_TIMEOUT = 5 + + +@six.add_metaclass(abc.ABCMeta) +class ApiClientBase(object): + """An abstract baseclass for all API client implementations.""" + + CONN_IDLE_TIMEOUT = 60 * 15 + + def _create_connection(self, host, port, is_ssl): + if is_ssl: + return httplib.HTTPSConnection(host, port, + timeout=self._connect_timeout) + return httplib.HTTPConnection(host, port, + timeout=self._connect_timeout) + + @staticmethod + def _conn_params(http_conn): + is_ssl = isinstance(http_conn, httplib.HTTPSConnection) + return (http_conn.host, http_conn.port, is_ssl) + + @property + def user(self): + return self._user + + @property + def password(self): + return self._password + + @property + def config_gen(self): + # If NSX_gen_timeout is not -1 then: + # Maintain a timestamp along with the generation ID. Hold onto the + # ID long enough to be useful and block on sequential requests but + # not long enough to persist when Onix db is cleared, which resets + # the generation ID, causing the DAL to block indefinitely with some + # number that's higher than the cluster's value. + if self._gen_timeout != -1: + ts = self._config_gen_ts + if ts is not None: + if (time.time() - ts) > self._gen_timeout: + return None + return self._config_gen + + @config_gen.setter + def config_gen(self, value): + if self._config_gen != value: + if self._gen_timeout != -1: + self._config_gen_ts = time.time() + self._config_gen = value + + def auth_cookie(self, conn): + cookie = None + data = self._get_provider_data(conn) + if data: + cookie = data[1] + return cookie + + def set_auth_cookie(self, conn, cookie): + data = self._get_provider_data(conn) + if data: + self._set_provider_data(conn, (data[0], cookie)) + + def acquire_connection(self, auto_login=True, headers=None, rid=-1): + '''Check out an available HTTPConnection instance. + + Blocks until a connection is available. + :auto_login: automatically logins before returning conn + :headers: header to pass on to login attempt + :param rid: request id passed in from request eventlet. + :returns: An available HTTPConnection instance or None if no + api_providers are configured. + ''' + if not self._api_providers: + LOG.warn(_("[%d] no API providers currently available."), rid) + return None + if self._conn_pool.empty(): + LOG.debug(_("[%d] Waiting to acquire API client connection."), rid) + priority, conn = self._conn_pool.get() + now = time.time() + if getattr(conn, 'last_used', now) < now - self.CONN_IDLE_TIMEOUT: + LOG.info(_("[%(rid)d] Connection %(conn)s idle for %(sec)0.2f " + "seconds; reconnecting."), + {'rid': rid, 'conn': api_client.ctrl_conn_to_str(conn), + 'sec': now - conn.last_used}) + conn = self._create_connection(*self._conn_params(conn)) + + conn.last_used = now + conn.priority = priority # stash current priority for release + qsize = self._conn_pool.qsize() + LOG.debug(_("[%(rid)d] Acquired connection %(conn)s. %(qsize)d " + "connection(s) available."), + {'rid': rid, 'conn': api_client.ctrl_conn_to_str(conn), + 'qsize': qsize}) + if auto_login and self.auth_cookie(conn) is None: + self._wait_for_login(conn, headers) + return conn + + def release_connection(self, http_conn, bad_state=False, + service_unavail=False, rid=-1): + '''Mark HTTPConnection instance as available for check-out. + + :param http_conn: An HTTPConnection instance obtained from this + instance. + :param bad_state: True if http_conn is known to be in a bad state + (e.g. connection fault.) + :service_unavail: True if http_conn returned 503 response. + :param rid: request id passed in from request eventlet. + ''' + conn_params = self._conn_params(http_conn) + if self._conn_params(http_conn) not in self._api_providers: + LOG.debug(_("[%(rid)d] Released connection %(conn)s is not an " + "API provider for the cluster"), + {'rid': rid, + 'conn': api_client.ctrl_conn_to_str(http_conn)}) + return + elif hasattr(http_conn, "no_release"): + return + + if bad_state: + # Reconnect to provider. + LOG.warn(_("[%(rid)d] Connection returned in bad state, " + "reconnecting to %(conn)s"), + {'rid': rid, + 'conn': api_client.ctrl_conn_to_str(http_conn)}) + http_conn = self._create_connection(*self._conn_params(http_conn)) + priority = self._next_conn_priority + self._next_conn_priority += 1 + elif service_unavail: + # http_conn returned a service unaviable response, put other + # connections to the same controller at end of priority queue, + conns = [] + while not self._conn_pool.empty(): + priority, conn = self._conn_pool.get() + if self._conn_params(conn) == conn_params: + priority = self._next_conn_priority + self._next_conn_priority += 1 + conns.append((priority, conn)) + for priority, conn in conns: + self._conn_pool.put((priority, conn)) + # put http_conn at end of queue also + priority = self._next_conn_priority + self._next_conn_priority += 1 + else: + priority = http_conn.priority + + self._conn_pool.put((priority, http_conn)) + LOG.debug(_("[%(rid)d] Released connection %(conn)s. %(qsize)d " + "connection(s) available."), + {'rid': rid, 'conn': api_client.ctrl_conn_to_str(http_conn), + 'qsize': self._conn_pool.qsize()}) + + def _wait_for_login(self, conn, headers=None): + '''Block until a login has occurred for the current API provider.''' + + data = self._get_provider_data(conn) + if data is None: + LOG.error(_("Login request for an invalid connection: '%s'"), + api_client.ctrl_conn_to_str(conn)) + return + provider_sem = data[0] + if provider_sem.acquire(blocking=False): + try: + cookie = self._login(conn, headers) + self.set_auth_cookie(conn, cookie) + finally: + provider_sem.release() + else: + LOG.debug(_("Waiting for auth to complete")) + # Wait until we can acquire then release + provider_sem.acquire(blocking=True) + provider_sem.release() + + def _get_provider_data(self, conn_or_conn_params, default=None): + """Get data for specified API provider. + + Args: + conn_or_conn_params: either a HTTP(S)Connection object or the + resolved conn_params tuple returned by self._conn_params(). + default: conn_params if ones passed aren't known + Returns: Data associated with specified provider + """ + conn_params = self._normalize_conn_params(conn_or_conn_params) + return self._api_provider_data.get(conn_params, default) + + def _set_provider_data(self, conn_or_conn_params, data): + """Set data for specified API provider. + + Args: + conn_or_conn_params: either a HTTP(S)Connection object or the + resolved conn_params tuple returned by self._conn_params(). + data: data to associate with API provider + """ + conn_params = self._normalize_conn_params(conn_or_conn_params) + if data is None: + del self._api_provider_data[conn_params] + else: + self._api_provider_data[conn_params] = data + + def _normalize_conn_params(self, conn_or_conn_params): + """Normalize conn_param tuple. + + Args: + conn_or_conn_params: either a HTTP(S)Connection object or the + resolved conn_params tuple returned by self._conn_params(). + + Returns: Normalized conn_param tuple + """ + if (not isinstance(conn_or_conn_params, tuple) and + not isinstance(conn_or_conn_params, httplib.HTTPConnection)): + LOG.debug(_("Invalid conn_params value: '%s'"), + str(conn_or_conn_params)) + return conn_or_conn_params + if isinstance(conn_or_conn_params, httplib.HTTPConnection): + conn_params = self._conn_params(conn_or_conn_params) + else: + conn_params = conn_or_conn_params + host, port, is_ssl = conn_params + if port is None: + port = 443 if is_ssl else 80 + return (host, port, is_ssl) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/api_client/eventlet_client.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/api_client/eventlet_client.py new file mode 100644 index 00000000..fa0cd1f3 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/api_client/eventlet_client.py @@ -0,0 +1,155 @@ +# Copyright 2012 VMware, Inc. +# +# All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +import time + +import eventlet +eventlet.monkey_patch() + +from neutron.openstack.common import log as logging +from neutron.plugins.vmware.api_client import base +from neutron.plugins.vmware.api_client import eventlet_request + +LOG = logging.getLogger(__name__) + + +class EventletApiClient(base.ApiClientBase): + """Eventlet-based implementation of NSX ApiClient ABC.""" + + def __init__(self, api_providers, user, password, + concurrent_connections=base.DEFAULT_CONCURRENT_CONNECTIONS, + gen_timeout=base.GENERATION_ID_TIMEOUT, + use_https=True, + connect_timeout=base.DEFAULT_CONNECT_TIMEOUT): + '''Constructor + + :param api_providers: a list of tuples of the form: (host, port, + is_ssl). + :param user: login username. + :param password: login password. + :param concurrent_connections: total number of concurrent connections. + :param use_https: whether or not to use https for requests. + :param connect_timeout: connection timeout in seconds. + :param gen_timeout controls how long the generation id is kept + if set to -1 the generation id is never timed out + ''' + if not api_providers: + api_providers = [] + self._api_providers = set([tuple(p) for p in api_providers]) + self._api_provider_data = {} # tuple(semaphore, session_cookie) + for p in self._api_providers: + self._set_provider_data(p, (eventlet.semaphore.Semaphore(1), None)) + self._user = user + self._password = password + self._concurrent_connections = concurrent_connections + self._use_https = use_https + self._connect_timeout = connect_timeout + self._config_gen = None + self._config_gen_ts = None + self._gen_timeout = gen_timeout + + # Connection pool is a list of queues. + self._conn_pool = eventlet.queue.PriorityQueue() + self._next_conn_priority = 1 + for host, port, is_ssl in api_providers: + for _ in range(concurrent_connections): + conn = self._create_connection(host, port, is_ssl) + self._conn_pool.put((self._next_conn_priority, conn)) + self._next_conn_priority += 1 + + def acquire_redirect_connection(self, conn_params, auto_login=True, + headers=None): + """Check out or create connection to redirected NSX API server. + + Args: + conn_params: tuple specifying target of redirect, see + self._conn_params() + auto_login: returned connection should have valid session cookie + headers: headers to pass on if auto_login + + Returns: An available HTTPConnection instance corresponding to the + specified conn_params. If a connection did not previously + exist, new connections are created with the highest prioity + in the connection pool and one of these new connections + returned. + """ + result_conn = None + data = self._get_provider_data(conn_params) + if data: + # redirect target already exists in provider data and connections + # to the provider have been added to the connection pool. Try to + # obtain a connection from the pool, note that it's possible that + # all connection to the provider are currently in use. + conns = [] + while not self._conn_pool.empty(): + priority, conn = self._conn_pool.get_nowait() + if not result_conn and self._conn_params(conn) == conn_params: + conn.priority = priority + result_conn = conn + else: + conns.append((priority, conn)) + for priority, conn in conns: + self._conn_pool.put((priority, conn)) + # hack: if no free connections available, create new connection + # and stash "no_release" attribute (so that we only exceed + # self._concurrent_connections temporarily) + if not result_conn: + conn = self._create_connection(*conn_params) + conn.priority = 0 # redirect connections have highest priority + conn.no_release = True + result_conn = conn + else: + #redirect target not already known, setup provider lists + self._api_providers.update([conn_params]) + self._set_provider_data(conn_params, + (eventlet.semaphore.Semaphore(1), None)) + # redirects occur during cluster upgrades, i.e. results to old + # redirects to new, so give redirect targets highest priority + priority = 0 + for i in range(self._concurrent_connections): + conn = self._create_connection(*conn_params) + conn.priority = priority + if i == self._concurrent_connections - 1: + break + self._conn_pool.put((priority, conn)) + result_conn = conn + if result_conn: + result_conn.last_used = time.time() + if auto_login and self.auth_cookie(conn) is None: + self._wait_for_login(result_conn, headers) + return result_conn + + def _login(self, conn=None, headers=None): + '''Issue login request and update authentication cookie.''' + cookie = None + g = eventlet_request.LoginRequestEventlet( + self, self._user, self._password, conn, headers) + g.start() + ret = g.join() + if ret: + if isinstance(ret, Exception): + LOG.error(_('Login error "%s"'), ret) + raise ret + + cookie = ret.getheader("Set-Cookie") + if cookie: + LOG.debug(_("Saving new authentication cookie '%s'"), cookie) + + return cookie + +# Register as subclass. +base.ApiClientBase.register(EventletApiClient) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/api_client/eventlet_request.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/api_client/eventlet_request.py new file mode 100644 index 00000000..26c378e0 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/api_client/eventlet_request.py @@ -0,0 +1,240 @@ +# Copyright 2012 VMware, Inc. +# +# All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import eventlet +import httplib +import urllib + +from neutron.openstack.common import jsonutils as json +from neutron.openstack.common import log as logging +from neutron.plugins.vmware.api_client import request + +LOG = logging.getLogger(__name__) +USER_AGENT = "Neutron eventlet client/2.0" + + +class EventletApiRequest(request.ApiRequest): + '''Eventlet-based ApiRequest class. + + This class will form the basis for eventlet-based ApiRequest classes + ''' + + # Maximum number of green threads present in the system at one time. + API_REQUEST_POOL_SIZE = request.DEFAULT_API_REQUEST_POOL_SIZE + + # Pool of green threads. One green thread is allocated per incoming + # request. Incoming requests will block when the pool is empty. + API_REQUEST_POOL = eventlet.GreenPool(API_REQUEST_POOL_SIZE) + + # A unique id is assigned to each incoming request. When the current + # request id reaches MAXIMUM_REQUEST_ID it wraps around back to 0. + MAXIMUM_REQUEST_ID = request.DEFAULT_MAXIMUM_REQUEST_ID + + # The request id for the next incoming request. + CURRENT_REQUEST_ID = 0 + + def __init__(self, client_obj, url, method="GET", body=None, + headers=None, + request_timeout=request.DEFAULT_REQUEST_TIMEOUT, + retries=request.DEFAULT_RETRIES, + auto_login=True, + redirects=request.DEFAULT_REDIRECTS, + http_timeout=request.DEFAULT_HTTP_TIMEOUT, client_conn=None): + '''Constructor.''' + self._api_client = client_obj + self._url = url + self._method = method + self._body = body + self._headers = headers or {} + self._request_timeout = request_timeout + self._retries = retries + self._auto_login = auto_login + self._redirects = redirects + self._http_timeout = http_timeout + self._client_conn = client_conn + self._abort = False + + self._request_error = None + + if "User-Agent" not in self._headers: + self._headers["User-Agent"] = USER_AGENT + + self._green_thread = None + # Retrieve and store this instance's unique request id. + self._request_id = EventletApiRequest.CURRENT_REQUEST_ID + # Update the class variable that tracks request id. + # Request IDs wrap around at MAXIMUM_REQUEST_ID + next_request_id = self._request_id + 1 + next_request_id %= self.MAXIMUM_REQUEST_ID + EventletApiRequest.CURRENT_REQUEST_ID = next_request_id + + @classmethod + def _spawn(cls, func, *args, **kwargs): + '''Allocate a green thread from the class pool.''' + return cls.API_REQUEST_POOL.spawn(func, *args, **kwargs) + + def spawn(self, func, *args, **kwargs): + '''Spawn a new green thread with the supplied function and args.''' + return self.__class__._spawn(func, *args, **kwargs) + + @classmethod + def joinall(cls): + '''Wait for all outstanding requests to complete.''' + return cls.API_REQUEST_POOL.waitall() + + def join(self): + '''Wait for instance green thread to complete.''' + if self._green_thread is not None: + return self._green_thread.wait() + return Exception(_('Joining an invalid green thread')) + + def start(self): + '''Start request processing.''' + self._green_thread = self.spawn(self._run) + + def copy(self): + '''Return a copy of this request instance.''' + return EventletApiRequest( + self._api_client, self._url, self._method, self._body, + self._headers, self._request_timeout, self._retries, + self._auto_login, self._redirects, self._http_timeout) + + def _run(self): + '''Method executed within green thread.''' + if self._request_timeout: + # No timeout exception escapes the with block. + with eventlet.timeout.Timeout(self._request_timeout, False): + return self._handle_request() + + LOG.info(_('[%d] Request timeout.'), self._rid()) + self._request_error = Exception(_('Request timeout')) + return None + else: + return self._handle_request() + + def _handle_request(self): + '''First level request handling.''' + attempt = 0 + timeout = 0 + response = None + while response is None and attempt <= self._retries: + eventlet.greenthread.sleep(timeout) + attempt += 1 + + req = self._issue_request() + # automatically raises any exceptions returned. + if isinstance(req, httplib.HTTPResponse): + timeout = 0 + if attempt <= self._retries and not self._abort: + if req.status in (httplib.UNAUTHORIZED, httplib.FORBIDDEN): + continue + elif req.status == httplib.SERVICE_UNAVAILABLE: + timeout = 0.5 + continue + # else fall through to return the error code + + LOG.debug(_("[%(rid)d] Completed request '%(method)s %(url)s'" + ": %(status)s"), + {'rid': self._rid(), 'method': self._method, + 'url': self._url, 'status': req.status}) + self._request_error = None + response = req + else: + LOG.info(_('[%(rid)d] Error while handling request: %(req)s'), + {'rid': self._rid(), 'req': req}) + self._request_error = req + response = None + return response + + +class LoginRequestEventlet(EventletApiRequest): + '''Process a login request.''' + + def __init__(self, client_obj, user, password, client_conn=None, + headers=None): + if headers is None: + headers = {} + headers.update({"Content-Type": "application/x-www-form-urlencoded"}) + body = urllib.urlencode({"username": user, "password": password}) + super(LoginRequestEventlet, self).__init__( + client_obj, "/ws.v1/login", "POST", body, headers, + auto_login=False, client_conn=client_conn) + + def session_cookie(self): + if self.successful(): + return self.value.getheader("Set-Cookie") + return None + + +class GetApiProvidersRequestEventlet(EventletApiRequest): + '''Get a list of API providers.''' + + def __init__(self, client_obj): + url = "/ws.v1/control-cluster/node?fields=roles" + super(GetApiProvidersRequestEventlet, self).__init__( + client_obj, url, "GET", auto_login=True) + + def api_providers(self): + """Parse api_providers from response. + + Returns: api_providers in [(host, port, is_ssl), ...] format + """ + def _provider_from_listen_addr(addr): + # (pssl|ptcp):: => (host, port, is_ssl) + parts = addr.split(':') + return (parts[1], int(parts[2]), parts[0] == 'pssl') + + try: + if self.successful(): + ret = [] + body = json.loads(self.value.body) + for node in body.get('results', []): + for role in node.get('roles', []): + if role.get('role') == 'api_provider': + addr = role.get('listen_addr') + if addr: + ret.append(_provider_from_listen_addr(addr)) + return ret + except Exception as e: + LOG.warn(_("[%(rid)d] Failed to parse API provider: %(e)s"), + {'rid': self._rid(), 'e': e}) + # intentionally fall through + return None + + +class GenericRequestEventlet(EventletApiRequest): + '''Handle a generic request.''' + + def __init__(self, client_obj, method, url, body, content_type, + auto_login=False, + request_timeout=request.DEFAULT_REQUEST_TIMEOUT, + http_timeout=request.DEFAULT_HTTP_TIMEOUT, + retries=request.DEFAULT_RETRIES, + redirects=request.DEFAULT_REDIRECTS): + headers = {"Content-Type": content_type} + super(GenericRequestEventlet, self).__init__( + client_obj, url, method, body, headers, + request_timeout=request_timeout, retries=retries, + auto_login=auto_login, redirects=redirects, + http_timeout=http_timeout) + + def session_cookie(self): + if self.successful(): + return self.value.getheader("Set-Cookie") + return None + + +request.ApiRequest.register(EventletApiRequest) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/api_client/request.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/api_client/request.py new file mode 100644 index 00000000..70e7dcef --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/api_client/request.py @@ -0,0 +1,287 @@ +# Copyright 2012 VMware, Inc. +# +# All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +import abc +import copy +import eventlet +import httplib +import time + +import six +import six.moves.urllib.parse as urlparse + +from neutron.openstack.common import excutils +from neutron.openstack.common import log as logging +from neutron.plugins.vmware import api_client + +LOG = logging.getLogger(__name__) + +DEFAULT_REQUEST_TIMEOUT = 30 +DEFAULT_HTTP_TIMEOUT = 30 +DEFAULT_RETRIES = 2 +DEFAULT_REDIRECTS = 2 +DEFAULT_API_REQUEST_POOL_SIZE = 1000 +DEFAULT_MAXIMUM_REQUEST_ID = 4294967295 +DOWNLOAD_TIMEOUT = 180 + + +@six.add_metaclass(abc.ABCMeta) +class ApiRequest(object): + '''An abstract baseclass for all ApiRequest implementations. + + This defines the interface and property structure for both eventlet and + gevent-based ApiRequest classes. + ''' + + # List of allowed status codes. + ALLOWED_STATUS_CODES = [ + httplib.OK, + httplib.CREATED, + httplib.NO_CONTENT, + httplib.MOVED_PERMANENTLY, + httplib.TEMPORARY_REDIRECT, + httplib.BAD_REQUEST, + httplib.UNAUTHORIZED, + httplib.FORBIDDEN, + httplib.NOT_FOUND, + httplib.CONFLICT, + httplib.INTERNAL_SERVER_ERROR, + httplib.SERVICE_UNAVAILABLE + ] + + @abc.abstractmethod + def start(self): + pass + + @abc.abstractmethod + def join(self): + pass + + @abc.abstractmethod + def copy(self): + pass + + def _issue_request(self): + '''Issue a request to a provider.''' + conn = (self._client_conn or + self._api_client.acquire_connection(True, + copy.copy(self._headers), + rid=self._rid())) + if conn is None: + error = Exception(_("No API connections available")) + self._request_error = error + return error + + url = self._url + LOG.debug(_("[%(rid)d] Issuing - request %(conn)s"), + {'rid': self._rid(), 'conn': self._request_str(conn, url)}) + issued_time = time.time() + is_conn_error = False + is_conn_service_unavail = False + response = None + try: + redirects = 0 + while (redirects <= self._redirects): + # Update connection with user specified request timeout, + # the connect timeout is usually smaller so we only set + # the request timeout after a connection is established + if conn.sock is None: + conn.connect() + conn.sock.settimeout(self._http_timeout) + elif conn.sock.gettimeout() != self._http_timeout: + conn.sock.settimeout(self._http_timeout) + + headers = copy.copy(self._headers) + cookie = self._api_client.auth_cookie(conn) + if cookie: + headers["Cookie"] = cookie + + gen = self._api_client.config_gen + if gen: + headers["X-Nvp-Wait-For-Config-Generation"] = gen + LOG.debug(_("Setting X-Nvp-Wait-For-Config-Generation " + "request header: '%s'"), gen) + try: + conn.request(self._method, url, self._body, headers) + except Exception as e: + with excutils.save_and_reraise_exception(): + LOG.warn(_("[%(rid)d] Exception issuing request: " + "%(e)s"), + {'rid': self._rid(), 'e': e}) + + response = conn.getresponse() + response.body = response.read() + response.headers = response.getheaders() + elapsed_time = time.time() - issued_time + LOG.debug(_("[%(rid)d] Completed request '%(conn)s': " + "%(status)s (%(elapsed)s seconds)"), + {'rid': self._rid(), + 'conn': self._request_str(conn, url), + 'status': response.status, + 'elapsed': elapsed_time}) + + new_gen = response.getheader('X-Nvp-Config-Generation', None) + if new_gen: + LOG.debug(_("Reading X-Nvp-config-Generation response " + "header: '%s'"), new_gen) + if (self._api_client.config_gen is None or + self._api_client.config_gen < int(new_gen)): + self._api_client.config_gen = int(new_gen) + + if response.status == httplib.UNAUTHORIZED: + + if cookie is None and self._url != "/ws.v1/login": + # The connection still has no valid cookie despite + # attemps to authenticate and the request has failed + # with unauthorized status code. If this isn't a + # a request to authenticate, we should abort the + # request since there is no point in retrying. + self._abort = True + else: + # If request is unauthorized, clear the session cookie + # for the current provider so that subsequent requests + # to the same provider triggers re-authentication. + self._api_client.set_auth_cookie(conn, None) + + self._api_client.set_auth_cookie(conn, None) + elif response.status == httplib.SERVICE_UNAVAILABLE: + is_conn_service_unavail = True + + if response.status not in [httplib.MOVED_PERMANENTLY, + httplib.TEMPORARY_REDIRECT]: + break + elif redirects >= self._redirects: + LOG.info(_("[%d] Maximum redirects exceeded, aborting " + "request"), self._rid()) + break + redirects += 1 + + conn, url = self._redirect_params(conn, response.headers, + self._client_conn is None) + if url is None: + response.status = httplib.INTERNAL_SERVER_ERROR + break + LOG.info(_("[%(rid)d] Redirecting request to: %(conn)s"), + {'rid': self._rid(), + 'conn': self._request_str(conn, url)}) + # yield here, just in case we are not out of the loop yet + eventlet.greenthread.sleep(0) + # If we receive any of these responses, then + # our server did not process our request and may be in an + # errored state. Raise an exception, which will cause the + # the conn to be released with is_conn_error == True + # which puts the conn on the back of the client's priority + # queue. + if (response.status == httplib.INTERNAL_SERVER_ERROR and + response.status > httplib.NOT_IMPLEMENTED): + LOG.warn(_("[%(rid)d] Request '%(method)s %(url)s' " + "received: %(status)s"), + {'rid': self._rid(), 'method': self._method, + 'url': self._url, 'status': response.status}) + raise Exception(_('Server error return: %s'), response.status) + return response + except Exception as e: + if isinstance(e, httplib.BadStatusLine): + msg = (_("Invalid server response")) + else: + msg = unicode(e) + if response is None: + elapsed_time = time.time() - issued_time + LOG.warn(_("[%(rid)d] Failed request '%(conn)s': '%(msg)s' " + "(%(elapsed)s seconds)"), + {'rid': self._rid(), 'conn': self._request_str(conn, url), + 'msg': msg, 'elapsed': elapsed_time}) + self._request_error = e + is_conn_error = True + return e + finally: + # Make sure we release the original connection provided by the + # acquire_connection() call above. + if self._client_conn is None: + self._api_client.release_connection(conn, is_conn_error, + is_conn_service_unavail, + rid=self._rid()) + + def _redirect_params(self, conn, headers, allow_release_conn=False): + """Process redirect response, create new connection if necessary. + + Args: + conn: connection that returned the redirect response + headers: response headers of the redirect response + allow_release_conn: if redirecting to a different server, + release existing connection back to connection pool. + + Returns: Return tuple(conn, url) where conn is a connection object + to the redirect target and url is the path of the API request + """ + + url = None + for name, value in headers: + if name.lower() == "location": + url = value + break + if not url: + LOG.warn(_("[%d] Received redirect status without location header" + " field"), self._rid()) + return (conn, None) + # Accept location with the following format: + # 1. /path, redirect to same node + # 2. scheme://hostname:[port]/path where scheme is https or http + # Reject others + # 3. e.g. relative paths, unsupported scheme, unspecified host + result = urlparse.urlparse(url) + if not result.scheme and not result.hostname and result.path: + if result.path[0] == "/": + if result.query: + url = "%s?%s" % (result.path, result.query) + else: + url = result.path + return (conn, url) # case 1 + else: + LOG.warn(_("[%(rid)d] Received invalid redirect location: " + "'%(url)s'"), {'rid': self._rid(), 'url': url}) + return (conn, None) # case 3 + elif result.scheme not in ["http", "https"] or not result.hostname: + LOG.warn(_("[%(rid)d] Received malformed redirect " + "location: %(url)s"), {'rid': self._rid(), 'url': url}) + return (conn, None) # case 3 + # case 2, redirect location includes a scheme + # so setup a new connection and authenticate + if allow_release_conn: + self._api_client.release_connection(conn) + conn_params = (result.hostname, result.port, result.scheme == "https") + conn = self._api_client.acquire_redirect_connection(conn_params, True, + self._headers) + if result.query: + url = "%s?%s" % (result.path, result.query) + else: + url = result.path + return (conn, url) + + def _rid(self): + '''Return current request id.''' + return self._request_id + + @property + def request_error(self): + '''Return any errors associated with this instance.''' + return self._request_error + + def _request_str(self, conn, url): + '''Return string representation of connection.''' + return "%s %s/%s" % (self._method, api_client.ctrl_conn_to_str(conn), + url) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/check_nsx_config.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/check_nsx_config.py new file mode 100644 index 00000000..8607f833 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/check_nsx_config.py @@ -0,0 +1,161 @@ +# Copyright 2013 VMware, Inc. +# All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from __future__ import print_function + +import sys + +from oslo.config import cfg + +from neutron.common import config +from neutron.plugins.vmware.common import config as nsx_config # noqa +from neutron.plugins.vmware.common import nsx_utils +from neutron.plugins.vmware import nsxlib + +config.setup_logging(cfg.CONF) + + +def help(name): + print("Usage: %s path/to/neutron/plugin/ini/config/file" % name) + sys.exit(1) + + +def get_nsx_controllers(cluster): + return cluster.nsx_controllers + + +def config_helper(config_entity, cluster): + try: + return nsxlib.do_request('GET', + "/ws.v1/%s?fields=uuid" % config_entity, + cluster=cluster).get('results', []) + except Exception as e: + msg = (_("Error '%(err)s' when connecting to controller(s): %(ctl)s.") + % {'err': str(e), + 'ctl': ', '.join(get_nsx_controllers(cluster))}) + raise Exception(msg) + + +def get_control_cluster_nodes(cluster): + return config_helper("control-cluster/node", cluster) + + +def get_gateway_services(cluster): + ret_gw_services = {"L2GatewayServiceConfig": [], + "L3GatewayServiceConfig": []} + gw_services = config_helper("gateway-service", cluster) + for gw_service in gw_services: + ret_gw_services[gw_service['type']].append(gw_service['uuid']) + return ret_gw_services + + +def get_transport_zones(cluster): + transport_zones = config_helper("transport-zone", cluster) + return [transport_zone['uuid'] for transport_zone in transport_zones] + + +def get_transport_nodes(cluster): + transport_nodes = config_helper("transport-node", cluster) + return [transport_node['uuid'] for transport_node in transport_nodes] + + +def is_transport_node_connected(cluster, node_uuid): + try: + return nsxlib.do_request('GET', + "/ws.v1/transport-node/%s/status" % node_uuid, + cluster=cluster)['connection']['connected'] + except Exception as e: + msg = (_("Error '%(err)s' when connecting to controller(s): %(ctl)s.") + % {'err': str(e), + 'ctl': ', '.join(get_nsx_controllers(cluster))}) + raise Exception(msg) + + +def main(): + if len(sys.argv) != 2: + help(sys.argv[0]) + args = ['--config-file'] + args.append(sys.argv[1]) + config.init(args) + print("----------------------- Database Options -----------------------") + print("\tconnection: %s" % cfg.CONF.database.connection) + print("\tretry_interval: %d" % cfg.CONF.database.retry_interval) + print("\tmax_retries: %d" % cfg.CONF.database.max_retries) + print("----------------------- NSX Options -----------------------") + print("\tNSX Generation Timeout %d" % cfg.CONF.NSX.nsx_gen_timeout) + print("\tNumber of concurrent connections to each controller %d" % + cfg.CONF.NSX.concurrent_connections) + print("\tmax_lp_per_bridged_ls: %s" % cfg.CONF.NSX.max_lp_per_bridged_ls) + print("\tmax_lp_per_overlay_ls: %s" % cfg.CONF.NSX.max_lp_per_overlay_ls) + print("----------------------- Cluster Options -----------------------") + print("\trequested_timeout: %s" % cfg.CONF.req_timeout) + print("\tretries: %s" % cfg.CONF.retries) + print("\tredirects: %s" % cfg.CONF.redirects) + print("\thttp_timeout: %s" % cfg.CONF.http_timeout) + cluster = nsx_utils.create_nsx_cluster( + cfg.CONF, + cfg.CONF.NSX.concurrent_connections, + cfg.CONF.NSX.nsx_gen_timeout) + nsx_controllers = get_nsx_controllers(cluster) + num_controllers = len(nsx_controllers) + print("Number of controllers found: %s" % num_controllers) + if num_controllers == 0: + print("You must specify at least one controller!") + sys.exit(1) + + get_control_cluster_nodes(cluster) + for controller in nsx_controllers: + print("\tController endpoint: %s" % controller) + gateway_services = get_gateway_services(cluster) + default_gateways = { + "L2GatewayServiceConfig": cfg.CONF.default_l2_gw_service_uuid, + "L3GatewayServiceConfig": cfg.CONF.default_l3_gw_service_uuid} + errors = 0 + for svc_type in default_gateways.keys(): + for uuid in gateway_services[svc_type]: + print("\t\tGateway(%s) uuid: %s" % (svc_type, uuid)) + if (default_gateways[svc_type] and + default_gateways[svc_type] not in gateway_services[svc_type]): + print("\t\t\tError: specified default %s gateway (%s) is " + "missing from NSX Gateway Services!" % ( + svc_type, + default_gateways[svc_type])) + errors += 1 + transport_zones = get_transport_zones(cluster) + print("\tTransport zones: %s" % transport_zones) + if cfg.CONF.default_tz_uuid not in transport_zones: + print("\t\tError: specified default transport zone " + "(%s) is missing from NSX transport zones!" + % cfg.CONF.default_tz_uuid) + errors += 1 + transport_nodes = get_transport_nodes(cluster) + print("\tTransport nodes: %s" % transport_nodes) + node_errors = [] + for node in transport_nodes: + if not is_transport_node_connected(cluster, node): + node_errors.append(node) + + # Use different exit codes, so that we can distinguish + # between config and runtime errors + if len(node_errors): + print("\nThere are one or mode transport nodes that are " + "not connected: %s. Please, revise!" % node_errors) + sys.exit(10) + elif errors: + print("\nThere are %d errors with your configuration. " + "Please, revise!" % errors) + sys.exit(12) + else: + print("Done.") diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/common/config.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/common/config.py new file mode 100644 index 00000000..fe93cedf --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/common/config.py @@ -0,0 +1,196 @@ +# Copyright 2012 VMware, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo.config import cfg + +from neutron.plugins.vmware.common import exceptions as nsx_exc + + +class AgentModes: + AGENT = 'agent' + AGENTLESS = 'agentless' + COMBINED = 'combined' + + +class MetadataModes: + DIRECT = 'access_network' + INDIRECT = 'dhcp_host_route' + + +class ReplicationModes: + SERVICE = 'service' + SOURCE = 'source' + + +base_opts = [ + cfg.IntOpt('max_lp_per_bridged_ls', default=5000, + deprecated_group='NVP', + help=_("Maximum number of ports of a logical switch on a " + "bridged transport zone (default 5000)")), + cfg.IntOpt('max_lp_per_overlay_ls', default=256, + deprecated_group='NVP', + help=_("Maximum number of ports of a logical switch on an " + "overlay transport zone (default 256)")), + cfg.IntOpt('concurrent_connections', default=10, + deprecated_group='NVP', + help=_("Maximum concurrent connections to each NSX " + "controller.")), + cfg.IntOpt('nsx_gen_timeout', default=-1, + deprecated_name='nvp_gen_timeout', + deprecated_group='NVP', + help=_("Number of seconds a generation id should be valid for " + "(default -1 meaning do not time out)")), + cfg.StrOpt('metadata_mode', default=MetadataModes.DIRECT, + deprecated_group='NVP', + help=_("If set to access_network this enables a dedicated " + "connection to the metadata proxy for metadata server " + "access via Neutron router. If set to dhcp_host_route " + "this enables host route injection via the dhcp agent. " + "This option is only useful if running on a host that " + "does not support namespaces otherwise access_network " + "should be used.")), + cfg.StrOpt('default_transport_type', default='stt', + deprecated_group='NVP', + help=_("The default network tranport type to use (stt, gre, " + "bridge, ipsec_gre, or ipsec_stt)")), + cfg.StrOpt('agent_mode', default=AgentModes.AGENT, + deprecated_group='NVP', + help=_("The mode used to implement DHCP/metadata services.")), + cfg.StrOpt('replication_mode', default=ReplicationModes.SERVICE, + help=_("The default option leverages service nodes to perform" + " packet replication though one could set to this to " + "'source' to perform replication locally. This is useful" + " if one does not want to deploy a service node(s).")) +] + +sync_opts = [ + cfg.IntOpt('state_sync_interval', default=10, + deprecated_group='NVP_SYNC', + help=_("Interval in seconds between runs of the state " + "synchronization task. Set it to 0 to disable it")), + cfg.IntOpt('max_random_sync_delay', default=0, + deprecated_group='NVP_SYNC', + help=_("Maximum value for the additional random " + "delay in seconds between runs of the state " + "synchronization task")), + cfg.IntOpt('min_sync_req_delay', default=1, + deprecated_group='NVP_SYNC', + help=_('Minimum delay, in seconds, between two state ' + 'synchronization queries to NSX. It must not ' + 'exceed state_sync_interval')), + cfg.IntOpt('min_chunk_size', default=500, + deprecated_group='NVP_SYNC', + help=_('Minimum number of resources to be retrieved from NSX ' + 'during state synchronization')), + cfg.BoolOpt('always_read_status', default=False, + deprecated_group='NVP_SYNC', + help=_('Always read operational status from backend on show ' + 'operations. Enabling this option might slow down ' + 'the system.')) +] + +connection_opts = [ + cfg.StrOpt('nsx_user', + default='admin', + deprecated_name='nvp_user', + help=_('User name for NSX controllers in this cluster')), + cfg.StrOpt('nsx_password', + default='admin', + deprecated_name='nvp_password', + secret=True, + help=_('Password for NSX controllers in this cluster')), + cfg.IntOpt('req_timeout', + default=30, + help=_('Total time limit for a cluster request')), + cfg.IntOpt('http_timeout', + default=30, + help=_('Time before aborting a request')), + cfg.IntOpt('retries', + default=2, + help=_('Number of time a request should be retried')), + cfg.IntOpt('redirects', + default=2, + help=_('Number of times a redirect should be followed')), + cfg.ListOpt('nsx_controllers', + deprecated_name='nvp_controllers', + help=_("Lists the NSX controllers in this cluster")), +] + +cluster_opts = [ + cfg.StrOpt('default_tz_uuid', + help=_("This is uuid of the default NSX Transport zone that " + "will be used for creating tunneled isolated " + "\"Neutron\" networks. It needs to be created in NSX " + "before starting Neutron with the nsx plugin.")), + cfg.StrOpt('default_l3_gw_service_uuid', + help=_("Unique identifier of the NSX L3 Gateway service " + "which will be used for implementing routers and " + "floating IPs")), + cfg.StrOpt('default_l2_gw_service_uuid', + help=_("Unique identifier of the NSX L2 Gateway service " + "which will be used by default for network gateways")), + cfg.StrOpt('default_service_cluster_uuid', + help=_("Unique identifier of the Service Cluster which will " + "be used by logical services like dhcp and metadata")), + cfg.StrOpt('default_interface_name', default='breth0', + help=_("Name of the interface on a L2 Gateway transport node" + "which should be used by default when setting up a " + "network connection")), +] + +DEFAULT_STATUS_CHECK_INTERVAL = 2000 + +vcns_opts = [ + cfg.StrOpt('user', + default='admin', + help=_('User name for vsm')), + cfg.StrOpt('password', + default='default', + secret=True, + help=_('Password for vsm')), + cfg.StrOpt('manager_uri', + help=_('uri for vsm')), + cfg.StrOpt('datacenter_moid', + help=_('Optional parameter identifying the ID of datacenter ' + 'to deploy NSX Edges')), + cfg.StrOpt('deployment_container_id', + help=_('Optional parameter identifying the ID of datastore to ' + 'deploy NSX Edges')), + cfg.StrOpt('resource_pool_id', + help=_('Optional parameter identifying the ID of resource to ' + 'deploy NSX Edges')), + cfg.StrOpt('datastore_id', + help=_('Optional parameter identifying the ID of datastore to ' + 'deploy NSX Edges')), + cfg.StrOpt('external_network', + help=_('Network ID for physical network connectivity')), + cfg.IntOpt('task_status_check_interval', + default=DEFAULT_STATUS_CHECK_INTERVAL, + help=_("Task status check interval")) +] + +# Register the configuration options +cfg.CONF.register_opts(connection_opts) +cfg.CONF.register_opts(cluster_opts) +cfg.CONF.register_opts(vcns_opts, group="vcns") +cfg.CONF.register_opts(base_opts, group="NSX") +cfg.CONF.register_opts(sync_opts, group="NSX_SYNC") + + +def validate_config_options(): + if cfg.CONF.NSX.replication_mode not in (ReplicationModes.SERVICE, + ReplicationModes.SOURCE): + error = (_("Invalid replication_mode: %s") % + cfg.CONF.NSX.replication_mode) + raise nsx_exc.NsxPluginException(err_msg=error) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/common/exceptions.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/common/exceptions.py new file mode 100644 index 00000000..3f435bd5 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/common/exceptions.py @@ -0,0 +1,121 @@ +# Copyright 2012 VMware, Inc +# +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from neutron.common import exceptions as n_exc + + +class NsxPluginException(n_exc.NeutronException): + message = _("An unexpected error occurred in the NSX Plugin: %(err_msg)s") + + +class InvalidVersion(NsxPluginException): + message = _("Unable to fulfill request with version %(version)s.") + + +class InvalidConnection(NsxPluginException): + message = _("Invalid NSX connection parameters: %(conn_params)s") + + +class InvalidClusterConfiguration(NsxPluginException): + message = _("Invalid cluster values: %(invalid_attrs)s. Please ensure " + "that these values are specified in the [DEFAULT] " + "section of the NSX plugin ini file.") + + +class InvalidNovaZone(NsxPluginException): + message = _("Unable to find cluster config entry " + "for nova zone: %(nova_zone)s") + + +class NoMorePortsException(NsxPluginException): + message = _("Unable to create port on network %(network)s. " + "Maximum number of ports reached") + + +class NatRuleMismatch(NsxPluginException): + message = _("While retrieving NAT rules, %(actual_rules)s were found " + "whereas rules in the (%(min_rules)s,%(max_rules)s) interval " + "were expected") + + +class InvalidAttachmentType(NsxPluginException): + message = _("Invalid NSX attachment type '%(attachment_type)s'") + + +class MaintenanceInProgress(NsxPluginException): + message = _("The networking backend is currently in maintenance mode and " + "therefore unable to accept requests which modify its state. " + "Please try later.") + + +class L2GatewayAlreadyInUse(n_exc.Conflict): + message = _("Gateway Service %(gateway)s is already in use") + + +class InvalidSecurityCertificate(NsxPluginException): + message = _("An invalid security certificate was specified for the " + "gateway device. Certificates must be enclosed between " + "'-----BEGIN CERTIFICATE-----' and " + "'-----END CERTIFICATE-----'") + + +class ServiceOverQuota(n_exc.Conflict): + message = _("Quota exceeded for Vcns resource: %(overs)s: %(err_msg)s") + + +class RouterInUseByLBService(n_exc.InUse): + message = _("Router %(router_id)s is in use by Loadbalancer Service " + "%(vip_id)s") + + +class RouterInUseByFWService(n_exc.InUse): + message = _("Router %(router_id)s is in use by firewall Service " + "%(firewall_id)s") + + +class VcnsDriverException(NsxPluginException): + message = _("Error happened in NSX VCNS Driver: %(err_msg)s") + + +class ServiceClusterUnavailable(NsxPluginException): + message = _("Service cluster: '%(cluster_id)s' is unavailable. Please, " + "check NSX setup and/or configuration") + + +class PortConfigurationError(NsxPluginException): + message = _("An error occurred while connecting LSN %(lsn_id)s " + "and network %(net_id)s via port %(port_id)s") + + def __init__(self, **kwargs): + super(PortConfigurationError, self).__init__(**kwargs) + self.port_id = kwargs.get('port_id') + + +class LsnNotFound(n_exc.NotFound): + message = _('Unable to find LSN for %(entity)s %(entity_id)s') + + +class LsnPortNotFound(n_exc.NotFound): + message = (_('Unable to find port for LSN %(lsn_id)s ' + 'and %(entity)s %(entity_id)s')) + + +class LsnMigrationConflict(n_exc.Conflict): + message = _("Unable to migrate network '%(net_id)s' to LSN: %(reason)s") + + +class LsnConfigurationConflict(NsxPluginException): + message = _("Configuration conflict on Logical Service Node %(lsn_id)s") diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/common/nsx_utils.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/common/nsx_utils.py new file mode 100644 index 00000000..37e99c7d --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/common/nsx_utils.py @@ -0,0 +1,247 @@ +# Copyright 2013 VMware Inc. +# All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from neutron.common import exceptions as n_exc +from neutron.openstack.common import log +from neutron.plugins.vmware.api_client import client +from neutron.plugins.vmware.api_client import exception as api_exc +from neutron.plugins.vmware.dbexts import db as nsx_db +from neutron.plugins.vmware.dbexts import networkgw_db +from neutron.plugins.vmware import nsx_cluster +from neutron.plugins.vmware.nsxlib import l2gateway as l2gwlib +from neutron.plugins.vmware.nsxlib import router as routerlib +from neutron.plugins.vmware.nsxlib import secgroup as secgrouplib +from neutron.plugins.vmware.nsxlib import switch as switchlib + +LOG = log.getLogger(__name__) + + +def fetch_nsx_switches(session, cluster, neutron_net_id): + """Retrieve logical switches for a neutron network. + + This function is optimized for fetching all the lswitches always + with a single NSX query. + If there is more than 1 logical switch (chained switches use case) + NSX lswitches are queried by 'quantum_net_id' tag. Otherwise the NSX + lswitch is directly retrieved by id (more efficient). + """ + nsx_switch_ids = get_nsx_switch_ids(session, cluster, neutron_net_id) + if len(nsx_switch_ids) > 1: + lswitches = switchlib.get_lswitches(cluster, neutron_net_id) + else: + lswitches = [switchlib.get_lswitch_by_id( + cluster, nsx_switch_ids[0])] + return lswitches + + +def get_nsx_switch_ids(session, cluster, neutron_network_id): + """Return the NSX switch id for a given neutron network. + + First lookup for mappings in Neutron database. If no mapping is + found, query the NSX backend and add the mappings. + """ + nsx_switch_ids = nsx_db.get_nsx_switch_ids( + session, neutron_network_id) + if not nsx_switch_ids: + # Find logical switches from backend. + # This is a rather expensive query, but it won't be executed + # more than once for each network in Neutron's lifetime + nsx_switches = switchlib.get_lswitches(cluster, neutron_network_id) + if not nsx_switches: + LOG.warn(_("Unable to find NSX switches for Neutron network %s"), + neutron_network_id) + return + nsx_switch_ids = [] + with session.begin(subtransactions=True): + for nsx_switch in nsx_switches: + nsx_switch_id = nsx_switch['uuid'] + nsx_switch_ids.append(nsx_switch_id) + # Create DB mapping + nsx_db.add_neutron_nsx_network_mapping( + session, + neutron_network_id, + nsx_switch_id) + return nsx_switch_ids + + +def get_nsx_switch_and_port_id(session, cluster, neutron_port_id): + """Return the NSX switch and port uuids for a given neutron port. + + First, look up the Neutron database. If not found, execute + a query on NSX platform as the mapping might be missing because + the port was created before upgrading to grizzly. + + This routine also retrieves the identifier of the logical switch in + the backend where the port is plugged. Prior to Icehouse this + information was not available in the Neutron Database. For dealing + with pre-existing records, this routine will query the backend + for retrieving the correct switch identifier. + + As of Icehouse release it is not indeed anymore possible to assume + the backend logical switch identifier is equal to the neutron + network identifier. + """ + nsx_switch_id, nsx_port_id = nsx_db.get_nsx_switch_and_port_id( + session, neutron_port_id) + if not nsx_switch_id: + # Find logical switch for port from backend + # This is a rather expensive query, but it won't be executed + # more than once for each port in Neutron's lifetime + nsx_ports = switchlib.query_lswitch_lports( + cluster, '*', relations='LogicalSwitchConfig', + filters={'tag': neutron_port_id, + 'tag_scope': 'q_port_id'}) + # Only one result expected + # NOTE(salv-orlando): Not handling the case where more than one + # port is found with the same neutron port tag + if not nsx_ports: + LOG.warn(_("Unable to find NSX port for Neutron port %s"), + neutron_port_id) + # This method is supposed to return a tuple + return None, None + nsx_port = nsx_ports[0] + nsx_switch_id = (nsx_port['_relations'] + ['LogicalSwitchConfig']['uuid']) + if nsx_port_id: + # Mapping already exists. Delete before recreating + nsx_db.delete_neutron_nsx_port_mapping( + session, neutron_port_id) + else: + nsx_port_id = nsx_port['uuid'] + # (re)Create DB mapping + nsx_db.add_neutron_nsx_port_mapping( + session, neutron_port_id, + nsx_switch_id, nsx_port_id) + return nsx_switch_id, nsx_port_id + + +def get_nsx_security_group_id(session, cluster, neutron_id): + """Return the NSX sec profile uuid for a given neutron sec group. + + First, look up the Neutron database. If not found, execute + a query on NSX platform as the mapping might be missing. + NOTE: Security groups are called 'security profiles' on the NSX backend. + """ + nsx_id = nsx_db.get_nsx_security_group_id(session, neutron_id) + if not nsx_id: + # Find security profile on backend. + # This is a rather expensive query, but it won't be executed + # more than once for each security group in Neutron's lifetime + nsx_sec_profiles = secgrouplib.query_security_profiles( + cluster, '*', + filters={'tag': neutron_id, + 'tag_scope': 'q_sec_group_id'}) + # Only one result expected + # NOTE(salv-orlando): Not handling the case where more than one + # security profile is found with the same neutron port tag + if not nsx_sec_profiles: + LOG.warn(_("Unable to find NSX security profile for Neutron " + "security group %s"), neutron_id) + return + elif len(nsx_sec_profiles) > 1: + LOG.warn(_("Multiple NSX security profiles found for Neutron " + "security group %s"), neutron_id) + nsx_sec_profile = nsx_sec_profiles[0] + nsx_id = nsx_sec_profile['uuid'] + with session.begin(subtransactions=True): + # Create DB mapping + nsx_db.add_neutron_nsx_security_group_mapping( + session, neutron_id, nsx_id) + return nsx_id + + +def get_nsx_router_id(session, cluster, neutron_router_id): + """Return the NSX router uuid for a given neutron router. + + First, look up the Neutron database. If not found, execute + a query on NSX platform as the mapping might be missing. + """ + nsx_router_id = nsx_db.get_nsx_router_id( + session, neutron_router_id) + if not nsx_router_id: + # Find logical router from backend. + # This is a rather expensive query, but it won't be executed + # more than once for each router in Neutron's lifetime + nsx_routers = routerlib.query_lrouters( + cluster, '*', + filters={'tag': neutron_router_id, + 'tag_scope': 'q_router_id'}) + # Only one result expected + # NOTE(salv-orlando): Not handling the case where more than one + # port is found with the same neutron port tag + if not nsx_routers: + LOG.warn(_("Unable to find NSX router for Neutron router %s"), + neutron_router_id) + return + nsx_router = nsx_routers[0] + nsx_router_id = nsx_router['uuid'] + with session.begin(subtransactions=True): + # Create DB mapping + nsx_db.add_neutron_nsx_router_mapping( + session, + neutron_router_id, + nsx_router_id) + return nsx_router_id + + +def create_nsx_cluster(cluster_opts, concurrent_connections, gen_timeout): + cluster = nsx_cluster.NSXCluster(**cluster_opts) + + def _ctrl_split(x, y): + return (x, int(y), True) + + api_providers = [_ctrl_split(*ctrl.split(':')) + for ctrl in cluster.nsx_controllers] + cluster.api_client = client.NsxApiClient( + api_providers, cluster.nsx_user, cluster.nsx_password, + request_timeout=cluster.req_timeout, + http_timeout=cluster.http_timeout, + retries=cluster.retries, + redirects=cluster.redirects, + concurrent_connections=concurrent_connections, + gen_timeout=gen_timeout) + return cluster + + +def get_nsx_device_status(cluster, nsx_uuid): + try: + status_up = l2gwlib.get_gateway_device_status( + cluster, nsx_uuid) + if status_up: + return networkgw_db.STATUS_ACTIVE + else: + return networkgw_db.STATUS_DOWN + except api_exc.NsxApiException: + return networkgw_db.STATUS_UNKNOWN + except n_exc.NotFound: + return networkgw_db.ERROR + + +def get_nsx_device_statuses(cluster, tenant_id): + try: + status_dict = l2gwlib.get_gateway_devices_status( + cluster, tenant_id) + return dict((nsx_device_id, + networkgw_db.STATUS_ACTIVE if connected + else networkgw_db.STATUS_DOWN) for + (nsx_device_id, connected) in status_dict.iteritems()) + except api_exc.NsxApiException: + # Do not make a NSX API exception fatal + if tenant_id: + LOG.warn(_("Unable to retrieve operational status for gateway " + "devices belonging to tenant: %s"), tenant_id) + else: + LOG.warn(_("Unable to retrieve operational status for " + "gateway devices")) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/common/sync.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/common/sync.py new file mode 100644 index 00000000..a35ae07a --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/common/sync.py @@ -0,0 +1,674 @@ +# Copyright 2013 VMware, Inc. +# All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import random + +from neutron.common import constants +from neutron.common import exceptions +from neutron import context +from neutron.db import external_net_db +from neutron.db import l3_db +from neutron.db import models_v2 +from neutron.extensions import l3 +from neutron.openstack.common import jsonutils +from neutron.openstack.common import log +from neutron.openstack.common import loopingcall +from neutron.openstack.common import timeutils +from neutron.plugins.vmware.api_client import exception as api_exc +from neutron.plugins.vmware.common import exceptions as nsx_exc +from neutron.plugins.vmware.common import nsx_utils +from neutron.plugins.vmware import nsxlib +from neutron.plugins.vmware.nsxlib import router as routerlib +from neutron.plugins.vmware.nsxlib import switch as switchlib + +# Maximum page size for a single request +# NOTE(salv-orlando): This might become a version-dependent map should the +# limit be raised in future versions +MAX_PAGE_SIZE = 5000 + +LOG = log.getLogger(__name__) + + +class NsxCache(object): + """A simple Cache for NSX resources. + + Associates resource id with resource hash to rapidly identify + updated resources. + Each entry in the cache also stores the following information: + - changed: the resource in the cache has been altered following + an update or a delete + - hit: the resource has been visited during an update (and possibly + left unchanged) + - data: current resource data + - data_bk: backup of resource data prior to its removal + """ + + def __init__(self): + # Maps a uuid to the dict containing it + self._uuid_dict_mappings = {} + # Dicts for NSX cached resources + self._lswitches = {} + self._lswitchports = {} + self._lrouters = {} + + def __getitem__(self, key): + # uuids are unique across the various types of resources + # TODO(salv-orlando): Avoid lookups over all dictionaries + # when retrieving items + # Fetch lswitches, lports, or lrouters + resources = self._uuid_dict_mappings[key] + return resources[key] + + def _clear_changed_flag_and_remove_from_cache(self, resources): + # Clear the 'changed' attribute for all items + for uuid, item in resources.items(): + if item.pop('changed', None) and not item.get('data'): + # The item is not anymore in NSX, so delete it + del resources[uuid] + del self._uuid_dict_mappings[uuid] + LOG.debug("Removed item %s from NSX object cache", uuid) + + def _update_resources(self, resources, new_resources, clear_changed=True): + if clear_changed: + self._clear_changed_flag_and_remove_from_cache(resources) + + def do_hash(item): + return hash(jsonutils.dumps(item)) + + # Parse new data and identify new, deleted, and updated resources + for item in new_resources: + item_id = item['uuid'] + if resources.get(item_id): + new_hash = do_hash(item) + if new_hash != resources[item_id]['hash']: + resources[item_id]['hash'] = new_hash + resources[item_id]['changed'] = True + resources[item_id]['data_bk'] = ( + resources[item_id]['data']) + resources[item_id]['data'] = item + # Mark the item as hit in any case + resources[item_id]['hit'] = True + LOG.debug("Updating item %s in NSX object cache", item_id) + else: + resources[item_id] = {'hash': do_hash(item)} + resources[item_id]['hit'] = True + resources[item_id]['changed'] = True + resources[item_id]['data'] = item + # add a uuid to dict mapping for easy retrieval + # with __getitem__ + self._uuid_dict_mappings[item_id] = resources + LOG.debug("Added item %s to NSX object cache", item_id) + + def _delete_resources(self, resources): + # Mark for removal all the elements which have not been visited. + # And clear the 'hit' attribute. + for to_delete in [k for (k, v) in resources.iteritems() + if not v.pop('hit', False)]: + resources[to_delete]['changed'] = True + resources[to_delete]['data_bk'] = ( + resources[to_delete].pop('data', None)) + + def _get_resource_ids(self, resources, changed_only): + if changed_only: + return [k for (k, v) in resources.iteritems() + if v.get('changed')] + return resources.keys() + + def get_lswitches(self, changed_only=False): + return self._get_resource_ids(self._lswitches, changed_only) + + def get_lrouters(self, changed_only=False): + return self._get_resource_ids(self._lrouters, changed_only) + + def get_lswitchports(self, changed_only=False): + return self._get_resource_ids(self._lswitchports, changed_only) + + def update_lswitch(self, lswitch): + self._update_resources(self._lswitches, [lswitch], clear_changed=False) + + def update_lrouter(self, lrouter): + self._update_resources(self._lrouters, [lrouter], clear_changed=False) + + def update_lswitchport(self, lswitchport): + self._update_resources(self._lswitchports, [lswitchport], + clear_changed=False) + + def process_updates(self, lswitches=None, + lrouters=None, lswitchports=None): + self._update_resources(self._lswitches, lswitches) + self._update_resources(self._lrouters, lrouters) + self._update_resources(self._lswitchports, lswitchports) + return (self._get_resource_ids(self._lswitches, changed_only=True), + self._get_resource_ids(self._lrouters, changed_only=True), + self._get_resource_ids(self._lswitchports, changed_only=True)) + + def process_deletes(self): + self._delete_resources(self._lswitches) + self._delete_resources(self._lrouters) + self._delete_resources(self._lswitchports) + return (self._get_resource_ids(self._lswitches, changed_only=True), + self._get_resource_ids(self._lrouters, changed_only=True), + self._get_resource_ids(self._lswitchports, changed_only=True)) + + +class SyncParameters(): + """Defines attributes used by the synchronization procedure. + + chunk_size: Actual chunk size + extra_chunk_size: Additional data to fetch because of chunk size + adjustment + current_chunk: Counter of the current data chunk being synchronized + Page cursors: markers for the next resource to fetch. + 'start' means page cursor unset for fetching 1st page + init_sync_performed: True if the initial synchronization concluded + """ + + def __init__(self, min_chunk_size): + self.chunk_size = min_chunk_size + self.extra_chunk_size = 0 + self.current_chunk = 0 + self.ls_cursor = 'start' + self.lr_cursor = 'start' + self.lp_cursor = 'start' + self.init_sync_performed = False + self.total_size = 0 + + +def _start_loopingcall(min_chunk_size, state_sync_interval, func): + """Start a loopingcall for the synchronization task.""" + # Start a looping call to synchronize operational status + # for neutron resources + if not state_sync_interval: + # do not start the looping call if specified + # sync interval is 0 + return + state_synchronizer = loopingcall.DynamicLoopingCall( + func, sp=SyncParameters(min_chunk_size)) + state_synchronizer.start( + periodic_interval_max=state_sync_interval) + return state_synchronizer + + +class NsxSynchronizer(): + + LS_URI = nsxlib._build_uri_path( + switchlib.LSWITCH_RESOURCE, fields='uuid,tags,fabric_status', + relations='LogicalSwitchStatus') + LR_URI = nsxlib._build_uri_path( + routerlib.LROUTER_RESOURCE, fields='uuid,tags,fabric_status', + relations='LogicalRouterStatus') + LP_URI = nsxlib._build_uri_path( + switchlib.LSWITCHPORT_RESOURCE, + parent_resource_id='*', + fields='uuid,tags,fabric_status_up', + relations='LogicalPortStatus') + + def __init__(self, plugin, cluster, state_sync_interval, + req_delay, min_chunk_size, max_rand_delay=0): + random.seed() + self._nsx_cache = NsxCache() + # Store parameters as instance members + # NOTE(salv-orlando): apologies if it looks java-ish + self._plugin = plugin + self._cluster = cluster + self._req_delay = req_delay + self._sync_interval = state_sync_interval + self._max_rand_delay = max_rand_delay + # Validate parameters + if self._sync_interval < self._req_delay: + err_msg = (_("Minimum request delay:%(req_delay)s must not " + "exceed synchronization interval:%(sync_interval)s") % + {'req_delay': self._req_delay, + 'sync_interval': self._sync_interval}) + LOG.error(err_msg) + raise nsx_exc.NsxPluginException(err_msg=err_msg) + # Backoff time in case of failures while fetching sync data + self._sync_backoff = 1 + # Store the looping call in an instance variable to allow unit tests + # for controlling its lifecycle + self._sync_looping_call = _start_loopingcall( + min_chunk_size, state_sync_interval, self._synchronize_state) + + def _get_tag_dict(self, tags): + return dict((tag.get('scope'), tag['tag']) for tag in tags) + + def synchronize_network(self, context, neutron_network_data, + lswitches=None): + """Synchronize a Neutron network with its NSX counterpart. + + This routine synchronizes a set of switches when a Neutron + network is mapped to multiple lswitches. + """ + if not lswitches: + # Try to get logical switches from nsx + try: + lswitches = nsx_utils.fetch_nsx_switches( + context.session, self._cluster, + neutron_network_data['id']) + except exceptions.NetworkNotFound: + # TODO(salv-orlando): We should be catching + # api_exc.ResourceNotFound here + # The logical switch was not found + LOG.warning(_("Logical switch for neutron network %s not " + "found on NSX."), neutron_network_data['id']) + lswitches = [] + else: + for lswitch in lswitches: + self._nsx_cache.update_lswitch(lswitch) + # By default assume things go wrong + status = constants.NET_STATUS_ERROR + # In most cases lswitches will contain a single element + for ls in lswitches: + if not ls: + # Logical switch was deleted + break + ls_status = ls['_relations']['LogicalSwitchStatus'] + if not ls_status['fabric_status']: + status = constants.NET_STATUS_DOWN + break + else: + # No switch was down or missing. Set status to ACTIVE unless + # there were no switches in the first place! + if lswitches: + status = constants.NET_STATUS_ACTIVE + # Update db object + if status == neutron_network_data['status']: + # do nothing + return + + with context.session.begin(subtransactions=True): + try: + network = self._plugin._get_network(context, + neutron_network_data['id']) + except exceptions.NetworkNotFound: + pass + else: + network.status = status + LOG.debug(_("Updating status for neutron resource %(q_id)s to:" + " %(status)s"), + {'q_id': neutron_network_data['id'], + 'status': status}) + + def _synchronize_lswitches(self, ctx, ls_uuids, scan_missing=False): + if not ls_uuids and not scan_missing: + return + neutron_net_ids = set() + neutron_nsx_mappings = {} + # TODO(salvatore-orlando): Deal with the case the tag + # has been tampered with + for ls_uuid in ls_uuids: + # If the lswitch has been deleted, get backup copy of data + lswitch = (self._nsx_cache[ls_uuid].get('data') or + self._nsx_cache[ls_uuid].get('data_bk')) + tags = self._get_tag_dict(lswitch['tags']) + neutron_id = tags.get('quantum_net_id') + neutron_net_ids.add(neutron_id) + neutron_nsx_mappings[neutron_id] = ( + neutron_nsx_mappings.get(neutron_id, []) + + [self._nsx_cache[ls_uuid]]) + # Fetch neutron networks from database + filters = {'router:external': [False]} + if not scan_missing: + filters['id'] = neutron_net_ids + + networks = self._plugin._get_collection( + ctx, models_v2.Network, self._plugin._make_network_dict, + filters=filters) + + for network in networks: + lswitches = neutron_nsx_mappings.get(network['id'], []) + lswitches = [lswitch.get('data') for lswitch in lswitches] + self.synchronize_network(ctx, network, lswitches) + + def synchronize_router(self, context, neutron_router_data, + lrouter=None): + """Synchronize a neutron router with its NSX counterpart.""" + if not lrouter: + # Try to get router from nsx + try: + # This query will return the logical router status too + nsx_router_id = nsx_utils.get_nsx_router_id( + context.session, self._cluster, neutron_router_data['id']) + if nsx_router_id: + lrouter = routerlib.get_lrouter( + self._cluster, nsx_router_id) + except exceptions.NotFound: + # NOTE(salv-orlando): We should be catching + # api_exc.ResourceNotFound here + # The logical router was not found + LOG.warning(_("Logical router for neutron router %s not " + "found on NSX."), neutron_router_data['id']) + if lrouter: + # Update the cache + self._nsx_cache.update_lrouter(lrouter) + + # Note(salv-orlando): It might worth adding a check to verify neutron + # resource tag in nsx entity matches a Neutron id. + # By default assume things go wrong + status = constants.NET_STATUS_ERROR + if lrouter: + lr_status = (lrouter['_relations'] + ['LogicalRouterStatus'] + ['fabric_status']) + status = (lr_status and + constants.NET_STATUS_ACTIVE + or constants.NET_STATUS_DOWN) + # Update db object + if status == neutron_router_data['status']: + # do nothing + return + + with context.session.begin(subtransactions=True): + try: + router = self._plugin._get_router(context, + neutron_router_data['id']) + except l3.RouterNotFound: + pass + else: + router.status = status + LOG.debug(_("Updating status for neutron resource %(q_id)s to:" + " %(status)s"), + {'q_id': neutron_router_data['id'], + 'status': status}) + + def _synchronize_lrouters(self, ctx, lr_uuids, scan_missing=False): + if not lr_uuids and not scan_missing: + return + # TODO(salvatore-orlando): Deal with the case the tag + # has been tampered with + neutron_router_mappings = {} + for lr_uuid in lr_uuids: + lrouter = (self._nsx_cache[lr_uuid].get('data') or + self._nsx_cache[lr_uuid].get('data_bk')) + tags = self._get_tag_dict(lrouter['tags']) + neutron_router_id = tags.get('q_router_id') + if neutron_router_id: + neutron_router_mappings[neutron_router_id] = ( + self._nsx_cache[lr_uuid]) + else: + LOG.warn(_("Unable to find Neutron router id for " + "NSX logical router: %s"), lr_uuid) + # Fetch neutron routers from database + filters = ({} if scan_missing else + {'id': neutron_router_mappings.keys()}) + routers = self._plugin._get_collection( + ctx, l3_db.Router, self._plugin._make_router_dict, + filters=filters) + for router in routers: + lrouter = neutron_router_mappings.get(router['id']) + self.synchronize_router( + ctx, router, lrouter and lrouter.get('data')) + + def synchronize_port(self, context, neutron_port_data, + lswitchport=None, ext_networks=None): + """Synchronize a Neutron port with its NSX counterpart.""" + # Skip synchronization for ports on external networks + if not ext_networks: + ext_networks = [net['id'] for net in context.session.query( + models_v2.Network).join( + external_net_db.ExternalNetwork, + (models_v2.Network.id == + external_net_db.ExternalNetwork.network_id))] + if neutron_port_data['network_id'] in ext_networks: + with context.session.begin(subtransactions=True): + neutron_port_data['status'] = constants.PORT_STATUS_ACTIVE + return + + if not lswitchport: + # Try to get port from nsx + try: + ls_uuid, lp_uuid = nsx_utils.get_nsx_switch_and_port_id( + context.session, self._cluster, neutron_port_data['id']) + if lp_uuid: + lswitchport = switchlib.get_port( + self._cluster, ls_uuid, lp_uuid, + relations='LogicalPortStatus') + except (exceptions.PortNotFoundOnNetwork): + # NOTE(salv-orlando): We should be catching + # api_exc.ResourceNotFound here instead + # of PortNotFoundOnNetwork when the id exists but + # the logical switch port was not found + LOG.warning(_("Logical switch port for neutron port %s " + "not found on NSX."), neutron_port_data['id']) + lswitchport = None + else: + # If lswitchport is not None, update the cache. + # It could be none if the port was deleted from the backend + if lswitchport: + self._nsx_cache.update_lswitchport(lswitchport) + # Note(salv-orlando): It might worth adding a check to verify neutron + # resource tag in nsx entity matches Neutron id. + # By default assume things go wrong + status = constants.PORT_STATUS_ERROR + if lswitchport: + lp_status = (lswitchport['_relations'] + ['LogicalPortStatus'] + ['fabric_status_up']) + status = (lp_status and + constants.PORT_STATUS_ACTIVE + or constants.PORT_STATUS_DOWN) + + # Update db object + if status == neutron_port_data['status']: + # do nothing + return + + with context.session.begin(subtransactions=True): + try: + port = self._plugin._get_port(context, + neutron_port_data['id']) + except exceptions.PortNotFound: + pass + else: + port.status = status + LOG.debug(_("Updating status for neutron resource %(q_id)s to:" + " %(status)s"), + {'q_id': neutron_port_data['id'], + 'status': status}) + + def _synchronize_lswitchports(self, ctx, lp_uuids, scan_missing=False): + if not lp_uuids and not scan_missing: + return + # Find Neutron port id by tag - the tag is already + # loaded in memory, no reason for doing a db query + # TODO(salvatore-orlando): Deal with the case the tag + # has been tampered with + neutron_port_mappings = {} + for lp_uuid in lp_uuids: + lport = (self._nsx_cache[lp_uuid].get('data') or + self._nsx_cache[lp_uuid].get('data_bk')) + tags = self._get_tag_dict(lport['tags']) + neutron_port_id = tags.get('q_port_id') + if neutron_port_id: + neutron_port_mappings[neutron_port_id] = ( + self._nsx_cache[lp_uuid]) + # Fetch neutron ports from database + # At the first sync we need to fetch all ports + filters = ({} if scan_missing else + {'id': neutron_port_mappings.keys()}) + # TODO(salv-orlando): Work out a solution for avoiding + # this query + ext_nets = [net['id'] for net in ctx.session.query( + models_v2.Network).join( + external_net_db.ExternalNetwork, + (models_v2.Network.id == + external_net_db.ExternalNetwork.network_id))] + ports = self._plugin._get_collection( + ctx, models_v2.Port, self._plugin._make_port_dict, + filters=filters) + for port in ports: + lswitchport = neutron_port_mappings.get(port['id']) + self.synchronize_port( + ctx, port, lswitchport and lswitchport.get('data'), + ext_networks=ext_nets) + + def _get_chunk_size(self, sp): + # NOTE(salv-orlando): Try to use __future__ for this routine only? + ratio = ((float(sp.total_size) / float(sp.chunk_size)) / + (float(self._sync_interval) / float(self._req_delay))) + new_size = max(1.0, ratio) * float(sp.chunk_size) + return int(new_size) + (new_size - int(new_size) > 0) + + def _fetch_data(self, uri, cursor, page_size): + # If not cursor there is nothing to retrieve + if cursor: + if cursor == 'start': + cursor = None + # Chunk size tuning might, in some conditions, make it larger + # than 5,000, which is the maximum page size allowed by the NSX + # API. In this case the request should be split in multiple + # requests. This is not ideal, and therefore a log warning will + # be emitted. + num_requests = page_size / (MAX_PAGE_SIZE + 1) + 1 + if num_requests > 1: + LOG.warn(_("Requested page size is %(cur_chunk_size)d." + "It might be necessary to do %(num_requests)d " + "round-trips to NSX for fetching data. Please " + "tune sync parameters to ensure chunk size " + "is less than %(max_page_size)d"), + {'cur_chunk_size': page_size, + 'num_requests': num_requests, + 'max_page_size': MAX_PAGE_SIZE}) + # Only the first request might return the total size, + # subsequent requests will definetely not + results, cursor, total_size = nsxlib.get_single_query_page( + uri, self._cluster, cursor, + min(page_size, MAX_PAGE_SIZE)) + for _req in range(num_requests - 1): + # If no cursor is returned break the cycle as there is no + # actual need to perform multiple requests (all fetched) + # This happens when the overall size of resources exceeds + # the maximum page size, but the number for each single + # resource type is below this threshold + if not cursor: + break + req_results, cursor = nsxlib.get_single_query_page( + uri, self._cluster, cursor, + min(page_size, MAX_PAGE_SIZE))[:2] + results.extend(req_results) + # reset cursor before returning if we queried just to + # know the number of entities + return results, cursor if page_size else 'start', total_size + return [], cursor, None + + def _fetch_nsx_data_chunk(self, sp): + base_chunk_size = sp.chunk_size + chunk_size = base_chunk_size + sp.extra_chunk_size + LOG.info(_("Fetching up to %s resources " + "from NSX backend"), chunk_size) + fetched = ls_count = lr_count = lp_count = 0 + lswitches = lrouters = lswitchports = [] + if sp.ls_cursor or sp.ls_cursor == 'start': + (lswitches, sp.ls_cursor, ls_count) = self._fetch_data( + self.LS_URI, sp.ls_cursor, chunk_size) + fetched = len(lswitches) + if fetched < chunk_size and sp.lr_cursor or sp.lr_cursor == 'start': + (lrouters, sp.lr_cursor, lr_count) = self._fetch_data( + self.LR_URI, sp.lr_cursor, max(chunk_size - fetched, 0)) + fetched += len(lrouters) + if fetched < chunk_size and sp.lp_cursor or sp.lp_cursor == 'start': + (lswitchports, sp.lp_cursor, lp_count) = self._fetch_data( + self.LP_URI, sp.lp_cursor, max(chunk_size - fetched, 0)) + fetched += len(lswitchports) + if sp.current_chunk == 0: + # No cursors were provided. Then it must be possible to + # calculate the total amount of data to fetch + sp.total_size = ls_count + lr_count + lp_count + LOG.debug(_("Total data size: %d"), sp.total_size) + sp.chunk_size = self._get_chunk_size(sp) + # Calculate chunk size adjustment + sp.extra_chunk_size = sp.chunk_size - base_chunk_size + LOG.debug(_("Fetched %(num_lswitches)d logical switches, " + "%(num_lswitchports)d logical switch ports," + "%(num_lrouters)d logical routers"), + {'num_lswitches': len(lswitches), + 'num_lswitchports': len(lswitchports), + 'num_lrouters': len(lrouters)}) + return (lswitches, lrouters, lswitchports) + + def _synchronize_state(self, sp): + # If the plugin has been destroyed, stop the LoopingCall + if not self._plugin: + raise loopingcall.LoopingCallDone + start = timeutils.utcnow() + # Reset page cursor variables if necessary + if sp.current_chunk == 0: + sp.ls_cursor = sp.lr_cursor = sp.lp_cursor = 'start' + LOG.info(_("Running state synchronization task. Chunk: %s"), + sp.current_chunk) + # Fetch chunk_size data from NSX + try: + (lswitches, lrouters, lswitchports) = ( + self._fetch_nsx_data_chunk(sp)) + except (api_exc.RequestTimeout, api_exc.NsxApiException): + sleep_interval = self._sync_backoff + # Cap max back off to 64 seconds + self._sync_backoff = min(self._sync_backoff * 2, 64) + LOG.exception(_("An error occurred while communicating with " + "NSX backend. Will retry synchronization " + "in %d seconds"), sleep_interval) + return sleep_interval + LOG.debug(_("Time elapsed querying NSX: %s"), + timeutils.utcnow() - start) + if sp.total_size: + num_chunks = ((sp.total_size / sp.chunk_size) + + (sp.total_size % sp.chunk_size != 0)) + else: + num_chunks = 1 + LOG.debug(_("Number of chunks: %d"), num_chunks) + # Find objects which have changed on NSX side and need + # to be synchronized + LOG.debug("Processing NSX cache for updated objects") + (ls_uuids, lr_uuids, lp_uuids) = self._nsx_cache.process_updates( + lswitches, lrouters, lswitchports) + # Process removed objects only at the last chunk + scan_missing = (sp.current_chunk == num_chunks - 1 and + not sp.init_sync_performed) + if sp.current_chunk == num_chunks - 1: + LOG.debug("Processing NSX cache for deleted objects") + self._nsx_cache.process_deletes() + ls_uuids = self._nsx_cache.get_lswitches( + changed_only=not scan_missing) + lr_uuids = self._nsx_cache.get_lrouters( + changed_only=not scan_missing) + lp_uuids = self._nsx_cache.get_lswitchports( + changed_only=not scan_missing) + LOG.debug(_("Time elapsed hashing data: %s"), + timeutils.utcnow() - start) + # Get an admin context + ctx = context.get_admin_context() + # Synchronize with database + self._synchronize_lswitches(ctx, ls_uuids, + scan_missing=scan_missing) + self._synchronize_lrouters(ctx, lr_uuids, + scan_missing=scan_missing) + self._synchronize_lswitchports(ctx, lp_uuids, + scan_missing=scan_missing) + # Increase chunk counter + LOG.info(_("Synchronization for chunk %(chunk_num)d of " + "%(total_chunks)d performed"), + {'chunk_num': sp.current_chunk + 1, + 'total_chunks': num_chunks}) + sp.current_chunk = (sp.current_chunk + 1) % num_chunks + added_delay = 0 + if sp.current_chunk == 0: + # Ensure init_sync_performed is True + if not sp.init_sync_performed: + sp.init_sync_performed = True + # Add additional random delay + added_delay = random.randint(0, self._max_rand_delay) + LOG.debug(_("Time elapsed at end of sync: %s"), + timeutils.utcnow() - start) + return self._sync_interval / num_chunks + added_delay diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/common/utils.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/common/utils.py new file mode 100644 index 00000000..fb21e55e --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/common/utils.py @@ -0,0 +1,67 @@ +# Copyright 2013 VMware, Inc. +# All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import hashlib + +from neutron.api.v2 import attributes +from neutron.openstack.common import log +from neutron import version + + +LOG = log.getLogger(__name__) +MAX_DISPLAY_NAME_LEN = 40 +NEUTRON_VERSION = version.version_info.release_string() + + +# Allowed network types for the NSX Plugin +class NetworkTypes: + """Allowed provider network types for the NSX Plugin.""" + L3_EXT = 'l3_ext' + STT = 'stt' + GRE = 'gre' + FLAT = 'flat' + VLAN = 'vlan' + BRIDGE = 'bridge' + + +def get_tags(**kwargs): + tags = ([dict(tag=value, scope=key) + for key, value in kwargs.iteritems()]) + tags.append({"tag": NEUTRON_VERSION, "scope": "quantum"}) + return tags + + +def device_id_to_vm_id(device_id, obfuscate=False): + # device_id can be longer than 40 characters, for example + # a device_id for a dhcp port is like the following: + # + # dhcp83b5fdeb-e3b4-5e18-ac5f-55161...80747326-47d7-46c2-a87a-cf6d5194877c + # + # To fit it into an NSX tag we need to hash it, however device_id + # used for ports associated to VM's are small enough so let's skip the + # hashing + if len(device_id) > MAX_DISPLAY_NAME_LEN or obfuscate: + return hashlib.sha1(device_id).hexdigest() + else: + return device_id + + +def check_and_truncate(display_name): + if (attributes.is_attr_set(display_name) and + len(display_name) > MAX_DISPLAY_NAME_LEN): + LOG.debug(_("Specified name:'%s' exceeds maximum length. " + "It will be truncated on NSX"), display_name) + return display_name[:MAX_DISPLAY_NAME_LEN] + return display_name or '' diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/dbexts/db.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/dbexts/db.py new file mode 100644 index 00000000..63264600 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/dbexts/db.py @@ -0,0 +1,193 @@ +# Copyright 2012 VMware, Inc. +# +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo.db import exception as db_exc +from sqlalchemy.orm import exc + +import neutron.db.api as db +from neutron.openstack.common import excutils +from neutron.openstack.common import log as logging +from neutron.plugins.vmware.dbexts import models +from neutron.plugins.vmware.dbexts import networkgw_db + +LOG = logging.getLogger(__name__) + + +def get_network_bindings(session, network_id): + session = session or db.get_session() + return (session.query(models.TzNetworkBinding). + filter_by(network_id=network_id). + all()) + + +def get_network_bindings_by_vlanid(session, vlan_id): + session = session or db.get_session() + return (session.query(models.TzNetworkBinding). + filter_by(vlan_id=vlan_id). + all()) + + +def add_network_binding(session, network_id, binding_type, phy_uuid, vlan_id): + with session.begin(subtransactions=True): + binding = models.TzNetworkBinding(network_id, binding_type, + phy_uuid, vlan_id) + session.add(binding) + return binding + + +def add_neutron_nsx_network_mapping(session, neutron_id, nsx_switch_id): + with session.begin(subtransactions=True): + mapping = models.NeutronNsxNetworkMapping( + neutron_id=neutron_id, nsx_id=nsx_switch_id) + session.add(mapping) + return mapping + + +def add_neutron_nsx_port_mapping(session, neutron_id, + nsx_switch_id, nsx_port_id): + session.begin(subtransactions=True) + try: + mapping = models.NeutronNsxPortMapping( + neutron_id, nsx_switch_id, nsx_port_id) + session.add(mapping) + session.commit() + except db_exc.DBDuplicateEntry: + with excutils.save_and_reraise_exception() as ctxt: + session.rollback() + # do not complain if the same exact mapping is being added, + # otherwise re-raise because even though it is possible for the + # same neutron port to map to different back-end ports over time, + # this should not occur whilst a mapping already exists + current = get_nsx_switch_and_port_id(session, neutron_id) + if current[1] == nsx_port_id: + LOG.debug(_("Port mapping for %s already available"), + neutron_id) + ctxt.reraise = False + except db_exc.DBError: + with excutils.save_and_reraise_exception(): + # rollback for any other db error + session.rollback() + return mapping + + +def add_neutron_nsx_router_mapping(session, neutron_id, nsx_router_id): + with session.begin(subtransactions=True): + mapping = models.NeutronNsxRouterMapping( + neutron_id=neutron_id, nsx_id=nsx_router_id) + session.add(mapping) + return mapping + + +def add_neutron_nsx_security_group_mapping(session, neutron_id, nsx_id): + """Map a Neutron security group to a NSX security profile. + + :param session: a valid database session object + :param neutron_id: a neutron security group identifier + :param nsx_id: a nsx security profile identifier + """ + with session.begin(subtransactions=True): + mapping = models.NeutronNsxSecurityGroupMapping( + neutron_id=neutron_id, nsx_id=nsx_id) + session.add(mapping) + return mapping + + +def get_nsx_switch_ids(session, neutron_id): + # This function returns a list of NSX switch identifiers because of + # the possibility of chained logical switches + return [mapping['nsx_id'] for mapping in + session.query(models.NeutronNsxNetworkMapping).filter_by( + neutron_id=neutron_id)] + + +def get_nsx_switch_and_port_id(session, neutron_id): + try: + mapping = (session.query(models.NeutronNsxPortMapping). + filter_by(neutron_id=neutron_id). + one()) + return mapping['nsx_switch_id'], mapping['nsx_port_id'] + except exc.NoResultFound: + LOG.debug(_("NSX identifiers for neutron port %s not yet " + "stored in Neutron DB"), neutron_id) + return None, None + + +def get_nsx_router_id(session, neutron_id): + try: + mapping = (session.query(models.NeutronNsxRouterMapping). + filter_by(neutron_id=neutron_id).one()) + return mapping['nsx_id'] + except exc.NoResultFound: + LOG.debug(_("NSX identifiers for neutron router %s not yet " + "stored in Neutron DB"), neutron_id) + + +def get_nsx_security_group_id(session, neutron_id): + """Return the id of a security group in the NSX backend. + + Note: security groups are called 'security profiles' in NSX + """ + try: + mapping = (session.query(models.NeutronNsxSecurityGroupMapping). + filter_by(neutron_id=neutron_id). + one()) + return mapping['nsx_id'] + except exc.NoResultFound: + LOG.debug(_("NSX identifiers for neutron security group %s not yet " + "stored in Neutron DB"), neutron_id) + return None + + +def _delete_by_neutron_id(session, model, neutron_id): + return session.query(model).filter_by(neutron_id=neutron_id).delete() + + +def delete_neutron_nsx_port_mapping(session, neutron_id): + return _delete_by_neutron_id( + session, models.NeutronNsxPortMapping, neutron_id) + + +def delete_neutron_nsx_router_mapping(session, neutron_id): + return _delete_by_neutron_id( + session, models.NeutronNsxRouterMapping, neutron_id) + + +def unset_default_network_gateways(session): + with session.begin(subtransactions=True): + session.query(networkgw_db.NetworkGateway).update( + {networkgw_db.NetworkGateway.default: False}) + + +def set_default_network_gateway(session, gw_id): + with session.begin(subtransactions=True): + gw = (session.query(networkgw_db.NetworkGateway). + filter_by(id=gw_id).one()) + gw['default'] = True + + +def set_multiprovider_network(session, network_id): + with session.begin(subtransactions=True): + multiprovider_network = models.MultiProviderNetworks( + network_id) + session.add(multiprovider_network) + return multiprovider_network + + +def is_multiprovider_network(session, network_id): + with session.begin(subtransactions=True): + return bool( + session.query(models.MultiProviderNetworks).filter_by( + network_id=network_id).first()) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/dbexts/lsn_db.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/dbexts/lsn_db.py new file mode 100644 index 00000000..f24e0488 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/dbexts/lsn_db.py @@ -0,0 +1,131 @@ +# Copyright 2014 VMware, Inc. +# +# All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +from oslo.db import exception as d_exc +from sqlalchemy import Column +from sqlalchemy import ForeignKey +from sqlalchemy import orm +from sqlalchemy import String + +from neutron.db import models_v2 +from neutron.openstack.common import log as logging +from neutron.plugins.vmware.common import exceptions as p_exc + + +LOG = logging.getLogger(__name__) + + +class LsnPort(models_v2.model_base.BASEV2): + + __tablename__ = 'lsn_port' + + lsn_port_id = Column(String(36), primary_key=True) + + lsn_id = Column(String(36), ForeignKey('lsn.lsn_id', ondelete="CASCADE"), + nullable=False) + sub_id = Column(String(36), nullable=False, unique=True) + mac_addr = Column(String(32), nullable=False, unique=True) + + def __init__(self, lsn_port_id, subnet_id, mac_address, lsn_id): + self.lsn_port_id = lsn_port_id + self.lsn_id = lsn_id + self.sub_id = subnet_id + self.mac_addr = mac_address + + +class Lsn(models_v2.model_base.BASEV2): + __tablename__ = 'lsn' + + lsn_id = Column(String(36), primary_key=True) + net_id = Column(String(36), nullable=False) + + def __init__(self, net_id, lsn_id): + self.net_id = net_id + self.lsn_id = lsn_id + + +def lsn_add(context, network_id, lsn_id): + """Add Logical Service Node information to persistent datastore.""" + with context.session.begin(subtransactions=True): + lsn = Lsn(network_id, lsn_id) + context.session.add(lsn) + + +def lsn_remove(context, lsn_id): + """Remove Logical Service Node information from datastore given its id.""" + with context.session.begin(subtransactions=True): + context.session.query(Lsn).filter_by(lsn_id=lsn_id).delete() + + +def lsn_remove_for_network(context, network_id): + """Remove information about the Logical Service Node given its network.""" + with context.session.begin(subtransactions=True): + context.session.query(Lsn).filter_by(net_id=network_id).delete() + + +def lsn_get_for_network(context, network_id, raise_on_err=True): + """Retrieve LSN information given its network id.""" + query = context.session.query(Lsn) + try: + return query.filter_by(net_id=network_id).one() + except (orm.exc.NoResultFound, d_exc.DBError): + logger = raise_on_err and LOG.error or LOG.warn + logger(_('Unable to find Logical Service Node for ' + 'network %s'), network_id) + if raise_on_err: + raise p_exc.LsnNotFound(entity='network', + entity_id=network_id) + + +def lsn_port_add_for_lsn(context, lsn_port_id, subnet_id, mac, lsn_id): + """Add Logical Service Node Port information to persistent datastore.""" + with context.session.begin(subtransactions=True): + lsn_port = LsnPort(lsn_port_id, subnet_id, mac, lsn_id) + context.session.add(lsn_port) + + +def lsn_port_get_for_subnet(context, subnet_id, raise_on_err=True): + """Return Logical Service Node Port information given its subnet id.""" + with context.session.begin(subtransactions=True): + try: + return (context.session.query(LsnPort). + filter_by(sub_id=subnet_id).one()) + except (orm.exc.NoResultFound, d_exc.DBError): + if raise_on_err: + raise p_exc.LsnPortNotFound(lsn_id=None, + entity='subnet', + entity_id=subnet_id) + + +def lsn_port_get_for_mac(context, mac_address, raise_on_err=True): + """Return Logical Service Node Port information given its mac address.""" + with context.session.begin(subtransactions=True): + try: + return (context.session.query(LsnPort). + filter_by(mac_addr=mac_address).one()) + except (orm.exc.NoResultFound, d_exc.DBError): + if raise_on_err: + raise p_exc.LsnPortNotFound(lsn_id=None, + entity='mac', + entity_id=mac_address) + + +def lsn_port_remove(context, lsn_port_id): + """Remove Logical Service Node port from the given Logical Service Node.""" + with context.session.begin(subtransactions=True): + (context.session.query(LsnPort). + filter_by(lsn_port_id=lsn_port_id).delete()) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/dbexts/models.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/dbexts/models.py new file mode 100644 index 00000000..35633281 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/dbexts/models.py @@ -0,0 +1,135 @@ +# Copyright 2013 VMware, Inc. +# +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +from sqlalchemy import Boolean, Column, Enum, ForeignKey, Integer, String +from sqlalchemy import orm + +from neutron.db import l3_db +from neutron.db import model_base + + +class TzNetworkBinding(model_base.BASEV2): + """Represents a binding of a virtual network with a transport zone. + + This model class associates a Neutron network with a transport zone; + optionally a vlan ID might be used if the binding type is 'bridge' + """ + __tablename__ = 'tz_network_bindings' + + # TODO(arosen) - it might be worth while refactoring the how this data + # is stored later so every column does not need to be a primary key. + network_id = Column(String(36), + ForeignKey('networks.id', ondelete="CASCADE"), + primary_key=True) + # 'flat', 'vlan', stt' or 'gre' + binding_type = Column(Enum('flat', 'vlan', 'stt', 'gre', 'l3_ext', + name='tz_network_bindings_binding_type'), + nullable=False, primary_key=True) + phy_uuid = Column(String(36), primary_key=True, nullable=True) + vlan_id = Column(Integer, primary_key=True, nullable=True, + autoincrement=False) + + def __init__(self, network_id, binding_type, phy_uuid, vlan_id): + self.network_id = network_id + self.binding_type = binding_type + self.phy_uuid = phy_uuid + self.vlan_id = vlan_id + + def __repr__(self): + return "" % (self.network_id, + self.binding_type, + self.phy_uuid, + self.vlan_id) + + +class NeutronNsxNetworkMapping(model_base.BASEV2): + """Maps neutron network identifiers to NSX identifiers. + + Because of chained logical switches more than one mapping might exist + for a single Neutron network. + """ + __tablename__ = 'neutron_nsx_network_mappings' + neutron_id = Column(String(36), + ForeignKey('networks.id', ondelete='CASCADE'), + primary_key=True) + nsx_id = Column(String(36), primary_key=True) + + +class NeutronNsxSecurityGroupMapping(model_base.BASEV2): + """Backend mappings for Neutron Security Group identifiers. + + This class maps a neutron security group identifier to the corresponding + NSX security profile identifier. + """ + + __tablename__ = 'neutron_nsx_security_group_mappings' + neutron_id = Column(String(36), + ForeignKey('securitygroups.id', ondelete="CASCADE"), + primary_key=True) + nsx_id = Column(String(36), primary_key=True) + + +class NeutronNsxPortMapping(model_base.BASEV2): + """Represents the mapping between neutron and nsx port uuids.""" + + __tablename__ = 'neutron_nsx_port_mappings' + neutron_id = Column(String(36), + ForeignKey('ports.id', ondelete="CASCADE"), + primary_key=True) + nsx_switch_id = Column(String(36)) + nsx_port_id = Column(String(36), nullable=False) + + def __init__(self, neutron_id, nsx_switch_id, nsx_port_id): + self.neutron_id = neutron_id + self.nsx_switch_id = nsx_switch_id + self.nsx_port_id = nsx_port_id + + +class NeutronNsxRouterMapping(model_base.BASEV2): + """Maps neutron router identifiers to NSX identifiers.""" + __tablename__ = 'neutron_nsx_router_mappings' + neutron_id = Column(String(36), + ForeignKey('routers.id', ondelete='CASCADE'), + primary_key=True) + nsx_id = Column(String(36)) + + +class MultiProviderNetworks(model_base.BASEV2): + """Networks provisioned through multiprovider extension.""" + + __tablename__ = 'multi_provider_networks' + network_id = Column(String(36), + ForeignKey('networks.id', ondelete="CASCADE"), + primary_key=True) + + def __init__(self, network_id): + self.network_id = network_id + + +class NSXRouterExtAttributes(model_base.BASEV2): + """Router attributes managed by NSX plugin extensions.""" + router_id = Column(String(36), + ForeignKey('routers.id', ondelete="CASCADE"), + primary_key=True) + distributed = Column(Boolean, default=False, nullable=False) + service_router = Column(Boolean, default=False, nullable=False) + # Add a relationship to the Router model in order to instruct + # SQLAlchemy to eagerly load this association + router = orm.relationship( + l3_db.Router, + backref=orm.backref("nsx_attributes", lazy='joined', + uselist=False, cascade='delete')) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/dbexts/networkgw_db.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/dbexts/networkgw_db.py new file mode 100644 index 00000000..fb5eb626 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/dbexts/networkgw_db.py @@ -0,0 +1,499 @@ +# Copyright 2013 VMware, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import sqlalchemy as sa + +from sqlalchemy import orm +from sqlalchemy.orm import exc as sa_orm_exc + +from neutron.api.v2 import attributes +from neutron.common import exceptions +from neutron.db import model_base +from neutron.db import models_v2 +from neutron.openstack.common import log as logging +from neutron.openstack.common import uuidutils +from neutron.plugins.vmware.extensions import networkgw + + +LOG = logging.getLogger(__name__) +DEVICE_OWNER_NET_GW_INTF = 'network:gateway-interface' +NETWORK_ID = 'network_id' +SEGMENTATION_TYPE = 'segmentation_type' +SEGMENTATION_ID = 'segmentation_id' +ALLOWED_CONNECTION_ATTRIBUTES = set((NETWORK_ID, + SEGMENTATION_TYPE, + SEGMENTATION_ID)) +# Constants for gateway device operational status +STATUS_UNKNOWN = "UNKNOWN" +STATUS_ERROR = "ERROR" +STATUS_ACTIVE = "ACTIVE" +STATUS_DOWN = "DOWN" + + +class GatewayInUse(exceptions.InUse): + message = _("Network Gateway '%(gateway_id)s' still has active mappings " + "with one or more neutron networks.") + + +class GatewayNotFound(exceptions.NotFound): + message = _("Network Gateway %(gateway_id)s could not be found") + + +class GatewayDeviceInUse(exceptions.InUse): + message = _("Network Gateway Device '%(device_id)s' is still used by " + "one or more network gateways.") + + +class GatewayDeviceNotFound(exceptions.NotFound): + message = _("Network Gateway Device %(device_id)s could not be found.") + + +class NetworkGatewayPortInUse(exceptions.InUse): + message = _("Port '%(port_id)s' is owned by '%(device_owner)s' and " + "therefore cannot be deleted directly via the port API.") + + +class GatewayConnectionInUse(exceptions.InUse): + message = _("The specified mapping '%(mapping)s' is already in use on " + "network gateway '%(gateway_id)s'.") + + +class MultipleGatewayConnections(exceptions.Conflict): + message = _("Multiple network connections found on '%(gateway_id)s' " + "with provided criteria.") + + +class GatewayConnectionNotFound(exceptions.NotFound): + message = _("The connection %(network_mapping_info)s was not found on the " + "network gateway '%(network_gateway_id)s'") + + +class NetworkGatewayUnchangeable(exceptions.InUse): + message = _("The network gateway %(gateway_id)s " + "cannot be updated or deleted") + + +class NetworkConnection(model_base.BASEV2, models_v2.HasTenant): + """Defines a connection between a network gateway and a network.""" + # We use port_id as the primary key as one can connect a gateway + # to a network in multiple ways (and we cannot use the same port form + # more than a single gateway) + network_gateway_id = sa.Column(sa.String(36), + sa.ForeignKey('networkgateways.id', + ondelete='CASCADE')) + network_id = sa.Column(sa.String(36), + sa.ForeignKey('networks.id', ondelete='CASCADE')) + segmentation_type = sa.Column( + sa.Enum('flat', 'vlan', + name='networkconnections_segmentation_type')) + segmentation_id = sa.Column(sa.Integer) + __table_args__ = (sa.UniqueConstraint(network_gateway_id, + segmentation_type, + segmentation_id),) + # Also, storing port id comes back useful when disconnecting a network + # from a gateway + port_id = sa.Column(sa.String(36), + sa.ForeignKey('ports.id', ondelete='CASCADE'), + primary_key=True) + + +class NetworkGatewayDeviceReference(model_base.BASEV2): + id = sa.Column(sa.String(36), primary_key=True) + network_gateway_id = sa.Column(sa.String(36), + sa.ForeignKey('networkgateways.id', + ondelete='CASCADE'), + primary_key=True) + interface_name = sa.Column(sa.String(64), primary_key=True) + + +class NetworkGatewayDevice(model_base.BASEV2, models_v2.HasId, + models_v2.HasTenant): + nsx_id = sa.Column(sa.String(36)) + # Optional name for the gateway device + name = sa.Column(sa.String(255)) + # Transport connector type. Not using enum as range of + # connector types might vary with backend version + connector_type = sa.Column(sa.String(10)) + # Transport connector IP Address + connector_ip = sa.Column(sa.String(64)) + # operational status + status = sa.Column(sa.String(16)) + + +class NetworkGateway(model_base.BASEV2, models_v2.HasId, + models_v2.HasTenant): + """Defines the data model for a network gateway.""" + name = sa.Column(sa.String(255)) + # Tenant id is nullable for this resource + tenant_id = sa.Column(sa.String(36)) + default = sa.Column(sa.Boolean()) + devices = orm.relationship(NetworkGatewayDeviceReference, + backref='networkgateways', + cascade='all,delete') + network_connections = orm.relationship(NetworkConnection, lazy='joined') + + +class NetworkGatewayMixin(networkgw.NetworkGatewayPluginBase): + + gateway_resource = networkgw.GATEWAY_RESOURCE_NAME + device_resource = networkgw.DEVICE_RESOURCE_NAME + + def _get_network_gateway(self, context, gw_id): + try: + gw = self._get_by_id(context, NetworkGateway, gw_id) + except sa_orm_exc.NoResultFound: + raise GatewayNotFound(gateway_id=gw_id) + return gw + + def _make_gw_connection_dict(self, gw_conn): + return {'port_id': gw_conn['port_id'], + 'segmentation_type': gw_conn['segmentation_type'], + 'segmentation_id': gw_conn['segmentation_id']} + + def _make_network_gateway_dict(self, network_gateway, fields=None): + device_list = [] + for d in network_gateway['devices']: + device_list.append({'id': d['id'], + 'interface_name': d['interface_name']}) + res = {'id': network_gateway['id'], + 'name': network_gateway['name'], + 'default': network_gateway['default'], + 'devices': device_list, + 'tenant_id': network_gateway['tenant_id']} + # Query gateway connections only if needed + if (fields and 'ports' in fields) or not fields: + res['ports'] = [self._make_gw_connection_dict(conn) + for conn in network_gateway.network_connections] + return self._fields(res, fields) + + def _set_mapping_info_defaults(self, mapping_info): + if not mapping_info.get('segmentation_type'): + mapping_info['segmentation_type'] = 'flat' + if not mapping_info.get('segmentation_id'): + mapping_info['segmentation_id'] = 0 + + def _validate_network_mapping_info(self, network_mapping_info): + self._set_mapping_info_defaults(network_mapping_info) + network_id = network_mapping_info.get(NETWORK_ID) + if not network_id: + raise exceptions.InvalidInput( + error_message=_("A network identifier must be specified " + "when connecting a network to a network " + "gateway. Unable to complete operation")) + connection_attrs = set(network_mapping_info.keys()) + if not connection_attrs.issubset(ALLOWED_CONNECTION_ATTRIBUTES): + raise exceptions.InvalidInput( + error_message=(_("Invalid keys found among the ones provided " + "in request body: %(connection_attrs)s."), + connection_attrs)) + seg_type = network_mapping_info.get(SEGMENTATION_TYPE) + seg_id = network_mapping_info.get(SEGMENTATION_ID) + if not seg_type and seg_id: + msg = _("In order to specify a segmentation id the " + "segmentation type must be specified as well") + raise exceptions.InvalidInput(error_message=msg) + elif seg_type and seg_type.lower() == 'flat' and seg_id: + msg = _("Cannot specify a segmentation id when " + "the segmentation type is flat") + raise exceptions.InvalidInput(error_message=msg) + return network_id + + def _retrieve_gateway_connections(self, context, gateway_id, + mapping_info={}, only_one=False): + filters = {'network_gateway_id': [gateway_id]} + for k, v in mapping_info.iteritems(): + if v and k != NETWORK_ID: + filters[k] = [v] + query = self._get_collection_query(context, + NetworkConnection, + filters) + return only_one and query.one() or query.all() + + def _unset_default_network_gateways(self, context): + with context.session.begin(subtransactions=True): + context.session.query(NetworkGateway).update( + {NetworkGateway.default: False}) + + def _set_default_network_gateway(self, context, gw_id): + with context.session.begin(subtransactions=True): + gw = (context.session.query(NetworkGateway). + filter_by(id=gw_id).one()) + gw['default'] = True + + def prevent_network_gateway_port_deletion(self, context, port): + """Pre-deletion check. + + Ensures a port will not be deleted if is being used by a network + gateway. In that case an exception will be raised. + """ + if port['device_owner'] == DEVICE_OWNER_NET_GW_INTF: + raise NetworkGatewayPortInUse(port_id=port['id'], + device_owner=port['device_owner']) + + def create_network_gateway(self, context, network_gateway): + gw_data = network_gateway[self.gateway_resource] + tenant_id = self._get_tenant_id_for_create(context, gw_data) + with context.session.begin(subtransactions=True): + gw_db = NetworkGateway( + id=gw_data.get('id', uuidutils.generate_uuid()), + tenant_id=tenant_id, + name=gw_data.get('name')) + # Device list is guaranteed to be a valid list + device_query = self._query_gateway_devices( + context, filters={'id': [device['id'] + for device in gw_data['devices']]}) + for device in device_query: + if device['tenant_id'] != tenant_id: + raise GatewayDeviceNotFound(device_id=device['id']) + gw_db.devices.extend([NetworkGatewayDeviceReference(**device) + for device in gw_data['devices']]) + context.session.add(gw_db) + LOG.debug(_("Created network gateway with id:%s"), gw_db['id']) + return self._make_network_gateway_dict(gw_db) + + def update_network_gateway(self, context, id, network_gateway): + gw_data = network_gateway[self.gateway_resource] + with context.session.begin(subtransactions=True): + gw_db = self._get_network_gateway(context, id) + if gw_db.default: + raise NetworkGatewayUnchangeable(gateway_id=id) + # Ensure there is something to update before doing it + if any([gw_db[k] != gw_data[k] for k in gw_data]): + gw_db.update(gw_data) + LOG.debug(_("Updated network gateway with id:%s"), id) + return self._make_network_gateway_dict(gw_db) + + def get_network_gateway(self, context, id, fields=None): + gw_db = self._get_network_gateway(context, id) + return self._make_network_gateway_dict(gw_db, fields) + + def delete_network_gateway(self, context, id): + with context.session.begin(subtransactions=True): + gw_db = self._get_network_gateway(context, id) + if gw_db.network_connections: + raise GatewayInUse(gateway_id=id) + if gw_db.default: + raise NetworkGatewayUnchangeable(gateway_id=id) + context.session.delete(gw_db) + LOG.debug(_("Network gateway '%s' was destroyed."), id) + + def get_network_gateways(self, context, filters=None, fields=None, + sorts=None, limit=None, marker=None, + page_reverse=False): + marker_obj = self._get_marker_obj( + context, 'network_gateway', limit, marker) + return self._get_collection(context, NetworkGateway, + self._make_network_gateway_dict, + filters=filters, fields=fields, + sorts=sorts, limit=limit, + marker_obj=marker_obj, + page_reverse=page_reverse) + + def connect_network(self, context, network_gateway_id, + network_mapping_info): + network_id = self._validate_network_mapping_info(network_mapping_info) + LOG.debug(_("Connecting network '%(network_id)s' to gateway " + "'%(network_gateway_id)s'"), + {'network_id': network_id, + 'network_gateway_id': network_gateway_id}) + with context.session.begin(subtransactions=True): + gw_db = self._get_network_gateway(context, network_gateway_id) + tenant_id = self._get_tenant_id_for_create(context, gw_db) + # TODO(salvatore-orlando): Leverage unique constraint instead + # of performing another query! + if self._retrieve_gateway_connections(context, + network_gateway_id, + network_mapping_info): + raise GatewayConnectionInUse(mapping=network_mapping_info, + gateway_id=network_gateway_id) + # TODO(salvatore-orlando): Creating a port will give it an IP, + # but we actually do not need any. Instead of wasting an IP we + # should have a way to say a port shall not be associated with + # any subnet + try: + # We pass the segmentation type and id too - the plugin + # might find them useful as the network connection object + # does not exist yet. + # NOTE: they're not extended attributes, rather extra data + # passed in the port structure to the plugin + # TODO(salvatore-orlando): Verify optimal solution for + # ownership of the gateway port + port = self.create_port(context, { + 'port': + {'tenant_id': tenant_id, + 'network_id': network_id, + 'mac_address': attributes.ATTR_NOT_SPECIFIED, + 'admin_state_up': True, + 'fixed_ips': [], + 'device_id': network_gateway_id, + 'device_owner': DEVICE_OWNER_NET_GW_INTF, + 'name': '', + 'gw:segmentation_type': + network_mapping_info.get('segmentation_type'), + 'gw:segmentation_id': + network_mapping_info.get('segmentation_id')}}) + except exceptions.NetworkNotFound: + err_msg = (_("Requested network '%(network_id)s' not found." + "Unable to create network connection on " + "gateway '%(network_gateway_id)s") % + {'network_id': network_id, + 'network_gateway_id': network_gateway_id}) + LOG.error(err_msg) + raise exceptions.InvalidInput(error_message=err_msg) + port_id = port['id'] + LOG.debug(_("Gateway port for '%(network_gateway_id)s' " + "created on network '%(network_id)s':%(port_id)s"), + {'network_gateway_id': network_gateway_id, + 'network_id': network_id, + 'port_id': port_id}) + # Create NetworkConnection record + network_mapping_info['port_id'] = port_id + network_mapping_info['tenant_id'] = tenant_id + gw_db.network_connections.append( + NetworkConnection(**network_mapping_info)) + port_id = port['id'] + # now deallocate and recycle ip from the port + for fixed_ip in port.get('fixed_ips', []): + self._delete_ip_allocation(context, network_id, + fixed_ip['subnet_id'], + fixed_ip['ip_address']) + LOG.debug(_("Ensured no Ip addresses are configured on port %s"), + port_id) + return {'connection_info': + {'network_gateway_id': network_gateway_id, + 'network_id': network_id, + 'port_id': port_id}} + + def disconnect_network(self, context, network_gateway_id, + network_mapping_info): + network_id = self._validate_network_mapping_info(network_mapping_info) + LOG.debug(_("Disconnecting network '%(network_id)s' from gateway " + "'%(network_gateway_id)s'"), + {'network_id': network_id, + 'network_gateway_id': network_gateway_id}) + with context.session.begin(subtransactions=True): + # Uniquely identify connection, otherwise raise + try: + net_connection = self._retrieve_gateway_connections( + context, network_gateway_id, + network_mapping_info, only_one=True) + except sa_orm_exc.NoResultFound: + raise GatewayConnectionNotFound( + network_mapping_info=network_mapping_info, + network_gateway_id=network_gateway_id) + except sa_orm_exc.MultipleResultsFound: + raise MultipleGatewayConnections( + gateway_id=network_gateway_id) + # Remove gateway port from network + # FIXME(salvatore-orlando): Ensure state of port in NSX is + # consistent with outcome of transaction + self.delete_port(context, net_connection['port_id'], + nw_gw_port_check=False) + # Remove NetworkConnection record + context.session.delete(net_connection) + + def _make_gateway_device_dict(self, gateway_device, fields=None, + include_nsx_id=False): + res = {'id': gateway_device['id'], + 'name': gateway_device['name'], + 'status': gateway_device['status'], + 'connector_type': gateway_device['connector_type'], + 'connector_ip': gateway_device['connector_ip'], + 'tenant_id': gateway_device['tenant_id']} + if include_nsx_id: + # Return the NSX mapping as well. This attribute will not be + # returned in the API response anyway. Ensure it will not be + # filtered out in field selection. + if fields: + fields.append('nsx_id') + res['nsx_id'] = gateway_device['nsx_id'] + return self._fields(res, fields) + + def _get_gateway_device(self, context, device_id): + try: + return self._get_by_id(context, NetworkGatewayDevice, device_id) + except sa_orm_exc.NoResultFound: + raise GatewayDeviceNotFound(device_id=device_id) + + def _is_device_in_use(self, context, device_id): + query = self._get_collection_query( + context, NetworkGatewayDeviceReference, {'id': [device_id]}) + return query.first() + + def get_gateway_device(self, context, device_id, fields=None, + include_nsx_id=False): + return self._make_gateway_device_dict( + self._get_gateway_device(context, device_id), + fields, include_nsx_id) + + def _query_gateway_devices(self, context, + filters=None, sorts=None, + limit=None, marker=None, + page_reverse=None): + marker_obj = self._get_marker_obj( + context, 'gateway_device', limit, marker) + return self._get_collection_query(context, + NetworkGatewayDevice, + filters=filters, + sorts=sorts, + limit=limit, + marker_obj=marker_obj, + page_reverse=page_reverse) + + def get_gateway_devices(self, context, filters=None, fields=None, + sorts=None, limit=None, marker=None, + page_reverse=False, include_nsx_id=False): + query = self._query_gateway_devices(context, filters, sorts, limit, + marker, page_reverse) + return [self._make_gateway_device_dict(row, fields, include_nsx_id) + for row in query] + + def create_gateway_device(self, context, gateway_device, + initial_status=STATUS_UNKNOWN): + device_data = gateway_device[self.device_resource] + tenant_id = self._get_tenant_id_for_create(context, device_data) + with context.session.begin(subtransactions=True): + device_db = NetworkGatewayDevice( + id=device_data.get('id', uuidutils.generate_uuid()), + tenant_id=tenant_id, + name=device_data.get('name'), + connector_type=device_data['connector_type'], + connector_ip=device_data['connector_ip'], + status=initial_status) + context.session.add(device_db) + LOG.debug(_("Created network gateway device: %s"), device_db['id']) + return self._make_gateway_device_dict(device_db) + + def update_gateway_device(self, context, gateway_device_id, + gateway_device, include_nsx_id=False): + device_data = gateway_device[self.device_resource] + with context.session.begin(subtransactions=True): + device_db = self._get_gateway_device(context, gateway_device_id) + # Ensure there is something to update before doing it + if any([device_db[k] != device_data[k] for k in device_data]): + device_db.update(device_data) + LOG.debug(_("Updated network gateway device: %s"), + gateway_device_id) + return self._make_gateway_device_dict( + device_db, include_nsx_id=include_nsx_id) + + def delete_gateway_device(self, context, device_id): + with context.session.begin(subtransactions=True): + # A gateway device should not be deleted + # if it is used in any network gateway service + if self._is_device_in_use(context, device_id): + raise GatewayDeviceInUse(device_id=device_id) + device_db = self._get_gateway_device(context, device_id) + context.session.delete(device_db) + LOG.debug(_("Deleted network gateway device: %s."), device_id) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/dbexts/qos_db.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/dbexts/qos_db.py new file mode 100644 index 00000000..b0105020 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/dbexts/qos_db.py @@ -0,0 +1,300 @@ +# Copyright 2013 VMware, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +import sqlalchemy as sa +from sqlalchemy import orm +from sqlalchemy.orm import exc + +from neutron.api.v2 import attributes as attr +from neutron.db import db_base_plugin_v2 +from neutron.db import model_base +from neutron.db import models_v2 +from neutron.openstack.common import log +from neutron.openstack.common import uuidutils +from neutron.plugins.vmware.extensions import qos + + +LOG = log.getLogger(__name__) + + +class QoSQueue(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant): + name = sa.Column(sa.String(255)) + default = sa.Column(sa.Boolean, default=False) + min = sa.Column(sa.Integer, nullable=False) + max = sa.Column(sa.Integer, nullable=True) + qos_marking = sa.Column(sa.Enum('untrusted', 'trusted', + name='qosqueues_qos_marking')) + dscp = sa.Column(sa.Integer) + + +class PortQueueMapping(model_base.BASEV2): + port_id = sa.Column(sa.String(36), + sa.ForeignKey("ports.id", ondelete="CASCADE"), + primary_key=True) + + queue_id = sa.Column(sa.String(36), sa.ForeignKey("qosqueues.id"), + primary_key=True) + + # Add a relationship to the Port model adding a backref which will + # allow SQLAlchemy for eagerly load the queue binding + port = orm.relationship( + models_v2.Port, + backref=orm.backref("qos_queue", uselist=False, + cascade='delete', lazy='joined')) + + +class NetworkQueueMapping(model_base.BASEV2): + network_id = sa.Column(sa.String(36), + sa.ForeignKey("networks.id", ondelete="CASCADE"), + primary_key=True) + + queue_id = sa.Column(sa.String(36), sa.ForeignKey("qosqueues.id", + ondelete="CASCADE")) + + # Add a relationship to the Network model adding a backref which will + # allow SQLAlcremy for eagerly load the queue binding + network = orm.relationship( + models_v2.Network, + backref=orm.backref("qos_queue", uselist=False, + cascade='delete', lazy='joined')) + + +class QoSDbMixin(qos.QueuePluginBase): + """Mixin class to add queues.""" + + def create_qos_queue(self, context, qos_queue): + q = qos_queue['qos_queue'] + with context.session.begin(subtransactions=True): + qos_queue = QoSQueue(id=q.get('id', uuidutils.generate_uuid()), + name=q.get('name'), + tenant_id=q['tenant_id'], + default=q.get('default'), + min=q.get('min'), + max=q.get('max'), + qos_marking=q.get('qos_marking'), + dscp=q.get('dscp')) + context.session.add(qos_queue) + return self._make_qos_queue_dict(qos_queue) + + def get_qos_queue(self, context, queue_id, fields=None): + return self._make_qos_queue_dict( + self._get_qos_queue(context, queue_id), fields) + + def _get_qos_queue(self, context, queue_id): + try: + return self._get_by_id(context, QoSQueue, queue_id) + except exc.NoResultFound: + raise qos.QueueNotFound(id=queue_id) + + def get_qos_queues(self, context, filters=None, fields=None, sorts=None, + limit=None, marker=None, page_reverse=False): + marker_obj = self._get_marker_obj(context, 'qos_queue', limit, marker) + return self._get_collection(context, QoSQueue, + self._make_qos_queue_dict, + filters=filters, fields=fields, + sorts=sorts, limit=limit, + marker_obj=marker_obj, + page_reverse=page_reverse) + + def delete_qos_queue(self, context, queue_id): + qos_queue = self._get_qos_queue(context, queue_id) + with context.session.begin(subtransactions=True): + context.session.delete(qos_queue) + + def _process_port_queue_mapping(self, context, port_data, queue_id): + port_data[qos.QUEUE] = queue_id + if not queue_id: + return + with context.session.begin(subtransactions=True): + context.session.add(PortQueueMapping(port_id=port_data['id'], + queue_id=queue_id)) + + def _get_port_queue_bindings(self, context, filters=None, fields=None): + return self._get_collection(context, PortQueueMapping, + self._make_port_queue_binding_dict, + filters=filters, fields=fields) + + def _delete_port_queue_mapping(self, context, port_id): + query = self._model_query(context, PortQueueMapping) + try: + binding = query.filter(PortQueueMapping.port_id == port_id).one() + except exc.NoResultFound: + # return since this can happen if we are updating a port that + # did not already have a queue on it. There is no need to check + # if there is one before deleting if we return here. + return + with context.session.begin(subtransactions=True): + context.session.delete(binding) + + def _process_network_queue_mapping(self, context, net_data, queue_id): + net_data[qos.QUEUE] = queue_id + if not queue_id: + return + with context.session.begin(subtransactions=True): + context.session.add( + NetworkQueueMapping(network_id=net_data['id'], + queue_id=queue_id)) + + def _get_network_queue_bindings(self, context, filters=None, fields=None): + return self._get_collection(context, NetworkQueueMapping, + self._make_network_queue_binding_dict, + filters=filters, fields=fields) + + def _delete_network_queue_mapping(self, context, network_id): + query = self._model_query(context, NetworkQueueMapping) + with context.session.begin(subtransactions=True): + binding = query.filter_by(network_id=network_id).first() + if binding: + context.session.delete(binding) + + def _extend_dict_qos_queue(self, obj_res, obj_db): + queue_mapping = obj_db['qos_queue'] + if queue_mapping: + obj_res[qos.QUEUE] = queue_mapping.get('queue_id') + return obj_res + + def _extend_port_dict_qos_queue(self, port_res, port_db): + self._extend_dict_qos_queue(port_res, port_db) + + def _extend_network_dict_qos_queue(self, network_res, network_db): + self._extend_dict_qos_queue(network_res, network_db) + + # Register dict extend functions for networks and ports + db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs( + attr.NETWORKS, ['_extend_network_dict_qos_queue']) + db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs( + attr.PORTS, ['_extend_port_dict_qos_queue']) + + def _make_qos_queue_dict(self, queue, fields=None): + res = {'id': queue['id'], + 'name': queue.get('name'), + 'default': queue.get('default'), + 'tenant_id': queue['tenant_id'], + 'min': queue.get('min'), + 'max': queue.get('max'), + 'qos_marking': queue.get('qos_marking'), + 'dscp': queue.get('dscp')} + return self._fields(res, fields) + + def _make_port_queue_binding_dict(self, queue, fields=None): + res = {'port_id': queue['port_id'], + 'queue_id': queue['queue_id']} + return self._fields(res, fields) + + def _make_network_queue_binding_dict(self, queue, fields=None): + res = {'network_id': queue['network_id'], + 'queue_id': queue['queue_id']} + return self._fields(res, fields) + + def _check_for_queue_and_create(self, context, port): + """Check for queue and create. + + This function determines if a port should be associated with a + queue. It works by first querying NetworkQueueMapping to determine + if the network is associated with a queue. If so, then it queries + NetworkQueueMapping for all the networks that are associated with + this queue. Next, it queries against all the ports on these networks + with the port device_id. Finally it queries PortQueueMapping. If that + query returns a queue_id that is returned. Otherwise a queue is + created that is the size of the queue associated with the network and + that queue_id is returned. + + If the network is not associated with a queue we then query to see + if there is a default queue in the system. If so, a copy of that is + created and the queue_id is returned. + + Otherwise None is returned. None is also returned if the port does not + have a device_id or if the device_owner is network: + """ + + queue_to_create = None + # If there is no device_id don't create a queue. The queue will be + # created on update port when the device_id is present. Also don't + # apply QoS to network ports. + if (not port.get('device_id') or + port['device_owner'].startswith('network:')): + return + + # Check if there is a queue assocated with the network + filters = {'network_id': [port['network_id']]} + network_queue_id = self._get_network_queue_bindings( + context, filters, ['queue_id']) + if network_queue_id: + # get networks that queue is assocated with + filters = {'queue_id': [network_queue_id[0]['queue_id']]} + networks_with_same_queue = self._get_network_queue_bindings( + context, filters) + + # get the ports on these networks with the same_queue and device_id + filters = {'device_id': [port.get('device_id')], + 'network_id': [network['network_id'] for + network in networks_with_same_queue]} + query = self._model_query(context, models_v2.Port.id) + query = self._apply_filters_to_query(query, models_v2.Port, + filters) + ports_ids = [p[0] for p in query] + if ports_ids: + # shared queue already exists find the queue id + queues = self._get_port_queue_bindings(context, + {'port_id': ports_ids}, + ['queue_id']) + if queues: + return queues[0]['queue_id'] + + # get the size of the queue we want to create + queue_to_create = self._get_qos_queue( + context, network_queue_id[0]['queue_id']) + + else: + # check for default queue + filters = {'default': [True]} + # context is elevated since default queue is owned by admin + queue_to_create = self.get_qos_queues(context.elevated(), filters) + if not queue_to_create: + return + queue_to_create = queue_to_create[0] + + # create the queue + tenant_id = self._get_tenant_id_for_create(context, port) + if port.get(qos.RXTX_FACTOR) and queue_to_create.get('max'): + queue_to_create['max'] *= int(port[qos.RXTX_FACTOR]) + queue = {'qos_queue': {'name': queue_to_create.get('name'), + 'min': queue_to_create.get('min'), + 'max': queue_to_create.get('max'), + 'dscp': queue_to_create.get('dscp'), + 'qos_marking': + queue_to_create.get('qos_marking'), + 'tenant_id': tenant_id}} + return self.create_qos_queue(context, queue, False)['id'] + + def _validate_qos_queue(self, context, qos_queue): + if qos_queue.get('default'): + if context.is_admin: + if self.get_qos_queues(context, filters={'default': [True]}): + raise qos.DefaultQueueAlreadyExists() + else: + raise qos.DefaultQueueCreateNotAdmin() + if qos_queue.get('qos_marking') == 'trusted': + dscp = qos_queue.pop('dscp') + if dscp: + # must raise because a non-zero dscp was provided + raise qos.QueueInvalidMarking() + LOG.info(_("DSCP value (%s) will be ignored with 'trusted' " + "marking"), dscp) + max = qos_queue.get('max') + min = qos_queue.get('min') + # Max can be None + if max and min > max: + raise qos.QueueMinGreaterMax() diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/dbexts/vcns_db.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/dbexts/vcns_db.py new file mode 100644 index 00000000..24b3e5b8 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/dbexts/vcns_db.py @@ -0,0 +1,202 @@ +# Copyright 2013 VMware, Inc. +# +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy.orm import exc + +from neutron.openstack.common import log as logging +from neutron.plugins.vmware.common import exceptions as nsx_exc +from neutron.plugins.vmware.dbexts import vcns_models +from neutron.plugins.vmware.vshield.common import ( + exceptions as vcns_exc) + +LOG = logging.getLogger(__name__) + + +def add_vcns_router_binding(session, router_id, vse_id, lswitch_id, status): + with session.begin(subtransactions=True): + binding = vcns_models.VcnsRouterBinding( + router_id=router_id, + edge_id=vse_id, + lswitch_id=lswitch_id, + status=status) + session.add(binding) + return binding + + +def get_vcns_router_binding(session, router_id): + with session.begin(subtransactions=True): + return (session.query(vcns_models.VcnsRouterBinding). + filter_by(router_id=router_id).first()) + + +def update_vcns_router_binding(session, router_id, **kwargs): + with session.begin(subtransactions=True): + binding = (session.query(vcns_models.VcnsRouterBinding). + filter_by(router_id=router_id).one()) + for key, value in kwargs.iteritems(): + binding[key] = value + + +def delete_vcns_router_binding(session, router_id): + with session.begin(subtransactions=True): + binding = (session.query(vcns_models.VcnsRouterBinding). + filter_by(router_id=router_id).one()) + session.delete(binding) + + +# +# Edge Firewall binding methods +# +def add_vcns_edge_firewallrule_binding(session, map_info): + with session.begin(subtransactions=True): + binding = vcns_models.VcnsEdgeFirewallRuleBinding( + rule_id=map_info['rule_id'], + rule_vseid=map_info['rule_vseid'], + edge_id=map_info['edge_id']) + session.add(binding) + return binding + + +def delete_vcns_edge_firewallrule_binding(session, id, edge_id): + with session.begin(subtransactions=True): + if not (session.query(vcns_models.VcnsEdgeFirewallRuleBinding). + filter_by(rule_id=id, edge_id=edge_id).delete()): + msg = _("Rule Resource binding with id:%s not found!") % id + raise nsx_exc.NsxPluginException(err_msg=msg) + + +def get_vcns_edge_firewallrule_binding(session, id, edge_id): + with session.begin(subtransactions=True): + return (session.query(vcns_models.VcnsEdgeFirewallRuleBinding). + filter_by(rule_id=id, edge_id=edge_id).first()) + + +def get_vcns_edge_firewallrule_binding_by_vseid( + session, edge_id, rule_vseid): + with session.begin(subtransactions=True): + try: + return (session.query(vcns_models.VcnsEdgeFirewallRuleBinding). + filter_by(edge_id=edge_id, rule_vseid=rule_vseid).one()) + except exc.NoResultFound: + msg = _("Rule Resource binding not found!") + raise nsx_exc.NsxPluginException(err_msg=msg) + + +def cleanup_vcns_edge_firewallrule_binding(session, edge_id): + with session.begin(subtransactions=True): + session.query( + vcns_models.VcnsEdgeFirewallRuleBinding).filter_by( + edge_id=edge_id).delete() + + +def add_vcns_edge_vip_binding(session, map_info): + with session.begin(subtransactions=True): + binding = vcns_models.VcnsEdgeVipBinding( + vip_id=map_info['vip_id'], + edge_id=map_info['edge_id'], + vip_vseid=map_info['vip_vseid'], + app_profileid=map_info['app_profileid']) + session.add(binding) + + return binding + + +def get_vcns_edge_vip_binding(session, id): + with session.begin(subtransactions=True): + try: + qry = session.query(vcns_models.VcnsEdgeVipBinding) + return qry.filter_by(vip_id=id).one() + except exc.NoResultFound: + msg = _("VIP Resource binding with id:%s not found!") % id + LOG.exception(msg) + raise vcns_exc.VcnsNotFound( + resource='router_service_binding', msg=msg) + + +def delete_vcns_edge_vip_binding(session, id): + with session.begin(subtransactions=True): + qry = session.query(vcns_models.VcnsEdgeVipBinding) + if not qry.filter_by(vip_id=id).delete(): + msg = _("VIP Resource binding with id:%s not found!") % id + LOG.exception(msg) + raise nsx_exc.NsxPluginException(err_msg=msg) + + +def add_vcns_edge_pool_binding(session, map_info): + with session.begin(subtransactions=True): + binding = vcns_models.VcnsEdgePoolBinding( + pool_id=map_info['pool_id'], + edge_id=map_info['edge_id'], + pool_vseid=map_info['pool_vseid']) + session.add(binding) + + return binding + + +def get_vcns_edge_pool_binding(session, id, edge_id): + with session.begin(subtransactions=True): + return (session.query(vcns_models.VcnsEdgePoolBinding). + filter_by(pool_id=id, edge_id=edge_id).first()) + + +def get_vcns_edge_pool_binding_by_vseid(session, edge_id, pool_vseid): + with session.begin(subtransactions=True): + try: + qry = session.query(vcns_models.VcnsEdgePoolBinding) + binding = qry.filter_by(edge_id=edge_id, + pool_vseid=pool_vseid).one() + except exc.NoResultFound: + msg = (_("Pool Resource binding with edge_id:%(edge_id)s " + "pool_vseid:%(pool_vseid)s not found!") % + {'edge_id': edge_id, 'pool_vseid': pool_vseid}) + LOG.exception(msg) + raise nsx_exc.NsxPluginException(err_msg=msg) + return binding + + +def delete_vcns_edge_pool_binding(session, id, edge_id): + with session.begin(subtransactions=True): + qry = session.query(vcns_models.VcnsEdgePoolBinding) + if not qry.filter_by(pool_id=id, edge_id=edge_id).delete(): + msg = _("Pool Resource binding with id:%s not found!") % id + LOG.exception(msg) + raise nsx_exc.NsxPluginException(err_msg=msg) + + +def add_vcns_edge_monitor_binding(session, map_info): + with session.begin(subtransactions=True): + binding = vcns_models.VcnsEdgeMonitorBinding( + monitor_id=map_info['monitor_id'], + edge_id=map_info['edge_id'], + monitor_vseid=map_info['monitor_vseid']) + session.add(binding) + + return binding + + +def get_vcns_edge_monitor_binding(session, id, edge_id): + with session.begin(subtransactions=True): + return (session.query(vcns_models.VcnsEdgeMonitorBinding). + filter_by(monitor_id=id, edge_id=edge_id).first()) + + +def delete_vcns_edge_monitor_binding(session, id, edge_id): + with session.begin(subtransactions=True): + qry = session.query(vcns_models.VcnsEdgeMonitorBinding) + if not qry.filter_by(monitor_id=id, edge_id=edge_id).delete(): + msg = _("Monitor Resource binding with id:%s not found!") % id + LOG.exception(msg) + raise nsx_exc.NsxPluginException(err_msg=msg) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/dhcp_meta/__init__.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/dhcp_meta/__init__.py new file mode 100644 index 00000000..75afea8b --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/dhcp_meta/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2013 VMware, Inc. +# All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/dhcp_meta/lsnmanager.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/dhcp_meta/lsnmanager.py new file mode 100644 index 00000000..9bc80e6c --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/dhcp_meta/lsnmanager.py @@ -0,0 +1,462 @@ +# Copyright 2014 VMware, Inc. +# +# All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +from oslo.config import cfg +from oslo.db import exception as db_exc + +from neutron.common import exceptions as n_exc +from neutron.openstack.common import excutils +from neutron.openstack.common import log as logging +from neutron.plugins.vmware.api_client import exception as api_exc +from neutron.plugins.vmware.common import exceptions as p_exc +from neutron.plugins.vmware.common import nsx_utils +from neutron.plugins.vmware.dbexts import lsn_db +from neutron.plugins.vmware.dhcp_meta import constants as const +from neutron.plugins.vmware.nsxlib import lsn as lsn_api +from neutron.plugins.vmware.nsxlib import switch as switch_api + +LOG = logging.getLogger(__name__) + +META_CONF = 'metadata-proxy' +DHCP_CONF = 'dhcp' + + +lsn_opts = [ + cfg.BoolOpt('sync_on_missing_data', default=False, + help=_('Pull LSN information from NSX in case it is missing ' + 'from the local data store. This is useful to rebuild ' + 'the local store in case of server recovery.')) +] + + +def register_lsn_opts(config): + config.CONF.register_opts(lsn_opts, "NSX_LSN") + + +class LsnManager(object): + """Manage LSN entities associated with networks.""" + + def __init__(self, plugin): + self.plugin = plugin + + @property + def cluster(self): + return self.plugin.cluster + + def lsn_exists(self, context, network_id): + """Return True if a Logical Service Node exists for the network.""" + return self.lsn_get( + context, network_id, raise_on_err=False) is not None + + def lsn_get(self, context, network_id, raise_on_err=True): + """Retrieve the LSN id associated to the network.""" + try: + return lsn_api.lsn_for_network_get(self.cluster, network_id) + except (n_exc.NotFound, api_exc.NsxApiException): + logger = raise_on_err and LOG.error or LOG.warn + logger(_('Unable to find Logical Service Node for ' + 'network %s'), network_id) + if raise_on_err: + raise p_exc.LsnNotFound(entity='network', + entity_id=network_id) + + def lsn_create(self, context, network_id): + """Create a LSN associated to the network.""" + try: + return lsn_api.lsn_for_network_create(self.cluster, network_id) + except api_exc.NsxApiException: + err_msg = _('Unable to create LSN for network %s') % network_id + raise p_exc.NsxPluginException(err_msg=err_msg) + + def lsn_delete(self, context, lsn_id): + """Delete a LSN given its id.""" + try: + lsn_api.lsn_delete(self.cluster, lsn_id) + except (n_exc.NotFound, api_exc.NsxApiException): + LOG.warn(_('Unable to delete Logical Service Node %s'), lsn_id) + + def lsn_delete_by_network(self, context, network_id): + """Delete a LSN associated to the network.""" + lsn_id = self.lsn_get(context, network_id, raise_on_err=False) + if lsn_id: + self.lsn_delete(context, lsn_id) + + def lsn_port_get(self, context, network_id, subnet_id, raise_on_err=True): + """Retrieve LSN and LSN port for the network and the subnet.""" + lsn_id = self.lsn_get(context, network_id, raise_on_err=raise_on_err) + if lsn_id: + try: + lsn_port_id = lsn_api.lsn_port_by_subnet_get( + self.cluster, lsn_id, subnet_id) + except (n_exc.NotFound, api_exc.NsxApiException): + logger = raise_on_err and LOG.error or LOG.warn + logger(_('Unable to find Logical Service Node Port for ' + 'LSN %(lsn_id)s and subnet %(subnet_id)s') + % {'lsn_id': lsn_id, 'subnet_id': subnet_id}) + if raise_on_err: + raise p_exc.LsnPortNotFound(lsn_id=lsn_id, + entity='subnet', + entity_id=subnet_id) + return (lsn_id, None) + else: + return (lsn_id, lsn_port_id) + else: + return (None, None) + + def lsn_port_get_by_mac(self, context, network_id, mac, raise_on_err=True): + """Retrieve LSN and LSN port given network and mac address.""" + lsn_id = self.lsn_get(context, network_id, raise_on_err=raise_on_err) + if lsn_id: + try: + lsn_port_id = lsn_api.lsn_port_by_mac_get( + self.cluster, lsn_id, mac) + except (n_exc.NotFound, api_exc.NsxApiException): + logger = raise_on_err and LOG.error or LOG.warn + logger(_('Unable to find Logical Service Node Port for ' + 'LSN %(lsn_id)s and mac address %(mac)s') + % {'lsn_id': lsn_id, 'mac': mac}) + if raise_on_err: + raise p_exc.LsnPortNotFound(lsn_id=lsn_id, + entity='MAC', + entity_id=mac) + return (lsn_id, None) + else: + return (lsn_id, lsn_port_id) + else: + return (None, None) + + def lsn_port_create(self, context, lsn_id, subnet_info): + """Create and return LSN port for associated subnet.""" + try: + return lsn_api.lsn_port_create(self.cluster, lsn_id, subnet_info) + except n_exc.NotFound: + raise p_exc.LsnNotFound(entity='', entity_id=lsn_id) + except api_exc.NsxApiException: + err_msg = _('Unable to create port for LSN %s') % lsn_id + raise p_exc.NsxPluginException(err_msg=err_msg) + + def lsn_port_delete(self, context, lsn_id, lsn_port_id): + """Delete a LSN port from the Logical Service Node.""" + try: + lsn_api.lsn_port_delete(self.cluster, lsn_id, lsn_port_id) + except (n_exc.NotFound, api_exc.NsxApiException): + LOG.warn(_('Unable to delete LSN Port %s'), lsn_port_id) + + def lsn_port_dispose(self, context, network_id, mac_address): + """Delete a LSN port given the network and the mac address.""" + lsn_id, lsn_port_id = self.lsn_port_get_by_mac( + context, network_id, mac_address, raise_on_err=False) + if lsn_port_id: + self.lsn_port_delete(context, lsn_id, lsn_port_id) + if mac_address == const.METADATA_MAC: + try: + lswitch_port_id = switch_api.get_port_by_neutron_tag( + self.cluster, network_id, + const.METADATA_PORT_ID)['uuid'] + switch_api.delete_port( + self.cluster, network_id, lswitch_port_id) + except (n_exc.PortNotFoundOnNetwork, + api_exc.NsxApiException): + LOG.warn(_("Metadata port not found while attempting " + "to delete it from network %s"), network_id) + else: + LOG.warn(_("Unable to find Logical Services Node " + "Port with MAC %s"), mac_address) + + def lsn_port_dhcp_setup( + self, context, network_id, port_id, port_data, subnet_config=None): + """Connect network to LSN via specified port and port_data.""" + try: + lsn_id = None + switch_id = nsx_utils.get_nsx_switch_ids( + context.session, self.cluster, network_id)[0] + lswitch_port_id = switch_api.get_port_by_neutron_tag( + self.cluster, switch_id, port_id)['uuid'] + lsn_id = self.lsn_get(context, network_id) + lsn_port_id = self.lsn_port_create(context, lsn_id, port_data) + except (n_exc.NotFound, p_exc.NsxPluginException): + raise p_exc.PortConfigurationError( + net_id=network_id, lsn_id=lsn_id, port_id=port_id) + else: + try: + lsn_api.lsn_port_plug_network( + self.cluster, lsn_id, lsn_port_id, lswitch_port_id) + except p_exc.LsnConfigurationConflict: + self.lsn_port_delete(context, lsn_id, lsn_port_id) + raise p_exc.PortConfigurationError( + net_id=network_id, lsn_id=lsn_id, port_id=port_id) + if subnet_config: + self.lsn_port_dhcp_configure( + context, lsn_id, lsn_port_id, subnet_config) + else: + return (lsn_id, lsn_port_id) + + def lsn_port_metadata_setup(self, context, lsn_id, subnet): + """Connect subnet to specified LSN.""" + data = { + "mac_address": const.METADATA_MAC, + "ip_address": subnet['cidr'], + "subnet_id": subnet['id'] + } + network_id = subnet['network_id'] + tenant_id = subnet['tenant_id'] + lswitch_port_id = None + try: + switch_id = nsx_utils.get_nsx_switch_ids( + context.session, self.cluster, network_id)[0] + lswitch_port_id = switch_api.create_lport( + self.cluster, switch_id, tenant_id, + const.METADATA_PORT_ID, const.METADATA_PORT_NAME, + const.METADATA_DEVICE_ID, True)['uuid'] + lsn_port_id = self.lsn_port_create(context, lsn_id, data) + except (n_exc.NotFound, p_exc.NsxPluginException, + api_exc.NsxApiException): + raise p_exc.PortConfigurationError( + net_id=network_id, lsn_id=lsn_id, port_id=lswitch_port_id) + else: + try: + lsn_api.lsn_port_plug_network( + self.cluster, lsn_id, lsn_port_id, lswitch_port_id) + except p_exc.LsnConfigurationConflict: + self.lsn_port_delete(self.cluster, lsn_id, lsn_port_id) + switch_api.delete_port( + self.cluster, network_id, lswitch_port_id) + raise p_exc.PortConfigurationError( + net_id=network_id, lsn_id=lsn_id, port_id=lsn_port_id) + + def lsn_port_dhcp_configure(self, context, lsn_id, lsn_port_id, subnet): + """Enable/disable dhcp services with the given config options.""" + is_enabled = subnet["enable_dhcp"] + dhcp_options = { + "domain_name": cfg.CONF.NSX_DHCP.domain_name, + "default_lease_time": cfg.CONF.NSX_DHCP.default_lease_time, + } + dns_servers = cfg.CONF.NSX_DHCP.extra_domain_name_servers or [] + dns_servers.extend(subnet["dns_nameservers"]) + if subnet['gateway_ip']: + dhcp_options["routers"] = subnet["gateway_ip"] + if dns_servers: + dhcp_options["domain_name_servers"] = ",".join(dns_servers) + if subnet["host_routes"]: + dhcp_options["classless_static_routes"] = ( + ",".join(subnet["host_routes"]) + ) + try: + lsn_api.lsn_port_dhcp_configure( + self.cluster, lsn_id, lsn_port_id, is_enabled, dhcp_options) + except (n_exc.NotFound, api_exc.NsxApiException): + err_msg = (_('Unable to configure dhcp for Logical Service ' + 'Node %(lsn_id)s and port %(lsn_port_id)s') + % {'lsn_id': lsn_id, 'lsn_port_id': lsn_port_id}) + LOG.error(err_msg) + raise p_exc.NsxPluginException(err_msg=err_msg) + + def lsn_metadata_configure(self, context, subnet_id, is_enabled): + """Configure metadata service for the specified subnet.""" + subnet = self.plugin.get_subnet(context, subnet_id) + network_id = subnet['network_id'] + meta_conf = cfg.CONF.NSX_METADATA + metadata_options = { + 'metadata_server_ip': meta_conf.metadata_server_address, + 'metadata_server_port': meta_conf.metadata_server_port, + 'metadata_proxy_shared_secret': meta_conf.metadata_shared_secret + } + try: + lsn_id = self.lsn_get(context, network_id) + lsn_api.lsn_metadata_configure( + self.cluster, lsn_id, is_enabled, metadata_options) + except (p_exc.LsnNotFound, api_exc.NsxApiException): + err_msg = (_('Unable to configure metadata ' + 'for subnet %s') % subnet_id) + LOG.error(err_msg) + raise p_exc.NsxPluginException(err_msg=err_msg) + if is_enabled: + try: + # test that the lsn port exists + self.lsn_port_get(context, network_id, subnet_id) + except p_exc.LsnPortNotFound: + # this might happen if subnet had dhcp off when created + # so create one, and wire it + self.lsn_port_metadata_setup(context, lsn_id, subnet) + else: + self.lsn_port_dispose(context, network_id, const.METADATA_MAC) + + def _lsn_port_host_conf(self, context, network_id, subnet_id, data, hdlr): + lsn_id, lsn_port_id = self.lsn_port_get( + context, network_id, subnet_id, raise_on_err=False) + try: + if lsn_id and lsn_port_id: + hdlr(self.cluster, lsn_id, lsn_port_id, data) + except (n_exc.NotFound, api_exc.NsxApiException): + LOG.error(_('Error while configuring LSN ' + 'port %s'), lsn_port_id) + raise p_exc.PortConfigurationError( + net_id=network_id, lsn_id=lsn_id, port_id=lsn_port_id) + + def lsn_port_dhcp_host_add(self, context, network_id, subnet_id, host): + """Add dhcp host entry to LSN port configuration.""" + self._lsn_port_host_conf(context, network_id, subnet_id, host, + lsn_api.lsn_port_dhcp_host_add) + + def lsn_port_dhcp_host_remove(self, context, network_id, subnet_id, host): + """Remove dhcp host entry from LSN port configuration.""" + self._lsn_port_host_conf(context, network_id, subnet_id, host, + lsn_api.lsn_port_dhcp_host_remove) + + def lsn_port_meta_host_add(self, context, network_id, subnet_id, host): + """Add dhcp host entry to LSN port configuration.""" + self._lsn_port_host_conf(context, network_id, subnet_id, host, + lsn_api.lsn_port_metadata_host_add) + + def lsn_port_meta_host_remove(self, context, network_id, subnet_id, host): + """Remove dhcp host entry from LSN port configuration.""" + self._lsn_port_host_conf(context, network_id, subnet_id, host, + lsn_api.lsn_port_metadata_host_remove) + + def lsn_port_update( + self, context, network_id, subnet_id, dhcp=None, meta=None): + """Update the specified configuration for the LSN port.""" + if not dhcp and not meta: + return + try: + lsn_id, lsn_port_id = self.lsn_port_get( + context, network_id, subnet_id, raise_on_err=False) + if dhcp and lsn_id and lsn_port_id: + lsn_api.lsn_port_host_entries_update( + self.cluster, lsn_id, lsn_port_id, DHCP_CONF, dhcp) + if meta and lsn_id and lsn_port_id: + lsn_api.lsn_port_host_entries_update( + self.cluster, lsn_id, lsn_port_id, META_CONF, meta) + except api_exc.NsxApiException: + raise p_exc.PortConfigurationError( + net_id=network_id, lsn_id=lsn_id, port_id=lsn_port_id) + + +class PersistentLsnManager(LsnManager): + """Add local persistent state to LSN Manager.""" + + def __init__(self, plugin): + super(PersistentLsnManager, self).__init__(plugin) + self.sync_on_missing = cfg.CONF.NSX_LSN.sync_on_missing_data + + def lsn_get(self, context, network_id, raise_on_err=True): + try: + obj = lsn_db.lsn_get_for_network( + context, network_id, raise_on_err=raise_on_err) + return obj.lsn_id if obj else None + except p_exc.LsnNotFound: + with excutils.save_and_reraise_exception() as ctxt: + ctxt.reraise = False + if self.sync_on_missing: + lsn_id = super(PersistentLsnManager, self).lsn_get( + context, network_id, raise_on_err=raise_on_err) + self.lsn_save(context, network_id, lsn_id) + return lsn_id + if raise_on_err: + ctxt.reraise = True + + def lsn_save(self, context, network_id, lsn_id): + """Save LSN-Network mapping to the DB.""" + try: + lsn_db.lsn_add(context, network_id, lsn_id) + except db_exc.DBError: + err_msg = _('Unable to save LSN for network %s') % network_id + LOG.exception(err_msg) + raise p_exc.NsxPluginException(err_msg=err_msg) + + def lsn_create(self, context, network_id): + lsn_id = super(PersistentLsnManager, + self).lsn_create(context, network_id) + try: + self.lsn_save(context, network_id, lsn_id) + except p_exc.NsxPluginException: + with excutils.save_and_reraise_exception(): + super(PersistentLsnManager, self).lsn_delete(context, lsn_id) + return lsn_id + + def lsn_delete(self, context, lsn_id): + lsn_db.lsn_remove(context, lsn_id) + super(PersistentLsnManager, self).lsn_delete(context, lsn_id) + + def lsn_port_get(self, context, network_id, subnet_id, raise_on_err=True): + try: + obj = lsn_db.lsn_port_get_for_subnet( + context, subnet_id, raise_on_err=raise_on_err) + return (obj.lsn_id, obj.lsn_port_id) if obj else (None, None) + except p_exc.LsnPortNotFound: + with excutils.save_and_reraise_exception() as ctxt: + ctxt.reraise = False + if self.sync_on_missing: + lsn_id, lsn_port_id = ( + super(PersistentLsnManager, self).lsn_port_get( + context, network_id, subnet_id, + raise_on_err=raise_on_err)) + mac_addr = lsn_api.lsn_port_info_get( + self.cluster, lsn_id, lsn_port_id)['mac_address'] + self.lsn_port_save( + context, lsn_port_id, subnet_id, mac_addr, lsn_id) + return (lsn_id, lsn_port_id) + if raise_on_err: + ctxt.reraise = True + + def lsn_port_get_by_mac(self, context, network_id, mac, raise_on_err=True): + try: + obj = lsn_db.lsn_port_get_for_mac( + context, mac, raise_on_err=raise_on_err) + return (obj.lsn_id, obj.lsn_port_id) if obj else (None, None) + except p_exc.LsnPortNotFound: + with excutils.save_and_reraise_exception() as ctxt: + ctxt.reraise = False + if self.sync_on_missing: + lsn_id, lsn_port_id = ( + super(PersistentLsnManager, self).lsn_port_get_by_mac( + context, network_id, mac, + raise_on_err=raise_on_err)) + subnet_id = lsn_api.lsn_port_info_get( + self.cluster, lsn_id, lsn_port_id).get('subnet_id') + self.lsn_port_save( + context, lsn_port_id, subnet_id, mac, lsn_id) + return (lsn_id, lsn_port_id) + if raise_on_err: + ctxt.reraise = True + + def lsn_port_save(self, context, lsn_port_id, subnet_id, mac_addr, lsn_id): + """Save LSN Port information to the DB.""" + try: + lsn_db.lsn_port_add_for_lsn( + context, lsn_port_id, subnet_id, mac_addr, lsn_id) + except db_exc.DBError: + err_msg = _('Unable to save LSN port for subnet %s') % subnet_id + LOG.exception(err_msg) + raise p_exc.NsxPluginException(err_msg=err_msg) + + def lsn_port_create(self, context, lsn_id, subnet_info): + lsn_port_id = super(PersistentLsnManager, + self).lsn_port_create(context, lsn_id, subnet_info) + try: + self.lsn_port_save(context, lsn_port_id, subnet_info['subnet_id'], + subnet_info['mac_address'], lsn_id) + except p_exc.NsxPluginException: + with excutils.save_and_reraise_exception(): + super(PersistentLsnManager, self).lsn_port_delete( + context, lsn_id, lsn_port_id) + return lsn_port_id + + def lsn_port_delete(self, context, lsn_id, lsn_port_id): + lsn_db.lsn_port_remove(context, lsn_port_id) + super(PersistentLsnManager, self).lsn_port_delete( + context, lsn_id, lsn_port_id) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/dhcp_meta/migration.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/dhcp_meta/migration.py new file mode 100644 index 00000000..0f1b32b7 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/dhcp_meta/migration.py @@ -0,0 +1,180 @@ +# Copyright 2014 VMware, Inc. +# +# All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +from neutron.common import constants as const +from neutron.common import exceptions as n_exc +from neutron.extensions import external_net +from neutron.openstack.common import log as logging +from neutron.plugins.vmware.common import exceptions as p_exc +from neutron.plugins.vmware.dhcp_meta import nsx +from neutron.plugins.vmware.dhcp_meta import rpc + +LOG = logging.getLogger(__name__) + + +class DhcpMetadataBuilder(object): + + def __init__(self, plugin, agent_notifier): + self.plugin = plugin + self.notifier = agent_notifier + + def dhcp_agent_get_all(self, context, network_id): + """Return the agents managing the network.""" + return self.plugin.list_dhcp_agents_hosting_network( + context, network_id)['agents'] + + def dhcp_port_get_all(self, context, network_id): + """Return the dhcp ports allocated for the network.""" + filters = { + 'network_id': [network_id], + 'device_owner': [const.DEVICE_OWNER_DHCP] + } + return self.plugin.get_ports(context, filters=filters) + + def router_id_get(self, context, subnet=None): + """Return the router and interface used for the subnet.""" + if not subnet: + return + network_id = subnet['network_id'] + filters = { + 'network_id': [network_id], + 'device_owner': [const.DEVICE_OWNER_ROUTER_INTF] + } + ports = self.plugin.get_ports(context, filters=filters) + for port in ports: + if port['fixed_ips'][0]['subnet_id'] == subnet['id']: + return port['device_id'] + + def metadata_deallocate(self, context, router_id, subnet_id): + """Deallocate metadata services for the subnet.""" + interface = {'subnet_id': subnet_id} + self.plugin.remove_router_interface(context, router_id, interface) + + def metadata_allocate(self, context, router_id, subnet_id): + """Allocate metadata resources for the subnet via the router.""" + interface = {'subnet_id': subnet_id} + self.plugin.add_router_interface(context, router_id, interface) + + def dhcp_deallocate(self, context, network_id, agents, ports): + """Deallocate dhcp resources for the network.""" + for agent in agents: + self.plugin.remove_network_from_dhcp_agent( + context, agent['id'], network_id) + for port in ports: + try: + self.plugin.delete_port(context, port['id']) + except n_exc.PortNotFound: + LOG.error(_('Port %s is already gone'), port['id']) + + def dhcp_allocate(self, context, network_id, subnet): + """Allocate dhcp resources for the subnet.""" + # Create LSN resources + network_data = {'id': network_id} + nsx.handle_network_dhcp_access(self.plugin, context, + network_data, 'create_network') + if subnet: + subnet_data = {'subnet': subnet} + self.notifier.notify(context, subnet_data, 'subnet.create.end') + # Get DHCP host and metadata entries created for the LSN + port = { + 'network_id': network_id, + 'fixed_ips': [{'subnet_id': subnet['id']}] + } + self.notifier.notify(context, {'port': port}, 'port.update.end') + + +class MigrationManager(object): + + def __init__(self, plugin, lsn_manager, agent_notifier): + self.plugin = plugin + self.manager = lsn_manager + self.builder = DhcpMetadataBuilder(plugin, agent_notifier) + + def validate(self, context, network_id): + """Validate and return subnet's dhcp info for migration.""" + network = self.plugin.get_network(context, network_id) + + if self.manager.lsn_exists(context, network_id): + reason = _("LSN already exist") + raise p_exc.LsnMigrationConflict(net_id=network_id, reason=reason) + + if network[external_net.EXTERNAL]: + reason = _("Cannot migrate an external network") + raise n_exc.BadRequest(resource='network', msg=reason) + + filters = {'network_id': [network_id]} + subnets = self.plugin.get_subnets(context, filters=filters) + count = len(subnets) + if count == 0: + return None + elif count == 1 and subnets[0]['cidr'] == rpc.METADATA_SUBNET_CIDR: + reason = _("Cannot migrate a 'metadata' network") + raise n_exc.BadRequest(resource='network', msg=reason) + elif count > 1: + reason = _("Unable to support multiple subnets per network") + raise p_exc.LsnMigrationConflict(net_id=network_id, reason=reason) + else: + return subnets[0] + + def migrate(self, context, network_id, subnet=None): + """Migrate subnet resources to LSN.""" + router_id = self.builder.router_id_get(context, subnet) + if router_id and subnet: + # Deallocate resources taken for the router, if any + self.builder.metadata_deallocate(context, router_id, subnet['id']) + if subnet: + # Deallocate reources taken for the agent, if any + agents = self.builder.dhcp_agent_get_all(context, network_id) + ports = self.builder.dhcp_port_get_all(context, network_id) + self.builder.dhcp_deallocate(context, network_id, agents, ports) + # (re)create the configuration for LSN + self.builder.dhcp_allocate(context, network_id, subnet) + if router_id and subnet: + # Allocate resources taken for the router, if any + self.builder.metadata_allocate(context, router_id, subnet['id']) + + def report(self, context, network_id, subnet_id=None): + """Return a report of the dhcp and metadata resources in use.""" + if subnet_id: + lsn_id, lsn_port_id = self.manager.lsn_port_get( + context, network_id, subnet_id, raise_on_err=False) + else: + filters = {'network_id': [network_id]} + subnets = self.plugin.get_subnets(context, filters=filters) + if subnets: + lsn_id, lsn_port_id = self.manager.lsn_port_get( + context, network_id, subnets[0]['id'], raise_on_err=False) + else: + lsn_id = self.manager.lsn_get(context, network_id, + raise_on_err=False) + lsn_port_id = None + if lsn_id: + ports = [lsn_port_id] if lsn_port_id else [] + report = { + 'type': 'lsn', + 'services': [lsn_id], + 'ports': ports + } + else: + agents = self.builder.dhcp_agent_get_all(context, network_id) + ports = self.builder.dhcp_port_get_all(context, network_id) + report = { + 'type': 'agent', + 'services': [a['id'] for a in agents], + 'ports': [p['id'] for p in ports] + } + return report diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/dhcp_meta/nsx.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/dhcp_meta/nsx.py new file mode 100644 index 00000000..5c1f3971 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/dhcp_meta/nsx.py @@ -0,0 +1,321 @@ +# Copyright 2013 VMware, Inc. + +# All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +from oslo.config import cfg + +from neutron.api.v2 import attributes as attr +from neutron.common import constants as const +from neutron.common import exceptions as n_exc +from neutron.db import db_base_plugin_v2 +from neutron.db import l3_db +from neutron.extensions import external_net +from neutron.openstack.common import excutils +from neutron.openstack.common import log as logging +from neutron.plugins.vmware.common import exceptions as p_exc +from neutron.plugins.vmware.dhcp_meta import constants as d_const +from neutron.plugins.vmware.nsxlib import lsn as lsn_api + +LOG = logging.getLogger(__name__) + + +dhcp_opts = [ + cfg.ListOpt('extra_domain_name_servers', + deprecated_group='NVP_DHCP', + default=[], + help=_('Comma separated list of additional ' + 'domain name servers')), + cfg.StrOpt('domain_name', + deprecated_group='NVP_DHCP', + default='openstacklocal', + help=_('Domain to use for building the hostnames')), + cfg.IntOpt('default_lease_time', default=43200, + deprecated_group='NVP_DHCP', + help=_("Default DHCP lease time")), +] + + +metadata_opts = [ + cfg.StrOpt('metadata_server_address', + deprecated_group='NVP_METADATA', + default='127.0.0.1', + help=_("IP address used by Metadata server.")), + cfg.IntOpt('metadata_server_port', + deprecated_group='NVP_METADATA', + default=8775, + help=_("TCP Port used by Metadata server.")), + cfg.StrOpt('metadata_shared_secret', + deprecated_group='NVP_METADATA', + default='', + help=_('Shared secret to sign instance-id request'), + secret=True) +] + + +def register_dhcp_opts(config): + config.CONF.register_opts(dhcp_opts, group="NSX_DHCP") + + +def register_metadata_opts(config): + config.CONF.register_opts(metadata_opts, group="NSX_METADATA") + + +class DhcpAgentNotifyAPI(object): + + def __init__(self, plugin, lsn_manager): + self.plugin = plugin + self.lsn_manager = lsn_manager + self._handle_subnet_dhcp_access = {'create': self._subnet_create, + 'update': self._subnet_update, + 'delete': self._subnet_delete} + + def notify(self, context, data, methodname): + [resource, action, _e] = methodname.split('.') + if resource == 'subnet': + self._handle_subnet_dhcp_access[action](context, data['subnet']) + elif resource == 'port' and action == 'update': + self._port_update(context, data['port']) + + def _port_update(self, context, port): + # With no fixed IP's there's nothing that can be updated + if not port["fixed_ips"]: + return + network_id = port['network_id'] + subnet_id = port["fixed_ips"][0]['subnet_id'] + filters = {'network_id': [network_id]} + # Because NSX does not support updating a single host entry we + # got to build the whole list from scratch and update in bulk + ports = self.plugin.get_ports(context, filters) + if not ports: + return + dhcp_conf = [ + {'mac_address': p['mac_address'], + 'ip_address': p["fixed_ips"][0]['ip_address']} + for p in ports if is_user_port(p) + ] + meta_conf = [ + {'instance_id': p['device_id'], + 'ip_address': p["fixed_ips"][0]['ip_address']} + for p in ports if is_user_port(p, check_dev_id=True) + ] + self.lsn_manager.lsn_port_update( + context, network_id, subnet_id, dhcp=dhcp_conf, meta=meta_conf) + + def _subnet_create(self, context, subnet, clean_on_err=True): + if subnet['enable_dhcp']: + network_id = subnet['network_id'] + # Create port for DHCP service + dhcp_port = { + "name": "", + "admin_state_up": True, + "device_id": "", + "device_owner": const.DEVICE_OWNER_DHCP, + "network_id": network_id, + "tenant_id": subnet["tenant_id"], + "mac_address": attr.ATTR_NOT_SPECIFIED, + "fixed_ips": [{"subnet_id": subnet['id']}] + } + try: + # This will end up calling handle_port_dhcp_access + # down below as well as handle_port_metadata_access + self.plugin.create_port(context, {'port': dhcp_port}) + except p_exc.PortConfigurationError as e: + err_msg = (_("Error while creating subnet %(cidr)s for " + "network %(network)s. Please, contact " + "administrator") % + {"cidr": subnet["cidr"], + "network": network_id}) + LOG.error(err_msg) + db_base_plugin_v2.NeutronDbPluginV2.delete_port( + self.plugin, context, e.port_id) + if clean_on_err: + self.plugin.delete_subnet(context, subnet['id']) + raise n_exc.Conflict() + + def _subnet_update(self, context, subnet): + network_id = subnet['network_id'] + try: + lsn_id, lsn_port_id = self.lsn_manager.lsn_port_get( + context, network_id, subnet['id']) + self.lsn_manager.lsn_port_dhcp_configure( + context, lsn_id, lsn_port_id, subnet) + except p_exc.LsnPortNotFound: + # It's possible that the subnet was created with dhcp off; + # check if the subnet was uplinked onto a router, and if so + # remove the patch attachment between the metadata port and + # the lsn port, in favor on the one we'll be creating during + # _subnet_create + self.lsn_manager.lsn_port_dispose( + context, network_id, d_const.METADATA_MAC) + # also, check that a dhcp port exists first and provision it + # accordingly + filters = dict(network_id=[network_id], + device_owner=[const.DEVICE_OWNER_DHCP]) + ports = self.plugin.get_ports(context, filters=filters) + if ports: + handle_port_dhcp_access( + self.plugin, context, ports[0], 'create_port') + else: + self._subnet_create(context, subnet, clean_on_err=False) + + def _subnet_delete(self, context, subnet): + # FIXME(armando-migliaccio): it looks like that a subnet filter + # is ineffective; so filter by network for now. + network_id = subnet['network_id'] + filters = dict(network_id=[network_id], + device_owner=[const.DEVICE_OWNER_DHCP]) + # FIXME(armando-migliaccio): this may be race-y + ports = self.plugin.get_ports(context, filters=filters) + if ports: + # This will end up calling handle_port_dhcp_access + # down below as well as handle_port_metadata_access + self.plugin.delete_port(context, ports[0]['id']) + + +def is_user_port(p, check_dev_id=False): + usable = p['fixed_ips'] and p['device_owner'] not in d_const.SPECIAL_OWNERS + return usable if not check_dev_id else usable and p['device_id'] + + +def check_services_requirements(cluster): + ver = cluster.api_client.get_version() + # It sounds like 4.1 is the first one where DHCP in NSX + # will have the experimental feature + if ver.major >= 4 and ver.minor >= 1: + cluster_id = cfg.CONF.default_service_cluster_uuid + if not lsn_api.service_cluster_exists(cluster, cluster_id): + raise p_exc.ServiceClusterUnavailable(cluster_id=cluster_id) + else: + raise p_exc.InvalidVersion(version=ver) + + +def handle_network_dhcp_access(plugin, context, network, action): + LOG.info(_("Performing DHCP %(action)s for resource: %(resource)s") + % {"action": action, "resource": network}) + if action == 'create_network': + network_id = network['id'] + if network.get(external_net.EXTERNAL): + LOG.info(_("Network %s is external: no LSN to create"), network_id) + return + plugin.lsn_manager.lsn_create(context, network_id) + elif action == 'delete_network': + # NOTE(armando-migliaccio): on delete_network, network + # is just the network id + network_id = network + plugin.lsn_manager.lsn_delete_by_network(context, network_id) + LOG.info(_("Logical Services Node for network " + "%s configured successfully"), network_id) + + +def handle_port_dhcp_access(plugin, context, port, action): + LOG.info(_("Performing DHCP %(action)s for resource: %(resource)s") + % {"action": action, "resource": port}) + if port["device_owner"] == const.DEVICE_OWNER_DHCP: + network_id = port["network_id"] + if action == "create_port": + # at this point the port must have a subnet and a fixed ip + subnet_id = port["fixed_ips"][0]['subnet_id'] + subnet = plugin.get_subnet(context, subnet_id) + subnet_data = { + "mac_address": port["mac_address"], + "ip_address": subnet['cidr'], + "subnet_id": subnet['id'] + } + try: + plugin.lsn_manager.lsn_port_dhcp_setup( + context, network_id, port['id'], subnet_data, subnet) + except p_exc.PortConfigurationError: + err_msg = (_("Error while configuring DHCP for " + "port %s"), port['id']) + LOG.error(err_msg) + raise n_exc.NeutronException() + elif action == "delete_port": + plugin.lsn_manager.lsn_port_dispose(context, network_id, + port['mac_address']) + elif port["device_owner"] != const.DEVICE_OWNER_DHCP: + if port.get("fixed_ips"): + # do something only if there are IP's and dhcp is enabled + subnet_id = port["fixed_ips"][0]['subnet_id'] + if not plugin.get_subnet(context, subnet_id)['enable_dhcp']: + LOG.info(_("DHCP is disabled for subnet %s: nothing " + "to do"), subnet_id) + return + host_data = { + "mac_address": port["mac_address"], + "ip_address": port["fixed_ips"][0]['ip_address'] + } + network_id = port["network_id"] + if action == "create_port": + handler = plugin.lsn_manager.lsn_port_dhcp_host_add + elif action == "delete_port": + handler = plugin.lsn_manager.lsn_port_dhcp_host_remove + try: + handler(context, network_id, subnet_id, host_data) + except p_exc.PortConfigurationError: + with excutils.save_and_reraise_exception(): + if action == 'create_port': + db_base_plugin_v2.NeutronDbPluginV2.delete_port( + plugin, context, port['id']) + LOG.info(_("DHCP for port %s configured successfully"), port['id']) + + +def handle_port_metadata_access(plugin, context, port, is_delete=False): + if is_user_port(port, check_dev_id=True): + network_id = port["network_id"] + network = plugin.get_network(context, network_id) + if network[external_net.EXTERNAL]: + LOG.info(_("Network %s is external: nothing to do"), network_id) + return + subnet_id = port["fixed_ips"][0]['subnet_id'] + host_data = { + "instance_id": port["device_id"], + "tenant_id": port["tenant_id"], + "ip_address": port["fixed_ips"][0]['ip_address'] + } + LOG.info(_("Configuring metadata entry for port %s"), port) + if not is_delete: + handler = plugin.lsn_manager.lsn_port_meta_host_add + else: + handler = plugin.lsn_manager.lsn_port_meta_host_remove + try: + handler(context, network_id, subnet_id, host_data) + except p_exc.PortConfigurationError: + with excutils.save_and_reraise_exception(): + if not is_delete: + db_base_plugin_v2.NeutronDbPluginV2.delete_port( + plugin, context, port['id']) + LOG.info(_("Metadata for port %s configured successfully"), port['id']) + + +def handle_router_metadata_access(plugin, context, router_id, interface=None): + LOG.info(_("Handle metadata access via router: %(r)s and " + "interface %(i)s") % {'r': router_id, 'i': interface}) + if interface: + try: + plugin.get_port(context, interface['port_id']) + is_enabled = True + except n_exc.NotFound: + is_enabled = False + subnet_id = interface['subnet_id'] + try: + plugin.lsn_manager.lsn_metadata_configure( + context, subnet_id, is_enabled) + except p_exc.NsxPluginException: + with excutils.save_and_reraise_exception(): + if is_enabled: + l3_db.L3_NAT_db_mixin.remove_router_interface( + plugin, context, router_id, interface) + LOG.info(_("Metadata for router %s handled successfully"), router_id) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/dhcp_meta/rpc.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/dhcp_meta/rpc.py new file mode 100644 index 00000000..e4e06134 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/dhcp_meta/rpc.py @@ -0,0 +1,220 @@ +# Copyright 2013 VMware, Inc. +# All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +from eventlet import greenthread +import netaddr +from oslo.config import cfg + +from neutron.api.rpc.agentnotifiers import dhcp_rpc_agent_api +from neutron.api.v2 import attributes +from neutron.common import constants as const +from neutron.common import exceptions as ntn_exc +from neutron.common import rpc as n_rpc +from neutron.db import db_base_plugin_v2 +from neutron.db import dhcp_rpc_base +from neutron.db import l3_db +from neutron.db import models_v2 +from neutron.openstack.common import log as logging +from neutron.plugins.vmware.api_client import exception as api_exc +from neutron.plugins.vmware.common import config +from neutron.plugins.vmware.common import exceptions as nsx_exc + +LOG = logging.getLogger(__name__) + +METADATA_DEFAULT_PREFIX = 30 +METADATA_SUBNET_CIDR = '169.254.169.252/%d' % METADATA_DEFAULT_PREFIX +METADATA_GATEWAY_IP = '169.254.169.253' +METADATA_DHCP_ROUTE = '169.254.169.254/32' + + +class NSXRpcCallbacks(n_rpc.RpcCallback, + dhcp_rpc_base.DhcpRpcCallbackMixin): + + RPC_API_VERSION = '1.1' + + +def handle_network_dhcp_access(plugin, context, network, action): + pass + + +def handle_port_dhcp_access(plugin, context, port_data, action): + active_port = (cfg.CONF.NSX.metadata_mode == config.MetadataModes.INDIRECT + and port_data.get('device_owner') == const.DEVICE_OWNER_DHCP + and port_data.get('fixed_ips', [])) + if active_port: + subnet_id = port_data['fixed_ips'][0]['subnet_id'] + subnet = plugin.get_subnet(context, subnet_id) + _notify_rpc_agent(context, {'subnet': subnet}, 'subnet.update.end') + + +def handle_port_metadata_access(plugin, context, port, is_delete=False): + if (cfg.CONF.NSX.metadata_mode == config.MetadataModes.INDIRECT and + port.get('device_owner') == const.DEVICE_OWNER_DHCP): + if port.get('fixed_ips', []) or is_delete: + fixed_ip = port['fixed_ips'][0] + query = context.session.query(models_v2.Subnet) + subnet = query.filter( + models_v2.Subnet.id == fixed_ip['subnet_id']).one() + # If subnet does not have a gateway do not create metadata + # route. This is done via the enable_isolated_metadata + # option if desired. + if not subnet.get('gateway_ip'): + LOG.info(_('Subnet %s does not have a gateway, the metadata ' + 'route will not be created'), subnet['id']) + return + metadata_routes = [r for r in subnet.routes + if r['destination'] == METADATA_DHCP_ROUTE] + if metadata_routes: + # We should have only a single metadata route at any time + # because the route logic forbids two routes with the same + # destination. Update next hop with the provided IP address + if not is_delete: + metadata_routes[0].nexthop = fixed_ip['ip_address'] + else: + context.session.delete(metadata_routes[0]) + else: + # add the metadata route + route = models_v2.SubnetRoute( + subnet_id=subnet.id, + destination=METADATA_DHCP_ROUTE, + nexthop=fixed_ip['ip_address']) + context.session.add(route) + + +def handle_router_metadata_access(plugin, context, router_id, interface=None): + if cfg.CONF.NSX.metadata_mode != config.MetadataModes.DIRECT: + LOG.debug(_("Metadata access network is disabled")) + return + if not cfg.CONF.allow_overlapping_ips: + LOG.warn(_("Overlapping IPs must be enabled in order to setup " + "the metadata access network")) + return + ctx_elevated = context.elevated() + device_filter = {'device_id': [router_id], + 'device_owner': [l3_db.DEVICE_OWNER_ROUTER_INTF]} + # Retrieve ports calling database plugin + ports = db_base_plugin_v2.NeutronDbPluginV2.get_ports( + plugin, ctx_elevated, filters=device_filter) + try: + if ports: + if (interface and + not _find_metadata_port(plugin, ctx_elevated, ports)): + _create_metadata_access_network( + plugin, ctx_elevated, router_id) + elif len(ports) == 1: + # The only port left might be the metadata port + _destroy_metadata_access_network( + plugin, ctx_elevated, router_id, ports) + else: + LOG.debug(_("No router interface found for router '%s'. " + "No metadata access network should be " + "created or destroyed"), router_id) + # TODO(salvatore-orlando): A better exception handling in the + # NSX plugin would allow us to improve error handling here + except (ntn_exc.NeutronException, nsx_exc.NsxPluginException, + api_exc.NsxApiException): + # Any exception here should be regarded as non-fatal + LOG.exception(_("An error occurred while operating on the " + "metadata access network for router:'%s'"), + router_id) + + +def _find_metadata_port(plugin, context, ports): + for port in ports: + for fixed_ip in port['fixed_ips']: + cidr = netaddr.IPNetwork( + plugin.get_subnet(context, fixed_ip['subnet_id'])['cidr']) + if cidr in netaddr.IPNetwork(METADATA_SUBNET_CIDR): + return port + + +def _create_metadata_access_network(plugin, context, router_id): + # Add network + # Network name is likely to be truncated on NSX + net_data = {'name': 'meta-%s' % router_id, + 'tenant_id': '', # intentionally not set + 'admin_state_up': True, + 'port_security_enabled': False, + 'shared': False, + 'status': const.NET_STATUS_ACTIVE} + meta_net = plugin.create_network(context, + {'network': net_data}) + greenthread.sleep(0) # yield + plugin.schedule_network(context, meta_net) + greenthread.sleep(0) # yield + # From this point on there will be resources to garbage-collect + # in case of failures + meta_sub = None + try: + # Add subnet + subnet_data = {'network_id': meta_net['id'], + 'tenant_id': '', # intentionally not set + 'name': 'meta-%s' % router_id, + 'ip_version': 4, + 'shared': False, + 'cidr': METADATA_SUBNET_CIDR, + 'enable_dhcp': True, + # Ensure default allocation pool is generated + 'allocation_pools': attributes.ATTR_NOT_SPECIFIED, + 'gateway_ip': METADATA_GATEWAY_IP, + 'dns_nameservers': [], + 'host_routes': []} + meta_sub = plugin.create_subnet(context, + {'subnet': subnet_data}) + greenthread.sleep(0) # yield + plugin.add_router_interface(context, router_id, + {'subnet_id': meta_sub['id']}) + greenthread.sleep(0) # yield + # Tell to start the metadata agent proxy, only if we had success + _notify_rpc_agent(context, {'subnet': meta_sub}, 'subnet.create.end') + except (ntn_exc.NeutronException, + nsx_exc.NsxPluginException, + api_exc.NsxApiException): + # It is not necessary to explicitly delete the subnet + # as it will be removed with the network + plugin.delete_network(context, meta_net['id']) + + +def _destroy_metadata_access_network(plugin, context, router_id, ports): + if not ports: + return + meta_port = _find_metadata_port(plugin, context, ports) + if not meta_port: + return + meta_net_id = meta_port['network_id'] + meta_sub_id = meta_port['fixed_ips'][0]['subnet_id'] + plugin.remove_router_interface( + context, router_id, {'port_id': meta_port['id']}) + greenthread.sleep(0) # yield + context.session.expunge_all() + try: + # Remove network (this will remove the subnet too) + plugin.delete_network(context, meta_net_id) + greenthread.sleep(0) # yield + except (ntn_exc.NeutronException, nsx_exc.NsxPluginException, + api_exc.NsxApiException): + # must re-add the router interface + plugin.add_router_interface(context, router_id, + {'subnet_id': meta_sub_id}) + # Tell to stop the metadata agent proxy + _notify_rpc_agent( + context, {'network': {'id': meta_net_id}}, 'network.delete.end') + + +def _notify_rpc_agent(context, payload, event): + if cfg.CONF.dhcp_agent_notification: + dhcp_notifier = dhcp_rpc_agent_api.DhcpAgentNotifyAPI() + dhcp_notifier.notify(context, payload, event) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/dhcpmeta_modes.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/dhcpmeta_modes.py new file mode 100644 index 00000000..6312700b --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/dhcpmeta_modes.py @@ -0,0 +1,163 @@ +# Copyright 2013 VMware, Inc. +# +# All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +from oslo.config import cfg + +from neutron.api.rpc.agentnotifiers import dhcp_rpc_agent_api +from neutron.common import constants as const +from neutron.common import rpc as n_rpc +from neutron.common import topics +from neutron.db import agents_db +from neutron.openstack.common import importutils +from neutron.openstack.common import log as logging +from neutron.plugins.vmware.common import config +from neutron.plugins.vmware.common import exceptions as nsx_exc +from neutron.plugins.vmware.dhcp_meta import combined +from neutron.plugins.vmware.dhcp_meta import lsnmanager +from neutron.plugins.vmware.dhcp_meta import migration +from neutron.plugins.vmware.dhcp_meta import nsx as nsx_svc +from neutron.plugins.vmware.dhcp_meta import rpc as nsx_rpc +from neutron.plugins.vmware.extensions import lsn + +LOG = logging.getLogger(__name__) + + +class DhcpMetadataAccess(object): + + def setup_dhcpmeta_access(self): + """Initialize support for DHCP and Metadata services.""" + self._init_extensions() + if cfg.CONF.NSX.agent_mode == config.AgentModes.AGENT: + self._setup_rpc_dhcp_metadata() + mod = nsx_rpc + elif cfg.CONF.NSX.agent_mode == config.AgentModes.AGENTLESS: + self._setup_nsx_dhcp_metadata() + mod = nsx_svc + elif cfg.CONF.NSX.agent_mode == config.AgentModes.COMBINED: + notifier = self._setup_nsx_dhcp_metadata() + self._setup_rpc_dhcp_metadata(notifier=notifier) + mod = combined + else: + error = _("Invalid agent_mode: %s") % cfg.CONF.NSX.agent_mode + LOG.error(error) + raise nsx_exc.NsxPluginException(err_msg=error) + self.handle_network_dhcp_access_delegate = ( + mod.handle_network_dhcp_access + ) + self.handle_port_dhcp_access_delegate = ( + mod.handle_port_dhcp_access + ) + self.handle_port_metadata_access_delegate = ( + mod.handle_port_metadata_access + ) + self.handle_metadata_access_delegate = ( + mod.handle_router_metadata_access + ) + + def _setup_rpc_dhcp_metadata(self, notifier=None): + self.topic = topics.PLUGIN + self.conn = n_rpc.create_connection(new=True) + self.endpoints = [nsx_rpc.NSXRpcCallbacks(), + agents_db.AgentExtRpcCallback()] + self.conn.create_consumer(self.topic, self.endpoints, fanout=False) + self.agent_notifiers[const.AGENT_TYPE_DHCP] = ( + notifier or dhcp_rpc_agent_api.DhcpAgentNotifyAPI()) + self.conn.consume_in_threads() + self.network_scheduler = importutils.import_object( + cfg.CONF.network_scheduler_driver + ) + self.supported_extension_aliases.extend( + ['agent', 'dhcp_agent_scheduler']) + + def _setup_nsx_dhcp_metadata(self): + self._check_services_requirements() + nsx_svc.register_dhcp_opts(cfg) + nsx_svc.register_metadata_opts(cfg) + lsnmanager.register_lsn_opts(cfg) + lsn_manager = lsnmanager.PersistentLsnManager(self.safe_reference) + self.lsn_manager = lsn_manager + if cfg.CONF.NSX.agent_mode == config.AgentModes.AGENTLESS: + notifier = nsx_svc.DhcpAgentNotifyAPI(self.safe_reference, + lsn_manager) + self.agent_notifiers[const.AGENT_TYPE_DHCP] = notifier + # In agentless mode, ports whose owner is DHCP need to + # be special cased; so add it to the list of special + # owners list + if const.DEVICE_OWNER_DHCP not in self.port_special_owners: + self.port_special_owners.append(const.DEVICE_OWNER_DHCP) + elif cfg.CONF.NSX.agent_mode == config.AgentModes.COMBINED: + # This becomes ineffective, as all new networks creations + # are handled by Logical Services Nodes in NSX + cfg.CONF.set_override('network_auto_schedule', False) + LOG.warn(_('network_auto_schedule has been disabled')) + notifier = combined.DhcpAgentNotifyAPI(self.safe_reference, + lsn_manager) + self.supported_extension_aliases.append(lsn.EXT_ALIAS) + # Add the capability to migrate dhcp and metadata services over + self.migration_manager = ( + migration.MigrationManager( + self.safe_reference, lsn_manager, notifier)) + return notifier + + def _init_extensions(self): + extensions = (lsn.EXT_ALIAS, 'agent', 'dhcp_agent_scheduler') + for ext in extensions: + if ext in self.supported_extension_aliases: + self.supported_extension_aliases.remove(ext) + + def _check_services_requirements(self): + try: + error = None + nsx_svc.check_services_requirements(self.cluster) + except nsx_exc.InvalidVersion: + error = _("Unable to run Neutron with config option '%s', as NSX " + "does not support it") % cfg.CONF.NSX.agent_mode + except nsx_exc.ServiceClusterUnavailable: + error = _("Unmet dependency for config option " + "'%s'") % cfg.CONF.NSX.agent_mode + if error: + LOG.exception(error) + raise nsx_exc.NsxPluginException(err_msg=error) + + def get_lsn(self, context, network_id, fields=None): + report = self.migration_manager.report(context, network_id) + return {'network': network_id, 'report': report} + + def create_lsn(self, context, lsn): + network_id = lsn['lsn']['network'] + subnet = self.migration_manager.validate(context, network_id) + subnet_id = None if not subnet else subnet['id'] + self.migration_manager.migrate(context, network_id, subnet) + r = self.migration_manager.report(context, network_id, subnet_id) + return {'network': network_id, 'report': r} + + def handle_network_dhcp_access(self, context, network, action): + self.handle_network_dhcp_access_delegate(self.safe_reference, context, + network, action) + + def handle_port_dhcp_access(self, context, port_data, action): + self.handle_port_dhcp_access_delegate(self.safe_reference, context, + port_data, action) + + def handle_port_metadata_access(self, context, port, is_delete=False): + self.handle_port_metadata_access_delegate(self.safe_reference, context, + port, is_delete) + + def handle_router_metadata_access(self, context, + router_id, interface=None): + self.handle_metadata_access_delegate(self.safe_reference, context, + router_id, interface) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/extensions/networkgw.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/extensions/networkgw.py new file mode 100644 index 00000000..28df9089 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/extensions/networkgw.py @@ -0,0 +1,249 @@ +# Copyright 2013 VMware. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +import abc + +from oslo.config import cfg + +from neutron.api.v2 import attributes +from neutron.api.v2 import resource_helper +from neutron.plugins.vmware.common import utils + +GATEWAY_RESOURCE_NAME = "network_gateway" +DEVICE_RESOURCE_NAME = "gateway_device" +# Use dash for alias and collection name +EXT_ALIAS = GATEWAY_RESOURCE_NAME.replace('_', '-') +NETWORK_GATEWAYS = "%ss" % EXT_ALIAS +GATEWAY_DEVICES = "%ss" % DEVICE_RESOURCE_NAME.replace('_', '-') +DEVICE_ID_ATTR = 'id' +IFACE_NAME_ATTR = 'interface_name' + +# Attribute Map for Network Gateway Resource +# TODO(salvatore-orlando): add admin state as other neutron resources +RESOURCE_ATTRIBUTE_MAP = { + NETWORK_GATEWAYS: { + 'id': {'allow_post': False, 'allow_put': False, + 'is_visible': True}, + 'name': {'allow_post': True, 'allow_put': True, + 'validate': {'type:string': None}, + 'is_visible': True, 'default': ''}, + 'default': {'allow_post': False, 'allow_put': False, + 'is_visible': True}, + 'devices': {'allow_post': True, 'allow_put': False, + 'validate': {'type:device_list': None}, + 'is_visible': True}, + 'ports': {'allow_post': False, 'allow_put': False, + 'default': [], + 'is_visible': True}, + 'tenant_id': {'allow_post': True, 'allow_put': False, + 'validate': {'type:string': None}, + 'required_by_policy': True, + 'is_visible': True} + }, + GATEWAY_DEVICES: { + 'id': {'allow_post': False, 'allow_put': False, + 'is_visible': True}, + 'name': {'allow_post': True, 'allow_put': True, + 'validate': {'type:string': None}, + 'is_visible': True, 'default': ''}, + 'client_certificate': {'allow_post': True, 'allow_put': True, + 'validate': {'type:string': None}, + 'is_visible': True}, + 'connector_type': {'allow_post': True, 'allow_put': True, + 'validate': {'type:connector_type': None}, + 'is_visible': True}, + 'connector_ip': {'allow_post': True, 'allow_put': True, + 'validate': {'type:ip_address': None}, + 'is_visible': True}, + 'tenant_id': {'allow_post': True, 'allow_put': False, + 'validate': {'type:string': None}, + 'required_by_policy': True, + 'is_visible': True}, + 'status': {'allow_post': False, 'allow_put': False, + 'is_visible': True}, + } +} + + +def _validate_device_list(data, valid_values=None): + """Validate the list of service definitions.""" + if not data: + # Devices must be provided + msg = _("Cannot create a gateway with an empty device list") + return msg + try: + for device in data: + key_specs = {DEVICE_ID_ATTR: + {'type:regex': attributes.UUID_PATTERN, + 'required': True}, + IFACE_NAME_ATTR: + {'type:string': None, + 'required': False}} + err_msg = attributes._validate_dict( + device, key_specs=key_specs) + if err_msg: + return err_msg + unexpected_keys = [key for key in device if key not in key_specs] + if unexpected_keys: + err_msg = (_("Unexpected keys found in device description:%s") + % ",".join(unexpected_keys)) + return err_msg + except TypeError: + return (_("%s: provided data are not iterable") % + _validate_device_list.__name__) + + +def _validate_connector_type(data, valid_values=None): + if not data: + # A connector type is compulsory + msg = _("A connector type is required to create a gateway device") + return msg + connector_types = (valid_values if valid_values else + [utils.NetworkTypes.GRE, + utils.NetworkTypes.STT, + utils.NetworkTypes.BRIDGE, + 'ipsec%s' % utils.NetworkTypes.GRE, + 'ipsec%s' % utils.NetworkTypes.STT]) + if data not in connector_types: + msg = _("Unknown connector type: %s") % data + return msg + + +nw_gw_quota_opts = [ + cfg.IntOpt('quota_network_gateway', + default=5, + help=_('Number of network gateways allowed per tenant, ' + '-1 for unlimited')) +] + +cfg.CONF.register_opts(nw_gw_quota_opts, 'QUOTAS') + +attributes.validators['type:device_list'] = _validate_device_list +attributes.validators['type:connector_type'] = _validate_connector_type + + +class Networkgw(object): + """API extension for Layer-2 Gateway support. + + The Layer-2 gateway feature allows for connecting neutron networks + with external networks at the layer-2 level. No assumption is made on + the location of the external network, which might not even be directly + reachable from the hosts where the VMs are deployed. + + This is achieved by instantiating 'network gateways', and then connecting + Neutron network to them. + """ + + @classmethod + def get_name(cls): + return "Network Gateway" + + @classmethod + def get_alias(cls): + return EXT_ALIAS + + @classmethod + def get_description(cls): + return "Connects Neutron networks with external networks at layer 2." + + @classmethod + def get_namespace(cls): + return "http://docs.openstack.org/ext/network-gateway/api/v1.0" + + @classmethod + def get_updated(cls): + return "2014-01-01T00:00:00-00:00" + + @classmethod + def get_resources(cls): + """Returns Ext Resources.""" + + member_actions = { + GATEWAY_RESOURCE_NAME.replace('_', '-'): { + 'connect_network': 'PUT', + 'disconnect_network': 'PUT'}} + + plural_mappings = resource_helper.build_plural_mappings( + {}, RESOURCE_ATTRIBUTE_MAP) + + return resource_helper.build_resource_info(plural_mappings, + RESOURCE_ATTRIBUTE_MAP, + None, + action_map=member_actions, + register_quota=True, + translate_name=True) + + def get_extended_resources(self, version): + if version == "2.0": + return RESOURCE_ATTRIBUTE_MAP + else: + return {} + + +class NetworkGatewayPluginBase(object): + + @abc.abstractmethod + def create_network_gateway(self, context, network_gateway): + pass + + @abc.abstractmethod + def update_network_gateway(self, context, id, network_gateway): + pass + + @abc.abstractmethod + def get_network_gateway(self, context, id, fields=None): + pass + + @abc.abstractmethod + def delete_network_gateway(self, context, id): + pass + + @abc.abstractmethod + def get_network_gateways(self, context, filters=None, fields=None, + sorts=None, limit=None, marker=None, + page_reverse=False): + pass + + @abc.abstractmethod + def connect_network(self, context, network_gateway_id, + network_mapping_info): + pass + + @abc.abstractmethod + def disconnect_network(self, context, network_gateway_id, + network_mapping_info): + pass + + @abc.abstractmethod + def create_gateway_device(self, context, gateway_device): + pass + + @abc.abstractmethod + def update_gateway_device(self, context, id, gateway_device): + pass + + @abc.abstractmethod + def delete_gateway_device(self, context, id): + pass + + @abc.abstractmethod + def get_gateway_device(self, context, id, fields=None): + pass + + @abc.abstractmethod + def get_gateway_devices(self, context, filters=None, fields=None, + sorts=None, limit=None, marker=None, + page_reverse=False): + pass diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/extensions/nvp_qos.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/extensions/nvp_qos.py new file mode 100644 index 00000000..470f267b --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/extensions/nvp_qos.py @@ -0,0 +1,40 @@ +# Copyright 2013 VMware, Inc. +# +# All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# TODO(arosen): This is deprecated in Juno, and +# to be removed in Kxxxx. + +from neutron.plugins.vmware.extensions import qos + + +class Nvp_qos(qos.Qos): + """(Deprecated) Port Queue extension.""" + + @classmethod + def get_name(cls): + return "nvp-qos" + + @classmethod + def get_alias(cls): + return "nvp-qos" + + @classmethod + def get_description(cls): + return "NVP QoS extension (deprecated)." + + @classmethod + def get_namespace(cls): + return "http://docs.openstack.org/ext/nvp-qos/api/v2.0" diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/extensions/qos.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/extensions/qos.py new file mode 100644 index 00000000..1904bcfd --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/extensions/qos.py @@ -0,0 +1,229 @@ +# Copyright 2013 VMware, Inc. +# All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +import abc + +from neutron.api import extensions +from neutron.api.v2 import attributes as attr +from neutron.api.v2 import base +from neutron.common import exceptions as qexception +from neutron import manager + + +# For policy.json/Auth +qos_queue_create = "create_qos_queue" +qos_queue_delete = "delete_qos_queue" +qos_queue_get = "get_qos_queue" +qos_queue_list = "get_qos_queues" + + +class DefaultQueueCreateNotAdmin(qexception.InUse): + message = _("Need to be admin in order to create queue called default") + + +class DefaultQueueAlreadyExists(qexception.InUse): + message = _("Default queue already exists.") + + +class QueueInvalidDscp(qexception.InvalidInput): + message = _("Invalid value for dscp %(data)s must be integer value" + " between 0 and 63.") + + +class QueueInvalidMarking(qexception.InvalidInput): + message = _("The qos marking cannot be set to 'trusted' " + "when the DSCP field is set") + + +class QueueMinGreaterMax(qexception.InvalidInput): + message = _("Invalid bandwidth rate, min greater than max.") + + +class QueueInvalidBandwidth(qexception.InvalidInput): + message = _("Invalid bandwidth rate, %(data)s must be a non negative" + " integer.") + + +class QueueNotFound(qexception.NotFound): + message = _("Queue %(id)s does not exist") + + +class QueueInUseByPort(qexception.InUse): + message = _("Unable to delete queue attached to port.") + + +class QueuePortBindingNotFound(qexception.NotFound): + message = _("Port is not associated with lqueue") + + +def convert_to_unsigned_int_or_none(val): + if val is None: + return + try: + val = int(val) + if val < 0: + raise ValueError + except (ValueError, TypeError): + msg = _("'%s' must be a non negative integer.") % val + raise qexception.InvalidInput(error_message=msg) + return val + + +def convert_to_unsigned_int_or_none_max_63(val): + val = convert_to_unsigned_int_or_none(val) + if val > 63: + raise QueueInvalidDscp(data=val) + return val + +# As per NSX API, if a queue is trusted, DSCP must be omitted; if a queue is +# untrusted, DSCP must be specified. Whichever default values we choose for +# the tuple (qos_marking, dscp), there will be at least one combination of a +# request with conflicting values: for instance given the default values below, +# requests with qos_marking = 'trusted' and the default dscp value will fail. +# In order to avoid API users to explicitly specify a setting for clearing +# the DSCP field when a trusted queue is created, the code serving this API +# will adopt the following behaviour when qos_marking is set to 'trusted': +# - if the DSCP attribute is set to the default value (0), silently drop +# its value +# - if the DSCP attribute is set to anything than 0 (but still a valid DSCP +# value) return a 400 error as qos_marking and DSCP setting conflict. +# TODO(salv-orlando): Evaluate whether it will be possible from a backward +# compatibility perspective to change the default value for DSCP in order to +# avoid this peculiar behaviour + +RESOURCE_ATTRIBUTE_MAP = { + 'qos_queues': { + 'id': {'allow_post': False, 'allow_put': False, + 'is_visible': True}, + 'default': {'allow_post': True, 'allow_put': False, + 'convert_to': attr.convert_to_boolean, + 'is_visible': True, 'default': False}, + 'name': {'allow_post': True, 'allow_put': False, + 'validate': {'type:string': None}, + 'is_visible': True, 'default': ''}, + 'min': {'allow_post': True, 'allow_put': False, + 'is_visible': True, 'default': '0', + 'convert_to': convert_to_unsigned_int_or_none}, + 'max': {'allow_post': True, 'allow_put': False, + 'is_visible': True, 'default': None, + 'convert_to': convert_to_unsigned_int_or_none}, + 'qos_marking': {'allow_post': True, 'allow_put': False, + 'validate': {'type:values': ['untrusted', 'trusted']}, + 'default': 'untrusted', 'is_visible': True}, + 'dscp': {'allow_post': True, 'allow_put': False, + 'is_visible': True, 'default': '0', + 'convert_to': convert_to_unsigned_int_or_none_max_63}, + 'tenant_id': {'allow_post': True, 'allow_put': False, + 'required_by_policy': True, + 'validate': {'type:string': None}, + 'is_visible': True}, + }, +} + + +QUEUE = 'queue_id' +RXTX_FACTOR = 'rxtx_factor' +EXTENDED_ATTRIBUTES_2_0 = { + 'ports': { + RXTX_FACTOR: {'allow_post': True, + # FIXME(arosen): the plugin currently does not + # implement updating rxtx factor on port. + 'allow_put': True, + 'is_visible': False, + 'default': 1, + 'enforce_policy': True, + 'convert_to': convert_to_unsigned_int_or_none}, + + QUEUE: {'allow_post': False, + 'allow_put': False, + 'is_visible': True, + 'default': False, + 'enforce_policy': True}}, + 'networks': {QUEUE: {'allow_post': True, + 'allow_put': True, + 'is_visible': True, + 'default': False, + 'enforce_policy': True}} + +} + + +class Qos(object): + """Port Queue extension.""" + + @classmethod + def get_name(cls): + return "QoS Queue" + + @classmethod + def get_alias(cls): + return "qos-queue" + + @classmethod + def get_description(cls): + return "NSX QoS extension." + + @classmethod + def get_namespace(cls): + return "http://docs.openstack.org/ext/qos-queue/api/v2.0" + + @classmethod + def get_updated(cls): + return "2014-01-01T00:00:00-00:00" + + @classmethod + def get_resources(cls): + """Returns Ext Resources.""" + exts = [] + plugin = manager.NeutronManager.get_plugin() + resource_name = 'qos_queue' + collection_name = resource_name.replace('_', '-') + "s" + params = RESOURCE_ATTRIBUTE_MAP.get(resource_name + "s", dict()) + controller = base.create_resource(collection_name, + resource_name, + plugin, params, allow_bulk=False) + + ex = extensions.ResourceExtension(collection_name, + controller) + exts.append(ex) + + return exts + + def get_extended_resources(self, version): + if version == "2.0": + return dict(EXTENDED_ATTRIBUTES_2_0.items() + + RESOURCE_ATTRIBUTE_MAP.items()) + else: + return {} + + +class QueuePluginBase(object): + @abc.abstractmethod + def create_qos_queue(self, context, queue): + pass + + @abc.abstractmethod + def delete_qos_queue(self, context, id): + pass + + @abc.abstractmethod + def get_qos_queue(self, context, id, fields=None): + pass + + @abc.abstractmethod + def get_qos_queues(self, context, filters=None, fields=None, sorts=None, + limit=None, marker=None, page_reverse=False): + pass diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/extensions/servicerouter.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/extensions/servicerouter.py new file mode 100644 index 00000000..9033039b --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/extensions/servicerouter.py @@ -0,0 +1,57 @@ +# Copyright 2013 VMware, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +from neutron.api import extensions +from neutron.api.v2 import attributes + + +SERVICE_ROUTER = 'service_router' +EXTENDED_ATTRIBUTES_2_0 = { + 'routers': { + SERVICE_ROUTER: {'allow_post': True, 'allow_put': False, + 'convert_to': attributes.convert_to_boolean, + 'default': False, 'is_visible': True}, + } +} + + +class Servicerouter(extensions.ExtensionDescriptor): + """Extension class supporting advanced service router.""" + + @classmethod + def get_name(cls): + return "Service Router" + + @classmethod + def get_alias(cls): + return "service-router" + + @classmethod + def get_description(cls): + return "Provides service router." + + @classmethod + def get_namespace(cls): + return "http://docs.openstack.org/ext/service-router/api/v1.0" + + @classmethod + def get_updated(cls): + return "2013-08-08T00:00:00-00:00" + + def get_extended_resources(self, version): + if version == "2.0": + return EXTENDED_ATTRIBUTES_2_0 + else: + return {} diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/nsxlib/__init__.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/nsxlib/__init__.py new file mode 100644 index 00000000..b09460b5 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/nsxlib/__init__.py @@ -0,0 +1,141 @@ +# Copyright 2014 VMware, Inc. +# All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from neutron.common import exceptions as exception +from neutron.openstack.common import jsonutils as json +from neutron.openstack.common import log +from neutron.plugins.vmware.api_client import exception as api_exc +from neutron.plugins.vmware.common import exceptions as nsx_exc +from neutron import version + +HTTP_GET = "GET" +HTTP_POST = "POST" +HTTP_DELETE = "DELETE" +HTTP_PUT = "PUT" +# Prefix to be used for all NSX API calls +URI_PREFIX = "/ws.v1" +NEUTRON_VERSION = version.version_info.release_string() + +LOG = log.getLogger(__name__) + + +def _build_uri_path(resource, + resource_id=None, + parent_resource_id=None, + fields=None, + relations=None, + filters=None, + types=None, + is_attachment=False, + extra_action=None): + resources = resource.split('/') + res_path = resources[0] + (resource_id and "/%s" % resource_id or '') + if len(resources) > 1: + # There is also a parent resource to account for in the uri + res_path = "%s/%s/%s" % (resources[1], + parent_resource_id, + res_path) + if is_attachment: + res_path = "%s/attachment" % res_path + elif extra_action: + res_path = "%s/%s" % (res_path, extra_action) + params = [] + params.append(fields and "fields=%s" % fields) + params.append(relations and "relations=%s" % relations) + params.append(types and "types=%s" % types) + if filters: + params.extend(['%s=%s' % (k, v) for (k, v) in filters.iteritems()]) + uri_path = "%s/%s" % (URI_PREFIX, res_path) + non_empty_params = [x for x in params if x is not None] + if non_empty_params: + query_string = '&'.join(non_empty_params) + if query_string: + uri_path += "?%s" % query_string + return uri_path + + +def format_exception(etype, e, exception_locals): + """Consistent formatting for exceptions. + + :param etype: a string describing the exception type. + :param e: the exception. + :param execption_locals: calling context local variable dict. + :returns: a formatted string. + """ + msg = [_("Error. %(type)s exception: %(exc)s.") % + {'type': etype, 'exc': e}] + l = dict((k, v) for k, v in exception_locals.iteritems() + if k != 'request') + msg.append(_("locals=[%s]") % str(l)) + return ' '.join(msg) + + +def do_request(*args, **kwargs): + """Issue a request to the cluster specified in kwargs. + + :param args: a list of positional arguments. + :param kwargs: a list of keyworkds arguments. + :returns: the result of the operation loaded into a python + object or None. + """ + cluster = kwargs["cluster"] + try: + res = cluster.api_client.request(*args) + if res: + return json.loads(res) + except api_exc.ResourceNotFound: + raise exception.NotFound() + except api_exc.ReadOnlyMode: + raise nsx_exc.MaintenanceInProgress() + + +def get_single_query_page(path, cluster, page_cursor=None, + page_length=1000, neutron_only=True): + params = [] + if page_cursor: + params.append("_page_cursor=%s" % page_cursor) + params.append("_page_length=%s" % page_length) + # NOTE(salv-orlando): On the NSX backend the 'Quantum' tag is still + # used for marking Neutron entities in order to preserve compatibility + if neutron_only: + params.append("tag_scope=quantum") + query_params = "&".join(params) + path = "%s%s%s" % (path, "&" if (path.find("?") != -1) else "?", + query_params) + body = do_request(HTTP_GET, path, cluster=cluster) + # Result_count won't be returned if _page_cursor is supplied + return body['results'], body.get('page_cursor'), body.get('result_count') + + +def get_all_query_pages(path, cluster): + need_more_results = True + result_list = [] + page_cursor = None + while need_more_results: + results, page_cursor = get_single_query_page( + path, cluster, page_cursor)[:2] + if not page_cursor: + need_more_results = False + result_list.extend(results) + return result_list + + +def mk_body(**kwargs): + """Convenience function creates and dumps dictionary to string. + + :param kwargs: the key/value pirs to be dumped into a json string. + :returns: a json string. + """ + return json.dumps(kwargs, ensure_ascii=False) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/nsxlib/l2gateway.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/nsxlib/l2gateway.py new file mode 100644 index 00000000..bd261f92 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/nsxlib/l2gateway.py @@ -0,0 +1,211 @@ +# Copyright 2014 VMware, Inc. +# All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +from neutron.openstack.common import jsonutils as json +from neutron.openstack.common import log +from neutron.plugins.vmware.api_client import exception as api_exc +from neutron.plugins.vmware.common import exceptions as nsx_exc +from neutron.plugins.vmware.common import utils +from neutron.plugins.vmware import nsxlib +from neutron.plugins.vmware.nsxlib import switch + +HTTP_GET = "GET" +HTTP_POST = "POST" +HTTP_DELETE = "DELETE" +HTTP_PUT = "PUT" + +GWSERVICE_RESOURCE = "gateway-service" +TRANSPORTNODE_RESOURCE = "transport-node" + +LOG = log.getLogger(__name__) + + +def create_l2_gw_service(cluster, tenant_id, display_name, devices): + """Create a NSX Layer-2 Network Gateway Service. + + :param cluster: The target NSX cluster + :param tenant_id: Identifier of the Openstack tenant for which + the gateway service. + :param display_name: Descriptive name of this gateway service + :param devices: List of transport node uuids (and network + interfaces on them) to use for the network gateway service + :raise NsxApiException: if there is a problem while communicating + with the NSX controller + """ + # NOTE(salvatore-orlando): This is a little confusing, but device_id in + # NSX is actually the identifier a physical interface on the gateway + # device, which in the Neutron API is referred as interface_name + gateways = [{"transport_node_uuid": device['id'], + "device_id": device['interface_name'], + "type": "L2Gateway"} for device in devices] + gwservice_obj = { + "display_name": utils.check_and_truncate(display_name), + "tags": utils.get_tags(os_tid=tenant_id), + "gateways": gateways, + "type": "L2GatewayServiceConfig" + } + return nsxlib.do_request( + HTTP_POST, nsxlib._build_uri_path(GWSERVICE_RESOURCE), + json.dumps(gwservice_obj), cluster=cluster) + + +def plug_l2_gw_service(cluster, lswitch_id, lport_id, + gateway_id, vlan_id=None): + """Plug a Layer-2 Gateway Attachment object in a logical port.""" + att_obj = {'type': 'L2GatewayAttachment', + 'l2_gateway_service_uuid': gateway_id} + if vlan_id: + att_obj['vlan_id'] = vlan_id + return switch.plug_interface(cluster, lswitch_id, lport_id, att_obj) + + +def get_l2_gw_service(cluster, gateway_id): + return nsxlib.do_request( + HTTP_GET, nsxlib._build_uri_path(GWSERVICE_RESOURCE, + resource_id=gateway_id), + cluster=cluster) + + +def get_l2_gw_services(cluster, tenant_id=None, + fields=None, filters=None): + actual_filters = dict(filters or {}) + if tenant_id: + actual_filters['tag'] = tenant_id + actual_filters['tag_scope'] = 'os_tid' + return nsxlib.get_all_query_pages( + nsxlib._build_uri_path(GWSERVICE_RESOURCE, + filters=actual_filters), + cluster) + + +def update_l2_gw_service(cluster, gateway_id, display_name): + # TODO(salvatore-orlando): Allow updates for gateways too + gwservice_obj = get_l2_gw_service(cluster, gateway_id) + if not display_name: + # Nothing to update + return gwservice_obj + gwservice_obj["display_name"] = utils.check_and_truncate(display_name) + return nsxlib.do_request(HTTP_PUT, + nsxlib._build_uri_path(GWSERVICE_RESOURCE, + resource_id=gateway_id), + json.dumps(gwservice_obj), cluster=cluster) + + +def delete_l2_gw_service(cluster, gateway_id): + nsxlib.do_request(HTTP_DELETE, + nsxlib._build_uri_path(GWSERVICE_RESOURCE, + resource_id=gateway_id), + cluster=cluster) + + +def _build_gateway_device_body(tenant_id, display_name, neutron_id, + connector_type, connector_ip, + client_certificate, tz_uuid): + + connector_type_mappings = { + utils.NetworkTypes.STT: "STTConnector", + utils.NetworkTypes.GRE: "GREConnector", + utils.NetworkTypes.BRIDGE: "BridgeConnector", + 'ipsec%s' % utils.NetworkTypes.STT: "IPsecSTT", + 'ipsec%s' % utils.NetworkTypes.GRE: "IPsecGRE"} + nsx_connector_type = connector_type_mappings.get(connector_type) + body = {"display_name": utils.check_and_truncate(display_name), + "tags": utils.get_tags(os_tid=tenant_id, + q_gw_dev_id=neutron_id), + "admin_status_enabled": True} + + if connector_ip and nsx_connector_type: + body["transport_connectors"] = [ + {"transport_zone_uuid": tz_uuid, + "ip_address": connector_ip, + "type": nsx_connector_type}] + + if client_certificate: + body["credential"] = {"client_certificate": + {"pem_encoded": client_certificate}, + "type": "SecurityCertificateCredential"} + return body + + +def create_gateway_device(cluster, tenant_id, display_name, neutron_id, + tz_uuid, connector_type, connector_ip, + client_certificate): + body = _build_gateway_device_body(tenant_id, display_name, neutron_id, + connector_type, connector_ip, + client_certificate, tz_uuid) + try: + return nsxlib.do_request( + HTTP_POST, nsxlib._build_uri_path(TRANSPORTNODE_RESOURCE), + json.dumps(body), cluster=cluster) + except api_exc.InvalidSecurityCertificate: + raise nsx_exc.InvalidSecurityCertificate() + + +def update_gateway_device(cluster, gateway_id, tenant_id, + display_name, neutron_id, + tz_uuid, connector_type, connector_ip, + client_certificate): + body = _build_gateway_device_body(tenant_id, display_name, neutron_id, + connector_type, connector_ip, + client_certificate, tz_uuid) + try: + return nsxlib.do_request( + HTTP_PUT, + nsxlib._build_uri_path(TRANSPORTNODE_RESOURCE, + resource_id=gateway_id), + json.dumps(body), cluster=cluster) + except api_exc.InvalidSecurityCertificate: + raise nsx_exc.InvalidSecurityCertificate() + + +def delete_gateway_device(cluster, device_uuid): + return nsxlib.do_request(HTTP_DELETE, + nsxlib._build_uri_path(TRANSPORTNODE_RESOURCE, + device_uuid), + cluster=cluster) + + +def get_gateway_device_status(cluster, device_uuid): + status_res = nsxlib.do_request(HTTP_GET, + nsxlib._build_uri_path( + TRANSPORTNODE_RESOURCE, + device_uuid, + extra_action='status'), + cluster=cluster) + # Returns the connection status + return status_res['connection']['connected'] + + +def get_gateway_devices_status(cluster, tenant_id=None): + if tenant_id: + gw_device_query_path = nsxlib._build_uri_path( + TRANSPORTNODE_RESOURCE, + fields="uuid,tags", + relations="TransportNodeStatus", + filters={'tag': tenant_id, + 'tag_scope': 'os_tid'}) + else: + gw_device_query_path = nsxlib._build_uri_path( + TRANSPORTNODE_RESOURCE, + fields="uuid,tags", + relations="TransportNodeStatus") + + response = nsxlib.get_all_query_pages(gw_device_query_path, cluster) + results = {} + for item in response: + results[item['uuid']] = (item['_relations']['TransportNodeStatus'] + ['connection']['connected']) + return results diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/nsxlib/lsn.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/nsxlib/lsn.py new file mode 100644 index 00000000..686fd3f0 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/nsxlib/lsn.py @@ -0,0 +1,268 @@ +# Copyright 2013 VMware, Inc. +# All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from neutron.common import exceptions as exception +from neutron.openstack.common import jsonutils as json +from neutron.openstack.common import log +from neutron.plugins.vmware.api_client import exception as api_exc +from neutron.plugins.vmware.common import exceptions as nsx_exc +from neutron.plugins.vmware.common import utils +from neutron.plugins.vmware import nsxlib + +HTTP_GET = "GET" +HTTP_POST = "POST" +HTTP_DELETE = "DELETE" +HTTP_PUT = "PUT" + +SERVICECLUSTER_RESOURCE = "edge-cluster" +LSERVICESNODE_RESOURCE = "lservices-node" +LSERVICESNODEPORT_RESOURCE = "lport/%s" % LSERVICESNODE_RESOURCE +SUPPORTED_METADATA_OPTIONS = ['metadata_proxy_shared_secret'] + +LOG = log.getLogger(__name__) + + +def service_cluster_exists(cluster, svc_cluster_id): + exists = False + try: + exists = ( + svc_cluster_id and + nsxlib.do_request(HTTP_GET, + nsxlib._build_uri_path( + SERVICECLUSTER_RESOURCE, + resource_id=svc_cluster_id), + cluster=cluster) is not None) + except exception.NotFound: + pass + return exists + + +def lsn_for_network_create(cluster, network_id): + lsn_obj = { + "edge_cluster_uuid": cluster.default_service_cluster_uuid, + "tags": utils.get_tags(n_network_id=network_id) + } + return nsxlib.do_request(HTTP_POST, + nsxlib._build_uri_path(LSERVICESNODE_RESOURCE), + json.dumps(lsn_obj), + cluster=cluster)["uuid"] + + +def lsn_for_network_get(cluster, network_id): + filters = {"tag": network_id, "tag_scope": "n_network_id"} + results = nsxlib.do_request(HTTP_GET, + nsxlib._build_uri_path(LSERVICESNODE_RESOURCE, + fields="uuid", + filters=filters), + cluster=cluster)['results'] + if not results: + raise exception.NotFound() + elif len(results) == 1: + return results[0]['uuid'] + + +def lsn_delete(cluster, lsn_id): + nsxlib.do_request(HTTP_DELETE, + nsxlib._build_uri_path(LSERVICESNODE_RESOURCE, + resource_id=lsn_id), + cluster=cluster) + + +def lsn_port_host_entries_update( + cluster, lsn_id, lsn_port_id, conf, hosts_data): + hosts_obj = {'hosts': hosts_data} + nsxlib.do_request(HTTP_PUT, + nsxlib._build_uri_path(LSERVICESNODEPORT_RESOURCE, + parent_resource_id=lsn_id, + resource_id=lsn_port_id, + extra_action=conf), + json.dumps(hosts_obj), + cluster=cluster) + + +def lsn_port_create(cluster, lsn_id, port_data): + port_obj = { + "ip_address": port_data["ip_address"], + "mac_address": port_data["mac_address"], + "tags": utils.get_tags(n_mac_address=port_data["mac_address"], + n_subnet_id=port_data["subnet_id"]), + "type": "LogicalServicesNodePortConfig", + } + return nsxlib.do_request(HTTP_POST, + nsxlib._build_uri_path(LSERVICESNODEPORT_RESOURCE, + parent_resource_id=lsn_id), + json.dumps(port_obj), + cluster=cluster)["uuid"] + + +def lsn_port_delete(cluster, lsn_id, lsn_port_id): + return nsxlib.do_request(HTTP_DELETE, + nsxlib._build_uri_path(LSERVICESNODEPORT_RESOURCE, + parent_resource_id=lsn_id, + resource_id=lsn_port_id), + cluster=cluster) + + +def _lsn_port_get(cluster, lsn_id, filters): + results = nsxlib.do_request(HTTP_GET, + nsxlib._build_uri_path( + LSERVICESNODEPORT_RESOURCE, + parent_resource_id=lsn_id, + fields="uuid", + filters=filters), + cluster=cluster)['results'] + if not results: + raise exception.NotFound() + elif len(results) == 1: + return results[0]['uuid'] + + +def lsn_port_by_mac_get(cluster, lsn_id, mac_address): + filters = {"tag": mac_address, "tag_scope": "n_mac_address"} + return _lsn_port_get(cluster, lsn_id, filters) + + +def lsn_port_by_subnet_get(cluster, lsn_id, subnet_id): + filters = {"tag": subnet_id, "tag_scope": "n_subnet_id"} + return _lsn_port_get(cluster, lsn_id, filters) + + +def lsn_port_info_get(cluster, lsn_id, lsn_port_id): + result = nsxlib.do_request(HTTP_GET, + nsxlib._build_uri_path( + LSERVICESNODEPORT_RESOURCE, + parent_resource_id=lsn_id, + resource_id=lsn_port_id), + cluster=cluster) + for tag in result['tags']: + if tag['scope'] == 'n_subnet_id': + result['subnet_id'] = tag['tag'] + break + return result + + +def lsn_port_plug_network(cluster, lsn_id, lsn_port_id, lswitch_port_id): + patch_obj = { + "type": "PatchAttachment", + "peer_port_uuid": lswitch_port_id + } + try: + nsxlib.do_request(HTTP_PUT, + nsxlib._build_uri_path(LSERVICESNODEPORT_RESOURCE, + parent_resource_id=lsn_id, + resource_id=lsn_port_id, + is_attachment=True), + json.dumps(patch_obj), + cluster=cluster) + except api_exc.Conflict: + # This restriction might be lifted at some point + msg = (_("Attempt to plug Logical Services Node %(lsn)s into " + "network with port %(port)s failed. PatchAttachment " + "already exists with another port") % + {'lsn': lsn_id, 'port': lswitch_port_id}) + LOG.exception(msg) + raise nsx_exc.LsnConfigurationConflict(lsn_id=lsn_id) + + +def _lsn_configure_action( + cluster, lsn_id, action, is_enabled, obj): + lsn_obj = {"enabled": is_enabled} + lsn_obj.update(obj) + nsxlib.do_request(HTTP_PUT, + nsxlib._build_uri_path(LSERVICESNODE_RESOURCE, + resource_id=lsn_id, + extra_action=action), + json.dumps(lsn_obj), + cluster=cluster) + + +def _lsn_port_configure_action( + cluster, lsn_id, lsn_port_id, action, is_enabled, obj): + nsxlib.do_request(HTTP_PUT, + nsxlib._build_uri_path(LSERVICESNODE_RESOURCE, + resource_id=lsn_id, + extra_action=action), + json.dumps({"enabled": is_enabled}), + cluster=cluster) + nsxlib.do_request(HTTP_PUT, + nsxlib._build_uri_path(LSERVICESNODEPORT_RESOURCE, + parent_resource_id=lsn_id, + resource_id=lsn_port_id, + extra_action=action), + json.dumps(obj), + cluster=cluster) + + +def _get_opts(name, value): + return {"name": name, "value": str(value)} + + +def lsn_port_dhcp_configure( + cluster, lsn_id, lsn_port_id, is_enabled=True, dhcp_options=None): + dhcp_options = dhcp_options or {} + opts = [_get_opts(key, val) for key, val in dhcp_options.iteritems()] + dhcp_obj = {'options': opts} + _lsn_port_configure_action( + cluster, lsn_id, lsn_port_id, 'dhcp', is_enabled, dhcp_obj) + + +def lsn_metadata_configure( + cluster, lsn_id, is_enabled=True, metadata_info=None): + meta_obj = { + 'metadata_server_ip': metadata_info['metadata_server_ip'], + 'metadata_server_port': metadata_info['metadata_server_port'], + } + if metadata_info: + opts = [ + _get_opts(opt, metadata_info[opt]) + for opt in SUPPORTED_METADATA_OPTIONS + if metadata_info.get(opt) + ] + if opts: + meta_obj["options"] = opts + _lsn_configure_action( + cluster, lsn_id, 'metadata-proxy', is_enabled, meta_obj) + + +def _lsn_port_host_action( + cluster, lsn_id, lsn_port_id, host_obj, extra_action, action): + nsxlib.do_request(HTTP_POST, + nsxlib._build_uri_path(LSERVICESNODEPORT_RESOURCE, + parent_resource_id=lsn_id, + resource_id=lsn_port_id, + extra_action=extra_action, + filters={"action": action}), + json.dumps(host_obj), + cluster=cluster) + + +def lsn_port_dhcp_host_add(cluster, lsn_id, lsn_port_id, host_data): + _lsn_port_host_action( + cluster, lsn_id, lsn_port_id, host_data, 'dhcp', 'add_host') + + +def lsn_port_dhcp_host_remove(cluster, lsn_id, lsn_port_id, host_data): + _lsn_port_host_action( + cluster, lsn_id, lsn_port_id, host_data, 'dhcp', 'remove_host') + + +def lsn_port_metadata_host_add(cluster, lsn_id, lsn_port_id, host_data): + _lsn_port_host_action( + cluster, lsn_id, lsn_port_id, host_data, 'metadata-proxy', 'add_host') + + +def lsn_port_metadata_host_remove(cluster, lsn_id, lsn_port_id, host_data): + _lsn_port_host_action(cluster, lsn_id, lsn_port_id, + host_data, 'metadata-proxy', 'remove_host') diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/nsxlib/queue.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/nsxlib/queue.py new file mode 100644 index 00000000..708a210b --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/nsxlib/queue.py @@ -0,0 +1,71 @@ +# Copyright 2014 VMware, Inc. +# All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from neutron.api.v2 import attributes as attr +from neutron.common import exceptions as exception +from neutron.openstack.common import excutils +from neutron.openstack.common import jsonutils +from neutron.openstack.common import log +from neutron.plugins.vmware.api_client import exception as api_exc +from neutron.plugins.vmware.common import utils +from neutron.plugins.vmware import nsxlib + +HTTP_POST = "POST" +HTTP_DELETE = "DELETE" + +LQUEUE_RESOURCE = "lqueue" + +LOG = log.getLogger(__name__) + + +def create_lqueue(cluster, queue_data): + params = { + 'name': 'display_name', + 'qos_marking': 'qos_marking', + 'min': 'min_bandwidth_rate', + 'max': 'max_bandwidth_rate', + 'dscp': 'dscp' + } + queue_obj = dict( + (nsx_name, queue_data.get(api_name)) + for api_name, nsx_name in params.iteritems() + if attr.is_attr_set(queue_data.get(api_name)) + ) + if 'display_name' in queue_obj: + queue_obj['display_name'] = utils.check_and_truncate( + queue_obj['display_name']) + + queue_obj['tags'] = utils.get_tags() + try: + return nsxlib.do_request(HTTP_POST, + nsxlib._build_uri_path(LQUEUE_RESOURCE), + jsonutils.dumps(queue_obj), + cluster=cluster)['uuid'] + except api_exc.NsxApiException: + # FIXME(salv-orlando): This should not raise NeutronException + with excutils.save_and_reraise_exception(): + raise exception.NeutronException() + + +def delete_lqueue(cluster, queue_id): + try: + nsxlib.do_request(HTTP_DELETE, + nsxlib._build_uri_path(LQUEUE_RESOURCE, + resource_id=queue_id), + cluster=cluster) + except Exception: + # FIXME(salv-orlando): This should not raise NeutronException + with excutils.save_and_reraise_exception(): + raise exception.NeutronException() diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/nsxlib/router.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/nsxlib/router.py new file mode 100644 index 00000000..52d34299 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/nsxlib/router.py @@ -0,0 +1,689 @@ +# Copyright 2014 VMware, Inc. +# All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo.config import cfg + +from neutron.common import exceptions as exception +from neutron.openstack.common import excutils +from neutron.openstack.common import jsonutils +from neutron.openstack.common import log +from neutron.plugins.vmware.api_client import exception as api_exc +from neutron.plugins.vmware.common import exceptions as nsx_exc +from neutron.plugins.vmware.common import utils +from neutron.plugins.vmware import nsxlib +from neutron.plugins.vmware.nsxlib import switch +from neutron.plugins.vmware.nsxlib import versioning + +HTTP_GET = "GET" +HTTP_POST = "POST" +HTTP_DELETE = "DELETE" +HTTP_PUT = "PUT" + +LROUTER_RESOURCE = "lrouter" +LROUTER_RESOURCE = "lrouter" +LROUTERPORT_RESOURCE = "lport/%s" % LROUTER_RESOURCE +LROUTERRIB_RESOURCE = "rib/%s" % LROUTER_RESOURCE +LROUTERNAT_RESOURCE = "nat/lrouter" +# Constants for NAT rules +MATCH_KEYS = ["destination_ip_addresses", "destination_port_max", + "destination_port_min", "source_ip_addresses", + "source_port_max", "source_port_min", "protocol"] + +LOG = log.getLogger(__name__) + + +def _prepare_lrouter_body(name, neutron_router_id, tenant_id, + router_type, distributed=None, **kwargs): + body = { + "display_name": utils.check_and_truncate(name), + "tags": utils.get_tags(os_tid=tenant_id, + q_router_id=neutron_router_id), + "routing_config": { + "type": router_type + }, + "type": "LogicalRouterConfig", + "replication_mode": cfg.CONF.NSX.replication_mode, + } + # add the distributed key only if not None (ie: True or False) + if distributed is not None: + body['distributed'] = distributed + if kwargs: + body["routing_config"].update(kwargs) + return body + + +def _create_implicit_routing_lrouter(cluster, neutron_router_id, tenant_id, + display_name, nexthop, distributed=None): + implicit_routing_config = { + "default_route_next_hop": { + "gateway_ip_address": nexthop, + "type": "RouterNextHop" + }, + } + lrouter_obj = _prepare_lrouter_body( + display_name, neutron_router_id, tenant_id, + "SingleDefaultRouteImplicitRoutingConfig", + distributed=distributed, + **implicit_routing_config) + return nsxlib.do_request(HTTP_POST, + nsxlib._build_uri_path(LROUTER_RESOURCE), + jsonutils.dumps(lrouter_obj), cluster=cluster) + + +def create_implicit_routing_lrouter(cluster, neutron_router_id, tenant_id, + display_name, nexthop): + """Create a NSX logical router on the specified cluster. + + :param cluster: The target NSX cluster + :param tenant_id: Identifier of the Openstack tenant for which + the logical router is being created + :param display_name: Descriptive name of this logical router + :param nexthop: External gateway IP address for the logical router + :raise NsxApiException: if there is a problem while communicating + with the NSX controller + """ + return _create_implicit_routing_lrouter( + cluster, neutron_router_id, tenant_id, display_name, nexthop) + + +def create_implicit_routing_lrouter_with_distribution( + cluster, neutron_router_id, tenant_id, display_name, + nexthop, distributed=None): + """Create a NSX logical router on the specified cluster. + + This function also allows for creating distributed lrouters + :param cluster: The target NSX cluster + :param tenant_id: Identifier of the Openstack tenant for which + the logical router is being created + :param display_name: Descriptive name of this logical router + :param nexthop: External gateway IP address for the logical router + :param distributed: True for distributed logical routers + :raise NsxApiException: if there is a problem while communicating + with the NSX controller + """ + return _create_implicit_routing_lrouter( + cluster, neutron_router_id, tenant_id, + display_name, nexthop, distributed) + + +def create_explicit_routing_lrouter(cluster, neutron_router_id, tenant_id, + display_name, nexthop, distributed=None): + lrouter_obj = _prepare_lrouter_body( + display_name, neutron_router_id, tenant_id, + "RoutingTableRoutingConfig", distributed=distributed) + router = nsxlib.do_request(HTTP_POST, + nsxlib._build_uri_path(LROUTER_RESOURCE), + jsonutils.dumps(lrouter_obj), cluster=cluster) + default_gw = {'prefix': '0.0.0.0/0', 'next_hop_ip': nexthop} + create_explicit_route_lrouter(cluster, router['uuid'], default_gw) + return router + + +def delete_lrouter(cluster, lrouter_id): + nsxlib.do_request(HTTP_DELETE, + nsxlib._build_uri_path(LROUTER_RESOURCE, + resource_id=lrouter_id), + cluster=cluster) + + +def get_lrouter(cluster, lrouter_id): + return nsxlib.do_request(HTTP_GET, + nsxlib._build_uri_path( + LROUTER_RESOURCE, + resource_id=lrouter_id, + relations='LogicalRouterStatus'), + cluster=cluster) + + +def query_lrouters(cluster, fields=None, filters=None): + return nsxlib.get_all_query_pages( + nsxlib._build_uri_path(LROUTER_RESOURCE, + fields=fields, + relations='LogicalRouterStatus', + filters=filters), + cluster) + + +def get_lrouters(cluster, tenant_id, fields=None, filters=None): + # FIXME(salv-orlando): Fields parameter is ignored in this routine + actual_filters = {} + if filters: + actual_filters.update(filters) + if tenant_id: + actual_filters['tag'] = tenant_id + actual_filters['tag_scope'] = 'os_tid' + lrouter_fields = "uuid,display_name,fabric_status,tags" + return query_lrouters(cluster, lrouter_fields, actual_filters) + + +def update_implicit_routing_lrouter(cluster, r_id, display_name, nexthop): + lrouter_obj = get_lrouter(cluster, r_id) + if not display_name and not nexthop: + # Nothing to update + return lrouter_obj + # It seems that this is faster than the doing an if on display_name + lrouter_obj["display_name"] = (utils.check_and_truncate(display_name) or + lrouter_obj["display_name"]) + if nexthop: + nh_element = lrouter_obj["routing_config"].get( + "default_route_next_hop") + if nh_element: + nh_element["gateway_ip_address"] = nexthop + return nsxlib.do_request(HTTP_PUT, + nsxlib._build_uri_path(LROUTER_RESOURCE, + resource_id=r_id), + jsonutils.dumps(lrouter_obj), + cluster=cluster) + + +def get_explicit_routes_lrouter(cluster, router_id, protocol_type='static'): + static_filter = {'protocol': protocol_type} + existing_routes = nsxlib.do_request( + HTTP_GET, + nsxlib._build_uri_path(LROUTERRIB_RESOURCE, + filters=static_filter, + fields="*", + parent_resource_id=router_id), + cluster=cluster)['results'] + return existing_routes + + +def delete_explicit_route_lrouter(cluster, router_id, route_id): + nsxlib.do_request(HTTP_DELETE, + nsxlib._build_uri_path(LROUTERRIB_RESOURCE, + resource_id=route_id, + parent_resource_id=router_id), + cluster=cluster) + + +def create_explicit_route_lrouter(cluster, router_id, route): + next_hop_ip = route.get("nexthop") or route.get("next_hop_ip") + prefix = route.get("destination") or route.get("prefix") + uuid = nsxlib.do_request( + HTTP_POST, + nsxlib._build_uri_path(LROUTERRIB_RESOURCE, + parent_resource_id=router_id), + jsonutils.dumps({ + "action": "accept", + "next_hop_ip": next_hop_ip, + "prefix": prefix, + "protocol": "static" + }), + cluster=cluster)['uuid'] + return uuid + + +def update_explicit_routes_lrouter(cluster, router_id, routes): + # Update in bulk: delete them all, and add the ones specified + # but keep track of what is been modified to allow roll-backs + # in case of failures + nsx_routes = get_explicit_routes_lrouter(cluster, router_id) + try: + deleted_routes = [] + added_routes = [] + # omit the default route (0.0.0.0/0) from the processing; + # this must be handled through the nexthop for the router + for route in nsx_routes: + prefix = route.get("destination") or route.get("prefix") + if prefix != '0.0.0.0/0': + delete_explicit_route_lrouter(cluster, + router_id, + route['uuid']) + deleted_routes.append(route) + for route in routes: + prefix = route.get("destination") or route.get("prefix") + if prefix != '0.0.0.0/0': + uuid = create_explicit_route_lrouter(cluster, + router_id, route) + added_routes.append(uuid) + except api_exc.NsxApiException: + LOG.exception(_('Cannot update NSX routes %(routes)s for ' + 'router %(router_id)s'), + {'routes': routes, 'router_id': router_id}) + # Roll back to keep NSX in consistent state + with excutils.save_and_reraise_exception(): + if nsx_routes: + if deleted_routes: + for route in deleted_routes: + create_explicit_route_lrouter(cluster, + router_id, route) + if added_routes: + for route_id in added_routes: + delete_explicit_route_lrouter(cluster, + router_id, route_id) + return nsx_routes + + +def get_default_route_explicit_routing_lrouter_v33(cluster, router_id): + static_filter = {"protocol": "static", + "prefix": "0.0.0.0/0"} + default_route = nsxlib.do_request( + HTTP_GET, + nsxlib._build_uri_path(LROUTERRIB_RESOURCE, + filters=static_filter, + fields="*", + parent_resource_id=router_id), + cluster=cluster)["results"][0] + return default_route + + +def get_default_route_explicit_routing_lrouter_v32(cluster, router_id): + # Scan all routes because 3.2 does not support query by prefix + all_routes = get_explicit_routes_lrouter(cluster, router_id) + for route in all_routes: + if route['prefix'] == '0.0.0.0/0': + return route + + +def update_default_gw_explicit_routing_lrouter(cluster, router_id, next_hop): + default_route = get_default_route_explicit_routing_lrouter(cluster, + router_id) + if next_hop != default_route["next_hop_ip"]: + new_default_route = {"action": "accept", + "next_hop_ip": next_hop, + "prefix": "0.0.0.0/0", + "protocol": "static"} + nsxlib.do_request(HTTP_PUT, + nsxlib._build_uri_path( + LROUTERRIB_RESOURCE, + resource_id=default_route['uuid'], + parent_resource_id=router_id), + jsonutils.dumps(new_default_route), + cluster=cluster) + + +def update_explicit_routing_lrouter(cluster, router_id, + display_name, next_hop, routes=None): + update_implicit_routing_lrouter(cluster, router_id, display_name, next_hop) + if next_hop: + update_default_gw_explicit_routing_lrouter(cluster, + router_id, next_hop) + if routes is not None: + return update_explicit_routes_lrouter(cluster, router_id, routes) + + +def query_lrouter_lports(cluster, lr_uuid, fields="*", + filters=None, relations=None): + uri = nsxlib._build_uri_path(LROUTERPORT_RESOURCE, + parent_resource_id=lr_uuid, + fields=fields, filters=filters, + relations=relations) + return nsxlib.do_request(HTTP_GET, uri, cluster=cluster)['results'] + + +def create_router_lport(cluster, lrouter_uuid, tenant_id, neutron_port_id, + display_name, admin_status_enabled, ip_addresses, + mac_address=None): + """Creates a logical port on the assigned logical router.""" + lport_obj = dict( + admin_status_enabled=admin_status_enabled, + display_name=display_name, + tags=utils.get_tags(os_tid=tenant_id, q_port_id=neutron_port_id), + ip_addresses=ip_addresses, + type="LogicalRouterPortConfig" + ) + # Only add the mac_address to lport_obj if present. This is because + # when creating the fake_ext_gw there is no mac_address present. + if mac_address: + lport_obj['mac_address'] = mac_address + path = nsxlib._build_uri_path(LROUTERPORT_RESOURCE, + parent_resource_id=lrouter_uuid) + result = nsxlib.do_request(HTTP_POST, path, jsonutils.dumps(lport_obj), + cluster=cluster) + + LOG.debug(_("Created logical port %(lport_uuid)s on " + "logical router %(lrouter_uuid)s"), + {'lport_uuid': result['uuid'], + 'lrouter_uuid': lrouter_uuid}) + return result + + +def update_router_lport(cluster, lrouter_uuid, lrouter_port_uuid, + tenant_id, neutron_port_id, display_name, + admin_status_enabled, ip_addresses): + """Updates a logical port on the assigned logical router.""" + lport_obj = dict( + admin_status_enabled=admin_status_enabled, + display_name=display_name, + tags=utils.get_tags(os_tid=tenant_id, q_port_id=neutron_port_id), + ip_addresses=ip_addresses, + type="LogicalRouterPortConfig" + ) + # Do not pass null items to NSX + for key in lport_obj.keys(): + if lport_obj[key] is None: + del lport_obj[key] + path = nsxlib._build_uri_path(LROUTERPORT_RESOURCE, + lrouter_port_uuid, + parent_resource_id=lrouter_uuid) + result = nsxlib.do_request(HTTP_PUT, path, + jsonutils.dumps(lport_obj), + cluster=cluster) + LOG.debug(_("Updated logical port %(lport_uuid)s on " + "logical router %(lrouter_uuid)s"), + {'lport_uuid': lrouter_port_uuid, 'lrouter_uuid': lrouter_uuid}) + return result + + +def delete_router_lport(cluster, lrouter_uuid, lport_uuid): + """Creates a logical port on the assigned logical router.""" + path = nsxlib._build_uri_path(LROUTERPORT_RESOURCE, lport_uuid, + lrouter_uuid) + nsxlib.do_request(HTTP_DELETE, path, cluster=cluster) + LOG.debug(_("Delete logical router port %(lport_uuid)s on " + "logical router %(lrouter_uuid)s"), + {'lport_uuid': lport_uuid, + 'lrouter_uuid': lrouter_uuid}) + + +def delete_peer_router_lport(cluster, lr_uuid, ls_uuid, lp_uuid): + nsx_port = switch.get_port(cluster, ls_uuid, lp_uuid, + relations="LogicalPortAttachment") + relations = nsx_port.get('_relations') + if relations: + att_data = relations.get('LogicalPortAttachment') + if att_data: + lrp_uuid = att_data.get('peer_port_uuid') + if lrp_uuid: + delete_router_lport(cluster, lr_uuid, lrp_uuid) + + +def find_router_gw_port(context, cluster, router_id): + """Retrieves the external gateway port for a NSX logical router.""" + + # Find the uuid of nsx ext gw logical router port + # TODO(salvatore-orlando): Consider storing it in Neutron DB + results = query_lrouter_lports( + cluster, router_id, + relations="LogicalPortAttachment") + for lport in results: + if '_relations' in lport: + attachment = lport['_relations'].get('LogicalPortAttachment') + if attachment and attachment.get('type') == 'L3GatewayAttachment': + return lport + + +def plug_router_port_attachment(cluster, router_id, port_id, + attachment_uuid, nsx_attachment_type, + attachment_vlan=None): + """Attach a router port to the given attachment. + + Current attachment types: + - PatchAttachment [-> logical switch port uuid] + - L3GatewayAttachment [-> L3GatewayService uuid] + For the latter attachment type a VLAN ID can be specified as well. + """ + uri = nsxlib._build_uri_path(LROUTERPORT_RESOURCE, port_id, router_id, + is_attachment=True) + attach_obj = {} + attach_obj["type"] = nsx_attachment_type + if nsx_attachment_type == "PatchAttachment": + attach_obj["peer_port_uuid"] = attachment_uuid + elif nsx_attachment_type == "L3GatewayAttachment": + attach_obj["l3_gateway_service_uuid"] = attachment_uuid + if attachment_vlan: + attach_obj['vlan_id'] = attachment_vlan + else: + raise nsx_exc.InvalidAttachmentType( + attachment_type=nsx_attachment_type) + return nsxlib.do_request( + HTTP_PUT, uri, jsonutils.dumps(attach_obj), cluster=cluster) + + +def _create_nat_match_obj(**kwargs): + nat_match_obj = {'ethertype': 'IPv4'} + delta = set(kwargs.keys()) - set(MATCH_KEYS) + if delta: + raise Exception(_("Invalid keys for NAT match: %s"), delta) + nat_match_obj.update(kwargs) + return nat_match_obj + + +def _create_lrouter_nat_rule(cluster, router_id, nat_rule_obj): + LOG.debug(_("Creating NAT rule: %s"), nat_rule_obj) + uri = nsxlib._build_uri_path(LROUTERNAT_RESOURCE, + parent_resource_id=router_id) + return nsxlib.do_request(HTTP_POST, uri, jsonutils.dumps(nat_rule_obj), + cluster=cluster) + + +def _build_snat_rule_obj(min_src_ip, max_src_ip, nat_match_obj): + return {"to_source_ip_address_min": min_src_ip, + "to_source_ip_address_max": max_src_ip, + "type": "SourceNatRule", + "match": nat_match_obj} + + +def create_lrouter_nosnat_rule_v2(cluster, _router_id, _match_criteria=None): + LOG.info(_("No SNAT rules cannot be applied as they are not available in " + "this version of the NSX platform")) + + +def create_lrouter_nodnat_rule_v2(cluster, _router_id, _match_criteria=None): + LOG.info(_("No DNAT rules cannot be applied as they are not available in " + "this version of the NSX platform")) + + +def create_lrouter_snat_rule_v2(cluster, router_id, + min_src_ip, max_src_ip, match_criteria=None): + + nat_match_obj = _create_nat_match_obj(**match_criteria) + nat_rule_obj = _build_snat_rule_obj(min_src_ip, max_src_ip, nat_match_obj) + return _create_lrouter_nat_rule(cluster, router_id, nat_rule_obj) + + +def create_lrouter_dnat_rule_v2(cluster, router_id, dst_ip, + to_dst_port=None, match_criteria=None): + + nat_match_obj = _create_nat_match_obj(**match_criteria) + nat_rule_obj = { + "to_destination_ip_address_min": dst_ip, + "to_destination_ip_address_max": dst_ip, + "type": "DestinationNatRule", + "match": nat_match_obj + } + if to_dst_port: + nat_rule_obj['to_destination_port'] = to_dst_port + return _create_lrouter_nat_rule(cluster, router_id, nat_rule_obj) + + +def create_lrouter_nosnat_rule_v3(cluster, router_id, order=None, + match_criteria=None): + nat_match_obj = _create_nat_match_obj(**match_criteria) + nat_rule_obj = { + "type": "NoSourceNatRule", + "match": nat_match_obj + } + if order: + nat_rule_obj['order'] = order + return _create_lrouter_nat_rule(cluster, router_id, nat_rule_obj) + + +def create_lrouter_nodnat_rule_v3(cluster, router_id, order=None, + match_criteria=None): + nat_match_obj = _create_nat_match_obj(**match_criteria) + nat_rule_obj = { + "type": "NoDestinationNatRule", + "match": nat_match_obj + } + if order: + nat_rule_obj['order'] = order + return _create_lrouter_nat_rule(cluster, router_id, nat_rule_obj) + + +def create_lrouter_snat_rule_v3(cluster, router_id, min_src_ip, max_src_ip, + order=None, match_criteria=None): + nat_match_obj = _create_nat_match_obj(**match_criteria) + nat_rule_obj = _build_snat_rule_obj(min_src_ip, max_src_ip, nat_match_obj) + if order: + nat_rule_obj['order'] = order + return _create_lrouter_nat_rule(cluster, router_id, nat_rule_obj) + + +def create_lrouter_dnat_rule_v3(cluster, router_id, dst_ip, to_dst_port=None, + order=None, match_criteria=None): + + nat_match_obj = _create_nat_match_obj(**match_criteria) + nat_rule_obj = { + "to_destination_ip_address": dst_ip, + "type": "DestinationNatRule", + "match": nat_match_obj + } + if to_dst_port: + nat_rule_obj['to_destination_port'] = to_dst_port + if order: + nat_rule_obj['order'] = order + return _create_lrouter_nat_rule(cluster, router_id, nat_rule_obj) + + +def delete_nat_rules_by_match(cluster, router_id, rule_type, + max_num_expected, + min_num_expected=0, + **kwargs): + # remove nat rules + nat_rules = query_nat_rules(cluster, router_id) + to_delete_ids = [] + for r in nat_rules: + if (r['type'] != rule_type): + continue + + for key, value in kwargs.iteritems(): + if not (key in r['match'] and r['match'][key] == value): + break + else: + to_delete_ids.append(r['uuid']) + if not (len(to_delete_ids) in + range(min_num_expected, max_num_expected + 1)): + raise nsx_exc.NatRuleMismatch(actual_rules=len(to_delete_ids), + min_rules=min_num_expected, + max_rules=max_num_expected) + + for rule_id in to_delete_ids: + delete_router_nat_rule(cluster, router_id, rule_id) + + +def delete_router_nat_rule(cluster, router_id, rule_id): + uri = nsxlib._build_uri_path(LROUTERNAT_RESOURCE, rule_id, router_id) + nsxlib.do_request(HTTP_DELETE, uri, cluster=cluster) + + +def query_nat_rules(cluster, router_id, fields="*", filters=None): + uri = nsxlib._build_uri_path(LROUTERNAT_RESOURCE, + parent_resource_id=router_id, + fields=fields, filters=filters) + return nsxlib.get_all_query_pages(uri, cluster) + + +# NOTE(salvatore-orlando): The following FIXME applies in general to +# each operation on list attributes. +# FIXME(salvatore-orlando): need a lock around the list of IPs on an iface +def update_lrouter_port_ips(cluster, lrouter_id, lport_id, + ips_to_add, ips_to_remove): + uri = nsxlib._build_uri_path(LROUTERPORT_RESOURCE, lport_id, lrouter_id) + try: + port = nsxlib.do_request(HTTP_GET, uri, cluster=cluster) + # TODO(salvatore-orlando): Enforce ips_to_add intersection with + # ips_to_remove is empty + ip_address_set = set(port['ip_addresses']) + ip_address_set = ip_address_set - set(ips_to_remove) + ip_address_set = ip_address_set | set(ips_to_add) + # Set is not JSON serializable - convert to list + port['ip_addresses'] = list(ip_address_set) + nsxlib.do_request(HTTP_PUT, uri, jsonutils.dumps(port), + cluster=cluster) + except exception.NotFound: + # FIXME(salv-orlando):avoid raising different exception + data = {'lport_id': lport_id, 'lrouter_id': lrouter_id} + msg = (_("Router Port %(lport_id)s not found on router " + "%(lrouter_id)s") % data) + LOG.exception(msg) + raise nsx_exc.NsxPluginException(err_msg=msg) + except api_exc.NsxApiException as e: + msg = _("An exception occurred while updating IP addresses on a " + "router logical port:%s") % str(e) + LOG.exception(msg) + raise nsx_exc.NsxPluginException(err_msg=msg) + + +ROUTER_FUNC_DICT = { + 'create_lrouter': { + 2: {versioning.DEFAULT_VERSION: create_implicit_routing_lrouter, }, + 3: {versioning.DEFAULT_VERSION: create_implicit_routing_lrouter, + 1: create_implicit_routing_lrouter_with_distribution, + 2: create_explicit_routing_lrouter, }, }, + 'update_lrouter': { + 2: {versioning.DEFAULT_VERSION: update_implicit_routing_lrouter, }, + 3: {versioning.DEFAULT_VERSION: update_implicit_routing_lrouter, + 2: update_explicit_routing_lrouter, }, }, + 'create_lrouter_dnat_rule': { + 2: {versioning.DEFAULT_VERSION: create_lrouter_dnat_rule_v2, }, + 3: {versioning.DEFAULT_VERSION: create_lrouter_dnat_rule_v3, }, }, + 'create_lrouter_snat_rule': { + 2: {versioning.DEFAULT_VERSION: create_lrouter_snat_rule_v2, }, + 3: {versioning.DEFAULT_VERSION: create_lrouter_snat_rule_v3, }, }, + 'create_lrouter_nosnat_rule': { + 2: {versioning.DEFAULT_VERSION: create_lrouter_nosnat_rule_v2, }, + 3: {versioning.DEFAULT_VERSION: create_lrouter_nosnat_rule_v3, }, }, + 'create_lrouter_nodnat_rule': { + 2: {versioning.DEFAULT_VERSION: create_lrouter_nodnat_rule_v2, }, + 3: {versioning.DEFAULT_VERSION: create_lrouter_nodnat_rule_v3, }, }, + 'get_default_route_explicit_routing_lrouter': { + 3: {versioning.DEFAULT_VERSION: + get_default_route_explicit_routing_lrouter_v32, + 2: get_default_route_explicit_routing_lrouter_v32, }, }, +} + + +@versioning.versioned(ROUTER_FUNC_DICT) +def create_lrouter(cluster, *args, **kwargs): + if kwargs.get('distributed', None): + v = cluster.api_client.get_version() + if (v.major, v.minor) < (3, 1): + raise nsx_exc.InvalidVersion(version=v) + return v + + +@versioning.versioned(ROUTER_FUNC_DICT) +def get_default_route_explicit_routing_lrouter(cluster, *args, **kwargs): + pass + + +@versioning.versioned(ROUTER_FUNC_DICT) +def update_lrouter(cluster, *args, **kwargs): + if kwargs.get('routes', None): + v = cluster.api_client.get_version() + if (v.major, v.minor) < (3, 2): + raise nsx_exc.InvalidVersion(version=v) + return v + + +@versioning.versioned(ROUTER_FUNC_DICT) +def create_lrouter_dnat_rule(cluster, *args, **kwargs): + pass + + +@versioning.versioned(ROUTER_FUNC_DICT) +def create_lrouter_snat_rule(cluster, *args, **kwargs): + pass + + +@versioning.versioned(ROUTER_FUNC_DICT) +def create_lrouter_nosnat_rule(cluster, *args, **kwargs): + pass + + +@versioning.versioned(ROUTER_FUNC_DICT) +def create_lrouter_nodnat_rule(cluster, *args, **kwargs): + pass diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/nsxlib/secgroup.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/nsxlib/secgroup.py new file mode 100644 index 00000000..6c9ba5e2 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/nsxlib/secgroup.py @@ -0,0 +1,141 @@ +# Copyright 2014 VMware, Inc. +# All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from neutron.common import constants +from neutron.common import exceptions +from neutron.openstack.common import excutils +from neutron.openstack.common import jsonutils as json +from neutron.openstack.common import log +from neutron.plugins.vmware.common import utils +from neutron.plugins.vmware import nsxlib + +HTTP_GET = "GET" +HTTP_POST = "POST" +HTTP_DELETE = "DELETE" +HTTP_PUT = "PUT" + +SECPROF_RESOURCE = "security-profile" + +LOG = log.getLogger(__name__) + + +def mk_body(**kwargs): + """Convenience function creates and dumps dictionary to string. + + :param kwargs: the key/value pirs to be dumped into a json string. + :returns: a json string. + """ + return json.dumps(kwargs, ensure_ascii=False) + + +def query_security_profiles(cluster, fields=None, filters=None): + return nsxlib.get_all_query_pages( + nsxlib._build_uri_path(SECPROF_RESOURCE, + fields=fields, + filters=filters), + cluster) + + +def create_security_profile(cluster, tenant_id, neutron_id, security_profile): + """Create a security profile on the NSX backend. + + :param cluster: a NSX cluster object reference + :param tenant_id: identifier of the Neutron tenant + :param neutron_id: neutron security group identifier + :param security_profile: dictionary with data for + configuring the NSX security profile. + """ + path = "/ws.v1/security-profile" + # Allow all dhcp responses and all ingress traffic + hidden_rules = {'logical_port_egress_rules': + [{'ethertype': 'IPv4', + 'protocol': constants.PROTO_NUM_UDP, + 'port_range_min': constants.DHCP_RESPONSE_PORT, + 'port_range_max': constants.DHCP_RESPONSE_PORT, + 'ip_prefix': '0.0.0.0/0'}], + 'logical_port_ingress_rules': + [{'ethertype': 'IPv4'}, + {'ethertype': 'IPv6'}]} + display_name = utils.check_and_truncate(security_profile.get('name')) + # NOTE(salv-orlando): neutron-id tags are prepended with 'q' for + # historical reasons + body = mk_body( + tags=utils.get_tags(os_tid=tenant_id, q_sec_group_id=neutron_id), + display_name=display_name, + logical_port_ingress_rules=( + hidden_rules['logical_port_ingress_rules']), + logical_port_egress_rules=hidden_rules['logical_port_egress_rules'] + ) + rsp = nsxlib.do_request(HTTP_POST, path, body, cluster=cluster) + if security_profile.get('name') == 'default': + # If security group is default allow ip traffic between + # members of the same security profile is allowed and ingress traffic + # from the switch + rules = {'logical_port_egress_rules': [{'ethertype': 'IPv4', + 'profile_uuid': rsp['uuid']}, + {'ethertype': 'IPv6', + 'profile_uuid': rsp['uuid']}], + 'logical_port_ingress_rules': [{'ethertype': 'IPv4'}, + {'ethertype': 'IPv6'}]} + + update_security_group_rules(cluster, rsp['uuid'], rules) + LOG.debug(_("Created Security Profile: %s"), rsp) + return rsp + + +def update_security_group_rules(cluster, spid, rules): + path = "/ws.v1/security-profile/%s" % spid + + # Allow all dhcp responses in + rules['logical_port_egress_rules'].append( + {'ethertype': 'IPv4', 'protocol': constants.PROTO_NUM_UDP, + 'port_range_min': constants.DHCP_RESPONSE_PORT, + 'port_range_max': constants.DHCP_RESPONSE_PORT, + 'ip_prefix': '0.0.0.0/0'}) + # If there are no ingress rules add bunk rule to drop all ingress traffic + if not rules['logical_port_ingress_rules']: + rules['logical_port_ingress_rules'].append( + {'ethertype': 'IPv4', 'ip_prefix': '127.0.0.1/32'}) + try: + body = mk_body( + logical_port_ingress_rules=rules['logical_port_ingress_rules'], + logical_port_egress_rules=rules['logical_port_egress_rules']) + rsp = nsxlib.do_request(HTTP_PUT, path, body, cluster=cluster) + except exceptions.NotFound as e: + LOG.error(nsxlib.format_exception("Unknown", e, locals())) + #FIXME(salvatore-orlando): This should not raise NeutronException + raise exceptions.NeutronException() + LOG.debug(_("Updated Security Profile: %s"), rsp) + return rsp + + +def update_security_profile(cluster, spid, name): + return nsxlib.do_request( + HTTP_PUT, + nsxlib._build_uri_path(SECPROF_RESOURCE, resource_id=spid), + json.dumps({"display_name": utils.check_and_truncate(name)}), + cluster=cluster) + + +def delete_security_profile(cluster, spid): + path = "/ws.v1/security-profile/%s" % spid + + try: + nsxlib.do_request(HTTP_DELETE, path, cluster=cluster) + except exceptions.NotFound: + with excutils.save_and_reraise_exception(): + # This is not necessarily an error condition + LOG.warn(_("Unable to find security profile %s on NSX backend"), + spid) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/nsxlib/switch.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/nsxlib/switch.py new file mode 100644 index 00000000..e94791e6 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/nsxlib/switch.py @@ -0,0 +1,397 @@ +# Copyright 2014 VMware, Inc. +# All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +from oslo.config import cfg + +from neutron.common import constants +from neutron.common import exceptions as exception +from neutron.openstack.common import jsonutils as json +from neutron.openstack.common import log +from neutron.plugins.vmware.api_client import exception as api_exc +from neutron.plugins.vmware.common import exceptions as nsx_exc +from neutron.plugins.vmware.common import utils +from neutron.plugins.vmware import nsxlib + +HTTP_GET = "GET" +HTTP_POST = "POST" +HTTP_DELETE = "DELETE" +HTTP_PUT = "PUT" + +LSWITCH_RESOURCE = "lswitch" +LSWITCHPORT_RESOURCE = "lport/%s" % LSWITCH_RESOURCE + +LOG = log.getLogger(__name__) + + +def _configure_extensions(lport_obj, mac_address, fixed_ips, + port_security_enabled, security_profiles, + queue_id, mac_learning_enabled, + allowed_address_pairs): + lport_obj['allowed_address_pairs'] = [] + if port_security_enabled: + for fixed_ip in fixed_ips: + ip_address = fixed_ip.get('ip_address') + if ip_address: + lport_obj['allowed_address_pairs'].append( + {'mac_address': mac_address, 'ip_address': ip_address}) + # add address pair allowing src_ip 0.0.0.0 to leave + # this is required for outgoing dhcp request + lport_obj["allowed_address_pairs"].append( + {"mac_address": mac_address, + "ip_address": "0.0.0.0"}) + lport_obj['security_profiles'] = list(security_profiles or []) + lport_obj['queue_uuid'] = queue_id + if mac_learning_enabled is not None: + lport_obj["mac_learning"] = mac_learning_enabled + lport_obj["type"] = "LogicalSwitchPortConfig" + for address_pair in list(allowed_address_pairs or []): + lport_obj['allowed_address_pairs'].append( + {'mac_address': address_pair['mac_address'], + 'ip_address': address_pair['ip_address']}) + + +def get_lswitch_by_id(cluster, lswitch_id): + try: + lswitch_uri_path = nsxlib._build_uri_path( + LSWITCH_RESOURCE, lswitch_id, + relations="LogicalSwitchStatus") + return nsxlib.do_request(HTTP_GET, lswitch_uri_path, cluster=cluster) + except exception.NotFound: + # FIXME(salv-orlando): this should not raise a neutron exception + raise exception.NetworkNotFound(net_id=lswitch_id) + + +def get_lswitches(cluster, neutron_net_id): + + def lookup_switches_by_tag(): + # Fetch extra logical switches + lswitch_query_path = nsxlib._build_uri_path( + LSWITCH_RESOURCE, + fields="uuid,display_name,tags,lport_count", + relations="LogicalSwitchStatus", + filters={'tag': neutron_net_id, + 'tag_scope': 'quantum_net_id'}) + return nsxlib.get_all_query_pages(lswitch_query_path, cluster) + + lswitch_uri_path = nsxlib._build_uri_path(LSWITCH_RESOURCE, neutron_net_id, + relations="LogicalSwitchStatus") + results = [] + try: + ls = nsxlib.do_request(HTTP_GET, lswitch_uri_path, cluster=cluster) + results.append(ls) + for tag in ls['tags']: + if (tag['scope'] == "multi_lswitch" and + tag['tag'] == "True"): + results.extend(lookup_switches_by_tag()) + except exception.NotFound: + # This is legit if the neutron network was created using + # a post-Havana version of the plugin + results.extend(lookup_switches_by_tag()) + if results: + return results + else: + raise exception.NetworkNotFound(net_id=neutron_net_id) + + +def create_lswitch(cluster, neutron_net_id, tenant_id, display_name, + transport_zones_config, + shared=None, + **kwargs): + # The tag scope adopts a slightly different naming convention for + # historical reasons + lswitch_obj = {"display_name": utils.check_and_truncate(display_name), + "transport_zones": transport_zones_config, + "replication_mode": cfg.CONF.NSX.replication_mode, + "tags": utils.get_tags(os_tid=tenant_id, + quantum_net_id=neutron_net_id)} + # TODO(salv-orlando): Now that we have async status synchronization + # this tag is perhaps not needed anymore + if shared: + lswitch_obj["tags"].append({"tag": "true", + "scope": "shared"}) + if "tags" in kwargs: + lswitch_obj["tags"].extend(kwargs["tags"]) + uri = nsxlib._build_uri_path(LSWITCH_RESOURCE) + lswitch = nsxlib.do_request(HTTP_POST, uri, json.dumps(lswitch_obj), + cluster=cluster) + LOG.debug(_("Created logical switch: %s"), lswitch['uuid']) + return lswitch + + +def update_lswitch(cluster, lswitch_id, display_name, + tenant_id=None, **kwargs): + uri = nsxlib._build_uri_path(LSWITCH_RESOURCE, resource_id=lswitch_id) + lswitch_obj = {"display_name": utils.check_and_truncate(display_name)} + # NOTE: tag update will not 'merge' existing tags with new ones. + tags = [] + if tenant_id: + tags = utils.get_tags(os_tid=tenant_id) + # The 'tags' kwarg might existing and be None + tags.extend(kwargs.get('tags') or []) + if tags: + lswitch_obj['tags'] = tags + try: + return nsxlib.do_request(HTTP_PUT, uri, json.dumps(lswitch_obj), + cluster=cluster) + except exception.NotFound as e: + LOG.error(_("Network not found, Error: %s"), str(e)) + raise exception.NetworkNotFound(net_id=lswitch_id) + + +def delete_network(cluster, net_id, lswitch_id): + delete_networks(cluster, net_id, [lswitch_id]) + + +#TODO(salvatore-orlando): Simplify and harmonize +def delete_networks(cluster, net_id, lswitch_ids): + for ls_id in lswitch_ids: + path = "/ws.v1/lswitch/%s" % ls_id + try: + nsxlib.do_request(HTTP_DELETE, path, cluster=cluster) + except exception.NotFound as e: + LOG.error(_("Network not found, Error: %s"), str(e)) + raise exception.NetworkNotFound(net_id=ls_id) + + +def query_lswitch_lports(cluster, ls_uuid, fields="*", + filters=None, relations=None): + # Fix filter for attachments + if filters and "attachment" in filters: + filters['attachment_vif_uuid'] = filters["attachment"] + del filters['attachment'] + uri = nsxlib._build_uri_path(LSWITCHPORT_RESOURCE, + parent_resource_id=ls_uuid, + fields=fields, + filters=filters, + relations=relations) + return nsxlib.do_request(HTTP_GET, uri, cluster=cluster)['results'] + + +def delete_port(cluster, switch, port): + uri = "/ws.v1/lswitch/" + switch + "/lport/" + port + try: + nsxlib.do_request(HTTP_DELETE, uri, cluster=cluster) + except exception.NotFound: + LOG.exception(_("Port or Network not found")) + raise exception.PortNotFoundOnNetwork( + net_id=switch, port_id=port) + except api_exc.NsxApiException: + raise exception.NeutronException() + + +def get_ports(cluster, networks=None, devices=None, tenants=None): + vm_filter_obsolete = "" + vm_filter = "" + tenant_filter = "" + # This is used when calling delete_network. Neutron checks to see if + # the network has any ports. + if networks: + # FIXME (Aaron) If we get more than one network_id this won't work + lswitch = networks[0] + else: + lswitch = "*" + if devices: + for device_id in devices: + vm_filter_obsolete = '&'.join( + ["tag_scope=vm_id", + "tag=%s" % utils.device_id_to_vm_id(device_id, + obfuscate=True), + vm_filter_obsolete]) + vm_filter = '&'.join( + ["tag_scope=vm_id", + "tag=%s" % utils.device_id_to_vm_id(device_id), + vm_filter]) + if tenants: + for tenant in tenants: + tenant_filter = '&'.join( + ["tag_scope=os_tid", + "tag=%s" % tenant, + tenant_filter]) + + nsx_lports = {} + lport_fields_str = ("tags,admin_status_enabled,display_name," + "fabric_status_up") + try: + lport_query_path_obsolete = ( + "/ws.v1/lswitch/%s/lport?fields=%s&%s%stag_scope=q_port_id" + "&relations=LogicalPortStatus" % + (lswitch, lport_fields_str, vm_filter_obsolete, tenant_filter)) + lport_query_path = ( + "/ws.v1/lswitch/%s/lport?fields=%s&%s%stag_scope=q_port_id" + "&relations=LogicalPortStatus" % + (lswitch, lport_fields_str, vm_filter, tenant_filter)) + try: + # NOTE(armando-migliaccio): by querying with obsolete tag first + # current deployments won't take the performance hit of a double + # call. In release L-** or M-**, we might want to swap the calls + # as it's likely that ports with the new tag would outnumber the + # ones with the old tag + ports = nsxlib.get_all_query_pages(lport_query_path_obsolete, + cluster) + if not ports: + ports = nsxlib.get_all_query_pages(lport_query_path, cluster) + except exception.NotFound: + LOG.warn(_("Lswitch %s not found in NSX"), lswitch) + ports = None + + if ports: + for port in ports: + for tag in port["tags"]: + if tag["scope"] == "q_port_id": + nsx_lports[tag["tag"]] = port + except Exception: + err_msg = _("Unable to get ports") + LOG.exception(err_msg) + raise nsx_exc.NsxPluginException(err_msg=err_msg) + return nsx_lports + + +def get_port_by_neutron_tag(cluster, lswitch_uuid, neutron_port_id): + """Get port by neutron tag. + + Returns the NSX UUID of the logical port with tag q_port_id equal to + neutron_port_id or None if the port is not Found. + """ + uri = nsxlib._build_uri_path(LSWITCHPORT_RESOURCE, + parent_resource_id=lswitch_uuid, + fields='uuid', + filters={'tag': neutron_port_id, + 'tag_scope': 'q_port_id'}) + LOG.debug(_("Looking for port with q_port_id tag '%(neutron_port_id)s' " + "on: '%(lswitch_uuid)s'"), + {'neutron_port_id': neutron_port_id, + 'lswitch_uuid': lswitch_uuid}) + res = nsxlib.do_request(HTTP_GET, uri, cluster=cluster) + num_results = len(res["results"]) + if num_results >= 1: + if num_results > 1: + LOG.warn(_("Found '%(num_ports)d' ports with " + "q_port_id tag: '%(neutron_port_id)s'. " + "Only 1 was expected."), + {'num_ports': num_results, + 'neutron_port_id': neutron_port_id}) + return res["results"][0] + + +def get_port(cluster, network, port, relations=None): + LOG.info(_("get_port() %(network)s %(port)s"), + {'network': network, 'port': port}) + uri = "/ws.v1/lswitch/" + network + "/lport/" + port + "?" + if relations: + uri += "relations=%s" % relations + try: + return nsxlib.do_request(HTTP_GET, uri, cluster=cluster) + except exception.NotFound as e: + LOG.error(_("Port or Network not found, Error: %s"), str(e)) + raise exception.PortNotFoundOnNetwork( + port_id=port, net_id=network) + + +def update_port(cluster, lswitch_uuid, lport_uuid, neutron_port_id, tenant_id, + display_name, device_id, admin_status_enabled, + mac_address=None, fixed_ips=None, port_security_enabled=None, + security_profiles=None, queue_id=None, + mac_learning_enabled=None, allowed_address_pairs=None): + lport_obj = dict( + admin_status_enabled=admin_status_enabled, + display_name=utils.check_and_truncate(display_name), + tags=utils.get_tags(os_tid=tenant_id, + q_port_id=neutron_port_id, + vm_id=utils.device_id_to_vm_id(device_id))) + + _configure_extensions(lport_obj, mac_address, fixed_ips, + port_security_enabled, security_profiles, + queue_id, mac_learning_enabled, + allowed_address_pairs) + + path = "/ws.v1/lswitch/" + lswitch_uuid + "/lport/" + lport_uuid + try: + result = nsxlib.do_request(HTTP_PUT, path, json.dumps(lport_obj), + cluster=cluster) + LOG.debug(_("Updated logical port %(result)s " + "on logical switch %(uuid)s"), + {'result': result['uuid'], 'uuid': lswitch_uuid}) + return result + except exception.NotFound as e: + LOG.error(_("Port or Network not found, Error: %s"), str(e)) + raise exception.PortNotFoundOnNetwork( + port_id=lport_uuid, net_id=lswitch_uuid) + + +def create_lport(cluster, lswitch_uuid, tenant_id, neutron_port_id, + display_name, device_id, admin_status_enabled, + mac_address=None, fixed_ips=None, port_security_enabled=None, + security_profiles=None, queue_id=None, + mac_learning_enabled=None, allowed_address_pairs=None): + """Creates a logical port on the assigned logical switch.""" + display_name = utils.check_and_truncate(display_name) + lport_obj = dict( + admin_status_enabled=admin_status_enabled, + display_name=display_name, + tags=utils.get_tags(os_tid=tenant_id, + q_port_id=neutron_port_id, + vm_id=utils.device_id_to_vm_id(device_id)) + ) + + _configure_extensions(lport_obj, mac_address, fixed_ips, + port_security_enabled, security_profiles, + queue_id, mac_learning_enabled, + allowed_address_pairs) + + path = nsxlib._build_uri_path(LSWITCHPORT_RESOURCE, + parent_resource_id=lswitch_uuid) + result = nsxlib.do_request(HTTP_POST, path, json.dumps(lport_obj), + cluster=cluster) + + LOG.debug(_("Created logical port %(result)s on logical switch %(uuid)s"), + {'result': result['uuid'], 'uuid': lswitch_uuid}) + return result + + +def get_port_status(cluster, lswitch_id, port_id): + """Retrieve the operational status of the port.""" + try: + r = nsxlib.do_request(HTTP_GET, + "/ws.v1/lswitch/%s/lport/%s/status" % + (lswitch_id, port_id), cluster=cluster) + except exception.NotFound as e: + LOG.error(_("Port not found, Error: %s"), str(e)) + raise exception.PortNotFoundOnNetwork( + port_id=port_id, net_id=lswitch_id) + if r['link_status_up'] is True: + return constants.PORT_STATUS_ACTIVE + else: + return constants.PORT_STATUS_DOWN + + +def plug_interface(cluster, lswitch_id, lport_id, att_obj): + return nsxlib.do_request(HTTP_PUT, + nsxlib._build_uri_path(LSWITCHPORT_RESOURCE, + lport_id, lswitch_id, + is_attachment=True), + json.dumps(att_obj), + cluster=cluster) + + +def plug_vif_interface( + cluster, lswitch_id, port_id, port_type, attachment=None): + """Plug a VIF Attachment object in a logical port.""" + lport_obj = {} + if attachment: + lport_obj["vif_uuid"] = attachment + + lport_obj["type"] = port_type + return plug_interface(cluster, lswitch_id, port_id, lport_obj) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/plugins/base.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/plugins/base.py new file mode 100644 index 00000000..3ffb4ed9 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/plugins/base.py @@ -0,0 +1,2528 @@ +# Copyright 2012 VMware, Inc. +# All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import logging +import uuid + +from oslo.config import cfg +from oslo.db import exception as db_exc +from sqlalchemy import exc as sql_exc +from sqlalchemy.orm import exc as sa_exc +import webob.exc + +from neutron.api import extensions as neutron_extensions +from neutron.api.v2 import attributes as attr +from neutron.api.v2 import base +from neutron.common import constants +from neutron.common import exceptions as n_exc +from neutron.common import utils +from neutron import context as q_context +from neutron.db import agentschedulers_db +from neutron.db import allowedaddresspairs_db as addr_pair_db +from neutron.db import db_base_plugin_v2 +from neutron.db import external_net_db +from neutron.db import extraroute_db +from neutron.db import l3_db +from neutron.db import l3_gwmode_db +from neutron.db import models_v2 +from neutron.db import portbindings_db +from neutron.db import portsecurity_db +from neutron.db import quota_db # noqa +from neutron.db import securitygroups_db +from neutron.extensions import allowedaddresspairs as addr_pair +from neutron.extensions import external_net as ext_net_extn +from neutron.extensions import extraroute +from neutron.extensions import l3 +from neutron.extensions import multiprovidernet as mpnet +from neutron.extensions import portbindings as pbin +from neutron.extensions import portsecurity as psec +from neutron.extensions import providernet as pnet +from neutron.extensions import securitygroup as ext_sg +from neutron.openstack.common import excutils +from neutron.openstack.common import lockutils +from neutron.plugins.common import constants as plugin_const +from neutron.plugins import vmware +from neutron.plugins.vmware.api_client import exception as api_exc +from neutron.plugins.vmware.common import config # noqa +from neutron.plugins.vmware.common import exceptions as nsx_exc +from neutron.plugins.vmware.common import nsx_utils +from neutron.plugins.vmware.common import securitygroups as sg_utils +from neutron.plugins.vmware.common import sync +from neutron.plugins.vmware.common import utils as c_utils +from neutron.plugins.vmware.dbexts import db as nsx_db +from neutron.plugins.vmware.dbexts import distributedrouter as dist_rtr +from neutron.plugins.vmware.dbexts import maclearning as mac_db +from neutron.plugins.vmware.dbexts import networkgw_db +from neutron.plugins.vmware.dbexts import qos_db +from neutron.plugins.vmware import dhcpmeta_modes +from neutron.plugins.vmware.extensions import maclearning as mac_ext +from neutron.plugins.vmware.extensions import networkgw +from neutron.plugins.vmware.extensions import qos +from neutron.plugins.vmware.nsxlib import l2gateway as l2gwlib +from neutron.plugins.vmware.nsxlib import queue as queuelib +from neutron.plugins.vmware.nsxlib import router as routerlib +from neutron.plugins.vmware.nsxlib import secgroup as secgrouplib +from neutron.plugins.vmware.nsxlib import switch as switchlib + +LOG = logging.getLogger("NeutronPlugin") + +NSX_NOSNAT_RULES_ORDER = 10 +NSX_FLOATINGIP_NAT_RULES_ORDER = 224 +NSX_EXTGW_NAT_RULES_ORDER = 255 +NSX_DEFAULT_NEXTHOP = '1.1.1.1' + + +class NsxPluginV2(addr_pair_db.AllowedAddressPairsMixin, + agentschedulers_db.DhcpAgentSchedulerDbMixin, + db_base_plugin_v2.NeutronDbPluginV2, + dhcpmeta_modes.DhcpMetadataAccess, + dist_rtr.DistributedRouter_mixin, + external_net_db.External_net_db_mixin, + extraroute_db.ExtraRoute_db_mixin, + l3_gwmode_db.L3_NAT_db_mixin, + mac_db.MacLearningDbMixin, + networkgw_db.NetworkGatewayMixin, + portbindings_db.PortBindingMixin, + portsecurity_db.PortSecurityDbMixin, + qos_db.QoSDbMixin, + securitygroups_db.SecurityGroupDbMixin): + + supported_extension_aliases = ["allowed-address-pairs", + "binding", + "dist-router", + "ext-gw-mode", + "extraroute", + "mac-learning", + "multi-provider", + "network-gateway", + "nvp-qos", + "port-security", + "provider", + "qos-queue", + "quotas", + "external-net", + "router", + "security-group"] + + __native_bulk_support = True + __native_pagination_support = True + __native_sorting_support = True + + # Map nova zones to cluster for easy retrieval + novazone_cluster_map = {} + + def __init__(self): + super(NsxPluginV2, self).__init__() + config.validate_config_options() + # TODO(salv-orlando): Replace These dicts with + # collections.defaultdict for better handling of default values + # Routines for managing logical ports in NSX + self.port_special_owners = [l3_db.DEVICE_OWNER_ROUTER_GW, + l3_db.DEVICE_OWNER_ROUTER_INTF] + self._port_drivers = { + 'create': {l3_db.DEVICE_OWNER_ROUTER_GW: + self._nsx_create_ext_gw_port, + l3_db.DEVICE_OWNER_FLOATINGIP: + self._nsx_create_fip_port, + l3_db.DEVICE_OWNER_ROUTER_INTF: + self._nsx_create_router_port, + networkgw_db.DEVICE_OWNER_NET_GW_INTF: + self._nsx_create_l2_gw_port, + 'default': self._nsx_create_port}, + 'delete': {l3_db.DEVICE_OWNER_ROUTER_GW: + self._nsx_delete_ext_gw_port, + l3_db.DEVICE_OWNER_ROUTER_INTF: + self._nsx_delete_router_port, + l3_db.DEVICE_OWNER_FLOATINGIP: + self._nsx_delete_fip_port, + networkgw_db.DEVICE_OWNER_NET_GW_INTF: + self._nsx_delete_port, + 'default': self._nsx_delete_port} + } + + neutron_extensions.append_api_extensions_path([vmware.NSX_EXT_PATH]) + self.nsx_opts = cfg.CONF.NSX + self.nsx_sync_opts = cfg.CONF.NSX_SYNC + self.cluster = nsx_utils.create_nsx_cluster( + cfg.CONF, + self.nsx_opts.concurrent_connections, + self.nsx_opts.nsx_gen_timeout) + + self.base_binding_dict = { + pbin.VIF_TYPE: pbin.VIF_TYPE_OVS, + pbin.VIF_DETAILS: { + # TODO(rkukura): Replace with new VIF security details + pbin.CAP_PORT_FILTER: + 'security-group' in self.supported_extension_aliases}} + + self._extend_fault_map() + self.setup_dhcpmeta_access() + # Set this flag to false as the default gateway has not + # been yet updated from the config file + self._is_default_net_gw_in_sync = False + # Create a synchronizer instance for backend sync + self._synchronizer = sync.NsxSynchronizer( + self.safe_reference, self.cluster, + self.nsx_sync_opts.state_sync_interval, + self.nsx_sync_opts.min_sync_req_delay, + self.nsx_sync_opts.min_chunk_size, + self.nsx_sync_opts.max_random_sync_delay) + + def _ensure_default_network_gateway(self): + if self._is_default_net_gw_in_sync: + return + # Add the gw in the db as default, and unset any previous default + def_l2_gw_uuid = self.cluster.default_l2_gw_service_uuid + try: + ctx = q_context.get_admin_context() + self._unset_default_network_gateways(ctx) + if not def_l2_gw_uuid: + return + try: + def_network_gw = self._get_network_gateway(ctx, + def_l2_gw_uuid) + except networkgw_db.GatewayNotFound: + # Create in DB only - don't go to backend + def_gw_data = {'id': def_l2_gw_uuid, + 'name': 'default L2 gateway service', + 'devices': []} + gw_res_name = networkgw.GATEWAY_RESOURCE_NAME.replace('-', '_') + def_network_gw = super( + NsxPluginV2, self).create_network_gateway( + ctx, {gw_res_name: def_gw_data}) + # In any case set is as default + self._set_default_network_gateway(ctx, def_network_gw['id']) + # Ensure this method is executed only once + self._is_default_net_gw_in_sync = True + except Exception: + with excutils.save_and_reraise_exception(): + LOG.exception(_("Unable to process default l2 gw service:%s"), + def_l2_gw_uuid) + + def _build_ip_address_list(self, context, fixed_ips, subnet_ids=None): + """Build ip_addresses data structure for logical router port. + + No need to perform validation on IPs - this has already been + done in the l3_db mixin class. + """ + ip_addresses = [] + for ip in fixed_ips: + if not subnet_ids or (ip['subnet_id'] in subnet_ids): + subnet = self._get_subnet(context, ip['subnet_id']) + ip_prefix = '%s/%s' % (ip['ip_address'], + subnet['cidr'].split('/')[1]) + ip_addresses.append(ip_prefix) + return ip_addresses + + def _create_and_attach_router_port(self, cluster, context, + nsx_router_id, port_data, + attachment_type, attachment, + attachment_vlan=None, + subnet_ids=None): + # Use a fake IP address if gateway port is not 'real' + ip_addresses = (port_data.get('fake_ext_gw') and + ['0.0.0.0/31'] or + self._build_ip_address_list(context, + port_data['fixed_ips'], + subnet_ids)) + try: + lrouter_port = routerlib.create_router_lport( + cluster, nsx_router_id, port_data.get('tenant_id', 'fake'), + port_data.get('id', 'fake'), port_data.get('name', 'fake'), + port_data.get('admin_state_up', True), ip_addresses, + port_data.get('mac_address')) + LOG.debug(_("Created NSX router port:%s"), lrouter_port['uuid']) + except api_exc.NsxApiException: + LOG.exception(_("Unable to create port on NSX logical router %s"), + nsx_router_id) + raise nsx_exc.NsxPluginException( + err_msg=_("Unable to create logical router port for neutron " + "port id %(port_id)s on router %(nsx_router_id)s") % + {'port_id': port_data.get('id'), + 'nsx_router_id': nsx_router_id}) + self._update_router_port_attachment(cluster, context, nsx_router_id, + port_data, lrouter_port['uuid'], + attachment_type, attachment, + attachment_vlan) + return lrouter_port + + def _update_router_gw_info(self, context, router_id, info): + # NOTE(salvatore-orlando): We need to worry about rollback of NSX + # configuration in case of failures in the process + # Ref. LP bug 1102301 + router = self._get_router(context, router_id) + # Check whether SNAT rule update should be triggered + # NSX also supports multiple external networks so there is also + # the possibility that NAT rules should be replaced + current_ext_net_id = router.gw_port_id and router.gw_port.network_id + new_ext_net_id = info and info.get('network_id') + # SNAT should be enabled unless info['enable_snat'] is + # explicitly set to false + enable_snat = new_ext_net_id and info.get('enable_snat', True) + # Remove if ext net removed, changed, or if snat disabled + remove_snat_rules = (current_ext_net_id and + new_ext_net_id != current_ext_net_id or + router.enable_snat and not enable_snat) + # Add rules if snat is enabled, and if either the external network + # changed or snat was previously disabled + # NOTE: enable_snat == True implies new_ext_net_id != None + add_snat_rules = (enable_snat and + (new_ext_net_id != current_ext_net_id or + not router.enable_snat)) + router = super(NsxPluginV2, self)._update_router_gw_info( + context, router_id, info, router=router) + # Add/Remove SNAT rules as needed + # Create an elevated context for dealing with metadata access + # cidrs which are created within admin context + ctx_elevated = context.elevated() + if remove_snat_rules or add_snat_rules: + cidrs = self._find_router_subnets_cidrs(ctx_elevated, router_id) + nsx_router_id = nsx_utils.get_nsx_router_id( + context.session, self.cluster, router_id) + if remove_snat_rules: + # Be safe and concede NAT rules might not exist. + # Therefore use min_num_expected=0 + for cidr in cidrs: + routerlib.delete_nat_rules_by_match( + self.cluster, nsx_router_id, "SourceNatRule", + max_num_expected=1, min_num_expected=0, + source_ip_addresses=cidr) + if add_snat_rules: + ip_addresses = self._build_ip_address_list( + ctx_elevated, router.gw_port['fixed_ips']) + # Set the SNAT rule for each subnet (only first IP) + for cidr in cidrs: + cidr_prefix = int(cidr.split('/')[1]) + routerlib.create_lrouter_snat_rule( + self.cluster, nsx_router_id, + ip_addresses[0].split('/')[0], + ip_addresses[0].split('/')[0], + order=NSX_EXTGW_NAT_RULES_ORDER - cidr_prefix, + match_criteria={'source_ip_addresses': cidr}) + + def _update_router_port_attachment(self, cluster, context, + nsx_router_id, port_data, + nsx_router_port_id, + attachment_type, + attachment, + attachment_vlan=None): + if not nsx_router_port_id: + nsx_router_port_id = self._find_router_gw_port(context, port_data) + try: + routerlib.plug_router_port_attachment(cluster, nsx_router_id, + nsx_router_port_id, + attachment, + attachment_type, + attachment_vlan) + LOG.debug(_("Attached %(att)s to NSX router port %(port)s"), + {'att': attachment, 'port': nsx_router_port_id}) + except api_exc.NsxApiException: + # Must remove NSX logical port + routerlib.delete_router_lport(cluster, nsx_router_id, + nsx_router_port_id) + LOG.exception(_("Unable to plug attachment in NSX logical " + "router port %(r_port_id)s, associated with " + "Neutron %(q_port_id)s"), + {'r_port_id': nsx_router_port_id, + 'q_port_id': port_data.get('id')}) + raise nsx_exc.NsxPluginException( + err_msg=(_("Unable to plug attachment in router port " + "%(r_port_id)s for neutron port id %(q_port_id)s " + "on router %(router_id)s") % + {'r_port_id': nsx_router_port_id, + 'q_port_id': port_data.get('id'), + 'router_id': nsx_router_id})) + + def _get_port_by_device_id(self, context, device_id, device_owner): + """Retrieve ports associated with a specific device id. + + Used for retrieving all neutron ports attached to a given router. + """ + port_qry = context.session.query(models_v2.Port) + return port_qry.filter_by( + device_id=device_id, + device_owner=device_owner,).all() + + def _find_router_subnets_cidrs(self, context, router_id): + """Retrieve subnets attached to the specified router.""" + ports = self._get_port_by_device_id(context, router_id, + l3_db.DEVICE_OWNER_ROUTER_INTF) + # No need to check for overlapping CIDRs + cidrs = [] + for port in ports: + for ip in port.get('fixed_ips', []): + cidrs.append(self._get_subnet(context, + ip.subnet_id).cidr) + return cidrs + + def _nsx_find_lswitch_for_port(self, context, port_data): + network = self._get_network(context, port_data['network_id']) + network_bindings = nsx_db.get_network_bindings( + context.session, port_data['network_id']) + max_ports = self.nsx_opts.max_lp_per_overlay_ls + allow_extra_lswitches = False + for network_binding in network_bindings: + if network_binding.binding_type in (c_utils.NetworkTypes.FLAT, + c_utils.NetworkTypes.VLAN): + max_ports = self.nsx_opts.max_lp_per_bridged_ls + allow_extra_lswitches = True + break + try: + return self._handle_lswitch_selection( + context, self.cluster, network, network_bindings, + max_ports, allow_extra_lswitches) + except api_exc.NsxApiException: + err_desc = _("An exception occurred while selecting logical " + "switch for the port") + LOG.exception(err_desc) + raise nsx_exc.NsxPluginException(err_msg=err_desc) + + def _nsx_create_port_helper(self, session, ls_uuid, port_data, + do_port_security=True): + # Convert Neutron security groups identifiers into NSX security + # profiles identifiers + nsx_sec_profile_ids = [ + nsx_utils.get_nsx_security_group_id( + session, self.cluster, neutron_sg_id) for + neutron_sg_id in (port_data[ext_sg.SECURITYGROUPS] or [])] + return switchlib.create_lport(self.cluster, + ls_uuid, + port_data['tenant_id'], + port_data['id'], + port_data['name'], + port_data['device_id'], + port_data['admin_state_up'], + port_data['mac_address'], + port_data['fixed_ips'], + port_data[psec.PORTSECURITY], + nsx_sec_profile_ids, + port_data.get(qos.QUEUE), + port_data.get(mac_ext.MAC_LEARNING), + port_data.get(addr_pair.ADDRESS_PAIRS)) + + def _handle_create_port_exception(self, context, port_id, + ls_uuid, lp_uuid): + with excutils.save_and_reraise_exception(): + # rollback nsx logical port only if it was successfully + # created on NSX. Should this command fail the original + # exception will be raised. + if lp_uuid: + # Remove orphaned port from NSX + switchlib.delete_port(self.cluster, ls_uuid, lp_uuid) + # rollback the neutron-nsx port mapping + nsx_db.delete_neutron_nsx_port_mapping(context.session, + port_id) + msg = (_("An exception occurred while creating the " + "neutron port %s on the NSX plaform") % port_id) + LOG.exception(msg) + + def _nsx_create_port(self, context, port_data): + """Driver for creating a logical switch port on NSX platform.""" + # FIXME(salvatore-orlando): On the NSX platform we do not really have + # external networks. So if as user tries and create a "regular" VIF + # port on an external network we are unable to actually create. + # However, in order to not break unit tests, we need to still create + # the DB object and return success + if self._network_is_external(context, port_data['network_id']): + LOG.info(_("NSX plugin does not support regular VIF ports on " + "external networks. Port %s will be down."), + port_data['network_id']) + # No need to actually update the DB state - the default is down + return port_data + lport = None + selected_lswitch = None + try: + selected_lswitch = self._nsx_find_lswitch_for_port(context, + port_data) + lport = self._nsx_create_port_helper(context.session, + selected_lswitch['uuid'], + port_data, + True) + nsx_db.add_neutron_nsx_port_mapping( + context.session, port_data['id'], + selected_lswitch['uuid'], lport['uuid']) + if port_data['device_owner'] not in self.port_special_owners: + switchlib.plug_vif_interface( + self.cluster, selected_lswitch['uuid'], + lport['uuid'], "VifAttachment", port_data['id']) + LOG.debug(_("_nsx_create_port completed for port %(name)s " + "on network %(network_id)s. The new port id is " + "%(id)s."), port_data) + except (api_exc.NsxApiException, n_exc.NeutronException): + self._handle_create_port_exception( + context, port_data['id'], + selected_lswitch and selected_lswitch['uuid'], + lport and lport['uuid']) + except db_exc.DBError as e: + if (port_data['device_owner'] == constants.DEVICE_OWNER_DHCP and + isinstance(e.inner_exception, sql_exc.IntegrityError)): + msg = (_("Concurrent network deletion detected; Back-end Port " + "%(nsx_id)s creation to be rolled back for Neutron " + "port: %(neutron_id)s") + % {'nsx_id': lport['uuid'], + 'neutron_id': port_data['id']}) + LOG.warning(msg) + if selected_lswitch and lport: + try: + switchlib.delete_port(self.cluster, + selected_lswitch['uuid'], + lport['uuid']) + except n_exc.NotFound: + LOG.debug(_("NSX Port %s already gone"), lport['uuid']) + + def _nsx_delete_port(self, context, port_data): + # FIXME(salvatore-orlando): On the NSX platform we do not really have + # external networks. So deleting regular ports from external networks + # does not make sense. However we cannot raise as this would break + # unit tests. + if self._network_is_external(context, port_data['network_id']): + LOG.info(_("NSX plugin does not support regular VIF ports on " + "external networks. Port %s will be down."), + port_data['network_id']) + return + nsx_switch_id, nsx_port_id = nsx_utils.get_nsx_switch_and_port_id( + context.session, self.cluster, port_data['id']) + if not nsx_port_id: + LOG.debug(_("Port '%s' was already deleted on NSX platform"), id) + return + # TODO(bgh): if this is a bridged network and the lswitch we just got + # back will have zero ports after the delete we should garbage collect + # the lswitch. + try: + switchlib.delete_port(self.cluster, nsx_switch_id, nsx_port_id) + LOG.debug(_("_nsx_delete_port completed for port %(port_id)s " + "on network %(net_id)s"), + {'port_id': port_data['id'], + 'net_id': port_data['network_id']}) + except n_exc.NotFound: + LOG.warning(_("Port %s not found in NSX"), port_data['id']) + + def _nsx_delete_router_port(self, context, port_data): + # Delete logical router port + nsx_router_id = nsx_utils.get_nsx_router_id( + context.session, self.cluster, port_data['device_id']) + nsx_switch_id, nsx_port_id = nsx_utils.get_nsx_switch_and_port_id( + context.session, self.cluster, port_data['id']) + if not nsx_port_id: + LOG.warn(_("Neutron port %(port_id)s not found on NSX backend. " + "Terminating delete operation. A dangling router port " + "might have been left on router %(router_id)s"), + {'port_id': port_data['id'], + 'router_id': nsx_router_id}) + return + try: + routerlib.delete_peer_router_lport(self.cluster, + nsx_router_id, + nsx_switch_id, + nsx_port_id) + except api_exc.NsxApiException: + # Do not raise because the issue might as well be that the + # router has already been deleted, so there would be nothing + # to do here + LOG.exception(_("Ignoring exception as this means the peer " + "for port '%s' has already been deleted."), + nsx_port_id) + + # Delete logical switch port + self._nsx_delete_port(context, port_data) + + def _nsx_create_router_port(self, context, port_data): + """Driver for creating a switch port to be connected to a router.""" + # No router ports on external networks! + if self._network_is_external(context, port_data['network_id']): + raise nsx_exc.NsxPluginException( + err_msg=(_("It is not allowed to create router interface " + "ports on external networks as '%s'") % + port_data['network_id'])) + ls_port = None + selected_lswitch = None + try: + selected_lswitch = self._nsx_find_lswitch_for_port( + context, port_data) + # Do not apply port security here! + ls_port = self._nsx_create_port_helper( + context.session, selected_lswitch['uuid'], + port_data, False) + # Assuming subnet being attached is on first fixed ip + # element in port data + subnet_id = port_data['fixed_ips'][0]['subnet_id'] + nsx_router_id = nsx_utils.get_nsx_router_id( + context.session, self.cluster, port_data['device_id']) + # Create peer port on logical router + self._create_and_attach_router_port( + self.cluster, context, nsx_router_id, port_data, + "PatchAttachment", ls_port['uuid'], + subnet_ids=[subnet_id]) + nsx_db.add_neutron_nsx_port_mapping( + context.session, port_data['id'], + selected_lswitch['uuid'], ls_port['uuid']) + LOG.debug(_("_nsx_create_router_port completed for port " + "%(name)s on network %(network_id)s. The new " + "port id is %(id)s."), + port_data) + except (api_exc.NsxApiException, n_exc.NeutronException): + self._handle_create_port_exception( + context, port_data['id'], + selected_lswitch and selected_lswitch['uuid'], + ls_port and ls_port['uuid']) + + def _find_router_gw_port(self, context, port_data): + router_id = port_data['device_id'] + if not router_id: + raise n_exc.BadRequest(_("device_id field must be populated in " + "order to create an external gateway " + "port for network %s"), + port_data['network_id']) + nsx_router_id = nsx_utils.get_nsx_router_id( + context.session, self.cluster, router_id) + lr_port = routerlib.find_router_gw_port(context, self.cluster, + nsx_router_id) + if not lr_port: + raise nsx_exc.NsxPluginException( + err_msg=(_("The gateway port for the NSX router %s " + "was not found on the backend") + % nsx_router_id)) + return lr_port + + @lockutils.synchronized('vmware', 'neutron-') + def _nsx_create_ext_gw_port(self, context, port_data): + """Driver for creating an external gateway port on NSX platform.""" + # TODO(salvatore-orlando): Handle NSX resource + # rollback when something goes not quite as expected + lr_port = self._find_router_gw_port(context, port_data) + ip_addresses = self._build_ip_address_list(context, + port_data['fixed_ips']) + # This operation actually always updates a NSX logical port + # instead of creating one. This is because the gateway port + # is created at the same time as the NSX logical router, otherwise + # the fabric status of the NSX router will be down. + # admin_status should always be up for the gateway port + # regardless of what the user specifies in neutron + nsx_router_id = nsx_utils.get_nsx_router_id( + context.session, self.cluster, port_data['device_id']) + routerlib.update_router_lport(self.cluster, + nsx_router_id, + lr_port['uuid'], + port_data['tenant_id'], + port_data['id'], + port_data['name'], + True, + ip_addresses) + ext_network = self.get_network(context, port_data['network_id']) + if ext_network.get(pnet.NETWORK_TYPE) == c_utils.NetworkTypes.L3_EXT: + # Update attachment + physical_network = (ext_network[pnet.PHYSICAL_NETWORK] or + self.cluster.default_l3_gw_service_uuid) + self._update_router_port_attachment( + self.cluster, context, nsx_router_id, port_data, + lr_port['uuid'], + "L3GatewayAttachment", + physical_network, + ext_network[pnet.SEGMENTATION_ID]) + + LOG.debug(_("_nsx_create_ext_gw_port completed on external network " + "%(ext_net_id)s, attached to router:%(router_id)s. " + "NSX port id is %(nsx_port_id)s"), + {'ext_net_id': port_data['network_id'], + 'router_id': nsx_router_id, + 'nsx_port_id': lr_port['uuid']}) + + @lockutils.synchronized('vmware', 'neutron-') + def _nsx_delete_ext_gw_port(self, context, port_data): + lr_port = self._find_router_gw_port(context, port_data) + # TODO(salvatore-orlando): Handle NSX resource + # rollback when something goes not quite as expected + try: + # Delete is actually never a real delete, otherwise the NSX + # logical router will stop working + router_id = port_data['device_id'] + nsx_router_id = nsx_utils.get_nsx_router_id( + context.session, self.cluster, router_id) + routerlib.update_router_lport(self.cluster, + nsx_router_id, + lr_port['uuid'], + port_data['tenant_id'], + port_data['id'], + port_data['name'], + True, + ['0.0.0.0/31']) + # Reset attachment + self._update_router_port_attachment( + self.cluster, context, nsx_router_id, port_data, + lr_port['uuid'], + "L3GatewayAttachment", + self.cluster.default_l3_gw_service_uuid) + + except api_exc.ResourceNotFound: + raise nsx_exc.NsxPluginException( + err_msg=_("Logical router resource %s not found " + "on NSX platform") % router_id) + except api_exc.NsxApiException: + raise nsx_exc.NsxPluginException( + err_msg=_("Unable to update logical router" + "on NSX Platform")) + LOG.debug(_("_nsx_delete_ext_gw_port completed on external network " + "%(ext_net_id)s, attached to NSX router:%(router_id)s"), + {'ext_net_id': port_data['network_id'], + 'router_id': nsx_router_id}) + + def _nsx_create_l2_gw_port(self, context, port_data): + """Create a switch port, and attach it to a L2 gateway attachment.""" + # FIXME(salvatore-orlando): On the NSX platform we do not really have + # external networks. So if as user tries and create a "regular" VIF + # port on an external network we are unable to actually create. + # However, in order to not break unit tests, we need to still create + # the DB object and return success + if self._network_is_external(context, port_data['network_id']): + LOG.info(_("NSX plugin does not support regular VIF ports on " + "external networks. Port %s will be down."), + port_data['network_id']) + # No need to actually update the DB state - the default is down + return port_data + lport = None + try: + selected_lswitch = self._nsx_find_lswitch_for_port( + context, port_data) + lport = self._nsx_create_port_helper( + context.session, + selected_lswitch['uuid'], + port_data, + True) + nsx_db.add_neutron_nsx_port_mapping( + context.session, port_data['id'], + selected_lswitch['uuid'], lport['uuid']) + l2gwlib.plug_l2_gw_service( + self.cluster, + selected_lswitch['uuid'], + lport['uuid'], + port_data['device_id'], + int(port_data.get('gw:segmentation_id') or 0)) + except Exception: + with excutils.save_and_reraise_exception(): + if lport: + switchlib.delete_port(self.cluster, + selected_lswitch['uuid'], + lport['uuid']) + LOG.debug(_("_nsx_create_l2_gw_port completed for port %(name)s " + "on network %(network_id)s. The new port id " + "is %(id)s."), port_data) + + def _nsx_create_fip_port(self, context, port_data): + # As we do not create ports for floating IPs in NSX, + # this is a no-op driver + pass + + def _nsx_delete_fip_port(self, context, port_data): + # As we do not create ports for floating IPs in NSX, + # this is a no-op driver + pass + + def _extend_fault_map(self): + """Extends the Neutron Fault Map. + + Exceptions specific to the NSX Plugin are mapped to standard + HTTP Exceptions. + """ + base.FAULT_MAP.update({nsx_exc.InvalidNovaZone: + webob.exc.HTTPBadRequest, + nsx_exc.NoMorePortsException: + webob.exc.HTTPBadRequest, + nsx_exc.MaintenanceInProgress: + webob.exc.HTTPServiceUnavailable, + nsx_exc.InvalidSecurityCertificate: + webob.exc.HTTPBadRequest}) + + def _validate_provider_create(self, context, network): + if not attr.is_attr_set(network.get(mpnet.SEGMENTS)): + return + + for segment in network[mpnet.SEGMENTS]: + network_type = segment.get(pnet.NETWORK_TYPE) + physical_network = segment.get(pnet.PHYSICAL_NETWORK) + segmentation_id = segment.get(pnet.SEGMENTATION_ID) + network_type_set = attr.is_attr_set(network_type) + segmentation_id_set = attr.is_attr_set(segmentation_id) + + err_msg = None + if not network_type_set: + err_msg = _("%s required") % pnet.NETWORK_TYPE + elif network_type in (c_utils.NetworkTypes.GRE, + c_utils.NetworkTypes.STT, + c_utils.NetworkTypes.FLAT): + if segmentation_id_set: + err_msg = _("Segmentation ID cannot be specified with " + "flat network type") + elif network_type == c_utils.NetworkTypes.VLAN: + if not segmentation_id_set: + err_msg = _("Segmentation ID must be specified with " + "vlan network type") + elif (segmentation_id_set and + not utils.is_valid_vlan_tag(segmentation_id)): + err_msg = (_("%(segmentation_id)s out of range " + "(%(min_id)s through %(max_id)s)") % + {'segmentation_id': segmentation_id, + 'min_id': constants.MIN_VLAN_TAG, + 'max_id': constants.MAX_VLAN_TAG}) + else: + # Verify segment is not already allocated + bindings = nsx_db.get_network_bindings_by_vlanid( + context.session, segmentation_id) + if bindings: + raise n_exc.VlanIdInUse( + vlan_id=segmentation_id, + physical_network=physical_network) + elif network_type == c_utils.NetworkTypes.L3_EXT: + if (segmentation_id_set and + not utils.is_valid_vlan_tag(segmentation_id)): + err_msg = (_("%(segmentation_id)s out of range " + "(%(min_id)s through %(max_id)s)") % + {'segmentation_id': segmentation_id, + 'min_id': constants.MIN_VLAN_TAG, + 'max_id': constants.MAX_VLAN_TAG}) + else: + err_msg = (_("%(net_type_param)s %(net_type_value)s not " + "supported") % + {'net_type_param': pnet.NETWORK_TYPE, + 'net_type_value': network_type}) + if err_msg: + raise n_exc.InvalidInput(error_message=err_msg) + # TODO(salvatore-orlando): Validate tranport zone uuid + # which should be specified in physical_network + + def _extend_network_dict_provider(self, context, network, + multiprovider=None, bindings=None): + if not bindings: + bindings = nsx_db.get_network_bindings(context.session, + network['id']) + if not multiprovider: + multiprovider = nsx_db.is_multiprovider_network(context.session, + network['id']) + # With NSX plugin 'normal' overlay networks will have no binding + # TODO(salvatore-orlando) make sure users can specify a distinct + # phy_uuid as 'provider network' for STT net type + if bindings: + if not multiprovider: + # network came in through provider networks api + network[pnet.NETWORK_TYPE] = bindings[0].binding_type + network[pnet.PHYSICAL_NETWORK] = bindings[0].phy_uuid + network[pnet.SEGMENTATION_ID] = bindings[0].vlan_id + else: + # network come in though multiprovider networks api + network[mpnet.SEGMENTS] = [ + {pnet.NETWORK_TYPE: binding.binding_type, + pnet.PHYSICAL_NETWORK: binding.phy_uuid, + pnet.SEGMENTATION_ID: binding.vlan_id} + for binding in bindings] + + def _handle_lswitch_selection(self, context, cluster, network, + network_bindings, max_ports, + allow_extra_lswitches): + lswitches = nsx_utils.fetch_nsx_switches( + context.session, cluster, network.id) + try: + return [ls for ls in lswitches + if (ls['_relations']['LogicalSwitchStatus'] + ['lport_count'] < max_ports)].pop(0) + except IndexError: + # Too bad, no switch available + LOG.debug(_("No switch has available ports (%d checked)"), + len(lswitches)) + if allow_extra_lswitches: + # The 'main' logical switch is either the only one available + # or the one where the 'multi_lswitch' tag was set + while lswitches: + main_ls = lswitches.pop(0) + tag_dict = dict((x['scope'], x['tag']) + for x in main_ls['tags']) + if 'multi_lswitch' in tag_dict: + break + else: + # by construction this statement is hit if there is only one + # logical switch and the multi_lswitch tag has not been set. + # The tag must therefore be added. + tags = main_ls['tags'] + tags.append({'tag': 'True', 'scope': 'multi_lswitch'}) + switchlib.update_lswitch(cluster, + main_ls['uuid'], + main_ls['display_name'], + network['tenant_id'], + tags=tags) + transport_zone_config = self._convert_to_nsx_transport_zones( + cluster, network, bindings=network_bindings) + selected_lswitch = switchlib.create_lswitch( + cluster, network.id, network.tenant_id, + "%s-ext-%s" % (network.name, len(lswitches)), + transport_zone_config) + # add a mapping between the neutron network and the newly + # created logical switch + nsx_db.add_neutron_nsx_network_mapping( + context.session, network.id, selected_lswitch['uuid']) + return selected_lswitch + else: + LOG.error(_("Maximum number of logical ports reached for " + "logical network %s"), network.id) + raise nsx_exc.NoMorePortsException(network=network.id) + + def _convert_to_nsx_transport_zones(self, cluster, network=None, + bindings=None): + nsx_transport_zones_config = [] + + # Convert fields from provider request to nsx format + if (network and not attr.is_attr_set( + network.get(mpnet.SEGMENTS))): + return [{"zone_uuid": cluster.default_tz_uuid, + "transport_type": cfg.CONF.NSX.default_transport_type}] + + # Convert fields from db to nsx format + if bindings: + transport_entry = {} + for binding in bindings: + if binding.binding_type in [c_utils.NetworkTypes.FLAT, + c_utils.NetworkTypes.VLAN]: + transport_entry['transport_type'] = ( + c_utils.NetworkTypes.BRIDGE) + transport_entry['binding_config'] = {} + vlan_id = binding.vlan_id + if vlan_id: + transport_entry['binding_config'] = ( + {'vlan_translation': [{'transport': vlan_id}]}) + else: + transport_entry['transport_type'] = binding.binding_type + transport_entry['zone_uuid'] = binding.phy_uuid + nsx_transport_zones_config.append(transport_entry) + return nsx_transport_zones_config + + for transport_zone in network.get(mpnet.SEGMENTS): + for value in [pnet.NETWORK_TYPE, pnet.PHYSICAL_NETWORK, + pnet.SEGMENTATION_ID]: + if transport_zone.get(value) == attr.ATTR_NOT_SPECIFIED: + transport_zone[value] = None + + transport_entry = {} + transport_type = transport_zone.get(pnet.NETWORK_TYPE) + if transport_type in [c_utils.NetworkTypes.FLAT, + c_utils.NetworkTypes.VLAN]: + transport_entry['transport_type'] = c_utils.NetworkTypes.BRIDGE + transport_entry['binding_config'] = {} + vlan_id = transport_zone.get(pnet.SEGMENTATION_ID) + if vlan_id: + transport_entry['binding_config'] = ( + {'vlan_translation': [{'transport': vlan_id}]}) + else: + transport_entry['transport_type'] = transport_type + transport_entry['zone_uuid'] = ( + transport_zone[pnet.PHYSICAL_NETWORK] or + cluster.default_tz_uuid) + nsx_transport_zones_config.append(transport_entry) + return nsx_transport_zones_config + + def _convert_to_transport_zones_dict(self, network): + """Converts the provider request body to multiprovider. + Returns: True if request is multiprovider False if provider + and None if neither. + """ + if any(attr.is_attr_set(network.get(f)) + for f in (pnet.NETWORK_TYPE, pnet.PHYSICAL_NETWORK, + pnet.SEGMENTATION_ID)): + if attr.is_attr_set(network.get(mpnet.SEGMENTS)): + raise mpnet.SegmentsSetInConjunctionWithProviders() + # convert to transport zone list + network[mpnet.SEGMENTS] = [ + {pnet.NETWORK_TYPE: network[pnet.NETWORK_TYPE], + pnet.PHYSICAL_NETWORK: network[pnet.PHYSICAL_NETWORK], + pnet.SEGMENTATION_ID: network[pnet.SEGMENTATION_ID]}] + del network[pnet.NETWORK_TYPE] + del network[pnet.PHYSICAL_NETWORK] + del network[pnet.SEGMENTATION_ID] + return False + if attr.is_attr_set(mpnet.SEGMENTS): + return True + + def create_network(self, context, network): + net_data = network['network'] + tenant_id = self._get_tenant_id_for_create(context, net_data) + self._ensure_default_security_group(context, tenant_id) + # Process the provider network extension + provider_type = self._convert_to_transport_zones_dict(net_data) + self._validate_provider_create(context, net_data) + # Replace ATTR_NOT_SPECIFIED with None before sending to NSX + for key, value in network['network'].iteritems(): + if value is attr.ATTR_NOT_SPECIFIED: + net_data[key] = None + # FIXME(arosen) implement admin_state_up = False in NSX + if net_data['admin_state_up'] is False: + LOG.warning(_("Network with admin_state_up=False are not yet " + "supported by this plugin. Ignoring setting for " + "network %s"), net_data.get('name', '')) + transport_zone_config = self._convert_to_nsx_transport_zones( + self.cluster, net_data) + external = net_data.get(ext_net_extn.EXTERNAL) + # NOTE(salv-orlando): Pre-generating uuid for Neutron + # network. This will be removed once the network create operation + # becomes an asynchronous task + net_data['id'] = str(uuid.uuid4()) + if (not attr.is_attr_set(external) or + attr.is_attr_set(external) and not external): + lswitch = switchlib.create_lswitch( + self.cluster, net_data['id'], + tenant_id, net_data.get('name'), + transport_zone_config, + shared=net_data.get(attr.SHARED)) + + with context.session.begin(subtransactions=True): + new_net = super(NsxPluginV2, self).create_network(context, + network) + # Process port security extension + self._process_network_port_security_create( + context, net_data, new_net) + # DB Operations for setting the network as external + self._process_l3_create(context, new_net, net_data) + # Process QoS queue extension + net_queue_id = net_data.get(qos.QUEUE) + if net_queue_id: + # Raises if not found + self.get_qos_queue(context, net_queue_id) + self._process_network_queue_mapping( + context, new_net, net_queue_id) + # Add mapping between neutron network and NSX switch + if (not attr.is_attr_set(external) or + attr.is_attr_set(external) and not external): + nsx_db.add_neutron_nsx_network_mapping( + context.session, new_net['id'], + lswitch['uuid']) + if (net_data.get(mpnet.SEGMENTS) and + isinstance(provider_type, bool)): + net_bindings = [] + for tz in net_data[mpnet.SEGMENTS]: + segmentation_id = tz.get(pnet.SEGMENTATION_ID, 0) + segmentation_id_set = attr.is_attr_set(segmentation_id) + if not segmentation_id_set: + segmentation_id = 0 + net_bindings.append(nsx_db.add_network_binding( + context.session, new_net['id'], + tz.get(pnet.NETWORK_TYPE), + tz.get(pnet.PHYSICAL_NETWORK), + segmentation_id)) + if provider_type: + nsx_db.set_multiprovider_network(context.session, + new_net['id']) + self._extend_network_dict_provider(context, new_net, + provider_type, + net_bindings) + self.handle_network_dhcp_access(context, new_net, + action='create_network') + return new_net + + def delete_network(self, context, id): + external = self._network_is_external(context, id) + # Before deleting ports, ensure the peer of a NSX logical + # port with a patch attachment is removed too + port_filter = {'network_id': [id], + 'device_owner': [constants.DEVICE_OWNER_ROUTER_INTF]} + router_iface_ports = self.get_ports(context, filters=port_filter) + for port in router_iface_ports: + nsx_switch_id, nsx_port_id = nsx_utils.get_nsx_switch_and_port_id( + context.session, self.cluster, id) + # Before removing entry from Neutron DB, retrieve NSX switch + # identifiers for removing them from backend + if not external: + lswitch_ids = nsx_utils.get_nsx_switch_ids( + context.session, self.cluster, id) + with context.session.begin(subtransactions=True): + self._process_l3_delete(context, id) + super(NsxPluginV2, self).delete_network(context, id) + + # clean up network owned ports + for port in router_iface_ports: + try: + if nsx_port_id: + nsx_router_id = nsx_utils.get_nsx_router_id( + context.session, self.cluster, port['device_id']) + routerlib.delete_peer_router_lport(self.cluster, + nsx_router_id, + nsx_switch_id, + nsx_port_id) + else: + LOG.warning(_("A nsx lport identifier was not found for " + "neutron port '%s'. Unable to remove " + "the peer router port for this switch port"), + port['id']) + + except (TypeError, KeyError, + api_exc.NsxApiException, + api_exc.ResourceNotFound): + # Do not raise because the issue might as well be that the + # router has already been deleted, so there would be nothing + # to do here + LOG.warning(_("Ignoring exception as this means the peer for " + "port '%s' has already been deleted."), + nsx_port_id) + + # Do not go to NSX for external networks + if not external: + try: + switchlib.delete_networks(self.cluster, id, lswitch_ids) + LOG.debug(_("delete_network completed for tenant: %s"), + context.tenant_id) + except n_exc.NotFound: + LOG.warning(_("Did not found lswitch %s in NSX"), id) + self.handle_network_dhcp_access(context, id, action='delete_network') + + def get_network(self, context, id, fields=None): + with context.session.begin(subtransactions=True): + # goto to the plugin DB and fetch the network + network = self._get_network(context, id) + if (self.nsx_sync_opts.always_read_status or + fields and 'status' in fields): + # External networks are not backed by nsx lswitches + if not network.external: + # Perform explicit state synchronization + self._synchronizer.synchronize_network(context, network) + # Don't do field selection here otherwise we won't be able + # to add provider networks fields + net_result = self._make_network_dict(network) + self._extend_network_dict_provider(context, net_result) + return self._fields(net_result, fields) + + def get_networks(self, context, filters=None, fields=None, + sorts=None, limit=None, marker=None, + page_reverse=False): + filters = filters or {} + with context.session.begin(subtransactions=True): + networks = ( + super(NsxPluginV2, self).get_networks( + context, filters, fields, sorts, + limit, marker, page_reverse)) + for net in networks: + self._extend_network_dict_provider(context, net) + return [self._fields(network, fields) for network in networks] + + def update_network(self, context, id, network): + pnet._raise_if_updates_provider_attributes(network['network']) + if network["network"].get("admin_state_up") is False: + raise NotImplementedError(_("admin_state_up=False networks " + "are not supported.")) + with context.session.begin(subtransactions=True): + net = super(NsxPluginV2, self).update_network(context, id, network) + if psec.PORTSECURITY in network['network']: + self._process_network_port_security_update( + context, network['network'], net) + net_queue_id = network['network'].get(qos.QUEUE) + if net_queue_id: + self._delete_network_queue_mapping(context, id) + self._process_network_queue_mapping(context, net, net_queue_id) + self._process_l3_update(context, net, network['network']) + self._extend_network_dict_provider(context, net) + # If provided, update port name on backend; treat backend failures as + # not critical (log error, but do not raise) + if 'name' in network['network']: + # in case of chained switches update name only for the first one + nsx_switch_ids = nsx_utils.get_nsx_switch_ids( + context.session, self.cluster, id) + if not nsx_switch_ids or len(nsx_switch_ids) < 1: + LOG.warn(_("Unable to find NSX mappings for neutron " + "network:%s"), id) + try: + switchlib.update_lswitch(self.cluster, + nsx_switch_ids[0], + network['network']['name']) + except api_exc.NsxApiException as e: + LOG.warn(_("Logical switch update on NSX backend failed. " + "Neutron network id:%(net_id)s; " + "NSX lswitch id:%(lswitch_id)s;" + "Error:%(error)s"), + {'net_id': id, 'lswitch_id': nsx_switch_ids[0], + 'error': e}) + + return net + + def create_port(self, context, port): + # If PORTSECURITY is not the default value ATTR_NOT_SPECIFIED + # then we pass the port to the policy engine. The reason why we don't + # pass the value to the policy engine when the port is + # ATTR_NOT_SPECIFIED is for the case where a port is created on a + # shared network that is not owned by the tenant. + port_data = port['port'] + # Set port status as 'DOWN'. This will be updated by backend sync. + port_data['status'] = constants.PORT_STATUS_DOWN + with context.session.begin(subtransactions=True): + # First we allocate port in neutron database + neutron_db = super(NsxPluginV2, self).create_port(context, port) + neutron_port_id = neutron_db['id'] + # Update fields obtained from neutron db (eg: MAC address) + port["port"].update(neutron_db) + self.handle_port_metadata_access(context, neutron_db) + # port security extension checks + (port_security, has_ip) = self._determine_port_security_and_has_ip( + context, port_data) + port_data[psec.PORTSECURITY] = port_security + self._process_port_port_security_create( + context, port_data, neutron_db) + # allowed address pair checks + if attr.is_attr_set(port_data.get(addr_pair.ADDRESS_PAIRS)): + if not port_security: + raise addr_pair.AddressPairAndPortSecurityRequired() + else: + self._process_create_allowed_address_pairs( + context, neutron_db, + port_data[addr_pair.ADDRESS_PAIRS]) + else: + # remove ATTR_NOT_SPECIFIED + port_data[addr_pair.ADDRESS_PAIRS] = None + + # security group extension checks + if port_security and has_ip: + self._ensure_default_security_group_on_port(context, port) + elif attr.is_attr_set(port_data.get(ext_sg.SECURITYGROUPS)): + raise psec.PortSecurityAndIPRequiredForSecurityGroups() + port_data[ext_sg.SECURITYGROUPS] = ( + self._get_security_groups_on_port(context, port)) + self._process_port_create_security_group( + context, port_data, port_data[ext_sg.SECURITYGROUPS]) + # QoS extension checks + port_queue_id = self._check_for_queue_and_create( + context, port_data) + self._process_port_queue_mapping( + context, port_data, port_queue_id) + if (isinstance(port_data.get(mac_ext.MAC_LEARNING), bool)): + self._create_mac_learning_state(context, port_data) + elif mac_ext.MAC_LEARNING in port_data: + port_data.pop(mac_ext.MAC_LEARNING) + self._process_portbindings_create_and_update(context, + port['port'], + port_data) + # DB Operation is complete, perform NSX operation + try: + port_data = port['port'].copy() + port_create_func = self._port_drivers['create'].get( + port_data['device_owner'], + self._port_drivers['create']['default']) + port_create_func(context, port_data) + LOG.debug(_("port created on NSX backend for tenant " + "%(tenant_id)s: (%(id)s)"), port_data) + except n_exc.NotFound: + LOG.warning(_("Logical switch for network %s was not " + "found in NSX."), port_data['network_id']) + # Put port in error on neutron DB + with context.session.begin(subtransactions=True): + port = self._get_port(context, neutron_port_id) + port_data['status'] = constants.PORT_STATUS_ERROR + port['status'] = port_data['status'] + context.session.add(port) + except Exception: + # Port must be removed from neutron DB + with excutils.save_and_reraise_exception(): + LOG.error(_("Unable to create port or set port " + "attachment in NSX.")) + with context.session.begin(subtransactions=True): + self._delete_port(context, neutron_port_id) + + self.handle_port_dhcp_access(context, port_data, action='create_port') + return port_data + + def update_port(self, context, id, port): + delete_security_groups = self._check_update_deletes_security_groups( + port) + has_security_groups = self._check_update_has_security_groups(port) + delete_addr_pairs = self._check_update_deletes_allowed_address_pairs( + port) + has_addr_pairs = self._check_update_has_allowed_address_pairs(port) + + with context.session.begin(subtransactions=True): + ret_port = super(NsxPluginV2, self).update_port( + context, id, port) + # Save current mac learning state to check whether it's + # being updated or not + old_mac_learning_state = ret_port.get(mac_ext.MAC_LEARNING) + # copy values over - except fixed_ips as + # they've already been processed + port['port'].pop('fixed_ips', None) + ret_port.update(port['port']) + tenant_id = self._get_tenant_id_for_create(context, ret_port) + + # populate port_security setting + if psec.PORTSECURITY not in port['port']: + ret_port[psec.PORTSECURITY] = self._get_port_security_binding( + context, id) + has_ip = self._ip_on_port(ret_port) + # validate port security and allowed address pairs + if not ret_port[psec.PORTSECURITY]: + # has address pairs in request + if has_addr_pairs: + raise addr_pair.AddressPairAndPortSecurityRequired() + elif not delete_addr_pairs: + # check if address pairs are in db + ret_port[addr_pair.ADDRESS_PAIRS] = ( + self.get_allowed_address_pairs(context, id)) + if ret_port[addr_pair.ADDRESS_PAIRS]: + raise addr_pair.AddressPairAndPortSecurityRequired() + + if (delete_addr_pairs or has_addr_pairs): + # delete address pairs and read them in + self._delete_allowed_address_pairs(context, id) + self._process_create_allowed_address_pairs( + context, ret_port, ret_port[addr_pair.ADDRESS_PAIRS]) + # checks if security groups were updated adding/modifying + # security groups, port security is set and port has ip + if not (has_ip and ret_port[psec.PORTSECURITY]): + if has_security_groups: + raise psec.PortSecurityAndIPRequiredForSecurityGroups() + # Update did not have security groups passed in. Check + # that port does not have any security groups already on it. + filters = {'port_id': [id]} + security_groups = ( + super(NsxPluginV2, self)._get_port_security_group_bindings( + context, filters) + ) + if security_groups and not delete_security_groups: + raise psec.PortSecurityPortHasSecurityGroup() + + if (delete_security_groups or has_security_groups): + # delete the port binding and read it with the new rules. + self._delete_port_security_group_bindings(context, id) + sgids = self._get_security_groups_on_port(context, port) + self._process_port_create_security_group(context, ret_port, + sgids) + + if psec.PORTSECURITY in port['port']: + self._process_port_port_security_update( + context, port['port'], ret_port) + + port_queue_id = self._check_for_queue_and_create( + context, ret_port) + # Populate the mac learning attribute + new_mac_learning_state = port['port'].get(mac_ext.MAC_LEARNING) + if (new_mac_learning_state is not None and + old_mac_learning_state != new_mac_learning_state): + self._update_mac_learning_state(context, id, + new_mac_learning_state) + ret_port[mac_ext.MAC_LEARNING] = new_mac_learning_state + self._delete_port_queue_mapping(context, ret_port['id']) + self._process_port_queue_mapping(context, ret_port, + port_queue_id) + LOG.debug(_("Updating port: %s"), port) + nsx_switch_id, nsx_port_id = nsx_utils.get_nsx_switch_and_port_id( + context.session, self.cluster, id) + # Convert Neutron security groups identifiers into NSX security + # profiles identifiers + nsx_sec_profile_ids = [ + nsx_utils.get_nsx_security_group_id( + context.session, self.cluster, neutron_sg_id) for + neutron_sg_id in (ret_port[ext_sg.SECURITYGROUPS] or [])] + + if nsx_port_id: + try: + switchlib.update_port( + self.cluster, nsx_switch_id, nsx_port_id, + id, tenant_id, + ret_port['name'], + ret_port['device_id'], + ret_port['admin_state_up'], + ret_port['mac_address'], + ret_port['fixed_ips'], + ret_port[psec.PORTSECURITY], + nsx_sec_profile_ids, + ret_port[qos.QUEUE], + ret_port.get(mac_ext.MAC_LEARNING), + ret_port.get(addr_pair.ADDRESS_PAIRS)) + + # Update the port status from nsx. If we fail here hide it + # since the port was successfully updated but we were not + # able to retrieve the status. + ret_port['status'] = switchlib.get_port_status( + self.cluster, nsx_switch_id, + nsx_port_id) + # FIXME(arosen) improve exception handling. + except Exception: + ret_port['status'] = constants.PORT_STATUS_ERROR + LOG.exception(_("Unable to update port id: %s."), + nsx_port_id) + + # If nsx_port_id is not in database or in nsx put in error state. + else: + ret_port['status'] = constants.PORT_STATUS_ERROR + + self._process_portbindings_create_and_update(context, + port['port'], + ret_port) + return ret_port + + def delete_port(self, context, id, l3_port_check=True, + nw_gw_port_check=True): + """Deletes a port on a specified Virtual Network. + + If the port contains a remote interface attachment, the remote + interface is first un-plugged and then the port is deleted. + + :returns: None + :raises: exception.PortInUse + :raises: exception.PortNotFound + :raises: exception.NetworkNotFound + """ + # if needed, check to see if this is a port owned by + # a l3 router. If so, we should prevent deletion here + if l3_port_check: + self.prevent_l3_port_deletion(context, id) + neutron_db_port = self.get_port(context, id) + # Perform the same check for ports owned by layer-2 gateways + if nw_gw_port_check: + self.prevent_network_gateway_port_deletion(context, + neutron_db_port) + port_delete_func = self._port_drivers['delete'].get( + neutron_db_port['device_owner'], + self._port_drivers['delete']['default']) + + port_delete_func(context, neutron_db_port) + self.disassociate_floatingips(context, id) + with context.session.begin(subtransactions=True): + queue = self._get_port_queue_bindings(context, {'port_id': [id]}) + # metadata_dhcp_host_route + self.handle_port_metadata_access( + context, neutron_db_port, is_delete=True) + super(NsxPluginV2, self).delete_port(context, id) + # Delete qos queue if possible + if queue: + self.delete_qos_queue(context, queue[0]['queue_id'], False) + self.handle_port_dhcp_access( + context, neutron_db_port, action='delete_port') + + def get_port(self, context, id, fields=None): + with context.session.begin(subtransactions=True): + if (self.nsx_sync_opts.always_read_status or + fields and 'status' in fields): + # Perform explicit state synchronization + db_port = self._get_port(context, id) + self._synchronizer.synchronize_port( + context, db_port) + return self._make_port_dict(db_port, fields) + else: + return super(NsxPluginV2, self).get_port(context, id, fields) + + def get_router(self, context, id, fields=None): + if (self.nsx_sync_opts.always_read_status or + fields and 'status' in fields): + db_router = self._get_router(context, id) + # Perform explicit state synchronization + self._synchronizer.synchronize_router( + context, db_router) + return self._make_router_dict(db_router, fields) + else: + return super(NsxPluginV2, self).get_router(context, id, fields) + + def _create_lrouter(self, context, router, nexthop): + tenant_id = self._get_tenant_id_for_create(context, router) + distributed = router.get('distributed') + try: + lrouter = routerlib.create_lrouter( + self.cluster, router['id'], + tenant_id, router['name'], nexthop, + distributed=attr.is_attr_set(distributed) and distributed) + except nsx_exc.InvalidVersion: + msg = _("Cannot create a distributed router with the NSX " + "platform currently in execution. Please, try " + "without specifying the 'distributed' attribute.") + LOG.exception(msg) + raise n_exc.BadRequest(resource='router', msg=msg) + except api_exc.NsxApiException: + err_msg = _("Unable to create logical router on NSX Platform") + LOG.exception(err_msg) + raise nsx_exc.NsxPluginException(err_msg=err_msg) + + # Create the port here - and update it later if we have gw_info + try: + self._create_and_attach_router_port( + self.cluster, context, lrouter['uuid'], {'fake_ext_gw': True}, + "L3GatewayAttachment", + self.cluster.default_l3_gw_service_uuid) + except nsx_exc.NsxPluginException: + LOG.exception(_("Unable to create L3GW port on logical router " + "%(router_uuid)s. Verify Default Layer-3 Gateway " + "service %(def_l3_gw_svc)s id is correct"), + {'router_uuid': lrouter['uuid'], + 'def_l3_gw_svc': + self.cluster.default_l3_gw_service_uuid}) + # Try and remove logical router from NSX + routerlib.delete_lrouter(self.cluster, lrouter['uuid']) + # Return user a 500 with an apter message + raise nsx_exc.NsxPluginException( + err_msg=(_("Unable to create router %s on NSX backend") % + router['id'])) + lrouter['status'] = plugin_const.ACTIVE + return lrouter + + def create_router(self, context, router): + # NOTE(salvatore-orlando): We completely override this method in + # order to be able to use the NSX ID as Neutron ID + # TODO(salvatore-orlando): Propose upstream patch for allowing + # 3rd parties to specify IDs as we do with l2 plugin + r = router['router'] + has_gw_info = False + tenant_id = self._get_tenant_id_for_create(context, r) + # default value to set - nsx wants it (even if we don't have it) + nexthop = NSX_DEFAULT_NEXTHOP + # if external gateway info are set, then configure nexthop to + # default external gateway + if 'external_gateway_info' in r and r.get('external_gateway_info'): + has_gw_info = True + gw_info = r['external_gateway_info'] + del r['external_gateway_info'] + # The following DB read will be performed again when updating + # gateway info. This is not great, but still better than + # creating NSX router here and updating it later + network_id = (gw_info.get('network_id', None) if gw_info + else None) + if network_id: + ext_net = self._get_network(context, network_id) + if not ext_net.external: + msg = (_("Network '%s' is not a valid external " + "network") % network_id) + raise n_exc.BadRequest(resource='router', msg=msg) + if ext_net.subnets: + ext_subnet = ext_net.subnets[0] + nexthop = ext_subnet.gateway_ip + # NOTE(salv-orlando): Pre-generating uuid for Neutron + # router. This will be removed once the router create operation + # becomes an asynchronous task + neutron_router_id = str(uuid.uuid4()) + r['id'] = neutron_router_id + lrouter = self._create_lrouter(context, r, nexthop) + # Update 'distributed' with value returned from NSX + # This will be useful for setting the value if the API request + # did not specify any value for the 'distributed' attribute + # Platforms older than 3.x do not support the attribute + r['distributed'] = lrouter.get('distributed', False) + # TODO(salv-orlando): Deal with backend object removal in case + # of db failures + with context.session.begin(subtransactions=True): + # Transaction nesting is needed to avoid foreign key violations + # when processing the distributed router binding + with context.session.begin(subtransactions=True): + router_db = l3_db.Router(id=neutron_router_id, + tenant_id=tenant_id, + name=r['name'], + admin_state_up=r['admin_state_up'], + status=lrouter['status']) + context.session.add(router_db) + self._process_nsx_router_create(context, router_db, r) + # Ensure neutron router is moved into the transaction's buffer + context.session.flush() + # Add mapping between neutron and nsx identifiers + nsx_db.add_neutron_nsx_router_mapping( + context.session, router_db['id'], lrouter['uuid']) + + if has_gw_info: + # NOTE(salv-orlando): This operation has been moved out of the + # database transaction since it performs several NSX queries, + # ithis ncreasing the risk of deadlocks between eventlet and + # sqlalchemy operations. + # Set external gateway and remove router in case of failure + try: + self._update_router_gw_info(context, router_db['id'], gw_info) + except (n_exc.NeutronException, api_exc.NsxApiException): + with excutils.save_and_reraise_exception(): + # As setting gateway failed, the router must be deleted + # in order to ensure atomicity + router_id = router_db['id'] + LOG.warn(_("Failed to set gateway info for router being " + "created:%s - removing router"), router_id) + self.delete_router(context, router_id) + LOG.info(_("Create router failed while setting external " + "gateway. Router:%s has been removed from " + "DB and backend"), + router_id) + return self._make_router_dict(router_db) + + def _update_lrouter(self, context, router_id, name, nexthop, routes=None): + nsx_router_id = nsx_utils.get_nsx_router_id( + context.session, self.cluster, router_id) + return routerlib.update_lrouter( + self.cluster, nsx_router_id, name, + nexthop, routes=routes) + + def _update_lrouter_routes(self, context, router_id, routes): + nsx_router_id = nsx_utils.get_nsx_router_id( + context.session, self.cluster, router_id) + routerlib.update_explicit_routes_lrouter( + self.cluster, nsx_router_id, routes) + + def update_router(self, context, router_id, router): + # Either nexthop is updated or should be kept as it was before + r = router['router'] + nexthop = None + if 'external_gateway_info' in r and r.get('external_gateway_info'): + gw_info = r['external_gateway_info'] + # The following DB read will be performed again when updating + # gateway info. This is not great, but still better than + # creating NSX router here and updating it later + network_id = (gw_info.get('network_id', None) if gw_info + else None) + if network_id: + ext_net = self._get_network(context, network_id) + if not ext_net.external: + msg = (_("Network '%s' is not a valid external " + "network") % network_id) + raise n_exc.BadRequest(resource='router', msg=msg) + if ext_net.subnets: + ext_subnet = ext_net.subnets[0] + nexthop = ext_subnet.gateway_ip + try: + for route in r.get('routes', []): + if route['destination'] == '0.0.0.0/0': + msg = _("'routes' cannot contain route '0.0.0.0/0', " + "this must be updated through the default " + "gateway attribute") + raise n_exc.BadRequest(resource='router', msg=msg) + previous_routes = self._update_lrouter( + context, router_id, r.get('name'), + nexthop, routes=r.get('routes')) + # NOTE(salv-orlando): The exception handling below is not correct, but + # unfortunately nsxlib raises a neutron notfound exception when an + # object is not found in the underlying backend + except n_exc.NotFound: + # Put the router in ERROR status + with context.session.begin(subtransactions=True): + router_db = self._get_router(context, router_id) + router_db['status'] = constants.NET_STATUS_ERROR + raise nsx_exc.NsxPluginException( + err_msg=_("Logical router %s not found " + "on NSX Platform") % router_id) + except api_exc.NsxApiException: + raise nsx_exc.NsxPluginException( + err_msg=_("Unable to update logical router on NSX Platform")) + except nsx_exc.InvalidVersion: + msg = _("Request cannot contain 'routes' with the NSX " + "platform currently in execution. Please, try " + "without specifying the static routes.") + LOG.exception(msg) + raise n_exc.BadRequest(resource='router', msg=msg) + try: + return super(NsxPluginV2, self).update_router(context, + router_id, router) + except (extraroute.InvalidRoutes, + extraroute.RouterInterfaceInUseByRoute, + extraroute.RoutesExhausted): + with excutils.save_and_reraise_exception(): + # revert changes made to NSX + self._update_lrouter_routes( + context, router_id, previous_routes) + + def _delete_lrouter(self, context, router_id, nsx_router_id): + # The neutron router id (router_id) is ignored in this routine, + # but used in plugins deriving from this one + routerlib.delete_lrouter(self.cluster, nsx_router_id) + + def delete_router(self, context, router_id): + with context.session.begin(subtransactions=True): + # TODO(salv-orlando): This call should have no effect on delete + # router, but if it does, it should not happen within a + # transaction, and it should be restored on rollback + self.handle_router_metadata_access( + context, router_id, interface=None) + # Pre-delete checks + # NOTE(salv-orlando): These checks will be repeated anyway when + # calling the superclass. This is wasteful, but is the simplest + # way of ensuring a consistent removal of the router both in + # the neutron Database and in the NSX backend. + # TODO(salv-orlando): split pre-delete checks and actual + # deletion in superclass. + + # Ensure that the router is not used + fips = self.get_floatingips_count( + context.elevated(), filters={'router_id': [router_id]}) + if fips: + raise l3.RouterInUse(router_id=router_id) + + device_filter = {'device_id': [router_id], + 'device_owner': [l3_db.DEVICE_OWNER_ROUTER_INTF]} + ports = self._core_plugin.get_ports_count(context.elevated(), + filters=device_filter) + if ports: + raise l3.RouterInUse(router_id=router_id) + + nsx_router_id = nsx_utils.get_nsx_router_id( + context.session, self.cluster, router_id) + # It is safe to remove the router from the database, so remove it + # from the backend + try: + self._delete_lrouter(context, router_id, nsx_router_id) + except n_exc.NotFound: + # This is not a fatal error, but needs to be logged + LOG.warning(_("Logical router '%s' not found " + "on NSX Platform"), router_id) + except api_exc.NsxApiException: + raise nsx_exc.NsxPluginException( + err_msg=(_("Unable to delete logical router '%s' " + "on NSX Platform") % nsx_router_id)) + # Remove the NSX mapping first in order to ensure a mapping to + # a non-existent NSX router is not left in the DB in case of + # failure while removing the router from the neutron DB + try: + nsx_db.delete_neutron_nsx_router_mapping( + context.session, router_id) + except db_exc.DBError as d_exc: + # Do not make this error fatal + LOG.warn(_("Unable to remove NSX mapping for Neutron router " + "%(router_id)s because of the following exception:" + "%(d_exc)s"), {'router_id': router_id, + 'd_exc': str(d_exc)}) + # Perform the actual delete on the Neutron DB + super(NsxPluginV2, self).delete_router(context, router_id) + + def _add_subnet_snat_rule(self, context, router, subnet): + gw_port = router.gw_port + if gw_port and router.enable_snat: + # There is a change gw_port might have multiple IPs + # In that case we will consider only the first one + if gw_port.get('fixed_ips'): + snat_ip = gw_port['fixed_ips'][0]['ip_address'] + cidr_prefix = int(subnet['cidr'].split('/')[1]) + nsx_router_id = nsx_utils.get_nsx_router_id( + context.session, self.cluster, router['id']) + routerlib.create_lrouter_snat_rule( + self.cluster, nsx_router_id, snat_ip, snat_ip, + order=NSX_EXTGW_NAT_RULES_ORDER - cidr_prefix, + match_criteria={'source_ip_addresses': subnet['cidr']}) + + def _delete_subnet_snat_rule(self, context, router, subnet): + # Remove SNAT rule if external gateway is configured + if router.gw_port: + nsx_router_id = nsx_utils.get_nsx_router_id( + context.session, self.cluster, router['id']) + routerlib.delete_nat_rules_by_match( + self.cluster, nsx_router_id, "SourceNatRule", + max_num_expected=1, min_num_expected=1, + source_ip_addresses=subnet['cidr']) + + def add_router_interface(self, context, router_id, interface_info): + # When adding interface by port_id we need to create the + # peer port on the nsx logical router in this routine + port_id = interface_info.get('port_id') + router_iface_info = super(NsxPluginV2, self).add_router_interface( + context, router_id, interface_info) + # router_iface_info will always have a subnet_id attribute + subnet_id = router_iface_info['subnet_id'] + nsx_router_id = nsx_utils.get_nsx_router_id( + context.session, self.cluster, router_id) + if port_id: + port_data = self._get_port(context, port_id) + nsx_switch_id, nsx_port_id = nsx_utils.get_nsx_switch_and_port_id( + context.session, self.cluster, port_id) + # Unplug current attachment from lswitch port + switchlib.plug_vif_interface(self.cluster, nsx_switch_id, + nsx_port_id, "NoAttachment") + # Create logical router port and plug patch attachment + self._create_and_attach_router_port( + self.cluster, context, nsx_router_id, port_data, + "PatchAttachment", nsx_port_id, subnet_ids=[subnet_id]) + subnet = self._get_subnet(context, subnet_id) + # If there is an external gateway we need to configure the SNAT rule. + # Fetch router from DB + router = self._get_router(context, router_id) + self._add_subnet_snat_rule(context, router, subnet) + routerlib.create_lrouter_nosnat_rule( + self.cluster, nsx_router_id, + order=NSX_NOSNAT_RULES_ORDER, + match_criteria={'destination_ip_addresses': subnet['cidr']}) + + # Ensure the NSX logical router has a connection to a 'metadata access' + # network (with a proxy listening on its DHCP port), by creating it + # if needed. + self.handle_router_metadata_access( + context, router_id, interface=router_iface_info) + LOG.debug(_("Add_router_interface completed for subnet:%(subnet_id)s " + "and router:%(router_id)s"), + {'subnet_id': subnet_id, 'router_id': router_id}) + return router_iface_info + + def remove_router_interface(self, context, router_id, interface_info): + # The code below is duplicated from base class, but comes handy + # as we need to retrieve the router port id before removing the port + subnet = None + subnet_id = None + if 'port_id' in interface_info: + port_id = interface_info['port_id'] + # find subnet_id - it is need for removing the SNAT rule + port = self._get_port(context, port_id) + if port.get('fixed_ips'): + subnet_id = port['fixed_ips'][0]['subnet_id'] + if not (port['device_owner'] == l3_db.DEVICE_OWNER_ROUTER_INTF and + port['device_id'] == router_id): + raise l3.RouterInterfaceNotFound(router_id=router_id, + port_id=port_id) + elif 'subnet_id' in interface_info: + subnet_id = interface_info['subnet_id'] + subnet = self._get_subnet(context, subnet_id) + rport_qry = context.session.query(models_v2.Port) + ports = rport_qry.filter_by( + device_id=router_id, + device_owner=l3_db.DEVICE_OWNER_ROUTER_INTF, + network_id=subnet['network_id']) + for p in ports: + if p['fixed_ips'][0]['subnet_id'] == subnet_id: + port_id = p['id'] + break + else: + raise l3.RouterInterfaceNotFoundForSubnet(router_id=router_id, + subnet_id=subnet_id) + # Finally remove the data from the Neutron DB + # This will also destroy the port on the logical switch + info = super(NsxPluginV2, self).remove_router_interface( + context, router_id, interface_info) + + try: + # Ensure the connection to the 'metadata access network' + # is removed (with the network) if this the last subnet + # on the router + self.handle_router_metadata_access( + context, router_id, interface=info) + if not subnet: + subnet = self._get_subnet(context, subnet_id) + router = self._get_router(context, router_id) + # If router is enabled_snat = False there are no snat rules to + # delete. + if router.enable_snat: + self._delete_subnet_snat_rule(context, router, subnet) + # Relax the minimum expected number as the nosnat rules + # do not exist in 2.x deployments + nsx_router_id = nsx_utils.get_nsx_router_id( + context.session, self.cluster, router_id) + routerlib.delete_nat_rules_by_match( + self.cluster, nsx_router_id, "NoSourceNatRule", + max_num_expected=1, min_num_expected=0, + destination_ip_addresses=subnet['cidr']) + except n_exc.NotFound: + LOG.error(_("Logical router resource %s not found " + "on NSX platform") % router_id) + except api_exc.NsxApiException: + raise nsx_exc.NsxPluginException( + err_msg=(_("Unable to update logical router" + "on NSX Platform"))) + return info + + def _retrieve_and_delete_nat_rules(self, context, floating_ip_address, + internal_ip, nsx_router_id, + min_num_rules_expected=0): + """Finds and removes NAT rules from a NSX router.""" + # NOTE(salv-orlando): The context parameter is ignored in this method + # but used by derived classes + try: + # Remove DNAT rule for the floating IP + routerlib.delete_nat_rules_by_match( + self.cluster, nsx_router_id, "DestinationNatRule", + max_num_expected=1, + min_num_expected=min_num_rules_expected, + destination_ip_addresses=floating_ip_address) + + # Remove SNAT rules for the floating IP + routerlib.delete_nat_rules_by_match( + self.cluster, nsx_router_id, "SourceNatRule", + max_num_expected=1, + min_num_expected=min_num_rules_expected, + source_ip_addresses=internal_ip) + routerlib.delete_nat_rules_by_match( + self.cluster, nsx_router_id, "SourceNatRule", + max_num_expected=1, + min_num_expected=min_num_rules_expected, + destination_ip_addresses=internal_ip) + + except api_exc.NsxApiException: + with excutils.save_and_reraise_exception(): + LOG.exception(_("An error occurred while removing NAT rules " + "on the NSX platform for floating ip:%s"), + floating_ip_address) + except nsx_exc.NatRuleMismatch: + # Do not surface to the user + LOG.warning(_("An incorrect number of matching NAT rules " + "was found on the NSX platform")) + + def _remove_floatingip_address(self, context, fip_db): + # Remove floating IP address from logical router port + # Fetch logical port of router's external gateway + router_id = fip_db.router_id + nsx_router_id = nsx_utils.get_nsx_router_id( + context.session, self.cluster, router_id) + nsx_gw_port_id = routerlib.find_router_gw_port( + context, self.cluster, nsx_router_id)['uuid'] + ext_neutron_port_db = self._get_port(context.elevated(), + fip_db.floating_port_id) + nsx_floating_ips = self._build_ip_address_list( + context.elevated(), ext_neutron_port_db['fixed_ips']) + routerlib.update_lrouter_port_ips(self.cluster, + nsx_router_id, + nsx_gw_port_id, + ips_to_add=[], + ips_to_remove=nsx_floating_ips) + + def _get_fip_assoc_data(self, context, fip, floatingip_db): + if (('fixed_ip_address' in fip and fip['fixed_ip_address']) and + not ('port_id' in fip and fip['port_id'])): + msg = _("fixed_ip_address cannot be specified without a port_id") + raise n_exc.BadRequest(resource='floatingip', msg=msg) + port_id = internal_ip = router_id = None + if 'port_id' in fip and fip['port_id']: + fip_qry = context.session.query(l3_db.FloatingIP) + port_id, internal_ip, router_id = self.get_assoc_data( + context, + fip, + floatingip_db['floating_network_id']) + try: + fip_qry.filter_by( + fixed_port_id=fip['port_id'], + floating_network_id=floatingip_db['floating_network_id'], + fixed_ip_address=internal_ip).one() + raise l3.FloatingIPPortAlreadyAssociated( + port_id=fip['port_id'], + fip_id=floatingip_db['id'], + floating_ip_address=floatingip_db['floating_ip_address'], + fixed_ip=floatingip_db['fixed_ip_address'], + net_id=floatingip_db['floating_network_id']) + except sa_exc.NoResultFound: + pass + return (port_id, internal_ip, router_id) + + def _update_fip_assoc(self, context, fip, floatingip_db, external_port): + """Update floating IP association data. + + Overrides method from base class. + The method is augmented for creating NAT rules in the process. + """ + # Store router currently serving the floating IP + old_router_id = floatingip_db.router_id + port_id, internal_ip, router_id = self._get_fip_assoc_data( + context, fip, floatingip_db) + floating_ip = floatingip_db['floating_ip_address'] + # If there's no association router_id will be None + if router_id: + nsx_router_id = nsx_utils.get_nsx_router_id( + context.session, self.cluster, router_id) + self._retrieve_and_delete_nat_rules( + context, floating_ip, internal_ip, nsx_router_id) + # Fetch logical port of router's external gateway + # Fetch logical port of router's external gateway + nsx_floating_ips = self._build_ip_address_list( + context.elevated(), external_port['fixed_ips']) + floating_ip = floatingip_db['floating_ip_address'] + # Retrieve and delete existing NAT rules, if any + if old_router_id: + nsx_old_router_id = nsx_utils.get_nsx_router_id( + context.session, self.cluster, old_router_id) + # Retrieve the current internal ip + _p, _s, old_internal_ip = self._internal_fip_assoc_data( + context, {'id': floatingip_db.id, + 'port_id': floatingip_db.fixed_port_id, + 'fixed_ip_address': floatingip_db.fixed_ip_address, + 'tenant_id': floatingip_db.tenant_id}) + nsx_gw_port_id = routerlib.find_router_gw_port( + context, self.cluster, nsx_old_router_id)['uuid'] + self._retrieve_and_delete_nat_rules( + context, floating_ip, old_internal_ip, nsx_old_router_id) + routerlib.update_lrouter_port_ips( + self.cluster, nsx_old_router_id, nsx_gw_port_id, + ips_to_add=[], ips_to_remove=nsx_floating_ips) + + if router_id: + nsx_gw_port_id = routerlib.find_router_gw_port( + context, self.cluster, nsx_router_id)['uuid'] + # Re-create NAT rules only if a port id is specified + if fip.get('port_id'): + try: + # Setup DNAT rules for the floating IP + routerlib.create_lrouter_dnat_rule( + self.cluster, nsx_router_id, internal_ip, + order=NSX_FLOATINGIP_NAT_RULES_ORDER, + match_criteria={'destination_ip_addresses': + floating_ip}) + # Setup SNAT rules for the floating IP + # Create a SNAT rule for enabling connectivity to the + # floating IP from the same network as the internal port + # Find subnet id for internal_ip from fixed_ips + internal_port = self._get_port(context, port_id) + # Cchecks not needed on statements below since otherwise + # _internal_fip_assoc_data would have raised + subnet_ids = [ip['subnet_id'] for ip in + internal_port['fixed_ips'] if + ip['ip_address'] == internal_ip] + internal_subnet_cidr = self._build_ip_address_list( + context, internal_port['fixed_ips'], + subnet_ids=subnet_ids)[0] + routerlib.create_lrouter_snat_rule( + self.cluster, nsx_router_id, floating_ip, floating_ip, + order=NSX_NOSNAT_RULES_ORDER - 1, + match_criteria={'source_ip_addresses': + internal_subnet_cidr, + 'destination_ip_addresses': + internal_ip}) + # setup snat rule such that src ip of a IP packet when + # using floating is the floating ip itself. + routerlib.create_lrouter_snat_rule( + self.cluster, nsx_router_id, floating_ip, floating_ip, + order=NSX_FLOATINGIP_NAT_RULES_ORDER, + match_criteria={'source_ip_addresses': internal_ip}) + + # Add Floating IP address to router_port + routerlib.update_lrouter_port_ips( + self.cluster, nsx_router_id, nsx_gw_port_id, + ips_to_add=nsx_floating_ips, ips_to_remove=[]) + except api_exc.NsxApiException: + LOG.exception(_("An error occurred while creating NAT " + "rules on the NSX platform for floating " + "ip:%(floating_ip)s mapped to " + "internal ip:%(internal_ip)s"), + {'floating_ip': floating_ip, + 'internal_ip': internal_ip}) + msg = _("Failed to update NAT rules for floatingip update") + raise nsx_exc.NsxPluginException(err_msg=msg) + + floatingip_db.update({'fixed_ip_address': internal_ip, + 'fixed_port_id': port_id, + 'router_id': router_id}) + + def delete_floatingip(self, context, id): + fip_db = self._get_floatingip(context, id) + # Check whether the floating ip is associated or not + if fip_db.fixed_port_id: + nsx_router_id = nsx_utils.get_nsx_router_id( + context.session, self.cluster, fip_db.router_id) + self._retrieve_and_delete_nat_rules(context, + fip_db.floating_ip_address, + fip_db.fixed_ip_address, + nsx_router_id, + min_num_rules_expected=1) + # Remove floating IP address from logical router port + self._remove_floatingip_address(context, fip_db) + return super(NsxPluginV2, self).delete_floatingip(context, id) + + def disassociate_floatingips(self, context, port_id): + try: + fip_qry = context.session.query(l3_db.FloatingIP) + fip_dbs = fip_qry.filter_by(fixed_port_id=port_id) + + for fip_db in fip_dbs: + nsx_router_id = nsx_utils.get_nsx_router_id( + context.session, self.cluster, fip_db.router_id) + self._retrieve_and_delete_nat_rules(context, + fip_db.floating_ip_address, + fip_db.fixed_ip_address, + nsx_router_id, + min_num_rules_expected=1) + self._remove_floatingip_address(context, fip_db) + except sa_exc.NoResultFound: + LOG.debug(_("The port '%s' is not associated with floating IPs"), + port_id) + except n_exc.NotFound: + LOG.warning(_("Nat rules not found in nsx for port: %s"), id) + + super(NsxPluginV2, self).disassociate_floatingips(context, port_id) + + def create_network_gateway(self, context, network_gateway): + """Create a layer-2 network gateway. + + Create the gateway service on NSX platform and corresponding data + structures in Neutron datase. + """ + # Ensure the default gateway in the config file is in sync with the db + self._ensure_default_network_gateway() + # Need to re-do authZ checks here in order to avoid creation on NSX + gw_data = network_gateway[networkgw.GATEWAY_RESOURCE_NAME] + tenant_id = self._get_tenant_id_for_create(context, gw_data) + devices = gw_data['devices'] + # Populate default physical network where not specified + for device in devices: + if not device.get('interface_name'): + device['interface_name'] = self.cluster.default_interface_name + try: + # Replace Neutron device identifiers with NSX identifiers + dev_map = dict((dev['id'], dev['interface_name']) for + dev in devices) + nsx_devices = [] + for db_device in self._query_gateway_devices( + context, filters={'id': [device['id'] for device in devices]}): + nsx_devices.append( + {'id': db_device['nsx_id'], + 'interface_name': dev_map[db_device['id']]}) + nsx_res = l2gwlib.create_l2_gw_service( + self.cluster, tenant_id, gw_data['name'], nsx_devices) + nsx_uuid = nsx_res.get('uuid') + except api_exc.Conflict: + raise nsx_exc.L2GatewayAlreadyInUse(gateway=gw_data['name']) + except api_exc.NsxApiException: + err_msg = _("Unable to create l2_gw_service for: %s") % gw_data + LOG.exception(err_msg) + raise nsx_exc.NsxPluginException(err_msg=err_msg) + gw_data['id'] = nsx_uuid + return super(NsxPluginV2, self).create_network_gateway( + context, network_gateway) + + def delete_network_gateway(self, context, gateway_id): + """Remove a layer-2 network gateway. + + Remove the gateway service from NSX platform and corresponding data + structures in Neutron datase. + """ + # Ensure the default gateway in the config file is in sync with the db + self._ensure_default_network_gateway() + with context.session.begin(subtransactions=True): + try: + super(NsxPluginV2, self).delete_network_gateway( + context, gateway_id) + l2gwlib.delete_l2_gw_service(self.cluster, gateway_id) + except api_exc.ResourceNotFound: + # Do not cause a 500 to be returned to the user if + # the corresponding NSX resource does not exist + LOG.exception(_("Unable to remove gateway service from " + "NSX plaform - the resource was not found")) + + def get_network_gateway(self, context, id, fields=None): + # Ensure the default gateway in the config file is in sync with the db + self._ensure_default_network_gateway() + return super(NsxPluginV2, self).get_network_gateway(context, + id, fields) + + def get_network_gateways(self, context, filters=None, fields=None, + sorts=None, limit=None, marker=None, + page_reverse=False): + # Ensure the default gateway in the config file is in sync with the db + self._ensure_default_network_gateway() + # Ensure the tenant_id attribute is populated on returned gateways + return super(NsxPluginV2, self).get_network_gateways( + context, filters, fields, sorts, limit, marker, page_reverse) + + def update_network_gateway(self, context, id, network_gateway): + # Ensure the default gateway in the config file is in sync with the db + self._ensure_default_network_gateway() + # Update gateway on backend when there's a name change + name = network_gateway[networkgw.GATEWAY_RESOURCE_NAME].get('name') + if name: + try: + l2gwlib.update_l2_gw_service(self.cluster, id, name) + except api_exc.NsxApiException: + # Consider backend failures as non-fatal, but still warn + # because this might indicate something dodgy is going on + LOG.warn(_("Unable to update name on NSX backend " + "for network gateway: %s"), id) + return super(NsxPluginV2, self).update_network_gateway( + context, id, network_gateway) + + def connect_network(self, context, network_gateway_id, + network_mapping_info): + # Ensure the default gateway in the config file is in sync with the db + self._ensure_default_network_gateway() + try: + return super(NsxPluginV2, self).connect_network( + context, network_gateway_id, network_mapping_info) + except api_exc.Conflict: + raise nsx_exc.L2GatewayAlreadyInUse(gateway=network_gateway_id) + + def disconnect_network(self, context, network_gateway_id, + network_mapping_info): + # Ensure the default gateway in the config file is in sync with the db + self._ensure_default_network_gateway() + return super(NsxPluginV2, self).disconnect_network( + context, network_gateway_id, network_mapping_info) + + def _get_nsx_device_id(self, context, device_id): + return self._get_gateway_device(context, device_id)['nsx_id'] + + def _rollback_gw_device(self, context, device_id, + gw_data=None, new_status=None, + is_create=False, log_level=logging.ERROR): + LOG.log(log_level, + _("Rolling back database changes for gateway device %s " + "because of an error in the NSX backend"), device_id) + with context.session.begin(subtransactions=True): + query = self._model_query( + context, networkgw_db.NetworkGatewayDevice).filter( + networkgw_db.NetworkGatewayDevice.id == device_id) + if is_create: + query.delete(synchronize_session=False) + else: + super(NsxPluginV2, self).update_gateway_device( + context, device_id, + {networkgw.DEVICE_RESOURCE_NAME: gw_data}) + if new_status: + query.update({'status': new_status}, + synchronize_session=False) + + # TODO(salv-orlando): Handlers for Gateway device operations should be + # moved into the appropriate nsx_handlers package once the code for the + # blueprint nsx-async-backend-communication merges + def create_gateway_device_handler(self, context, gateway_device, + client_certificate): + neutron_id = gateway_device['id'] + try: + nsx_res = l2gwlib.create_gateway_device( + self.cluster, + gateway_device['tenant_id'], + gateway_device['name'], + neutron_id, + self.cluster.default_tz_uuid, + gateway_device['connector_type'], + gateway_device['connector_ip'], + client_certificate) + + # Fetch status (it needs another NSX API call) + device_status = nsx_utils.get_nsx_device_status(self.cluster, + nsx_res['uuid']) + + # set NSX GW device in neutron database and update status + with context.session.begin(subtransactions=True): + query = self._model_query( + context, networkgw_db.NetworkGatewayDevice).filter( + networkgw_db.NetworkGatewayDevice.id == neutron_id) + query.update({'status': device_status, + 'nsx_id': nsx_res['uuid']}, + synchronize_session=False) + LOG.debug(_("Neutron gateway device: %(neutron_id)s; " + "NSX transport node identifier: %(nsx_id)s; " + "Operational status: %(status)s."), + {'neutron_id': neutron_id, + 'nsx_id': nsx_res['uuid'], + 'status': device_status}) + return device_status + except (nsx_exc.InvalidSecurityCertificate, api_exc.NsxApiException): + with excutils.save_and_reraise_exception(): + self._rollback_gw_device(context, neutron_id, is_create=True) + + def update_gateway_device_handler(self, context, gateway_device, + old_gateway_device_data, + client_certificate): + nsx_id = gateway_device['nsx_id'] + neutron_id = gateway_device['id'] + try: + l2gwlib.update_gateway_device( + self.cluster, + nsx_id, + gateway_device['tenant_id'], + gateway_device['name'], + neutron_id, + self.cluster.default_tz_uuid, + gateway_device['connector_type'], + gateway_device['connector_ip'], + client_certificate) + + # Fetch status (it needs another NSX API call) + device_status = nsx_utils.get_nsx_device_status(self.cluster, + nsx_id) + # update status + with context.session.begin(subtransactions=True): + query = self._model_query( + context, networkgw_db.NetworkGatewayDevice).filter( + networkgw_db.NetworkGatewayDevice.id == neutron_id) + query.update({'status': device_status}, + synchronize_session=False) + LOG.debug(_("Neutron gateway device: %(neutron_id)s; " + "NSX transport node identifier: %(nsx_id)s; " + "Operational status: %(status)s."), + {'neutron_id': neutron_id, + 'nsx_id': nsx_id, + 'status': device_status}) + return device_status + except (nsx_exc.InvalidSecurityCertificate, api_exc.NsxApiException): + with excutils.save_and_reraise_exception(): + self._rollback_gw_device(context, neutron_id, + gw_data=old_gateway_device_data) + except n_exc.NotFound: + # The gateway device was probably deleted in the backend. + # The DB change should be rolled back and the status must + # be put in error + with excutils.save_and_reraise_exception(): + self._rollback_gw_device(context, neutron_id, + gw_data=old_gateway_device_data, + new_status=networkgw_db.ERROR) + + def get_gateway_device(self, context, device_id, fields=None): + # Get device from database + gw_device = super(NsxPluginV2, self).get_gateway_device( + context, device_id, fields, include_nsx_id=True) + # Fetch status from NSX + nsx_id = gw_device['nsx_id'] + device_status = nsx_utils.get_nsx_device_status(self.cluster, nsx_id) + # TODO(salv-orlando): Asynchronous sync for gateway device status + # Update status in database + with context.session.begin(subtransactions=True): + query = self._model_query( + context, networkgw_db.NetworkGatewayDevice).filter( + networkgw_db.NetworkGatewayDevice.id == device_id) + query.update({'status': device_status}, + synchronize_session=False) + gw_device['status'] = device_status + return gw_device + + def get_gateway_devices(self, context, filters=None, fields=None, + sorts=None, limit=None, marker=None, + page_reverse=False): + # Get devices from database + devices = super(NsxPluginV2, self).get_gateway_devices( + context, filters, fields, include_nsx_id=True) + # Fetch operational status from NSX, filter by tenant tag + # TODO(salv-orlando): Asynchronous sync for gateway device status + tenant_id = context.tenant_id if not context.is_admin else None + nsx_statuses = nsx_utils.get_nsx_device_statuses(self.cluster, + tenant_id) + # Update statuses in database + with context.session.begin(subtransactions=True): + for device in devices: + new_status = nsx_statuses.get(device['nsx_id']) + if new_status: + device['status'] = new_status + return devices + + def create_gateway_device(self, context, gateway_device): + # NOTE(salv-orlando): client-certificate will not be stored + # in the database + device_data = gateway_device[networkgw.DEVICE_RESOURCE_NAME] + client_certificate = device_data.pop('client_certificate') + gw_device = super(NsxPluginV2, self).create_gateway_device( + context, gateway_device) + # DB operation was successful, perform NSX operation + gw_device['status'] = self.create_gateway_device_handler( + context, gw_device, client_certificate) + return gw_device + + def update_gateway_device(self, context, device_id, + gateway_device): + # NOTE(salv-orlando): client-certificate will not be stored + # in the database + client_certificate = ( + gateway_device[networkgw.DEVICE_RESOURCE_NAME].pop( + 'client_certificate', None)) + # Retrive current state from DB in case a rollback should be needed + old_gw_device_data = super(NsxPluginV2, self).get_gateway_device( + context, device_id, include_nsx_id=True) + gw_device = super(NsxPluginV2, self).update_gateway_device( + context, device_id, gateway_device, include_nsx_id=True) + # DB operation was successful, perform NSX operation + gw_device['status'] = self.update_gateway_device_handler( + context, gw_device, old_gw_device_data, client_certificate) + gw_device.pop('nsx_id') + return gw_device + + def delete_gateway_device(self, context, device_id): + nsx_device_id = self._get_nsx_device_id(context, device_id) + super(NsxPluginV2, self).delete_gateway_device( + context, device_id) + # DB operation was successful, peform NSX operation + # TODO(salv-orlando): State consistency with neutron DB + # should be ensured even in case of backend failures + try: + l2gwlib.delete_gateway_device(self.cluster, nsx_device_id) + except n_exc.NotFound: + LOG.warn(_("Removal of gateway device: %(neutron_id)s failed on " + "NSX backend (NSX id:%(nsx_id)s) because the NSX " + "resource was not found"), + {'neutron_id': device_id, 'nsx_id': nsx_device_id}) + except api_exc.NsxApiException: + with excutils.save_and_reraise_exception(): + # In this case a 500 should be returned + LOG.exception(_("Removal of gateway device: %(neutron_id)s " + "failed on NSX backend (NSX id:%(nsx_id)s). " + "Neutron and NSX states have diverged."), + {'neutron_id': device_id, + 'nsx_id': nsx_device_id}) + + def create_security_group(self, context, security_group, default_sg=False): + """Create security group. + + If default_sg is true that means we are creating a default security + group and we don't need to check if one exists. + """ + s = security_group.get('security_group') + + tenant_id = self._get_tenant_id_for_create(context, s) + if not default_sg: + self._ensure_default_security_group(context, tenant_id) + # NOTE(salv-orlando): Pre-generating Neutron ID for security group. + neutron_id = str(uuid.uuid4()) + nsx_secgroup = secgrouplib.create_security_profile( + self.cluster, tenant_id, neutron_id, s) + with context.session.begin(subtransactions=True): + s['id'] = neutron_id + sec_group = super(NsxPluginV2, self).create_security_group( + context, security_group, default_sg) + context.session.flush() + # Add mapping between neutron and nsx identifiers + nsx_db.add_neutron_nsx_security_group_mapping( + context.session, neutron_id, nsx_secgroup['uuid']) + return sec_group + + def update_security_group(self, context, secgroup_id, security_group): + secgroup = (super(NsxPluginV2, self). + update_security_group(context, + secgroup_id, + security_group)) + if ('name' in security_group['security_group'] and + secgroup['name'] != 'default'): + nsx_sec_profile_id = nsx_utils.get_nsx_security_group_id( + context.session, self.cluster, secgroup_id) + try: + name = security_group['security_group']['name'] + secgrouplib.update_security_profile( + self.cluster, nsx_sec_profile_id, name) + except (n_exc.NotFound, api_exc.NsxApiException) as e: + # Reverting the DB change is not really worthwhile + # for a mismatch between names. It's the rules that + # we care about. + LOG.error(_('Error while updating security profile ' + '%(uuid)s with name %(name)s: %(error)s.') + % {'uuid': secgroup_id, 'name': name, 'error': e}) + return secgroup + + def delete_security_group(self, context, security_group_id): + """Delete a security group. + + :param security_group_id: security group rule to remove. + """ + with context.session.begin(subtransactions=True): + security_group = super(NsxPluginV2, self).get_security_group( + context, security_group_id) + if not security_group: + raise ext_sg.SecurityGroupNotFound(id=security_group_id) + + if security_group['name'] == 'default' and not context.is_admin: + raise ext_sg.SecurityGroupCannotRemoveDefault() + + filters = {'security_group_id': [security_group['id']]} + if super(NsxPluginV2, self)._get_port_security_group_bindings( + context, filters): + raise ext_sg.SecurityGroupInUse(id=security_group['id']) + nsx_sec_profile_id = nsx_utils.get_nsx_security_group_id( + context.session, self.cluster, security_group_id) + + try: + secgrouplib.delete_security_profile( + self.cluster, nsx_sec_profile_id) + except n_exc.NotFound: + # The security profile was not found on the backend + # do not fail in this case. + LOG.warning(_("The NSX security profile %(sec_profile_id)s, " + "associated with the Neutron security group " + "%(sec_group_id)s was not found on the backend"), + {'sec_profile_id': nsx_sec_profile_id, + 'sec_group_id': security_group_id}) + except api_exc.NsxApiException: + # Raise and fail the operation, as there is a problem which + # prevented the sec group from being removed from the backend + LOG.exception(_("An exception occurred while removing the " + "NSX security profile %(sec_profile_id)s, " + "associated with Netron security group " + "%(sec_group_id)s"), + {'sec_profile_id': nsx_sec_profile_id, + 'sec_group_id': security_group_id}) + raise nsx_exc.NsxPluginException( + _("Unable to remove security group %s from backend"), + security_group['id']) + return super(NsxPluginV2, self).delete_security_group( + context, security_group_id) + + def _validate_security_group_rules(self, context, rules): + for rule in rules['security_group_rules']: + r = rule.get('security_group_rule') + port_based_proto = (self._get_ip_proto_number(r['protocol']) + in securitygroups_db.IP_PROTOCOL_MAP.values()) + if (not port_based_proto and + (r['port_range_min'] is not None or + r['port_range_max'] is not None)): + msg = (_("Port values not valid for " + "protocol: %s") % r['protocol']) + raise n_exc.BadRequest(resource='security_group_rule', + msg=msg) + return super(NsxPluginV2, self)._validate_security_group_rules(context, + rules) + + def create_security_group_rule(self, context, security_group_rule): + """Create a single security group rule.""" + bulk_rule = {'security_group_rules': [security_group_rule]} + return self.create_security_group_rule_bulk(context, bulk_rule)[0] + + def create_security_group_rule_bulk(self, context, security_group_rule): + """Create security group rules. + + :param security_group_rule: list of rules to create + """ + s = security_group_rule.get('security_group_rules') + tenant_id = self._get_tenant_id_for_create(context, s) + + # TODO(arosen) is there anyway we could avoid having the update of + # the security group rules in nsx outside of this transaction? + with context.session.begin(subtransactions=True): + self._ensure_default_security_group(context, tenant_id) + security_group_id = self._validate_security_group_rules( + context, security_group_rule) + # Check to make sure security group exists + security_group = super(NsxPluginV2, self).get_security_group( + context, security_group_id) + + if not security_group: + raise ext_sg.SecurityGroupNotFound(id=security_group_id) + # Check for duplicate rules + self._check_for_duplicate_rules(context, s) + # gather all the existing security group rules since we need all + # of them to PUT to NSX. + existing_rules = self.get_security_group_rules( + context, {'security_group_id': [security_group['id']]}) + combined_rules = sg_utils.merge_security_group_rules_with_current( + context.session, self.cluster, s, existing_rules) + nsx_sec_profile_id = nsx_utils.get_nsx_security_group_id( + context.session, self.cluster, security_group_id) + secgrouplib.update_security_group_rules(self.cluster, + nsx_sec_profile_id, + combined_rules) + return super( + NsxPluginV2, self).create_security_group_rule_bulk_native( + context, security_group_rule) + + def delete_security_group_rule(self, context, sgrid): + """Delete a security group rule + :param sgrid: security group id to remove. + """ + with context.session.begin(subtransactions=True): + # determine security profile id + security_group_rule = ( + super(NsxPluginV2, self).get_security_group_rule( + context, sgrid)) + if not security_group_rule: + raise ext_sg.SecurityGroupRuleNotFound(id=sgrid) + + sgid = security_group_rule['security_group_id'] + current_rules = self.get_security_group_rules( + context, {'security_group_id': [sgid]}) + current_rules_nsx = sg_utils.get_security_group_rules_nsx_format( + context.session, self.cluster, current_rules, True) + + sg_utils.remove_security_group_with_id_and_id_field( + current_rules_nsx, sgrid) + nsx_sec_profile_id = nsx_utils.get_nsx_security_group_id( + context.session, self.cluster, sgid) + secgrouplib.update_security_group_rules( + self.cluster, nsx_sec_profile_id, current_rules_nsx) + return super(NsxPluginV2, self).delete_security_group_rule(context, + sgrid) + + def create_qos_queue(self, context, qos_queue, check_policy=True): + q = qos_queue.get('qos_queue') + self._validate_qos_queue(context, q) + q['id'] = queuelib.create_lqueue(self.cluster, q) + return super(NsxPluginV2, self).create_qos_queue(context, qos_queue) + + def delete_qos_queue(self, context, queue_id, raise_in_use=True): + filters = {'queue_id': [queue_id]} + queues = self._get_port_queue_bindings(context, filters) + if queues: + if raise_in_use: + raise qos.QueueInUseByPort() + else: + return + queuelib.delete_lqueue(self.cluster, queue_id) + return super(NsxPluginV2, self).delete_qos_queue(context, queue_id) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/plugins/service.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/plugins/service.py new file mode 100644 index 00000000..f9d70321 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/plugins/service.py @@ -0,0 +1,1819 @@ +# Copyright 2013 VMware, Inc. +# All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +import netaddr +from oslo.config import cfg + +from neutron.common import exceptions as n_exc +from neutron.db.firewall import firewall_db +from neutron.db import l3_db +from neutron.db.loadbalancer import loadbalancer_db +from neutron.db import routedserviceinsertion_db as rsi_db +from neutron.db.vpn import vpn_db +from neutron.extensions import firewall as fw_ext +from neutron.extensions import l3 +from neutron.extensions import routedserviceinsertion as rsi +from neutron.extensions import vpnaas as vpn_ext +from neutron.openstack.common import excutils +from neutron.openstack.common import log as logging +from neutron.plugins.common import constants as service_constants +from neutron.plugins.vmware.api_client import exception as api_exc +from neutron.plugins.vmware.common import config # noqa +from neutron.plugins.vmware.common import exceptions as nsx_exc +from neutron.plugins.vmware.common import utils +from neutron.plugins.vmware.dbexts import servicerouter as sr_db +from neutron.plugins.vmware.dbexts import vcns_db +from neutron.plugins.vmware.dbexts import vcns_models +from neutron.plugins.vmware.extensions import servicerouter as sr +from neutron.plugins.vmware.nsxlib import router as routerlib +from neutron.plugins.vmware.nsxlib import switch as switchlib +from neutron.plugins.vmware.plugins import base +from neutron.plugins.vmware.vshield.common import constants as vcns_const +from neutron.plugins.vmware.vshield.common import exceptions +from neutron.plugins.vmware.vshield.tasks import constants as tasks_const +from neutron.plugins.vmware.vshield import vcns_driver +from sqlalchemy.orm import exc as sa_exc + +LOG = logging.getLogger(__name__) + +ROUTER_TYPE_BASIC = 1 +ROUTER_TYPE_ADVANCED = 2 + +ROUTER_STATUS = [ + service_constants.ACTIVE, + service_constants.DOWN, + service_constants.PENDING_CREATE, + service_constants.PENDING_DELETE, + service_constants.ERROR +] + +ROUTER_STATUS_LEVEL = { + service_constants.ACTIVE: vcns_const.RouterStatus.ROUTER_STATUS_ACTIVE, + service_constants.DOWN: vcns_const.RouterStatus.ROUTER_STATUS_DOWN, + service_constants.PENDING_CREATE: ( + vcns_const.RouterStatus.ROUTER_STATUS_PENDING_CREATE + ), + service_constants.PENDING_DELETE: ( + vcns_const.RouterStatus.ROUTER_STATUS_PENDING_DELETE + ), + service_constants.ERROR: vcns_const.RouterStatus.ROUTER_STATUS_ERROR +} + + +class NsxAdvancedPlugin(sr_db.ServiceRouter_mixin, + base.NsxPluginV2, + rsi_db.RoutedServiceInsertionDbMixin, + firewall_db.Firewall_db_mixin, + loadbalancer_db.LoadBalancerPluginDb, + vpn_db.VPNPluginDb + ): + + supported_extension_aliases = ( + base.NsxPluginV2.supported_extension_aliases + [ + "service-router", + "routed-service-insertion", + "fwaas", + "lbaas", + "vpnaas" + ]) + # The service plugin cannot currently support pagination + __native_pagination_support = False + __native_sorting_support = False + + def __init__(self): + super(NsxAdvancedPlugin, self).__init__() + + self._super_create_ext_gw_port = ( + self._port_drivers['create'][l3_db.DEVICE_OWNER_ROUTER_GW]) + self._super_delete_ext_gw_port = ( + self._port_drivers['delete'][l3_db.DEVICE_OWNER_ROUTER_GW]) + + self._port_drivers['create'][l3_db.DEVICE_OWNER_ROUTER_GW] = ( + self._vcns_create_ext_gw_port) + self._port_drivers['delete'][l3_db.DEVICE_OWNER_ROUTER_GW] = ( + self._vcns_delete_ext_gw_port) + + # cache router type based on router id + self._router_type = {} + self.callbacks = VcnsCallbacks(self.safe_reference) + + # load the vCNS driver + self._load_vcns_drivers() + + # switchlib's create_lswitch needs to be replaced in order to proxy + # logical switch create requests to vcns + self._set_create_lswitch_proxy() + + def _set_create_lswitch_proxy(self): + base.switchlib.create_lswitch = self._proxy_create_lswitch + + def _proxy_create_lswitch(self, *args, **kwargs): + name, tz_config, tags = ( + _process_base_create_lswitch_args(*args, **kwargs) + ) + return self.vcns_driver.create_lswitch( + name, tz_config, tags=tags, + port_isolation=None, replication_mode=None) + + def _load_vcns_drivers(self): + self.vcns_driver = vcns_driver.VcnsDriver(self.callbacks) + + def _set_router_type(self, router_id, router_type): + self._router_type[router_id] = router_type + + def _get_router_type(self, context=None, router_id=None, router=None): + if not router: + if router_id in self._router_type: + return self._router_type[router_id] + router = self._get_router(context, router_id) + + LOG.debug(_("EDGE: router = %s"), router) + if router['nsx_attributes']['service_router']: + router_type = ROUTER_TYPE_ADVANCED + else: + router_type = ROUTER_TYPE_BASIC + self._set_router_type(router['id'], router_type) + return router_type + + def _find_router_type(self, router): + is_service_router = router.get(sr.SERVICE_ROUTER, False) + if is_service_router: + return ROUTER_TYPE_ADVANCED + else: + return ROUTER_TYPE_BASIC + + def _is_advanced_service_router(self, context=None, router_id=None, + router=None): + if router: + router_type = self._get_router_type(router=router) + else: + router_type = self._get_router_type(context, router_id) + return (router_type == ROUTER_TYPE_ADVANCED) + + def _vcns_create_ext_gw_port(self, context, port_data): + router_id = port_data['device_id'] + if not self._is_advanced_service_router(context, router_id): + self._super_create_ext_gw_port(context, port_data) + return + + # NOP for Edge because currently the port will be create internally + # by VSM + LOG.debug(_("EDGE: _vcns_create_ext_gw_port")) + + def _vcns_delete_ext_gw_port(self, context, port_data): + router_id = port_data['device_id'] + if not self._is_advanced_service_router(context, router_id): + self._super_delete_ext_gw_port(context, port_data) + return + + # NOP for Edge + LOG.debug(_("EDGE: _vcns_delete_ext_gw_port")) + + def _get_external_attachment_info(self, context, router): + gw_port = router.gw_port + ipaddress = None + netmask = None + nexthop = None + + if gw_port: + # gw_port may have multiple IPs, only configure the first one + if gw_port.get('fixed_ips'): + ipaddress = gw_port['fixed_ips'][0]['ip_address'] + + network_id = gw_port.get('network_id') + if network_id: + ext_net = self._get_network(context, network_id) + if not ext_net.external: + msg = (_("Network '%s' is not a valid external " + "network") % network_id) + raise n_exc.BadRequest(resource='router', msg=msg) + if ext_net.subnets: + ext_subnet = ext_net.subnets[0] + netmask = str(netaddr.IPNetwork(ext_subnet.cidr).netmask) + nexthop = ext_subnet.gateway_ip + + return (ipaddress, netmask, nexthop) + + def _get_external_gateway_address(self, context, router): + ipaddress, netmask, nexthop = self._get_external_attachment_info( + context, router) + return nexthop + + def _vcns_update_static_routes(self, context, **kwargs): + router = kwargs.get('router') + if router is None: + router = self._get_router(context, kwargs['router_id']) + + edge_id = kwargs.get('edge_id') + if edge_id is None: + binding = vcns_db.get_vcns_router_binding(context.session, + router['id']) + edge_id = binding['edge_id'] + + skippable = True + if 'nexthop' in kwargs: + nexthop = kwargs['nexthop'] + # The default gateway and vnic config has dependencies, if we + # explicitly specify nexthop to change, tell the driver not to + # skip this route update + skippable = False + else: + nexthop = self._get_external_gateway_address(context, + router) + + if 'subnets' in kwargs: + subnets = kwargs['subnets'] + else: + subnets = self._find_router_subnets_cidrs(context.elevated(), + router['id']) + + routes = [] + for subnet in subnets: + routes.append({ + 'cidr': subnet, + 'nexthop': vcns_const.INTEGRATION_LR_IPADDRESS.split('/')[0] + }) + self.vcns_driver.update_routes(router['id'], edge_id, nexthop, routes, + skippable) + + def _get_nat_rules(self, context, router): + fip_qry = context.session.query(l3_db.FloatingIP) + fip_db = fip_qry.filter_by(router_id=router['id']).all() + + dnat = [] + snat = [] + for fip in fip_db: + if fip.fixed_port_id: + dnat.append({ + 'dst': fip.floating_ip_address, + 'translated': fip.fixed_ip_address + }) + + gw_port = router.gw_port + if gw_port and router.enable_snat: + if gw_port.get('fixed_ips'): + snat_ip = gw_port['fixed_ips'][0]['ip_address'] + subnets = self._find_router_subnets_cidrs(context.elevated(), + router['id']) + for subnet in subnets: + snat.append({ + 'src': subnet, + 'translated': snat_ip + }) + + return (snat, dnat) + + def _update_nat_rules(self, context, router): + snat, dnat = self._get_nat_rules(context, router) + binding = vcns_db.get_vcns_router_binding(context.session, + router['id']) + self.vcns_driver.update_nat_rules(router['id'], + binding['edge_id'], + snat, dnat) + + def _update_interface(self, context, router, sync=False): + addr, mask, nexthop = self._get_external_attachment_info( + context, router) + + secondary = [] + fip_qry = context.session.query(l3_db.FloatingIP) + fip_db = fip_qry.filter_by(router_id=router['id']).all() + for fip in fip_db: + if fip.fixed_port_id: + secondary.append(fip.floating_ip_address) + #Add all vip addresses bound on the router + vip_addrs = self._get_all_vip_addrs_by_router_id(context, + router['id']) + secondary.extend(vip_addrs) + + binding = vcns_db.get_vcns_router_binding(context.session, + router['id']) + task = self.vcns_driver.update_interface( + router['id'], binding['edge_id'], + vcns_const.EXTERNAL_VNIC_INDEX, + self.vcns_driver.external_network, + addr, mask, secondary=secondary) + if sync: + task.wait(tasks_const.TaskState.RESULT) + + def _update_router_gw_info(self, context, router_id, info): + if not self._is_advanced_service_router(context, router_id): + super(NsxAdvancedPlugin, self)._update_router_gw_info( + context, router_id, info) + return + + # get original gw_port config + router = self._get_router(context, router_id) + org_ext_net_id = router.gw_port_id and router.gw_port.network_id + org_enable_snat = router.enable_snat + orgaddr, orgmask, orgnexthop = self._get_external_attachment_info( + context, router) + + super(base.NsxPluginV2, self)._update_router_gw_info( + context, router_id, info, router=router) + + new_ext_net_id = router.gw_port_id and router.gw_port.network_id + new_enable_snat = router.enable_snat + newaddr, newmask, newnexthop = self._get_external_attachment_info( + context, router) + + binding = vcns_db.get_vcns_router_binding(context.session, router_id) + + if new_ext_net_id != org_ext_net_id and orgnexthop: + # network changed, need to remove default gateway before vnic + # can be configured + LOG.debug(_("VCNS: delete default gateway %s"), orgnexthop) + self._vcns_update_static_routes(context, + router=router, + edge_id=binding['edge_id'], + nexthop=None) + + if orgaddr != newaddr or orgmask != newmask: + self.vcns_driver.update_interface( + router_id, binding['edge_id'], + vcns_const.EXTERNAL_VNIC_INDEX, + self.vcns_driver.external_network, + newaddr, newmask) + + if orgnexthop != newnexthop: + self._vcns_update_static_routes(context, + router=router, + edge_id=binding['edge_id'], + nexthop=newnexthop) + + if (new_ext_net_id == org_ext_net_id and + org_enable_snat == new_enable_snat): + return + + self._update_nat_rules(context, router) + + def _add_subnet_snat_rule(self, context, router, subnet): + # NOP for service router + if not self._is_advanced_service_router(router=router): + super(NsxAdvancedPlugin, self)._add_subnet_snat_rule( + context, router, subnet) + + def _delete_subnet_snat_rule(self, context, router, subnet): + # NOP for service router + if not self._is_advanced_service_router(router=router): + super(NsxAdvancedPlugin, self)._delete_subnet_snat_rule( + context, router, subnet) + + def _remove_floatingip_address(self, context, fip_db): + # NOP for service router + router_id = fip_db.router_id + if not self._is_advanced_service_router(context, router_id): + super(NsxAdvancedPlugin, self)._remove_floatingip_address( + context, fip_db) + + def _create_advanced_service_router(self, context, neutron_router_id, + name, lrouter, lswitch): + + # store binding + binding = vcns_db.add_vcns_router_binding( + context.session, neutron_router_id, None, lswitch['uuid'], + service_constants.PENDING_CREATE) + + # deploy edge + jobdata = { + 'neutron_router_id': neutron_router_id, + 'lrouter': lrouter, + 'lswitch': lswitch, + 'context': context + } + + # deploy and wait until the deploy requeste has been requested + # so we will have edge_id ready. The wait here should be fine + # as we're not in a database transaction now + self.vcns_driver.deploy_edge( + lrouter['uuid'], name, lswitch['uuid'], jobdata=jobdata, + wait_for_exec=True) + + return binding + + def _create_integration_lswitch(self, tenant_id, name): + # use defautl transport zone + transport_zone_config = [{ + "zone_uuid": self.cluster.default_tz_uuid, + "transport_type": cfg.CONF.NSX.default_transport_type + }] + return self.vcns_driver.create_lswitch(name, transport_zone_config) + + def _add_router_integration_interface(self, tenant_id, name, + lrouter, lswitch): + # create logic switch port + try: + ls_port = switchlib.create_lport( + self.cluster, lswitch['uuid'], tenant_id, + '', '', lrouter['uuid'], True) + except api_exc.NsxApiException: + msg = (_("An exception occurred while creating a port " + "on lswitch %s") % lswitch['uuid']) + LOG.exception(msg) + raise n_exc.NeutronException(message=msg) + + # create logic router port + try: + neutron_port_id = '' + pname = name[:36] + '-lp' + admin_status_enabled = True + lr_port = routerlib.create_router_lport( + self.cluster, lrouter['uuid'], tenant_id, + neutron_port_id, pname, admin_status_enabled, + [vcns_const.INTEGRATION_LR_IPADDRESS]) + except api_exc.NsxApiException: + msg = (_("Unable to create port on NSX logical router %s") % name) + LOG.exception(msg) + switchlib.delete_port( + self.cluster, lswitch['uuid'], ls_port['uuid']) + raise n_exc.NeutronException(message=msg) + + # attach logic router port to switch port + try: + self._update_router_port_attachment( + self.cluster, None, lrouter['uuid'], {}, lr_port['uuid'], + 'PatchAttachment', ls_port['uuid'], None) + except api_exc.NsxApiException as e: + # lr_port should have been deleted + switchlib.delete_port( + self.cluster, lswitch['uuid'], ls_port['uuid']) + raise e + + def _create_lrouter(self, context, router, nexthop): + lrouter = super(NsxAdvancedPlugin, self)._create_lrouter( + context, router, vcns_const.INTEGRATION_EDGE_IPADDRESS) + + router_type = self._find_router_type(router) + self._set_router_type(lrouter['uuid'], router_type) + if router_type == ROUTER_TYPE_BASIC: + return lrouter + + tenant_id = self._get_tenant_id_for_create(context, router) + name = router['name'] + try: + lsname = name[:36] + '-ls' + lswitch = self._create_integration_lswitch( + tenant_id, lsname) + except Exception: + msg = _("Unable to create integration logic switch " + "for router %s") % name + LOG.exception(msg) + routerlib.delete_lrouter(self.cluster, lrouter['uuid']) + raise n_exc.NeutronException(message=msg) + + try: + self._add_router_integration_interface(tenant_id, name, + lrouter, lswitch) + except Exception: + msg = _("Unable to add router interface to integration lswitch " + "for router %s") % name + LOG.exception(msg) + routerlib.delete_lrouter(self.cluster, lrouter['uuid']) + raise n_exc.NeutronException(message=msg) + + try: + self._create_advanced_service_router( + context, router['id'], name, lrouter, lswitch) + except Exception: + msg = (_("Unable to create advance service router for %s") % name) + LOG.exception(msg) + self.vcns_driver.delete_lswitch(lswitch('uuid')) + routerlib.delete_lrouter(self.cluster, lrouter['uuid']) + raise n_exc.NeutronException(message=msg) + + lrouter['status'] = service_constants.PENDING_CREATE + return lrouter + + def check_router_in_use(self, context, router_id): + router_filter = {'router_id': [router_id]} + vpnservices = self.get_vpnservices( + context, filters={'router_id': [router_id]}) + if vpnservices: + raise vpn_ext.RouterInUseByVPNService( + router_id=router_id, + vpnservice_id=vpnservices[0]['id']) + vips = self.get_vips( + context, filters=router_filter) + if vips: + raise nsx_exc.RouterInUseByLBService( + router_id=router_id, + vip_id=vips[0]['id']) + firewalls = self.get_firewalls( + context, filters=router_filter) + if firewalls: + raise nsx_exc.RouterInUseByFWService( + router_id=router_id, + firewall_id=firewalls[0]['id']) + + def _delete_lrouter(self, context, router_id, nsx_router_id): + binding = vcns_db.get_vcns_router_binding(context.session, router_id) + if not binding: + super(NsxAdvancedPlugin, self)._delete_lrouter( + context, router_id, nsx_router_id) + else: + #Check whether router has an advanced service inserted. + self.check_router_in_use(context, router_id) + vcns_db.update_vcns_router_binding( + context.session, router_id, + status=service_constants.PENDING_DELETE) + + lswitch_id = binding['lswitch_id'] + edge_id = binding['edge_id'] + + # delete lswitch + try: + self.vcns_driver.delete_lswitch(lswitch_id) + except exceptions.ResourceNotFound: + LOG.warning(_("Did not found lswitch %s in NSX"), lswitch_id) + + # delete edge + jobdata = { + 'context': context + } + self.vcns_driver.delete_edge(router_id, edge_id, jobdata=jobdata) + + # delete NSX logical router + routerlib.delete_lrouter(self.cluster, nsx_router_id) + + if id in self._router_type: + del self._router_type[router_id] + + def _update_lrouter(self, context, router_id, name, nexthop, routes=None): + if not self._is_advanced_service_router(context, router_id): + return super(NsxAdvancedPlugin, self)._update_lrouter( + context, router_id, name, nexthop, routes=routes) + + previous_routes = super(NsxAdvancedPlugin, self)._update_lrouter( + context, router_id, name, + vcns_const.INTEGRATION_EDGE_IPADDRESS, routes=routes) + + # TODO(fank): Theoretically users can specify extra routes for + # physical network, and routes for phyiscal network needs to be + # configured on Edge. This can be done by checking if nexthop is in + # external network. But for now we only handle routes for logic + # space and leave it for future enhancement. + + # Let _update_router_gw_info handle nexthop change + #self._vcns_update_static_routes(context, router_id=router_id) + + return previous_routes + + def _retrieve_and_delete_nat_rules(self, context, floating_ip_address, + internal_ip, router_id, + min_num_rules_expected=0): + # NOP for advanced service router + if not self._is_advanced_service_router(context, router_id): + super(NsxAdvancedPlugin, self)._retrieve_and_delete_nat_rules( + context, floating_ip_address, internal_ip, router_id, + min_num_rules_expected=min_num_rules_expected) + + def _update_fip_assoc(self, context, fip, floatingip_db, external_port): + # Update DB model only for advanced service router + router_id = self._get_fip_assoc_data(context, fip, floatingip_db)[2] + if (router_id and + not self._is_advanced_service_router(context, router_id)): + super(NsxAdvancedPlugin, self)._update_fip_assoc( + context, fip, floatingip_db, external_port) + else: + super(base.NsxPluginV2, self)._update_fip_assoc( + context, fip, floatingip_db, external_port) + + def _get_nsx_lrouter_status(self, id): + try: + lrouter = routerlib.get_lrouter(self.cluster, id) + lr_status = lrouter["_relations"]["LogicalRouterStatus"] + if lr_status["fabric_status"]: + nsx_status = vcns_const.RouterStatus.ROUTER_STATUS_ACTIVE + else: + nsx_status = vcns_const.RouterStatus.ROUTER_STATUS_DOWN + except n_exc.NotFound: + nsx_status = vcns_const.RouterStatus.ROUTER_STATUS_ERROR + + return nsx_status + + def _get_vse_status(self, context, id): + binding = vcns_db.get_vcns_router_binding(context.session, id) + edge_status_level = self.vcns_driver.get_edge_status( + binding['edge_id']) + edge_db_status_level = ROUTER_STATUS_LEVEL[binding.status] + + if edge_status_level > edge_db_status_level: + return edge_status_level + else: + return edge_db_status_level + + def _get_all_nsx_lrouters_statuses(self, tenant_id, fields): + # get nsx lrouters status + nsx_lrouters = routerlib.get_lrouters(self.cluster, + tenant_id, + fields) + + nsx_status = {} + for nsx_lrouter in nsx_lrouters: + if (nsx_lrouter["_relations"]["LogicalRouterStatus"] + ["fabric_status"]): + nsx_status[nsx_lrouter['uuid']] = ( + vcns_const.RouterStatus.ROUTER_STATUS_ACTIVE + ) + else: + nsx_status[nsx_lrouter['uuid']] = ( + vcns_const.RouterStatus.ROUTER_STATUS_DOWN + ) + + return nsx_status + + def _get_all_vse_statuses(self, context): + bindings = self._model_query( + context, vcns_models.VcnsRouterBinding) + + vse_db_status_level = {} + edge_id_to_router_id = {} + router_ids = [] + for binding in bindings: + if not binding['edge_id']: + continue + router_id = binding['router_id'] + router_ids.append(router_id) + edge_id_to_router_id[binding['edge_id']] = router_id + vse_db_status_level[router_id] = ( + ROUTER_STATUS_LEVEL[binding['status']]) + + if not vse_db_status_level: + # no advanced service router, no need to query + return {} + + vse_status_level = {} + edges_status_level = self.vcns_driver.get_edges_statuses() + for edge_id, status_level in edges_status_level.iteritems(): + if edge_id in edge_id_to_router_id: + router_id = edge_id_to_router_id[edge_id] + db_status_level = vse_db_status_level[router_id] + if status_level > db_status_level: + vse_status_level[router_id] = status_level + else: + vse_status_level[router_id] = db_status_level + + return vse_status_level + + def get_router(self, context, id, fields=None): + if fields and 'status' not in fields: + return super(NsxAdvancedPlugin, self).get_router( + context, id, fields=fields) + + router = super(NsxAdvancedPlugin, self).get_router(context, id) + + router_type = self._find_router_type(router) + if router_type == ROUTER_TYPE_ADVANCED: + vse_status_level = self._get_vse_status(context, id) + if vse_status_level > ROUTER_STATUS_LEVEL[router['status']]: + router['status'] = ROUTER_STATUS[vse_status_level] + + return self._fields(router, fields) + + def get_routers(self, context, filters=None, fields=None, **kwargs): + routers = super(NsxAdvancedPlugin, self).get_routers( + context, filters=filters, **kwargs) + + if fields and 'status' not in fields: + # no status checking, just return regular get_routers + return [self._fields(router, fields) for router in routers] + + for router in routers: + router_type = self._find_router_type(router) + if router_type == ROUTER_TYPE_ADVANCED: + break + else: + # no advanced service router, return here + return [self._fields(router, fields) for router in routers] + + vse_status_all = self._get_all_vse_statuses(context) + for router in routers: + router_type = self._find_router_type(router) + if router_type == ROUTER_TYPE_ADVANCED: + vse_status_level = vse_status_all.get(router['id']) + if vse_status_level is None: + vse_status_level = ( + vcns_const.RouterStatus.ROUTER_STATUS_ERROR) + if vse_status_level > ROUTER_STATUS_LEVEL[router['status']]: + router['status'] = ROUTER_STATUS[vse_status_level] + + return [self._fields(router, fields) for router in routers] + + def add_router_interface(self, context, router_id, interface_info): + info = super(NsxAdvancedPlugin, self).add_router_interface( + context, router_id, interface_info) + if self._is_advanced_service_router(context, router_id): + router = self._get_router(context, router_id) + if router.enable_snat: + self._update_nat_rules(context, router) + # TODO(fank): do rollback on error, or have a dedicated thread + # do sync work (rollback, re-configure, or make router down) + self._vcns_update_static_routes(context, router=router) + return info + + def remove_router_interface(self, context, router_id, interface_info): + info = super(NsxAdvancedPlugin, self).remove_router_interface( + context, router_id, interface_info) + if self._is_advanced_service_router(context, router_id): + router = self._get_router(context, router_id) + if router.enable_snat: + self._update_nat_rules(context, router) + # TODO(fank): do rollback on error, or have a dedicated thread + # do sync work (rollback, re-configure, or make router down) + self._vcns_update_static_routes(context, router=router) + return info + + def create_floatingip(self, context, floatingip): + fip = super(NsxAdvancedPlugin, self).create_floatingip( + context, floatingip) + router_id = fip.get('router_id') + if router_id and self._is_advanced_service_router(context, router_id): + router = self._get_router(context, router_id) + # TODO(fank): do rollback on error, or have a dedicated thread + # do sync work (rollback, re-configure, or make router down) + self._update_nat_rules(context, router) + self._update_interface(context, router) + return fip + + def update_floatingip(self, context, id, floatingip): + fip = super(NsxAdvancedPlugin, self).update_floatingip( + context, id, floatingip) + router_id = fip.get('router_id') + if router_id and self._is_advanced_service_router(context, router_id): + router = self._get_router(context, router_id) + # TODO(fank): do rollback on error, or have a dedicated thread + # do sync work (rollback, re-configure, or make router down) + self._update_nat_rules(context, router) + self._update_interface(context, router) + return fip + + def delete_floatingip(self, context, id): + fip_db = self._get_floatingip(context, id) + router_id = None + if fip_db.fixed_port_id: + router_id = fip_db.router_id + super(NsxAdvancedPlugin, self).delete_floatingip(context, id) + if router_id and self._is_advanced_service_router(context, router_id): + router = self._get_router(context, router_id) + # TODO(fank): do rollback on error, or have a dedicated thread + # do sync work (rollback, re-configure, or make router down) + self._update_interface(context, router) + self._update_nat_rules(context, router) + + def disassociate_floatingips(self, context, port_id): + routers = set() + + try: + fip_qry = context.session.query(l3_db.FloatingIP) + fip_dbs = fip_qry.filter_by(fixed_port_id=port_id) + for fip_db in fip_dbs: + routers.add(fip_db.router_id) + except sa_exc.NoResultFound: + pass + super(NsxAdvancedPlugin, self).disassociate_floatingips(context, + port_id) + + for router_id in routers: + if self._is_advanced_service_router(context, router_id): + router = self._get_router(context, router_id) + # TODO(fank): do rollback on error, or have a dedicated thread + # do sync work (rollback, re-configure, or make router down) + self._update_interface(context, router) + self._update_nat_rules(context, router) + + # + # FWaaS plugin implementation + # + def _firewall_set_status( + self, context, firewall_id, status, firewall=None): + with context.session.begin(subtransactions=True): + fw_db = self._get_firewall(context, firewall_id) + if status == service_constants.PENDING_UPDATE and ( + fw_db.status == service_constants.PENDING_DELETE): + raise fw_ext.FirewallInPendingState( + firewall_id=firewall_id, pending_state=status) + else: + fw_db.status = status + if firewall: + firewall['status'] = status + + def _ensure_firewall_update_allowed(self, context, firewall_id): + fwall = self.get_firewall(context, firewall_id) + if fwall['status'] in [service_constants.PENDING_CREATE, + service_constants.PENDING_UPDATE, + service_constants.PENDING_DELETE]: + raise fw_ext.FirewallInPendingState(firewall_id=firewall_id, + pending_state=fwall['status']) + + def _ensure_firewall_policy_update_allowed( + self, context, firewall_policy_id): + firewall_policy = self.get_firewall_policy(context, firewall_policy_id) + for firewall_id in firewall_policy.get('firewall_list', []): + self._ensure_firewall_update_allowed(context, firewall_id) + + def _ensure_update_or_delete_firewall_rule( + self, context, firewall_rule_id): + fw_rule = self.get_firewall_rule(context, firewall_rule_id) + if fw_rule.get('firewall_policy_id'): + self._ensure_firewall_policy_update_allowed( + context, fw_rule['firewall_policy_id']) + + def _make_firewall_rule_list_by_policy_id(self, context, fw_policy_id): + if not fw_policy_id: + return [] + firewall_policy_db = self._get_firewall_policy(context, fw_policy_id) + return [ + self._make_firewall_rule_dict(fw_rule_db) + for fw_rule_db in firewall_policy_db['firewall_rules'] + ] + + def _get_edge_id_by_vcns_edge_binding(self, context, + router_id): + #Get vcns_router_binding mapping between router and edge + router_binding = vcns_db.get_vcns_router_binding( + context.session, router_id) + return router_binding.edge_id + + def _get_firewall_list_from_firewall_policy(self, context, policy_id): + firewall_policy_db = self._get_firewall_policy(context, policy_id) + return [ + self._make_firewall_dict(fw_db) + for fw_db in firewall_policy_db['firewalls'] + ] + + def _get_firewall_list_from_firewall_rule(self, context, rule_id): + rule = self._get_firewall_rule(context, rule_id) + if not rule.firewall_policy_id: + # The firewall rule is not associated with firewall policy yet + return None + + return self._get_firewall_list_from_firewall_policy( + context, rule.firewall_policy_id) + + def _vcns_update_firewall(self, context, fw, router_id=None, **kwargs): + edge_id = kwargs.get('edge_id') + if not edge_id: + edge_id = self._get_edge_id_by_vcns_edge_binding( + context, router_id) + firewall_rule_list = kwargs.get('firewall_rule_list') + if not firewall_rule_list: + firewall_rule_list = self._make_firewall_rule_list_by_policy_id( + context, fw['firewall_policy_id']) + fw_with_rules = fw + fw_with_rules['firewall_rule_list'] = firewall_rule_list + try: + self.vcns_driver.update_firewall(context, edge_id, fw_with_rules) + except exceptions.VcnsApiException as e: + self._firewall_set_status( + context, fw['id'], service_constants.ERROR) + msg = (_("Failed to create firewall on vShield Edge " + "bound on router %s") % router_id) + LOG.exception(msg) + raise e + + except exceptions.VcnsBadRequest as e: + self._firewall_set_status( + context, fw['id'], service_constants.ERROR) + LOG.exception(_("Bad Firewall request Input")) + raise e + + def _vcns_delete_firewall(self, context, router_id=None, **kwargs): + edge_id = kwargs.get('edge_id') + if not edge_id: + edge_id = self._get_edge_id_by_vcns_edge_binding( + context, router_id) + #TODO(linb):do rollback on error + self.vcns_driver.delete_firewall(context, edge_id) + + def create_firewall(self, context, firewall): + LOG.debug(_("create_firewall() called")) + router_id = firewall['firewall'].get(vcns_const.ROUTER_ID) + if not router_id: + msg = _("router_id is not provided!") + LOG.error(msg) + raise n_exc.BadRequest(resource='router', msg=msg) + if not self._is_advanced_service_router(context, router_id): + msg = _("router_id:%s is not an advanced router!") % router_id + LOG.error(msg) + raise n_exc.BadRequest(resource='router', msg=msg) + if self._get_resource_router_id_binding( + context, firewall_db.Firewall, router_id=router_id): + msg = _("A firewall is already associated with the router") + LOG.error(msg) + raise nsx_exc.ServiceOverQuota( + overs='firewall', err_msg=msg) + + fw = super(NsxAdvancedPlugin, self).create_firewall(context, firewall) + #Add router service insertion binding with firewall object + res = { + 'id': fw['id'], + 'router_id': router_id + } + self._process_create_resource_router_id( + context, res, firewall_db.Firewall) + # Since there is only one firewall per edge, + # here would be bulk configuration operation on firewall + self._vcns_update_firewall(context, fw, router_id) + self._firewall_set_status( + context, fw['id'], service_constants.ACTIVE, fw) + fw[rsi.ROUTER_ID] = router_id + return fw + + def update_firewall(self, context, id, firewall): + LOG.debug(_("update_firewall() called")) + self._ensure_firewall_update_allowed(context, id) + service_router_binding = self._get_resource_router_id_binding( + context, firewall_db.Firewall, resource_id=id) + rule_list_pre = self._make_firewall_rule_list_by_policy_id( + context, + self.get_firewall(context, id)['firewall_policy_id']) + firewall['firewall']['status'] = service_constants.PENDING_UPDATE + fw = super(NsxAdvancedPlugin, self).update_firewall( + context, id, firewall) + fw[rsi.ROUTER_ID] = service_router_binding['router_id'] + rule_list_new = self._make_firewall_rule_list_by_policy_id( + context, fw['firewall_policy_id']) + if rule_list_pre == rule_list_new: + self._firewall_set_status( + context, fw['id'], service_constants.ACTIVE, fw) + return fw + else: + self._vcns_update_firewall( + context, fw, service_router_binding.router_id, + firewall_rule_list=rule_list_new) + self._firewall_set_status( + context, fw['id'], service_constants.ACTIVE, fw) + return fw + + def delete_firewall(self, context, id): + LOG.debug(_("delete_firewall() called")) + self._firewall_set_status( + context, id, service_constants.PENDING_DELETE) + service_router_binding = self._get_resource_router_id_binding( + context, firewall_db.Firewall, resource_id=id) + self._vcns_delete_firewall(context, service_router_binding.router_id) + super(NsxAdvancedPlugin, self).delete_firewall(context, id) + self._delete_resource_router_id_binding( + context, id, firewall_db.Firewall) + + def get_firewall(self, context, id, fields=None): + fw = super(NsxAdvancedPlugin, self).get_firewall( + context, id, fields) + if fields and rsi.ROUTER_ID not in fields: + return fw + + service_router_binding = self._get_resource_router_id_binding( + context, firewall_db.Firewall, resource_id=fw['id']) + fw[rsi.ROUTER_ID] = service_router_binding['router_id'] + return fw + + def get_firewalls(self, context, filters=None, fields=None): + fws = super(NsxAdvancedPlugin, self).get_firewalls( + context, filters, fields) + if fields and rsi.ROUTER_ID not in fields: + return fws + service_router_bindings = self._get_resource_router_id_bindings( + context, firewall_db.Firewall, + resource_ids=[fw['id'] for fw in fws]) + mapping = dict([(binding['resource_id'], binding['router_id']) + for binding in service_router_bindings]) + for fw in fws: + fw[rsi.ROUTER_ID] = mapping[fw['id']] + return fws + + def update_firewall_rule(self, context, id, firewall_rule): + LOG.debug(_("update_firewall_rule() called")) + self._ensure_update_or_delete_firewall_rule(context, id) + fwr_pre = self.get_firewall_rule(context, id) + fwr = super(NsxAdvancedPlugin, self).update_firewall_rule( + context, id, firewall_rule) + if fwr_pre == fwr: + return fwr + + # check if this rule is associated with firewall + fw_list = self._get_firewall_list_from_firewall_rule(context, id) + if not fw_list: + return fwr + + for fw in fw_list: + # get router service insertion binding with firewall id + service_router_binding = self._get_resource_router_id_binding( + context, firewall_db.Firewall, resource_id=fw['id']) + edge_id = self._get_edge_id_by_vcns_edge_binding( + context, service_router_binding.router_id) + + #TODO(linb): do rollback on error + self.vcns_driver.update_firewall_rule(context, id, edge_id, fwr) + + return fwr + + def update_firewall_policy(self, context, id, firewall_policy): + LOG.debug(_("update_firewall_policy() called")) + self._ensure_firewall_policy_update_allowed(context, id) + firewall_rules_pre = self._make_firewall_rule_list_by_policy_id( + context, id) + fwp = super(NsxAdvancedPlugin, self).update_firewall_policy( + context, id, firewall_policy) + firewall_rules = self._make_firewall_rule_list_by_policy_id( + context, id) + if firewall_rules_pre == firewall_rules: + return fwp + + # check if this policy is associated with firewall + fw_list = self._get_firewall_list_from_firewall_policy(context, id) + if not fw_list: + return fwp + + for fw in fw_list: + # Get the router_service insertion binding with firewall id + # TODO(fank): optimized by using _get_resource_router_id_bindings + service_router_binding = self._get_resource_router_id_binding( + context, firewall_db.Firewall, resource_id=fw['id']) + self._vcns_update_firewall( + context, fw, service_router_binding.router_id, + firewall_rule_list=firewall_rules) + return fwp + + def insert_rule(self, context, id, rule_info): + LOG.debug(_("insert_rule() called")) + self._ensure_firewall_policy_update_allowed(context, id) + fwp = super(NsxAdvancedPlugin, self).insert_rule( + context, id, rule_info) + fwr = super(NsxAdvancedPlugin, self).get_firewall_rule( + context, rule_info['firewall_rule_id']) + + # check if this policy is associated with firewall + fw_list = self._get_firewall_list_from_firewall_policy(context, id) + if not fw_list: + return fwp + for fw in fw_list: + # TODO(fank): optimized by using _get_resource_router_id_bindings + service_router_binding = self._get_resource_router_id_binding( + context, firewall_db.Firewall, resource_id=fw['id']) + edge_id = self._get_edge_id_by_vcns_edge_binding( + context, service_router_binding.router_id) + + if rule_info.get('insert_before') or rule_info.get('insert_after'): + #if insert_before or insert_after is set, we would call + #VCNS insert_rule API + #TODO(linb): do rollback on error + self.vcns_driver.insert_rule(context, rule_info, edge_id, fwr) + else: + #Else we would call bulk configuration on the firewall + self._vcns_update_firewall(context, fw, edge_id=edge_id) + return fwp + + def remove_rule(self, context, id, rule_info): + LOG.debug(_("remove_rule() called")) + self._ensure_firewall_policy_update_allowed(context, id) + fwp = super(NsxAdvancedPlugin, self).remove_rule( + context, id, rule_info) + fwr = super(NsxAdvancedPlugin, self).get_firewall_rule( + context, rule_info['firewall_rule_id']) + + # check if this policy is associated with firewall + fw_list = self._get_firewall_list_from_firewall_policy(context, id) + if not fw_list: + return fwp + for fw in fw_list: + # TODO(fank): optimized by using _get_resource_router_id_bindings + service_router_binding = self._get_resource_router_id_binding( + context, firewall_db.Firewall, resource_id=fw['id']) + edge_id = self._get_edge_id_by_vcns_edge_binding( + context, service_router_binding.router_id) + #TODO(linb): do rollback on error + self.vcns_driver.delete_firewall_rule( + context, fwr['id'], edge_id) + return fwp + + # + # LBAAS service plugin implementation + # + def _get_edge_id_by_vip_id(self, context, vip_id): + try: + service_router_binding = self._get_resource_router_id_binding( + context, loadbalancer_db.Vip, resource_id=vip_id) + except Exception: + with excutils.save_and_reraise_exception(): + LOG.exception(_("Failed to find the edge with " + "vip_id: %s"), vip_id) + return self._get_edge_id_by_vcns_edge_binding( + context, service_router_binding.router_id) + + def _get_all_vip_addrs_by_router_id( + self, context, router_id): + vip_bindings = self._get_resource_router_id_bindings( + context, loadbalancer_db.Vip, router_ids=[router_id]) + vip_addrs = [] + for vip_binding in vip_bindings: + vip = self.get_vip(context, vip_binding.resource_id) + vip_addrs.append(vip.get('address')) + return vip_addrs + + def _add_router_service_insertion_binding(self, context, resource_id, + router_id, + model): + res = { + 'id': resource_id, + 'router_id': router_id + } + self._process_create_resource_router_id(context, res, + model) + + def _resource_set_status(self, context, model, id, status, obj=None, + pool_id=None): + with context.session.begin(subtransactions=True): + try: + qry = context.session.query(model) + if issubclass(model, loadbalancer_db.PoolMonitorAssociation): + res = qry.filter_by(monitor_id=id, + pool_id=pool_id).one() + else: + res = qry.filter_by(id=id).one() + if status == service_constants.PENDING_UPDATE and ( + res.get('status') == service_constants.PENDING_DELETE): + msg = (_("Operation can't be performed, Since resource " + "%(model)s : %(id)s is in DELETEing status!") % + {'model': model, + 'id': id}) + LOG.error(msg) + raise nsx_exc.NsxPluginException(err_msg=msg) + else: + res.status = status + except sa_exc.NoResultFound: + msg = (_("Resource %(model)s : %(id)s not found!") % + {'model': model, + 'id': id}) + LOG.exception(msg) + raise nsx_exc.NsxPluginException(err_msg=msg) + if obj: + obj['status'] = status + + def _vcns_create_pool_and_monitors(self, context, pool_id, **kwargs): + pool = self.get_pool(context, pool_id) + edge_id = kwargs.get('edge_id') + if not edge_id: + edge_id = self._get_edge_id_by_vip_id( + context, pool['vip_id']) + #Check wheter the pool is already created on the router + #in case of future's M:N relation between Pool and Vip + + #Check associated HealthMonitors and then create them + for monitor_id in pool.get('health_monitors'): + hm = self.get_health_monitor(context, monitor_id) + try: + self.vcns_driver.create_health_monitor( + context, edge_id, hm) + except Exception: + with excutils.save_and_reraise_exception(): + LOG.exception(_("Failed to create healthmonitor " + "associated with pool id: %s!") % pool_id) + for monitor_ide in pool.get('health_monitors'): + if monitor_ide == monitor_id: + break + self.vcns_driver.delete_health_monitor( + context, monitor_ide, edge_id) + #Create the pool on the edge + members = [ + super(NsxAdvancedPlugin, self).get_member( + context, member_id) + for member_id in pool.get('members') + ] + try: + self.vcns_driver.create_pool(context, edge_id, pool, members) + except Exception: + with excutils.save_and_reraise_exception(): + LOG.exception(_("Failed to create pool on vshield edge")) + self.vcns_driver.delete_pool( + context, pool_id, edge_id) + for monitor_id in pool.get('health_monitors'): + self.vcns_driver.delete_health_monitor( + context, monitor_id, edge_id) + + def _vcns_update_pool(self, context, pool, **kwargs): + edge_id = self._get_edge_id_by_vip_id(context, pool['vip_id']) + members = kwargs.get('members') + if not members: + members = [ + super(NsxAdvancedPlugin, self).get_member( + context, member_id) + for member_id in pool.get('members') + ] + self.vcns_driver.update_pool(context, edge_id, pool, members) + + def create_vip(self, context, vip): + LOG.debug(_("create_vip() called")) + router_id = vip['vip'].get(vcns_const.ROUTER_ID) + if not router_id: + msg = _("router_id is not provided!") + LOG.error(msg) + raise n_exc.BadRequest(resource='router', msg=msg) + + if not self._is_advanced_service_router(context, router_id): + msg = _("router_id: %s is not an advanced router!") % router_id + LOG.error(msg) + raise nsx_exc.NsxPluginException(err_msg=msg) + + #Check whether the vip port is an external port + subnet_id = vip['vip']['subnet_id'] + network_id = self.get_subnet(context, subnet_id)['network_id'] + ext_net = self._get_network(context, network_id) + if not ext_net.external: + msg = (_("Network '%s' is not a valid external " + "network") % network_id) + raise nsx_exc.NsxPluginException(err_msg=msg) + + v = super(NsxAdvancedPlugin, self).create_vip(context, vip) + #Get edge_id for the resource + router_binding = vcns_db.get_vcns_router_binding( + context.session, + router_id) + edge_id = router_binding.edge_id + #Add vip_router binding + self._add_router_service_insertion_binding(context, v['id'], + router_id, + loadbalancer_db.Vip) + #Create the vip port on vShield Edge + router = self._get_router(context, router_id) + self._update_interface(context, router, sync=True) + #Create the vip and associated pool/monitor on the corresponding edge + try: + self._vcns_create_pool_and_monitors( + context, v['pool_id'], edge_id=edge_id) + self.vcns_driver.create_vip(context, edge_id, v) + except Exception: + with excutils.save_and_reraise_exception(): + LOG.exception(_("Failed to create vip!")) + self._delete_resource_router_id_binding( + context, v['id'], loadbalancer_db.Vip) + super(NsxAdvancedPlugin, self).delete_vip(context, v['id']) + self._resource_set_status(context, loadbalancer_db.Vip, + v['id'], service_constants.ACTIVE, v) + v[rsi.ROUTER_ID] = router_id + + return v + + def update_vip(self, context, id, vip): + edge_id = self._get_edge_id_by_vip_id(context, id) + old_vip = self.get_vip(context, id) + session_persistence_update = bool( + vip['vip'].get('session_persistence')) + vip['vip']['status'] = service_constants.PENDING_UPDATE + v = super(NsxAdvancedPlugin, self).update_vip(context, id, vip) + v[rsi.ROUTER_ID] = self._get_resource_router_id_binding( + context, loadbalancer_db.Vip, resource_id=id)['router_id'] + if old_vip['pool_id'] != v['pool_id']: + self.vcns_driver.delete_vip(context, id) + #Delete old pool/monitor on the edge + #TODO(linb): Factor out procedure for removing pool and health + #separate method + old_pool = self.get_pool(context, old_vip['pool_id']) + self.vcns_driver.delete_pool( + context, old_vip['pool_id'], edge_id) + for monitor_id in old_pool.get('health_monitors'): + self.vcns_driver.delete_health_monitor( + context, monitor_id, edge_id) + #Create new pool/monitor object on the edge + #TODO(linb): add exception handle if error + self._vcns_create_pool_and_monitors( + context, v['pool_id'], edge_id=edge_id) + self.vcns_driver.create_vip(context, edge_id, v) + return v + try: + self.vcns_driver.update_vip(context, v, session_persistence_update) + except Exception: + with excutils.save_and_reraise_exception(): + LOG.exception(_("Failed to update vip with id: %s!"), id) + self._resource_set_status(context, loadbalancer_db.Vip, + id, service_constants.ERROR, v) + + self._resource_set_status(context, loadbalancer_db.Vip, + v['id'], service_constants.ACTIVE, v) + return v + + def delete_vip(self, context, id): + v = self.get_vip(context, id) + self._resource_set_status( + context, loadbalancer_db.Vip, + id, service_constants.PENDING_DELETE) + try: + self.vcns_driver.delete_vip(context, id) + except Exception: + with excutils.save_and_reraise_exception(): + LOG.exception(_("Failed to delete vip with id: %s!"), id) + self._resource_set_status(context, loadbalancer_db.Vip, + id, service_constants.ERROR) + edge_id = self._get_edge_id_by_vip_id(context, id) + #Check associated HealthMonitors and then delete them + pool = self.get_pool(context, v['pool_id']) + self.vcns_driver.delete_pool(context, v['pool_id'], edge_id) + for monitor_id in pool.get('health_monitors'): + #TODO(linb): do exception handle if error + self.vcns_driver.delete_health_monitor( + context, monitor_id, edge_id) + + router_binding = self._get_resource_router_id_binding( + context, loadbalancer_db.Vip, resource_id=id) + router = self._get_router(context, router_binding.router_id) + self._delete_resource_router_id_binding( + context, id, loadbalancer_db.Vip) + super(NsxAdvancedPlugin, self).delete_vip(context, id) + self._update_interface(context, router, sync=True) + + def get_vip(self, context, id, fields=None): + vip = super(NsxAdvancedPlugin, self).get_vip(context, id, fields) + if fields and rsi.ROUTER_ID not in fields: + return vip + + service_router_binding = self._get_resource_router_id_binding( + context, loadbalancer_db.Vip, resource_id=vip['id']) + vip[rsi.ROUTER_ID] = service_router_binding['router_id'] + return vip + + def get_vips(self, context, filters=None, fields=None): + vips = super(NsxAdvancedPlugin, self).get_vips( + context, filters, fields) + if fields and rsi.ROUTER_ID not in fields: + return vips + service_router_bindings = self._get_resource_router_id_bindings( + context, loadbalancer_db.Vip, + resource_ids=[vip['id'] for vip in vips]) + mapping = dict([(binding['resource_id'], binding['router_id']) + for binding in service_router_bindings]) + for vip in vips: + vip[rsi.ROUTER_ID] = mapping[vip['id']] + return vips + + def update_pool(self, context, id, pool): + pool['pool']['status'] = service_constants.PENDING_UPDATE + p = super(NsxAdvancedPlugin, self).update_pool(context, id, pool) + #Check whether the pool is already associated with the vip + if not p.get('vip_id'): + self._resource_set_status(context, loadbalancer_db.Pool, + p['id'], service_constants.ACTIVE, p) + return p + try: + self._vcns_update_pool(context, p) + except Exception: + with excutils.save_and_reraise_exception(): + LOG.exception(_("Failed to update pool with id: %s!"), id) + self._resource_set_status(context, loadbalancer_db.Pool, + p['id'], service_constants.ERROR, p) + self._resource_set_status(context, loadbalancer_db.Pool, + p['id'], service_constants.ACTIVE, p) + return p + + def create_member(self, context, member): + m = super(NsxAdvancedPlugin, self).create_member(context, member) + pool_id = m.get('pool_id') + pool = self.get_pool(context, pool_id) + if not pool.get('vip_id'): + self._resource_set_status(context, loadbalancer_db.Member, + m['id'], service_constants.ACTIVE, m) + return m + self._resource_set_status(context, loadbalancer_db.Pool, + pool_id, + service_constants.PENDING_UPDATE) + try: + self._vcns_update_pool(context, pool) + except Exception: + with excutils.save_and_reraise_exception(): + LOG.exception(_("Failed to update pool with the member")) + super(NsxAdvancedPlugin, self).delete_member(context, m['id']) + + self._resource_set_status(context, loadbalancer_db.Pool, + pool_id, service_constants.ACTIVE) + self._resource_set_status(context, loadbalancer_db.Member, + m['id'], service_constants.ACTIVE, m) + return m + + def update_member(self, context, id, member): + member['member']['status'] = service_constants.PENDING_UPDATE + old_member = self.get_member(context, id) + m = super(NsxAdvancedPlugin, self).update_member( + context, id, member) + + if m['pool_id'] != old_member['pool_id']: + old_pool_id = old_member['pool_id'] + old_pool = self.get_pool(context, old_pool_id) + if old_pool.get('vip_id'): + self._resource_set_status( + context, loadbalancer_db.Pool, + old_pool_id, service_constants.PENDING_UPDATE) + try: + self._vcns_update_pool(context, old_pool) + except Exception: + with excutils.save_and_reraise_exception(): + LOG.exception(_("Failed to update old pool " + "with the member")) + super(NsxAdvancedPlugin, self).delete_member( + context, m['id']) + self._resource_set_status( + context, loadbalancer_db.Pool, + old_pool_id, service_constants.ACTIVE) + + pool_id = m['pool_id'] + pool = self.get_pool(context, pool_id) + if not pool.get('vip_id'): + self._resource_set_status(context, loadbalancer_db.Member, + m['id'], service_constants.ACTIVE, m) + return m + self._resource_set_status(context, loadbalancer_db.Pool, + pool_id, + service_constants.PENDING_UPDATE) + try: + self._vcns_update_pool(context, pool) + except Exception: + with excutils.save_and_reraise_exception(): + LOG.exception(_("Failed to update pool with the member")) + super(NsxAdvancedPlugin, self).delete_member( + context, m['id']) + + self._resource_set_status(context, loadbalancer_db.Pool, + pool_id, service_constants.ACTIVE) + self._resource_set_status(context, loadbalancer_db.Member, + m['id'], service_constants.ACTIVE, m) + return m + + def delete_member(self, context, id): + m = self.get_member(context, id) + super(NsxAdvancedPlugin, self).delete_member(context, id) + pool_id = m['pool_id'] + pool = self.get_pool(context, pool_id) + if not pool.get('vip_id'): + return + self._resource_set_status(context, loadbalancer_db.Pool, + pool_id, service_constants.PENDING_UPDATE) + try: + self._vcns_update_pool(context, pool) + except Exception: + with excutils.save_and_reraise_exception(): + LOG.exception(_("Failed to update pool with the member")) + self._resource_set_status(context, loadbalancer_db.Pool, + pool_id, service_constants.ACTIVE) + + def update_health_monitor(self, context, id, health_monitor): + old_hm = super(NsxAdvancedPlugin, self).get_health_monitor( + context, id) + hm = super(NsxAdvancedPlugin, self).update_health_monitor( + context, id, health_monitor) + for hm_pool in hm.get('pools'): + pool_id = hm_pool['pool_id'] + pool = self.get_pool(context, pool_id) + if pool.get('vip_id'): + edge_id = self._get_edge_id_by_vip_id( + context, pool['vip_id']) + try: + self.vcns_driver.update_health_monitor( + context, edge_id, old_hm, hm) + except Exception: + with excutils.save_and_reraise_exception(): + LOG.exception(_("Failed to update monitor " + "with id: %s!"), id) + return hm + + def create_pool_health_monitor(self, context, + health_monitor, pool_id): + monitor_id = health_monitor['health_monitor']['id'] + pool = self.get_pool(context, pool_id) + monitors = pool.get('health_monitors') + if len(monitors) > 0: + msg = _("Vcns right now can only support " + "one monitor per pool") + LOG.error(msg) + raise nsx_exc.NsxPluginException(err_msg=msg) + #Check whether the pool is already associated with the vip + if not pool.get('vip_id'): + res = super(NsxAdvancedPlugin, + self).create_pool_health_monitor(context, + health_monitor, + pool_id) + return res + #Get the edge_id + edge_id = self._get_edge_id_by_vip_id(context, pool['vip_id']) + res = super(NsxAdvancedPlugin, + self).create_pool_health_monitor(context, + health_monitor, + pool_id) + monitor = self.get_health_monitor(context, monitor_id) + #TODO(linb)Add Exception handle if error + self.vcns_driver.create_health_monitor(context, edge_id, monitor) + #Get updated pool + pool['health_monitors'].append(monitor['id']) + self._resource_set_status( + context, loadbalancer_db.Pool, + pool_id, service_constants.PENDING_UPDATE) + try: + self._vcns_update_pool(context, pool) + except Exception: + with excutils.save_and_reraise_exception(): + LOG.exception(_("Failed to associate monitor with pool!")) + self._resource_set_status( + context, loadbalancer_db.Pool, + pool_id, service_constants.ERROR) + super(NsxAdvancedPlugin, self).delete_pool_health_monitor( + context, monitor_id, pool_id) + self._resource_set_status( + context, loadbalancer_db.Pool, + pool_id, service_constants.ACTIVE) + self._resource_set_status( + context, loadbalancer_db.PoolMonitorAssociation, + monitor_id, service_constants.ACTIVE, res, + pool_id=pool_id) + return res + + def delete_pool_health_monitor(self, context, id, pool_id): + super(NsxAdvancedPlugin, self).delete_pool_health_monitor( + context, id, pool_id) + pool = self.get_pool(context, pool_id) + #Check whether the pool is already associated with the vip + if pool.get('vip_id'): + #Delete the monitor on vshield edge + edge_id = self._get_edge_id_by_vip_id(context, pool['vip_id']) + self._resource_set_status( + context, loadbalancer_db.Pool, + pool_id, service_constants.PENDING_UPDATE) + try: + self._vcns_update_pool(context, pool) + except Exception: + with excutils.save_and_reraise_exception(): + LOG.exception( + _("Failed to update pool with pool_monitor!")) + self._resource_set_status( + context, loadbalancer_db.Pool, + pool_id, service_constants.ERROR) + #TODO(linb): Add exception handle if error + self.vcns_driver.delete_health_monitor(context, id, edge_id) + self._resource_set_status( + context, loadbalancer_db.Pool, + pool_id, service_constants.ACTIVE) + + def _vcns_update_ipsec_config( + self, context, vpnservice_id, removed_ipsec_conn_id=None): + sites = [] + vpn_service = self._get_vpnservice(context, vpnservice_id) + edge_id = self._get_edge_id_by_vcns_edge_binding( + context, vpn_service.router_id) + if not vpn_service.router.gw_port: + msg = _("Failed to update ipsec vpn configuration on edge, since " + "the router: %s does not have a gateway yet!" + ) % vpn_service.router_id + LOG.error(msg) + raise exceptions.VcnsBadRequest(resource='router', msg=msg) + + external_ip = vpn_service.router.gw_port['fixed_ips'][0]['ip_address'] + subnet = self._make_subnet_dict(vpn_service.subnet) + for ipsec_site_conn in vpn_service.ipsec_site_connections: + if ipsec_site_conn.id != removed_ipsec_conn_id: + site = self._make_ipsec_site_connection_dict(ipsec_site_conn) + ikepolicy = self._make_ikepolicy_dict( + ipsec_site_conn.ikepolicy) + ipsecpolicy = self._make_ipsecpolicy_dict( + ipsec_site_conn.ipsecpolicy) + sites.append({'site': site, + 'ikepolicy': ikepolicy, + 'ipsecpolicy': ipsecpolicy, + 'subnet': subnet, + 'external_ip': external_ip}) + try: + self.vcns_driver.update_ipsec_config( + edge_id, sites, enabled=vpn_service.admin_state_up) + except exceptions.VcnsBadRequest: + with excutils.save_and_reraise_exception(): + LOG.exception(_("Bad or unsupported Input request!")) + except exceptions.VcnsApiException: + with excutils.save_and_reraise_exception(): + msg = (_("Failed to update ipsec VPN configuration " + "with vpnservice: %(vpnservice_id)s on vShield Edge: " + "%(edge_id)s") % {'vpnservice_id': vpnservice_id, + 'edge_id': edge_id}) + LOG.exception(msg) + + def create_vpnservice(self, context, vpnservice): + LOG.debug(_("create_vpnservice() called")) + router_id = vpnservice['vpnservice'].get('router_id') + if not self._is_advanced_service_router(context, router_id): + msg = _("router_id:%s is not an advanced router!") % router_id + LOG.warning(msg) + raise exceptions.VcnsBadRequest(resource='router', msg=msg) + + if self.get_vpnservices(context, filters={'router_id': [router_id]}): + msg = _("a vpnservice is already associated with the router: %s" + ) % router_id + LOG.warning(msg) + raise nsx_exc.ServiceOverQuota( + overs='vpnservice', err_msg=msg) + + service = super(NsxAdvancedPlugin, self).create_vpnservice( + context, vpnservice) + self._resource_set_status( + context, vpn_db.VPNService, + service['id'], service_constants.ACTIVE, service) + return service + + def update_vpnservice(self, context, vpnservice_id, vpnservice): + vpnservice['vpnservice']['status'] = service_constants.PENDING_UPDATE + service = super(NsxAdvancedPlugin, self).update_vpnservice( + context, vpnservice_id, vpnservice) + # Only admin_state_up attribute is configurable on Edge. + if vpnservice['vpnservice'].get('admin_state_up') is None: + self._resource_set_status( + context, vpn_db.VPNService, + service['id'], service_constants.ACTIVE, service) + return service + # Test whether there is one ipsec site connection attached to + # the vpnservice. If not, just return without updating ipsec + # config on edge side. + vpn_service_db = self._get_vpnservice(context, vpnservice_id) + if not vpn_service_db.ipsec_site_connections: + self._resource_set_status( + context, vpn_db.VPNService, + service['id'], service_constants.ACTIVE, service) + return service + try: + self._vcns_update_ipsec_config(context, service['id']) + except Exception: + with excutils.save_and_reraise_exception(): + self._resource_set_status( + context, vpn_db.VPNService, + service['id'], service_constants.ERROR, service) + self._resource_set_status( + context, vpn_db.VPNService, + service['id'], service_constants.ACTIVE, service) + return service + + def create_ipsec_site_connection(self, context, ipsec_site_connection): + ipsec_site_conn = super( + NsxAdvancedPlugin, self).create_ipsec_site_connection( + context, ipsec_site_connection) + try: + self._vcns_update_ipsec_config( + context, ipsec_site_conn['vpnservice_id']) + except Exception: + with excutils.save_and_reraise_exception(): + super(NsxAdvancedPlugin, self).delete_ipsec_site_connection( + context, ipsec_site_conn['id']) + self._resource_set_status( + context, vpn_db.IPsecSiteConnection, + ipsec_site_conn['id'], service_constants.ACTIVE, ipsec_site_conn) + return ipsec_site_conn + + def update_ipsec_site_connection(self, context, ipsec_site_connection_id, + ipsec_site_connection): + ipsec_site_connection['ipsec_site_connection']['status'] = ( + service_constants.PENDING_UPDATE) + ipsec_site_conn = super( + NsxAdvancedPlugin, self).update_ipsec_site_connection( + context, ipsec_site_connection_id, ipsec_site_connection) + try: + self._vcns_update_ipsec_config( + context, ipsec_site_conn['vpnservice_id']) + except Exception: + with excutils.save_and_reraise_exception(): + self._resource_set_status( + context, vpn_db.IPsecSiteConnection, ipsec_site_conn['id'], + service_constants.ERROR, ipsec_site_conn) + self._resource_set_status( + context, vpn_db.IPsecSiteConnection, + ipsec_site_conn['id'], service_constants.ACTIVE, ipsec_site_conn) + return ipsec_site_conn + + def delete_ipsec_site_connection(self, context, ipsec_site_conn_id): + self._resource_set_status( + context, vpn_db.IPsecSiteConnection, + ipsec_site_conn_id, service_constants.PENDING_DELETE) + vpnservice_id = self.get_ipsec_site_connection( + context, ipsec_site_conn_id)['vpnservice_id'] + try: + self._vcns_update_ipsec_config( + context, vpnservice_id, ipsec_site_conn_id) + except Exception: + with excutils.save_and_reraise_exception(): + self._resource_set_status( + context, vpn_db.IPsecSiteConnection, ipsec_site_conn_id, + service_constants.ERROR) + super(NsxAdvancedPlugin, self).delete_ipsec_site_connection( + context, ipsec_site_conn_id) + + +class VcnsCallbacks(object): + """Edge callback implementation Callback functions for + asynchronous tasks. + """ + def __init__(self, plugin): + self.plugin = plugin + + def edge_deploy_started(self, task): + """callback when deployment task started.""" + jobdata = task.userdata['jobdata'] + context = jobdata['context'] + edge_id = task.userdata.get('edge_id') + neutron_router_id = jobdata['neutron_router_id'] + name = task.userdata['router_name'] + if edge_id: + LOG.debug(_("Start deploying %(edge_id)s for router %(name)s"), { + 'edge_id': edge_id, + 'name': name}) + vcns_db.update_vcns_router_binding( + context.session, neutron_router_id, edge_id=edge_id) + else: + LOG.debug(_("Failed to deploy Edge for router %s"), name) + vcns_db.update_vcns_router_binding( + context.session, neutron_router_id, + status=service_constants.ERROR) + + def edge_deploy_result(self, task): + """callback when deployment task finished.""" + jobdata = task.userdata['jobdata'] + lrouter = jobdata['lrouter'] + context = jobdata['context'] + name = task.userdata['router_name'] + neutron_router_id = jobdata['neutron_router_id'] + router_db = None + try: + router_db = self.plugin._get_router( + context, neutron_router_id) + except l3.RouterNotFound: + # Router might have been deleted before deploy finished + LOG.exception(_("Router %s not found"), lrouter['uuid']) + + if task.status == tasks_const.TaskStatus.COMPLETED: + LOG.debug(_("Successfully deployed %(edge_id)s for " + "router %(name)s"), { + 'edge_id': task.userdata['edge_id'], + 'name': name}) + if (router_db and + router_db['status'] == service_constants.PENDING_CREATE): + router_db['status'] = service_constants.ACTIVE + + binding = vcns_db.get_vcns_router_binding( + context.session, neutron_router_id) + # only update status to active if its status is pending create + if binding['status'] == service_constants.PENDING_CREATE: + vcns_db.update_vcns_router_binding( + context.session, neutron_router_id, + status=service_constants.ACTIVE) + else: + LOG.debug(_("Failed to deploy Edge for router %s"), name) + if router_db: + router_db['status'] = service_constants.ERROR + vcns_db.update_vcns_router_binding( + context.session, neutron_router_id, + status=service_constants.ERROR) + + def edge_delete_result(self, task): + jobdata = task.userdata['jobdata'] + router_id = task.userdata['router_id'] + context = jobdata['context'] + if task.status == tasks_const.TaskStatus.COMPLETED: + vcns_db.delete_vcns_router_binding(context.session, + router_id) + + def interface_update_result(self, task): + LOG.debug(_("interface_update_result %d"), task.status) + + def snat_create_result(self, task): + LOG.debug(_("snat_create_result %d"), task.status) + + def snat_delete_result(self, task): + LOG.debug(_("snat_delete_result %d"), task.status) + + def dnat_create_result(self, task): + LOG.debug(_("dnat_create_result %d"), task.status) + + def dnat_delete_result(self, task): + LOG.debug(_("dnat_delete_result %d"), task.status) + + def routes_update_result(self, task): + LOG.debug(_("routes_update_result %d"), task.status) + + def nat_update_result(self, task): + LOG.debug(_("nat_update_result %d"), task.status) + + +def _process_base_create_lswitch_args(*args, **kwargs): + tags = utils.get_tags() + tags.append({"tag": args[1], + "scope": "quantum_net_id"}) + if args[2]: + tags.append({"tag": args[2], "scope": "os_tid"}) + switch_name = args[3] + tz_config = args[4] + if kwargs.get("shared", False) or len(args) >= 6: + tags.append({"tag": "true", "scope": "shared"}) + if kwargs.get("tags"): + tags.extend(kwargs["tags"]) + return switch_name, tz_config, tags diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/shell/commands.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/shell/commands.py new file mode 100644 index 00000000..bd6706ff --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/shell/commands.py @@ -0,0 +1,67 @@ +# Copyright 2014 VMware, Inc. +# +# All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +from neutronclient.neutron import v2_0 as client + +LSN_PATH = '/lsns' + + +def print_report(write_func, report): + write_func(_("\nService type = %s\n") % report['report']['type']) + services = ','.join(report['report']['services']) + ports = ','.join(report['report']['ports']) + write_func(_("Service uuids = %s\n") % services) + write_func(_("Port uuids = %s\n\n") % ports) + + +class NetworkReport(client.NeutronCommand): + """Retrieve network migration report.""" + + def get_parser(self, prog_name): + parser = super(NetworkReport, self).get_parser(prog_name) + parser.add_argument('network', metavar='network', + help=_('ID or name of network to run report on')) + return parser + + def run(self, parsed_args): + net = parsed_args.network + net_id = client.find_resourceid_by_name_or_id(self.app.client, + 'network', net) + res = self.app.client.get("%s/%s" % (LSN_PATH, net_id)) + if res: + self.app.stdout.write(_('Migration report is:\n')) + print_report(self.app.stdout.write, res['lsn']) + + +class NetworkMigrate(client.NeutronCommand): + """Perform network migration.""" + + def get_parser(self, prog_name): + parser = super(NetworkMigrate, self).get_parser(prog_name) + parser.add_argument('network', metavar='network', + help=_('ID or name of network to migrate')) + return parser + + def run(self, parsed_args): + net = parsed_args.network + net_id = client.find_resourceid_by_name_or_id(self.app.client, + 'network', net) + body = {'network': net_id} + res = self.app.client.post(LSN_PATH, body={'lsn': body}) + if res: + self.app.stdout.write(_('Migration has been successful:\n')) + print_report(self.app.stdout.write, res['lsn']) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/vshield/__init__.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/vshield/__init__.py new file mode 100644 index 00000000..db5f20ea --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/vshield/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2013 VMware, Inc +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/vshield/edge_appliance_driver.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/vshield/edge_appliance_driver.py new file mode 100644 index 00000000..ce61b8f8 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/vshield/edge_appliance_driver.py @@ -0,0 +1,665 @@ +# Copyright 2013 VMware, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Kaiwei Fan, VMware, Inc. +# @author: Bo Link, VMware, Inc. + +from neutron.openstack.common import excutils +from neutron.openstack.common import jsonutils +from neutron.openstack.common import log as logging +from neutron.plugins.vmware.common import utils +from neutron.plugins.vmware.vshield.common import ( + constants as vcns_const) +from neutron.plugins.vmware.vshield.common import constants as common_constants +from neutron.plugins.vmware.vshield.common import exceptions +from neutron.plugins.vmware.vshield.tasks import constants +from neutron.plugins.vmware.vshield.tasks import tasks + +LOG = logging.getLogger(__name__) + + +class EdgeApplianceDriver(object): + def __init__(self): + # store the last task per edge that has the latest config + self.updated_task = { + 'nat': {}, + 'route': {}, + } + + def _assemble_edge(self, name, appliance_size="compact", + deployment_container_id=None, datacenter_moid=None, + enable_aesni=True, hypervisor_assist=False, + enable_fips=False, remote_access=False): + edge = { + 'name': name, + 'fqdn': name, + 'hypervisorAssist': hypervisor_assist, + 'type': 'gatewayServices', + 'enableAesni': enable_aesni, + 'enableFips': enable_fips, + 'cliSettings': { + 'remoteAccess': remote_access + }, + 'appliances': { + 'applianceSize': appliance_size + }, + 'vnics': { + 'vnics': [] + } + } + if deployment_container_id: + edge['appliances']['deploymentContainerId'] = ( + deployment_container_id) + if datacenter_moid: + edge['datacenterMoid'] = datacenter_moid, + + return edge + + def _assemble_edge_appliance(self, resource_pool_id, datastore_id): + appliance = {} + if resource_pool_id: + appliance['resourcePoolId'] = resource_pool_id + if datastore_id: + appliance['datastoreId'] = datastore_id + return appliance + + def _assemble_edge_vnic(self, name, index, portgroup_id, + primary_address=None, subnet_mask=None, + secondary=None, + type="internal", + enable_proxy_arp=False, + enable_send_redirects=True, + is_connected=True, + mtu=1500): + vnic = { + 'index': index, + 'name': name, + 'type': type, + 'portgroupId': portgroup_id, + 'mtu': mtu, + 'enableProxyArp': enable_proxy_arp, + 'enableSendRedirects': enable_send_redirects, + 'isConnected': is_connected + } + if primary_address and subnet_mask: + address_group = { + 'primaryAddress': primary_address, + 'subnetMask': subnet_mask + } + if secondary: + address_group['secondaryAddresses'] = { + 'ipAddress': secondary, + 'type': 'IpAddressesDto' + } + + vnic['addressGroups'] = { + 'addressGroups': [address_group] + } + + return vnic + + def _edge_status_to_level(self, status): + if status == 'GREEN': + status_level = common_constants.RouterStatus.ROUTER_STATUS_ACTIVE + elif status in ('GREY', 'YELLOW'): + status_level = common_constants.RouterStatus.ROUTER_STATUS_DOWN + else: + status_level = common_constants.RouterStatus.ROUTER_STATUS_ERROR + return status_level + + def _enable_loadbalancer(self, edge): + if not edge.get('featureConfigs') or ( + not edge['featureConfigs'].get('features')): + edge['featureConfigs'] = {'features': []} + edge['featureConfigs']['features'].append( + {'featureType': 'loadbalancer_4.0', + 'enabled': True}) + + def get_edge_status(self, edge_id): + try: + response = self.vcns.get_edge_status(edge_id)[1] + status_level = self._edge_status_to_level( + response['edgeStatus']) + except exceptions.VcnsApiException as e: + LOG.exception(_("VCNS: Failed to get edge status:\n%s"), + e.response) + status_level = common_constants.RouterStatus.ROUTER_STATUS_ERROR + try: + desc = jsonutils.loads(e.response) + if desc.get('errorCode') == ( + vcns_const.VCNS_ERROR_CODE_EDGE_NOT_RUNNING): + status_level = ( + common_constants.RouterStatus.ROUTER_STATUS_DOWN) + except ValueError: + LOG.exception(e.response) + + return status_level + + def get_edges_statuses(self): + edges_status_level = {} + edges = self._get_edges() + for edge in edges['edgePage'].get('data', []): + edge_id = edge['id'] + status = edge['edgeStatus'] + edges_status_level[edge_id] = self._edge_status_to_level(status) + + return edges_status_level + + def _update_interface(self, task): + edge_id = task.userdata['edge_id'] + config = task.userdata['config'] + LOG.debug(_("VCNS: start updating vnic %s"), config) + try: + self.vcns.update_interface(edge_id, config) + except exceptions.VcnsApiException as e: + with excutils.save_and_reraise_exception(): + LOG.exception(_("VCNS: Failed to update vnic %(config)s:\n" + "%(response)s"), { + 'config': config, + 'response': e.response}) + except Exception: + with excutils.save_and_reraise_exception(): + LOG.exception(_("VCNS: Failed to update vnic %d"), + config['index']) + + return constants.TaskStatus.COMPLETED + + def update_interface(self, router_id, edge_id, index, network, + address=None, netmask=None, secondary=None, + jobdata=None): + LOG.debug(_("VCNS: update vnic %(index)d: %(addr)s %(netmask)s"), { + 'index': index, 'addr': address, 'netmask': netmask}) + if index == vcns_const.EXTERNAL_VNIC_INDEX: + name = vcns_const.EXTERNAL_VNIC_NAME + intf_type = 'uplink' + elif index == vcns_const.INTERNAL_VNIC_INDEX: + name = vcns_const.INTERNAL_VNIC_NAME + intf_type = 'internal' + else: + msg = _("Vnic %d currently not supported") % index + raise exceptions.VcnsGeneralException(msg) + + config = self._assemble_edge_vnic( + name, index, network, address, netmask, secondary, type=intf_type) + + userdata = { + 'edge_id': edge_id, + 'config': config, + 'jobdata': jobdata + } + task_name = "update-interface-%s-%d" % (edge_id, index) + task = tasks.Task(task_name, router_id, + self._update_interface, userdata=userdata) + task.add_result_monitor(self.callbacks.interface_update_result) + self.task_manager.add(task) + return task + + def _deploy_edge(self, task): + userdata = task.userdata + name = userdata['router_name'] + LOG.debug(_("VCNS: start deploying edge %s"), name) + request = userdata['request'] + try: + header = self.vcns.deploy_edge(request)[0] + objuri = header['location'] + job_id = objuri[objuri.rfind("/") + 1:] + response = self.vcns.get_edge_id(job_id)[1] + edge_id = response['edgeId'] + LOG.debug(_("VCNS: deploying edge %s"), edge_id) + userdata['edge_id'] = edge_id + status = constants.TaskStatus.PENDING + except exceptions.VcnsApiException: + with excutils.save_and_reraise_exception(): + LOG.exception(_("VCNS: deploy edge failed for router %s."), + name) + + return status + + def _status_edge(self, task): + edge_id = task.userdata['edge_id'] + try: + response = self.vcns.get_edge_deploy_status(edge_id)[1] + task.userdata['retries'] = 0 + system_status = response.get('systemStatus', None) + if system_status is None: + status = constants.TaskStatus.PENDING + elif system_status == 'good': + status = constants.TaskStatus.COMPLETED + else: + status = constants.TaskStatus.ERROR + except exceptions.VcnsApiException: + with excutils.save_and_reraise_exception(): + LOG.exception(_("VCNS: Edge %s status query failed."), edge_id) + except Exception: + retries = task.userdata.get('retries', 0) + 1 + if retries < 3: + task.userdata['retries'] = retries + msg = _("VCNS: Unable to retrieve edge %(edge_id)s status. " + "Retry %(retries)d.") % { + 'edge_id': edge_id, + 'retries': retries} + LOG.exception(msg) + status = constants.TaskStatus.PENDING + else: + msg = _("VCNS: Unable to retrieve edge %s status. " + "Abort.") % edge_id + LOG.exception(msg) + status = constants.TaskStatus.ERROR + LOG.debug(_("VCNS: Edge %s status"), edge_id) + return status + + def _result_edge(self, task): + router_name = task.userdata['router_name'] + edge_id = task.userdata.get('edge_id') + if task.status != constants.TaskStatus.COMPLETED: + LOG.error(_("VCNS: Failed to deploy edge %(edge_id)s " + "for %(name)s, status %(status)d"), { + 'edge_id': edge_id, + 'name': router_name, + 'status': task.status + }) + else: + LOG.debug(_("VCNS: Edge %(edge_id)s deployed for " + "router %(name)s"), { + 'edge_id': edge_id, 'name': router_name + }) + + def _delete_edge(self, task): + edge_id = task.userdata['edge_id'] + LOG.debug(_("VCNS: start destroying edge %s"), edge_id) + status = constants.TaskStatus.COMPLETED + if edge_id: + try: + self.vcns.delete_edge(edge_id) + except exceptions.ResourceNotFound: + pass + except exceptions.VcnsApiException as e: + msg = _("VCNS: Failed to delete %(edge_id)s:\n" + "%(response)s") % { + 'edge_id': edge_id, 'response': e.response} + LOG.exception(msg) + status = constants.TaskStatus.ERROR + except Exception: + LOG.exception(_("VCNS: Failed to delete %s"), edge_id) + status = constants.TaskStatus.ERROR + + return status + + def _get_edges(self): + try: + return self.vcns.get_edges()[1] + except exceptions.VcnsApiException as e: + with excutils.save_and_reraise_exception(): + LOG.exception(_("VCNS: Failed to get edges:\n%s"), e.response) + + def deploy_edge(self, router_id, name, internal_network, jobdata=None, + wait_for_exec=False, loadbalancer_enable=True): + task_name = 'deploying-%s' % name + edge_name = name + edge = self._assemble_edge( + edge_name, datacenter_moid=self.datacenter_moid, + deployment_container_id=self.deployment_container_id, + appliance_size='large', remote_access=True) + appliance = self._assemble_edge_appliance(self.resource_pool_id, + self.datastore_id) + if appliance: + edge['appliances']['appliances'] = [appliance] + + vnic_external = self._assemble_edge_vnic( + vcns_const.EXTERNAL_VNIC_NAME, vcns_const.EXTERNAL_VNIC_INDEX, + self.external_network, type="uplink") + edge['vnics']['vnics'].append(vnic_external) + vnic_inside = self._assemble_edge_vnic( + vcns_const.INTERNAL_VNIC_NAME, vcns_const.INTERNAL_VNIC_INDEX, + internal_network, + vcns_const.INTEGRATION_EDGE_IPADDRESS, + vcns_const.INTEGRATION_SUBNET_NETMASK, + type="internal") + edge['vnics']['vnics'].append(vnic_inside) + if loadbalancer_enable: + self._enable_loadbalancer(edge) + userdata = { + 'request': edge, + 'router_name': name, + 'jobdata': jobdata + } + task = tasks.Task(task_name, router_id, + self._deploy_edge, + status_callback=self._status_edge, + result_callback=self._result_edge, + userdata=userdata) + task.add_executed_monitor(self.callbacks.edge_deploy_started) + task.add_result_monitor(self.callbacks.edge_deploy_result) + self.task_manager.add(task) + + if wait_for_exec: + # wait until the deploy task is executed so edge_id is available + task.wait(constants.TaskState.EXECUTED) + + return task + + def delete_edge(self, router_id, edge_id, jobdata=None): + task_name = 'delete-%s' % edge_id + userdata = { + 'router_id': router_id, + 'edge_id': edge_id, + 'jobdata': jobdata + } + task = tasks.Task(task_name, router_id, self._delete_edge, + userdata=userdata) + task.add_result_monitor(self.callbacks.edge_delete_result) + self.task_manager.add(task) + return task + + def _assemble_nat_rule(self, action, original_address, + translated_address, + vnic_index=vcns_const.EXTERNAL_VNIC_INDEX, + enabled=True): + nat_rule = {} + nat_rule['action'] = action + nat_rule['vnic'] = vnic_index + nat_rule['originalAddress'] = original_address + nat_rule['translatedAddress'] = translated_address + nat_rule['enabled'] = enabled + return nat_rule + + def get_nat_config(self, edge_id): + try: + return self.vcns.get_nat_config(edge_id)[1] + except exceptions.VcnsApiException as e: + with excutils.save_and_reraise_exception(): + LOG.exception(_("VCNS: Failed to get nat config:\n%s"), + e.response) + + def _create_nat_rule(self, task): + # TODO(fank): use POST for optimization + # return rule_id for future reference + rule = task.userdata['rule'] + LOG.debug(_("VCNS: start creating nat rules: %s"), rule) + edge_id = task.userdata['edge_id'] + nat = self.get_nat_config(edge_id) + location = task.userdata['location'] + + del nat['version'] + + if location is None or location == vcns_const.APPEND: + nat['rules']['natRulesDtos'].append(rule) + else: + nat['rules']['natRulesDtos'].insert(location, rule) + + try: + self.vcns.update_nat_config(edge_id, nat) + status = constants.TaskStatus.COMPLETED + except exceptions.VcnsApiException as e: + LOG.exception(_("VCNS: Failed to create snat rule:\n%s"), + e.response) + status = constants.TaskStatus.ERROR + + return status + + def create_snat_rule(self, router_id, edge_id, src, translated, + jobdata=None, location=None): + LOG.debug(_("VCNS: create snat rule %(src)s/%(translated)s"), { + 'src': src, 'translated': translated}) + snat_rule = self._assemble_nat_rule("snat", src, translated) + userdata = { + 'router_id': router_id, + 'edge_id': edge_id, + 'rule': snat_rule, + 'location': location, + 'jobdata': jobdata + } + task_name = "create-snat-%s-%s-%s" % (edge_id, src, translated) + task = tasks.Task(task_name, router_id, self._create_nat_rule, + userdata=userdata) + task.add_result_monitor(self.callbacks.snat_create_result) + self.task_manager.add(task) + return task + + def _delete_nat_rule(self, task): + # TODO(fank): pass in rule_id for optimization + # handle routes update for optimization + edge_id = task.userdata['edge_id'] + address = task.userdata['address'] + addrtype = task.userdata['addrtype'] + LOG.debug(_("VCNS: start deleting %(type)s rules: %(addr)s"), { + 'type': addrtype, 'addr': address}) + nat = self.get_nat_config(edge_id) + del nat['version'] + status = constants.TaskStatus.COMPLETED + for nat_rule in nat['rules']['natRulesDtos']: + if nat_rule[addrtype] == address: + rule_id = nat_rule['ruleId'] + try: + self.vcns.delete_nat_rule(edge_id, rule_id) + except exceptions.VcnsApiException as e: + LOG.exception(_("VCNS: Failed to delete snat rule:\n" + "%s"), e.response) + status = constants.TaskStatus.ERROR + + return status + + def delete_snat_rule(self, router_id, edge_id, src, jobdata=None): + LOG.debug(_("VCNS: delete snat rule %s"), src) + userdata = { + 'edge_id': edge_id, + 'address': src, + 'addrtype': 'originalAddress', + 'jobdata': jobdata + } + task_name = "delete-snat-%s-%s" % (edge_id, src) + task = tasks.Task(task_name, router_id, self._delete_nat_rule, + userdata=userdata) + task.add_result_monitor(self.callbacks.snat_delete_result) + self.task_manager.add(task) + return task + + def create_dnat_rule(self, router_id, edge_id, dst, translated, + jobdata=None, location=None): + # TODO(fank): use POST for optimization + # return rule_id for future reference + LOG.debug(_("VCNS: create dnat rule %(dst)s/%(translated)s"), { + 'dst': dst, 'translated': translated}) + dnat_rule = self._assemble_nat_rule( + "dnat", dst, translated) + userdata = { + 'router_id': router_id, + 'edge_id': edge_id, + 'rule': dnat_rule, + 'location': location, + 'jobdata': jobdata + } + task_name = "create-dnat-%s-%s-%s" % (edge_id, dst, translated) + task = tasks.Task(task_name, router_id, self._create_nat_rule, + userdata=userdata) + task.add_result_monitor(self.callbacks.dnat_create_result) + self.task_manager.add(task) + return task + + def delete_dnat_rule(self, router_id, edge_id, translated, + jobdata=None): + # TODO(fank): pass in rule_id for optimization + LOG.debug(_("VCNS: delete dnat rule %s"), translated) + userdata = { + 'edge_id': edge_id, + 'address': translated, + 'addrtype': 'translatedAddress', + 'jobdata': jobdata + } + task_name = "delete-dnat-%s-%s" % (edge_id, translated) + task = tasks.Task(task_name, router_id, self._delete_nat_rule, + userdata=userdata) + task.add_result_monitor(self.callbacks.dnat_delete_result) + self.task_manager.add(task) + return task + + def _update_nat_rule(self, task): + # TODO(fank): use POST for optimization + # return rule_id for future reference + edge_id = task.userdata['edge_id'] + if task != self.updated_task['nat'][edge_id]: + # this task does not have the latest config, abort now + # for speedup + return constants.TaskStatus.ABORT + + rules = task.userdata['rules'] + LOG.debug(_("VCNS: start updating nat rules: %s"), rules) + + nat = { + 'featureType': 'nat', + 'rules': { + 'natRulesDtos': rules + } + } + + try: + self.vcns.update_nat_config(edge_id, nat) + status = constants.TaskStatus.COMPLETED + except exceptions.VcnsApiException as e: + LOG.exception(_("VCNS: Failed to create snat rule:\n%s"), + e.response) + status = constants.TaskStatus.ERROR + + return status + + def update_nat_rules(self, router_id, edge_id, snats, dnats, + jobdata=None): + LOG.debug(_("VCNS: update nat rule\n" + "SNAT:%(snat)s\n" + "DNAT:%(dnat)s\n"), { + 'snat': snats, 'dnat': dnats}) + nat_rules = [] + + for dnat in dnats: + nat_rules.append(self._assemble_nat_rule( + 'dnat', dnat['dst'], dnat['translated'])) + nat_rules.append(self._assemble_nat_rule( + 'snat', dnat['translated'], dnat['dst'])) + + for snat in snats: + nat_rules.append(self._assemble_nat_rule( + 'snat', snat['src'], snat['translated'])) + + userdata = { + 'edge_id': edge_id, + 'rules': nat_rules, + 'jobdata': jobdata, + } + task_name = "update-nat-%s" % edge_id + task = tasks.Task(task_name, router_id, self._update_nat_rule, + userdata=userdata) + task.add_result_monitor(self.callbacks.nat_update_result) + self.updated_task['nat'][edge_id] = task + self.task_manager.add(task) + return task + + def _update_routes(self, task): + edge_id = task.userdata['edge_id'] + if (task != self.updated_task['route'][edge_id] and + task.userdata.get('skippable', True)): + # this task does not have the latest config, abort now + # for speedup + return constants.TaskStatus.ABORT + gateway = task.userdata['gateway'] + routes = task.userdata['routes'] + LOG.debug(_("VCNS: start updating routes for %s"), edge_id) + static_routes = [] + for route in routes: + static_routes.append({ + "description": "", + "vnic": vcns_const.INTERNAL_VNIC_INDEX, + "network": route['cidr'], + "nextHop": route['nexthop'] + }) + request = { + "staticRoutes": { + "staticRoutes": static_routes + } + } + if gateway: + request["defaultRoute"] = { + "description": "default-gateway", + "gatewayAddress": gateway, + "vnic": vcns_const.EXTERNAL_VNIC_INDEX + } + try: + self.vcns.update_routes(edge_id, request) + status = constants.TaskStatus.COMPLETED + except exceptions.VcnsApiException as e: + LOG.exception(_("VCNS: Failed to update routes:\n%s"), + e.response) + status = constants.TaskStatus.ERROR + + return status + + def update_routes(self, router_id, edge_id, gateway, routes, + skippable=True, jobdata=None): + if gateway: + gateway = gateway.split('/')[0] + + userdata = { + 'edge_id': edge_id, + 'gateway': gateway, + 'routes': routes, + 'skippable': skippable, + 'jobdata': jobdata + } + task_name = "update-routes-%s" % (edge_id) + task = tasks.Task(task_name, router_id, self._update_routes, + userdata=userdata) + task.add_result_monitor(self.callbacks.routes_update_result) + self.updated_task['route'][edge_id] = task + self.task_manager.add(task) + return task + + def create_lswitch(self, name, tz_config, tags=None, + port_isolation=False, replication_mode="service"): + lsconfig = { + 'display_name': utils.check_and_truncate(name), + "tags": tags or [], + "type": "LogicalSwitchConfig", + "_schema": "/ws.v1/schema/LogicalSwitchConfig", + "transport_zones": tz_config + } + if port_isolation is bool: + lsconfig["port_isolation_enabled"] = port_isolation + if replication_mode: + lsconfig["replication_mode"] = replication_mode + + response = self.vcns.create_lswitch(lsconfig)[1] + return response + + def delete_lswitch(self, lswitch_id): + self.vcns.delete_lswitch(lswitch_id) + + def get_loadbalancer_config(self, edge_id): + try: + header, response = self.vcns.get_loadbalancer_config( + edge_id) + except exceptions.VcnsApiException: + with excutils.save_and_reraise_exception(): + LOG.exception(_("Failed to get service config")) + return response + + def enable_service_loadbalancer(self, edge_id): + config = self.get_loadbalancer_config( + edge_id) + if not config['enabled']: + config['enabled'] = True + try: + self.vcns.enable_service_loadbalancer(edge_id, config) + except exceptions.VcnsApiException: + with excutils.save_and_reraise_exception(): + LOG.exception(_("Failed to enable loadbalancer " + "service config")) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/vshield/edge_firewall_driver.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/vshield/edge_firewall_driver.py new file mode 100644 index 00000000..b0520651 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/vshield/edge_firewall_driver.py @@ -0,0 +1,352 @@ +# Copyright 2013 VMware, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Leon Cui, VMware + +from neutron.db import db_base_plugin_v2 +from neutron.openstack.common import excutils +from neutron.openstack.common import log as logging +from neutron.plugins.common import constants +from neutron.plugins.vmware.dbexts import vcns_db +from neutron.plugins.vmware.vshield.common import ( + exceptions as vcns_exc) + +LOG = logging.getLogger(__name__) + +VSE_FWAAS_ALLOW = "accept" +VSE_FWAAS_DENY = "deny" + + +class EdgeFirewallDriver(db_base_plugin_v2.NeutronDbPluginV2): + """Implementation of driver APIs for + Edge Firewall feature configuration + """ + def _convert_firewall_action(self, action): + if action == constants.FWAAS_ALLOW: + return VSE_FWAAS_ALLOW + elif action == constants.FWAAS_DENY: + return VSE_FWAAS_DENY + else: + msg = _("Invalid action value %s in a firewall rule") % action + raise vcns_exc.VcnsBadRequest(resource='firewall_rule', msg=msg) + + def _restore_firewall_action(self, action): + if action == VSE_FWAAS_ALLOW: + return constants.FWAAS_ALLOW + elif action == VSE_FWAAS_DENY: + return constants.FWAAS_DENY + else: + msg = (_("Invalid action value %s in " + "a vshield firewall rule") % action) + raise vcns_exc.VcnsBadRequest(resource='firewall_rule', msg=msg) + + def _get_port_range_from_min_max_ports(self, min_port, max_port): + if not min_port: + return None + if min_port == max_port: + return str(min_port) + else: + return '%d:%d' % (min_port, max_port) + + def _get_min_max_ports_from_range(self, port_range): + if not port_range: + return [None, None] + min_port, sep, max_port = port_range.partition(":") + if not max_port: + max_port = min_port + return [int(min_port), int(max_port)] + + def _convert_firewall_rule(self, context, rule, index=None): + vcns_rule = { + "name": rule['name'], + "description": rule['description'], + "action": self._convert_firewall_action(rule['action']), + "enabled": rule['enabled']} + if rule.get('source_ip_address'): + vcns_rule['source'] = { + "ipAddress": [rule['source_ip_address']] + } + if rule.get('destination_ip_address'): + vcns_rule['destination'] = { + "ipAddress": [rule['destination_ip_address']] + } + service = {} + if rule.get('source_port'): + min_port, max_port = self._get_min_max_ports_from_range( + rule['source_port']) + service['sourcePort'] = [i for i in range(min_port, max_port + 1)] + if rule.get('destination_port'): + min_port, max_port = self._get_min_max_ports_from_range( + rule['destination_port']) + service['port'] = [i for i in range(min_port, max_port + 1)] + if rule.get('protocol'): + service['protocol'] = rule['protocol'] + if service: + vcns_rule['application'] = { + 'service': [service] + } + if index: + vcns_rule['ruleTag'] = index + return vcns_rule + + def _restore_firewall_rule(self, context, edge_id, response): + rule = response + rule_binding = vcns_db.get_vcns_edge_firewallrule_binding_by_vseid( + context.session, edge_id, rule['ruleId']) + service = rule['application']['service'][0] + src_port_range = self._get_port_range_from_min_max_ports( + service['sourcePort'][0], service['sourcePort'][-1]) + dst_port_range = self._get_port_range_from_min_max_ports( + service['port'][0], service['port'][-1]) + return { + 'firewall_rule': { + 'name': rule['name'], + 'id': rule_binding['rule_id'], + 'description': rule['description'], + 'source_ip_address': rule['source']['ipAddress'][0], + 'destination_ip_address': rule['destination']['ipAddress'][0], + 'protocol': service['protocol'], + 'destination_port': dst_port_range, + 'source_port': src_port_range, + 'action': self._restore_firewall_action(rule['action']), + 'enabled': rule['enabled']}} + + def _convert_firewall(self, context, firewall): + #bulk configuration on firewall and rescheduling the rule binding + ruleTag = 1 + vcns_rules = [] + for rule in firewall['firewall_rule_list']: + vcns_rule = self._convert_firewall_rule(context, rule, ruleTag) + vcns_rules.append(vcns_rule) + ruleTag += 1 + return { + 'featureType': "firewall_4.0", + 'firewallRules': { + 'firewallRules': vcns_rules}} + + def _restore_firewall(self, context, edge_id, response): + res = {} + res['firewall_rule_list'] = [] + for rule in response['firewallRules']['firewallRules']: + rule_binding = ( + vcns_db.get_vcns_edge_firewallrule_binding_by_vseid( + context.session, edge_id, rule['ruleId'])) + if rule_binding is None: + continue + service = rule['application']['service'][0] + src_port_range = self._get_port_range_from_min_max_ports( + service['sourcePort'][0], service['sourcePort'][-1]) + dst_port_range = self._get_port_range_from_min_max_ports( + service['port'][0], service['port'][-1]) + item = { + 'firewall_rule': { + 'name': rule['name'], + 'id': rule_binding['rule_id'], + 'description': rule['description'], + 'source_ip_address': rule['source']['ipAddress'][0], + 'destination_ip_address': rule[ + 'destination']['ipAddress'][0], + 'protocol': service['protocol'], + 'destination_port': dst_port_range, + 'source_port': src_port_range, + 'action': self._restore_firewall_action(rule['action']), + 'enabled': rule['enabled']}} + res['firewall_rule_list'].append(item) + return res + + def _create_rule_id_mapping( + self, context, edge_id, firewall, vcns_fw): + for rule in vcns_fw['firewallRules']['firewallRules']: + index = rule['ruleTag'] - 1 + #TODO(linb):a simple filter of the retrived rules which may be + #created by other operations unintentionally + if index < len(firewall['firewall_rule_list']): + rule_vseid = rule['ruleId'] + rule_id = firewall['firewall_rule_list'][index]['id'] + map_info = { + 'rule_id': rule_id, + 'rule_vseid': rule_vseid, + 'edge_id': edge_id + } + vcns_db.add_vcns_edge_firewallrule_binding( + context.session, map_info) + + def _get_firewall(self, context, edge_id): + try: + return self.vcns.get_firewall(edge_id)[1] + except vcns_exc.VcnsApiException: + with excutils.save_and_reraise_exception(): + LOG.exception(_("Failed to get firewall with edge " + "id: %s"), edge_id) + + def _get_firewall_rule_next(self, context, edge_id, rule_vseid): + # Return the firewall rule below 'rule_vseid' + fw_cfg = self._get_firewall(context, edge_id) + for i in range(len(fw_cfg['firewallRules']['firewallRules'])): + rule_cur = fw_cfg['firewallRules']['firewallRules'][i] + if str(rule_cur['ruleId']) == rule_vseid: + if (i + 1) == len(fw_cfg['firewallRules']['firewallRules']): + return None + else: + return fw_cfg['firewallRules']['firewallRules'][i + 1] + + def get_firewall_rule(self, context, id, edge_id): + rule_map = vcns_db.get_vcns_edge_firewallrule_binding( + context.session, id, edge_id) + if rule_map is None: + msg = _("No rule id:%s found in the edge_firewall_binding") % id + LOG.error(msg) + raise vcns_exc.VcnsNotFound( + resource='vcns_firewall_rule_bindings', msg=msg) + vcns_rule_id = rule_map.rule_vseid + try: + response = self.vcns.get_firewall_rule( + edge_id, vcns_rule_id)[1] + except vcns_exc.VcnsApiException: + with excutils.save_and_reraise_exception(): + LOG.exception(_("Failed to get firewall rule: %(rule_id)s " + "with edge_id: %(edge_id)s"), { + 'rule_id': id, + 'edge_id': edge_id}) + return self._restore_firewall_rule(context, edge_id, response) + + def get_firewall(self, context, edge_id): + response = self._get_firewall(context, edge_id) + return self._restore_firewall(context, edge_id, response) + + def update_firewall(self, context, edge_id, firewall): + fw_req = self._convert_firewall(context, firewall) + try: + self.vcns.update_firewall(edge_id, fw_req) + except vcns_exc.VcnsApiException: + with excutils.save_and_reraise_exception(): + LOG.exception(_("Failed to update firewall " + "with edge_id: %s"), edge_id) + fw_res = self._get_firewall(context, edge_id) + vcns_db.cleanup_vcns_edge_firewallrule_binding( + context.session, edge_id) + self._create_rule_id_mapping(context, edge_id, firewall, fw_res) + + def delete_firewall(self, context, edge_id): + try: + self.vcns.delete_firewall(edge_id) + except vcns_exc.VcnsApiException: + with excutils.save_and_reraise_exception(): + LOG.exception(_("Failed to delete firewall " + "with edge_id:%s"), edge_id) + vcns_db.cleanup_vcns_edge_firewallrule_binding( + context.session, edge_id) + + def update_firewall_rule(self, context, id, edge_id, firewall_rule): + rule_map = vcns_db.get_vcns_edge_firewallrule_binding( + context.session, id, edge_id) + vcns_rule_id = rule_map.rule_vseid + fwr_req = self._convert_firewall_rule(context, firewall_rule) + try: + self.vcns.update_firewall_rule(edge_id, vcns_rule_id, fwr_req) + except vcns_exc.VcnsApiException: + with excutils.save_and_reraise_exception(): + LOG.exception(_("Failed to update firewall rule: %(rule_id)s " + "with edge_id: %(edge_id)s"), + {'rule_id': id, + 'edge_id': edge_id}) + + def delete_firewall_rule(self, context, id, edge_id): + rule_map = vcns_db.get_vcns_edge_firewallrule_binding( + context.session, id, edge_id) + vcns_rule_id = rule_map.rule_vseid + try: + self.vcns.delete_firewall_rule(edge_id, vcns_rule_id) + except vcns_exc.VcnsApiException: + with excutils.save_and_reraise_exception(): + LOG.exception(_("Failed to delete firewall rule: %(rule_id)s " + "with edge_id: %(edge_id)s"), + {'rule_id': id, + 'edge_id': edge_id}) + vcns_db.delete_vcns_edge_firewallrule_binding( + context.session, id, edge_id) + + def _add_rule_above(self, context, ref_rule_id, edge_id, firewall_rule): + rule_map = vcns_db.get_vcns_edge_firewallrule_binding( + context.session, ref_rule_id, edge_id) + ref_vcns_rule_id = rule_map.rule_vseid + fwr_req = self._convert_firewall_rule(context, firewall_rule) + try: + header = self.vcns.add_firewall_rule_above( + edge_id, ref_vcns_rule_id, fwr_req)[0] + except vcns_exc.VcnsApiException: + with excutils.save_and_reraise_exception(): + LOG.exception(_("Failed to add firewall rule above: " + "%(rule_id)s with edge_id: %(edge_id)s"), + {'rule_id': ref_vcns_rule_id, + 'edge_id': edge_id}) + + objuri = header['location'] + fwr_vseid = objuri[objuri.rfind("/") + 1:] + map_info = { + 'rule_id': firewall_rule['id'], + 'rule_vseid': fwr_vseid, + 'edge_id': edge_id} + vcns_db.add_vcns_edge_firewallrule_binding( + context.session, map_info) + + def _add_rule_below(self, context, ref_rule_id, edge_id, firewall_rule): + rule_map = vcns_db.get_vcns_edge_firewallrule_binding( + context.session, ref_rule_id, edge_id) + ref_vcns_rule_id = rule_map.rule_vseid + fwr_vse_next = self._get_firewall_rule_next( + context, edge_id, ref_vcns_rule_id) + fwr_req = self._convert_firewall_rule(context, firewall_rule) + if fwr_vse_next: + ref_vcns_rule_id = fwr_vse_next['ruleId'] + try: + header = self.vcns.add_firewall_rule_above( + edge_id, int(ref_vcns_rule_id), fwr_req)[0] + except vcns_exc.VcnsApiException: + with excutils.save_and_reraise_exception(): + LOG.exception(_("Failed to add firewall rule above: " + "%(rule_id)s with edge_id: %(edge_id)s"), + {'rule_id': ref_vcns_rule_id, + 'edge_id': edge_id}) + else: + # append the rule at the bottom + try: + header = self.vcns.add_firewall_rule( + edge_id, fwr_req)[0] + except vcns_exc.VcnsApiException: + with excutils.save_and_reraise_exception(): + LOG.exception(_("Failed to append a firewall rule" + "with edge_id: %s"), edge_id) + + objuri = header['location'] + fwr_vseid = objuri[objuri.rfind("/") + 1:] + map_info = { + 'rule_id': firewall_rule['id'], + 'rule_vseid': fwr_vseid, + 'edge_id': edge_id + } + vcns_db.add_vcns_edge_firewallrule_binding( + context.session, map_info) + + def insert_rule(self, context, rule_info, edge_id, fwr): + if rule_info.get('insert_before'): + self._add_rule_above( + context, rule_info['insert_before'], edge_id, fwr) + elif rule_info.get('insert_after'): + self._add_rule_below( + context, rule_info['insert_after'], edge_id, fwr) + else: + msg = _("Can't execute insert rule operation " + "without reference rule_id") + raise vcns_exc.VcnsBadRequest(resource='firewall_rule', msg=msg) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/vshield/edge_loadbalancer_driver.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/vshield/edge_loadbalancer_driver.py new file mode 100644 index 00000000..2e3efd56 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/vshield/edge_loadbalancer_driver.py @@ -0,0 +1,401 @@ +# Copyright 2013 VMware, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Leon Cui, VMware + +from neutron.openstack.common import excutils +from neutron.openstack.common import log as logging +from neutron.plugins.vmware.dbexts import vcns_db +from neutron.plugins.vmware.vshield.common import ( + constants as vcns_const) +from neutron.plugins.vmware.vshield.common import ( + exceptions as vcns_exc) +from neutron.services.loadbalancer import constants as lb_constants + +LOG = logging.getLogger(__name__) + +BALANCE_MAP = { + lb_constants.LB_METHOD_ROUND_ROBIN: 'round-robin', + lb_constants.LB_METHOD_LEAST_CONNECTIONS: 'leastconn', + lb_constants.LB_METHOD_SOURCE_IP: 'source' +} +PROTOCOL_MAP = { + lb_constants.PROTOCOL_TCP: 'tcp', + lb_constants.PROTOCOL_HTTP: 'http', + lb_constants.PROTOCOL_HTTPS: 'tcp' +} +SESSION_PERSISTENCE_METHOD_MAP = { + lb_constants.SESSION_PERSISTENCE_SOURCE_IP: 'sourceip', + lb_constants.SESSION_PERSISTENCE_APP_COOKIE: 'cookie', + lb_constants.SESSION_PERSISTENCE_HTTP_COOKIE: 'cookie'} +SESSION_PERSISTENCE_COOKIE_MAP = { + lb_constants.SESSION_PERSISTENCE_APP_COOKIE: 'app', + lb_constants.SESSION_PERSISTENCE_HTTP_COOKIE: 'insert'} + + +class EdgeLbDriver(): + """Implementation of driver APIs for + Edge Loadbalancer feature configuration + """ + + def _convert_lb_vip(self, context, edge_id, vip, app_profileid): + pool_id = vip.get('pool_id') + poolid_map = vcns_db.get_vcns_edge_pool_binding( + context.session, pool_id, edge_id) + pool_vseid = poolid_map['pool_vseid'] + return { + 'name': vip.get( + 'name', '') + vip['id'][-vcns_const.SUFFIX_LENGTH:], + 'description': vip.get('description'), + 'ipAddress': vip.get('address'), + 'protocol': vip.get('protocol'), + 'port': vip.get('protocol_port'), + 'connectionLimit': max(0, vip.get('connection_limit')), + 'defaultPoolId': pool_vseid, + 'applicationProfileId': app_profileid + } + + def _restore_lb_vip(self, context, edge_id, vip_vse): + pool_binding = vcns_db.get_vcns_edge_pool_binding_by_vseid( + context.session, + edge_id, + vip_vse['defaultPoolId']) + + return { + 'name': vip_vse['name'][:-vcns_const.SUFFIX_LENGTH], + 'address': vip_vse['ipAddress'], + 'protocol': vip_vse['protocol'], + 'protocol_port': vip_vse['port'], + 'pool_id': pool_binding['pool_id'] + } + + def _convert_lb_pool(self, context, edge_id, pool, members): + vsepool = { + 'name': pool.get( + 'name', '') + pool['id'][-vcns_const.SUFFIX_LENGTH:], + 'description': pool.get('description'), + 'algorithm': BALANCE_MAP.get( + pool.get('lb_method'), + 'round-robin'), + 'transparent': True, + 'member': [], + 'monitorId': [] + } + for member in members: + vsepool['member'].append({ + 'ipAddress': member['address'], + 'weight': member['weight'], + 'port': member['protocol_port'] + }) + ##TODO(linb) right now, vse only accept at most one monitor per pool + monitors = pool.get('health_monitors') + if not monitors: + return vsepool + monitorid_map = vcns_db.get_vcns_edge_monitor_binding( + context.session, + monitors[0], + edge_id) + vsepool['monitorId'].append(monitorid_map['monitor_vseid']) + return vsepool + + def _restore_lb_pool(self, context, edge_id, pool_vse): + #TODO(linb): Get more usefule info + return { + 'name': pool_vse['name'][:-vcns_const.SUFFIX_LENGTH], + } + + def _convert_lb_monitor(self, context, monitor): + return { + 'type': PROTOCOL_MAP.get( + monitor.get('type'), 'http'), + 'interval': monitor.get('delay'), + 'timeout': monitor.get('timeout'), + 'maxRetries': monitor.get('max_retries'), + 'name': monitor.get('id') + } + + def _restore_lb_monitor(self, context, edge_id, monitor_vse): + return { + 'delay': monitor_vse['interval'], + 'timeout': monitor_vse['timeout'], + 'max_retries': monitor_vse['maxRetries'], + 'id': monitor_vse['name'] + } + + def _convert_app_profile(self, name, sess_persist, protocol): + vcns_app_profile = { + 'insertXForwardedFor': False, + 'name': name, + 'serverSslEnabled': False, + 'sslPassthrough': False, + 'template': protocol, + } + # Since SSL Termination is not supported right now, so just use + # sslPassthrough mehtod if the protocol is HTTPS. + if protocol == lb_constants.PROTOCOL_HTTPS: + vcns_app_profile['sslPassthrough'] = True + + if sess_persist.get('type'): + # If protocol is not HTTP, only sourceip is supported + if (protocol != lb_constants.PROTOCOL_HTTP and + sess_persist['type'] != ( + lb_constants.SESSION_PERSISTENCE_SOURCE_IP)): + msg = (_("Invalid %(protocol)s persistence method: %(type)s") % + {'protocol': protocol, + 'type': sess_persist['type']}) + raise vcns_exc.VcnsBadRequest(resource='sess_persist', msg=msg) + persistence = { + 'method': SESSION_PERSISTENCE_METHOD_MAP.get( + sess_persist['type'])} + if sess_persist['type'] in SESSION_PERSISTENCE_COOKIE_MAP: + if sess_persist.get('cookie_name'): + persistence['cookieName'] = sess_persist['cookie_name'] + else: + persistence['cookieName'] = 'default_cookie_name' + persistence['cookieMode'] = SESSION_PERSISTENCE_COOKIE_MAP.get( + sess_persist['type']) + vcns_app_profile['persistence'] = persistence + return vcns_app_profile + + def create_vip(self, context, edge_id, vip): + app_profile = self._convert_app_profile( + vip['name'], (vip.get('session_persistence') or {}), + vip.get('protocol')) + try: + header, response = self.vcns.create_app_profile( + edge_id, app_profile) + except vcns_exc.VcnsApiException: + with excutils.save_and_reraise_exception(): + LOG.exception(_("Failed to create app profile on edge: %s"), + edge_id) + objuri = header['location'] + app_profileid = objuri[objuri.rfind("/") + 1:] + + vip_new = self._convert_lb_vip(context, edge_id, vip, app_profileid) + try: + header, response = self.vcns.create_vip( + edge_id, vip_new) + except vcns_exc.VcnsApiException: + with excutils.save_and_reraise_exception(): + LOG.exception(_("Failed to create vip on vshield edge: %s"), + edge_id) + self.vcns.delete_app_profile(edge_id, app_profileid) + objuri = header['location'] + vip_vseid = objuri[objuri.rfind("/") + 1:] + + # Add the vip mapping + map_info = { + "vip_id": vip['id'], + "vip_vseid": vip_vseid, + "edge_id": edge_id, + "app_profileid": app_profileid + } + vcns_db.add_vcns_edge_vip_binding(context.session, map_info) + + def _get_vip_binding(self, session, id): + vip_binding = vcns_db.get_vcns_edge_vip_binding(session, id) + if not vip_binding: + msg = (_("vip_binding not found with id: %(id)s " + "edge_id: %(edge_id)s") % { + 'id': id, + 'edge_id': vip_binding[vcns_const.EDGE_ID]}) + LOG.error(msg) + raise vcns_exc.VcnsNotFound( + resource='router_service_binding', msg=msg) + return vip_binding + + def get_vip(self, context, id): + vip_binding = vcns_db.get_vcns_edge_vip_binding(context.session, id) + edge_id = vip_binding[vcns_const.EDGE_ID] + vip_vseid = vip_binding['vip_vseid'] + try: + response = self.vcns.get_vip(edge_id, vip_vseid)[1] + except vcns_exc.VcnsApiException: + with excutils.save_and_reraise_exception(): + LOG.exception(_("Failed to get vip on edge")) + return self._restore_lb_vip(context, edge_id, response) + + def update_vip(self, context, vip, session_persistence_update=True): + vip_binding = self._get_vip_binding(context.session, vip['id']) + edge_id = vip_binding[vcns_const.EDGE_ID] + vip_vseid = vip_binding.get('vip_vseid') + if session_persistence_update: + app_profileid = vip_binding.get('app_profileid') + app_profile = self._convert_app_profile( + vip['name'], vip.get('session_persistence', {}), + vip.get('protocol')) + try: + self.vcns.update_app_profile( + edge_id, app_profileid, app_profile) + except vcns_exc.VcnsApiException: + with excutils.save_and_reraise_exception(): + LOG.exception(_("Failed to update app profile on " + "edge: %s") % edge_id) + + vip_new = self._convert_lb_vip(context, edge_id, vip, app_profileid) + try: + self.vcns.update_vip(edge_id, vip_vseid, vip_new) + except vcns_exc.VcnsApiException: + with excutils.save_and_reraise_exception(): + LOG.exception(_("Failed to update vip on edge: %s") % edge_id) + + def delete_vip(self, context, id): + vip_binding = self._get_vip_binding(context.session, id) + edge_id = vip_binding[vcns_const.EDGE_ID] + vip_vseid = vip_binding['vip_vseid'] + app_profileid = vip_binding['app_profileid'] + + try: + self.vcns.delete_vip(edge_id, vip_vseid) + except vcns_exc.ResourceNotFound: + LOG.exception(_("vip not found on edge: %s") % edge_id) + except vcns_exc.VcnsApiException: + with excutils.save_and_reraise_exception(): + LOG.exception(_("Failed to delete vip on edge: %s") % edge_id) + + try: + self.vcns.delete_app_profile(edge_id, app_profileid) + except vcns_exc.ResourceNotFound: + LOG.exception(_("app profile not found on edge: %s") % edge_id) + except vcns_exc.VcnsApiException: + with excutils.save_and_reraise_exception(): + LOG.exception(_("Failed to delete app profile on edge: %s") % + edge_id) + + vcns_db.delete_vcns_edge_vip_binding(context.session, id) + + def create_pool(self, context, edge_id, pool, members): + pool_new = self._convert_lb_pool(context, edge_id, pool, members) + try: + header = self.vcns.create_pool(edge_id, pool_new)[0] + except vcns_exc.VcnsApiException: + with excutils.save_and_reraise_exception(): + LOG.exception(_("Failed to create pool")) + + objuri = header['location'] + pool_vseid = objuri[objuri.rfind("/") + 1:] + + # update the pool mapping table + map_info = { + "pool_id": pool['id'], + "pool_vseid": pool_vseid, + "edge_id": edge_id + } + vcns_db.add_vcns_edge_pool_binding(context.session, map_info) + + def get_pool(self, context, id, edge_id): + pool_binding = vcns_db.get_vcns_edge_pool_binding( + context.session, id, edge_id) + if not pool_binding: + msg = (_("pool_binding not found with id: %(id)s " + "edge_id: %(edge_id)s") % {'id': id, 'edge_id': edge_id}) + LOG.error(msg) + raise vcns_exc.VcnsNotFound( + resource='router_service_binding', msg=msg) + pool_vseid = pool_binding['pool_vseid'] + try: + response = self.vcns.get_pool(edge_id, pool_vseid)[1] + except vcns_exc.VcnsApiException: + with excutils.save_and_reraise_exception(): + LOG.exception(_("Failed to get pool on edge")) + return self._restore_lb_pool(context, edge_id, response) + + def update_pool(self, context, edge_id, pool, members): + pool_binding = vcns_db.get_vcns_edge_pool_binding( + context.session, pool['id'], edge_id) + pool_vseid = pool_binding['pool_vseid'] + pool_new = self._convert_lb_pool(context, edge_id, pool, members) + try: + self.vcns.update_pool(edge_id, pool_vseid, pool_new) + except vcns_exc.VcnsApiException: + with excutils.save_and_reraise_exception(): + LOG.exception(_("Failed to update pool")) + + def delete_pool(self, context, id, edge_id): + pool_binding = vcns_db.get_vcns_edge_pool_binding( + context.session, id, edge_id) + pool_vseid = pool_binding['pool_vseid'] + try: + self.vcns.delete_pool(edge_id, pool_vseid) + except vcns_exc.VcnsApiException: + with excutils.save_and_reraise_exception(): + LOG.exception(_("Failed to delete pool")) + vcns_db.delete_vcns_edge_pool_binding( + context.session, id, edge_id) + + def create_health_monitor(self, context, edge_id, health_monitor): + monitor_new = self._convert_lb_monitor(context, health_monitor) + try: + header = self.vcns.create_health_monitor(edge_id, monitor_new)[0] + except vcns_exc.VcnsApiException: + with excutils.save_and_reraise_exception(): + LOG.exception(_("Failed to create monitor on edge: %s"), + edge_id) + + objuri = header['location'] + monitor_vseid = objuri[objuri.rfind("/") + 1:] + + # update the health_monitor mapping table + map_info = { + "monitor_id": health_monitor['id'], + "monitor_vseid": monitor_vseid, + "edge_id": edge_id + } + vcns_db.add_vcns_edge_monitor_binding(context.session, map_info) + + def get_health_monitor(self, context, id, edge_id): + monitor_binding = vcns_db.get_vcns_edge_monitor_binding( + context.session, id, edge_id) + if not monitor_binding: + msg = (_("monitor_binding not found with id: %(id)s " + "edge_id: %(edge_id)s") % {'id': id, 'edge_id': edge_id}) + LOG.error(msg) + raise vcns_exc.VcnsNotFound( + resource='router_service_binding', msg=msg) + monitor_vseid = monitor_binding['monitor_vseid'] + try: + response = self.vcns.get_health_monitor(edge_id, monitor_vseid)[1] + except vcns_exc.VcnsApiException as e: + with excutils.save_and_reraise_exception(): + LOG.exception(_("Failed to get monitor on edge: %s"), + e.response) + return self._restore_lb_monitor(context, edge_id, response) + + def update_health_monitor(self, context, edge_id, + old_health_monitor, health_monitor): + monitor_binding = vcns_db.get_vcns_edge_monitor_binding( + context.session, + old_health_monitor['id'], edge_id) + monitor_vseid = monitor_binding['monitor_vseid'] + monitor_new = self._convert_lb_monitor( + context, health_monitor) + try: + self.vcns.update_health_monitor( + edge_id, monitor_vseid, monitor_new) + except vcns_exc.VcnsApiException: + with excutils.save_and_reraise_exception(): + LOG.exception(_("Failed to update monitor on edge: %s"), + edge_id) + + def delete_health_monitor(self, context, id, edge_id): + monitor_binding = vcns_db.get_vcns_edge_monitor_binding( + context.session, id, edge_id) + monitor_vseid = monitor_binding['monitor_vseid'] + try: + self.vcns.delete_health_monitor(edge_id, monitor_vseid) + except vcns_exc.VcnsApiException: + with excutils.save_and_reraise_exception(): + LOG.exception(_("Failed to delete monitor")) + vcns_db.delete_vcns_edge_monitor_binding( + context.session, id, edge_id) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/vshield/tasks/tasks.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/vshield/tasks/tasks.py new file mode 100644 index 00000000..7037c430 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/vshield/tasks/tasks.py @@ -0,0 +1,397 @@ +# Copyright 2013 VMware, Inc. +# All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import collections +import uuid + +from eventlet import event +from eventlet import greenthread + +from neutron.common import exceptions +from neutron.openstack.common import log as logging +from neutron.openstack.common import loopingcall +from neutron.plugins.vmware.vshield.tasks import constants + +DEFAULT_INTERVAL = 1000 + +LOG = logging.getLogger(__name__) + + +def nop(task): + return constants.TaskStatus.COMPLETED + + +class TaskException(exceptions.NeutronException): + + def __init__(self, message=None, **kwargs): + if message is not None: + self.message = message + + super(TaskException, self).__init__(**kwargs) + + +class InvalidState(TaskException): + message = _("Invalid state %(state)d") + + +class TaskStateSkipped(TaskException): + message = _("State %(state)d skipped. Current state %(current)d") + + +class Task(): + def __init__(self, name, resource_id, execute_callback, + status_callback=nop, result_callback=nop, userdata=None): + self.name = name + self.resource_id = resource_id + self._execute_callback = execute_callback + self._status_callback = status_callback + self._result_callback = result_callback + self.userdata = userdata + self.id = None + self.status = None + + self._monitors = { + constants.TaskState.START: [], + constants.TaskState.EXECUTED: [], + constants.TaskState.RESULT: [] + } + self._states = [None, None, None, None] + self._state = constants.TaskState.NONE + + def _add_monitor(self, action, func): + self._monitors[action].append(func) + return self + + def _move_state(self, state): + self._state = state + if self._states[state] is not None: + e = self._states[state] + self._states[state] = None + e.send() + + for s in range(state): + if self._states[s] is not None: + e = self._states[s] + self._states[s] = None + e.send_exception( + TaskStateSkipped(state=s, current=self._state)) + + def _invoke_monitor(self, state): + for func in self._monitors[state]: + try: + func(self) + except Exception: + msg = _("Task %(task)s encountered exception in %(func)s " + "at state %(state)s") % { + 'task': str(self), + 'func': str(func), + 'state': state} + LOG.exception(msg) + + self._move_state(state) + + return self + + def _start(self): + return self._invoke_monitor(constants.TaskState.START) + + def _executed(self): + return self._invoke_monitor(constants.TaskState.EXECUTED) + + def _update_status(self, status): + if self.status == status: + return self + + self.status = status + + def _finished(self): + return self._invoke_monitor(constants.TaskState.RESULT) + + def add_start_monitor(self, func): + return self._add_monitor(constants.TaskState.START, func) + + def add_executed_monitor(self, func): + return self._add_monitor(constants.TaskState.EXECUTED, func) + + def add_result_monitor(self, func): + return self._add_monitor(constants.TaskState.RESULT, func) + + def wait(self, state): + if (state < constants.TaskState.START or + state > constants.TaskState.RESULT or + state == constants.TaskState.STATUS): + raise InvalidState(state=state) + + if state <= self._state: + # we already passed this current state, so no wait + return + + e = event.Event() + self._states[state] = e + e.wait() + + def __repr__(self): + return "Task-%s-%s-%s" % ( + self.name, self.resource_id, self.id) + + +class TaskManager(): + + _instance = None + _default_interval = DEFAULT_INTERVAL + + def __init__(self, interval=None): + self._interval = interval or TaskManager._default_interval + + # A queue to pass tasks from other threads + self._tasks_queue = collections.deque() + + # A dict to store resource -> resource's tasks + self._tasks = {} + + # Current task being executed in main thread + self._main_thread_exec_task = None + + # New request event + self._req = event.Event() + + # TaskHandler stopped event + self._stopped = False + + # Periodic function trigger + self._monitor = None + self._monitor_busy = False + + # Thread handling the task request + self._thread = None + + def _execute(self, task): + """Execute task.""" + msg = _("Start task %s") % str(task) + LOG.debug(msg) + task._start() + try: + status = task._execute_callback(task) + except Exception: + msg = _("Task %(task)s encountered exception in %(cb)s") % { + 'task': str(task), + 'cb': str(task._execute_callback)} + LOG.exception(msg) + status = constants.TaskStatus.ERROR + + LOG.debug(_("Task %(task)s return %(status)s"), { + 'task': str(task), + 'status': status}) + + task._update_status(status) + task._executed() + + return status + + def _result(self, task): + """Notify task execution result.""" + try: + task._result_callback(task) + except Exception: + msg = _("Task %(task)s encountered exception in %(cb)s") % { + 'task': str(task), + 'cb': str(task._result_callback)} + LOG.exception(msg) + + LOG.debug(_("Task %(task)s return %(status)s"), + {'task': str(task), 'status': task.status}) + + task._finished() + + def _check_pending_tasks(self): + """Check all pending tasks status.""" + for resource_id in self._tasks.keys(): + if self._stopped: + # Task manager is stopped, return now + return + + tasks = self._tasks[resource_id] + # only the first task is executed and pending + task = tasks[0] + try: + status = task._status_callback(task) + except Exception: + msg = _("Task %(task)s encountered exception in %(cb)s") % { + 'task': str(task), + 'cb': str(task._status_callback)} + LOG.exception(msg) + status = constants.TaskStatus.ERROR + task._update_status(status) + if status != constants.TaskStatus.PENDING: + self._dequeue(task, True) + + def _enqueue(self, task): + if task.resource_id in self._tasks: + # append to existing resource queue for ordered processing + self._tasks[task.resource_id].append(task) + else: + # put the task to a new resource queue + tasks = collections.deque() + tasks.append(task) + self._tasks[task.resource_id] = tasks + + def _dequeue(self, task, run_next): + self._result(task) + tasks = self._tasks[task.resource_id] + tasks.remove(task) + if not tasks: + # no more tasks for this resource + del self._tasks[task.resource_id] + return + + if run_next: + # process next task for this resource + while tasks: + task = tasks[0] + status = self._execute(task) + if status == constants.TaskStatus.PENDING: + break + self._dequeue(task, False) + + def _abort(self): + """Abort all tasks.""" + # put all tasks haven't been received by main thread to queue + # so the following abort handling can cover them + for t in self._tasks_queue: + self._enqueue(t) + self._tasks_queue.clear() + + for resource_id in self._tasks.keys(): + tasks = list(self._tasks[resource_id]) + for task in tasks: + task._update_status(constants.TaskStatus.ABORT) + self._dequeue(task, False) + + def _get_task(self): + """Get task request.""" + while True: + for t in self._tasks_queue: + return self._tasks_queue.popleft() + self._req.wait() + self._req.reset() + + def run(self): + while True: + try: + if self._stopped: + # Gracefully terminate this thread if the _stopped + # attribute was set to true + LOG.info(_("Stopping TaskManager")) + break + + # get a task from queue, or timeout for periodic status check + task = self._get_task() + if task.resource_id in self._tasks: + # this resource already has some tasks under processing, + # append the task to same queue for ordered processing + self._enqueue(task) + continue + + try: + self._main_thread_exec_task = task + self._execute(task) + finally: + self._main_thread_exec_task = None + if task.status is None: + # The thread is killed during _execute(). To guarantee + # the task been aborted correctly, put it to the queue. + self._enqueue(task) + elif task.status != constants.TaskStatus.PENDING: + self._result(task) + else: + self._enqueue(task) + except Exception: + LOG.exception(_("TaskManager terminating because " + "of an exception")) + break + + def add(self, task): + task.id = uuid.uuid1() + self._tasks_queue.append(task) + if not self._req.ready(): + self._req.send() + return task.id + + def stop(self): + if self._thread is None: + return + self._stopped = True + self._thread.kill() + self._thread = None + # Stop looping call and abort running tasks + self._monitor.stop() + if self._monitor_busy: + self._monitor.wait() + self._abort() + LOG.info(_("TaskManager terminated")) + + def has_pending_task(self): + if self._tasks_queue or self._tasks or self._main_thread_exec_task: + return True + else: + return False + + def show_pending_tasks(self): + for task in self._tasks_queue: + LOG.info(str(task)) + for resource, tasks in self._tasks.iteritems(): + for task in tasks: + LOG.info(str(task)) + if self._main_thread_exec_task: + LOG.info(str(self._main_thread_exec_task)) + + def count(self): + count = 0 + for resource_id, tasks in self._tasks.iteritems(): + count += len(tasks) + return count + + def start(self, interval=None): + def _inner(): + self.run() + + def _loopingcall_callback(): + self._monitor_busy = True + try: + self._check_pending_tasks() + except Exception: + LOG.exception(_("Exception in _check_pending_tasks")) + self._monitor_busy = False + + if self._thread is not None: + return self + + if interval is None or interval == 0: + interval = self._interval + + self._stopped = False + self._thread = greenthread.spawn(_inner) + self._monitor = loopingcall.FixedIntervalLoopingCall( + _loopingcall_callback) + self._monitor.start(interval / 1000.0, + interval / 1000.0) + # To allow the created thread start running + greenthread.sleep(0) + + return self + + @classmethod + def set_default_interval(cls, interval): + cls._default_interval = interval diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/vshield/vcns.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/vshield/vcns.py new file mode 100644 index 00000000..7183fd76 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/vshield/vcns.py @@ -0,0 +1,302 @@ +# Copyright 2013 VMware, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: linb, VMware + +from neutron.openstack.common import jsonutils +from neutron.openstack.common import log as logging +from neutron.plugins.vmware.vshield.common import VcnsApiClient + +LOG = logging.getLogger(__name__) + +HTTP_GET = "GET" +HTTP_POST = "POST" +HTTP_DELETE = "DELETE" +HTTP_PUT = "PUT" +URI_PREFIX = "/api/4.0/edges" + +#FwaaS constants +FIREWALL_SERVICE = "firewall/config" +FIREWALL_RULE_RESOURCE = "rules" + +#LbaaS Constants +LOADBALANCER_SERVICE = "loadbalancer/config" +VIP_RESOURCE = "virtualservers" +POOL_RESOURCE = "pools" +MONITOR_RESOURCE = "monitors" +APP_PROFILE_RESOURCE = "applicationprofiles" + +# IPsec VPNaaS Constants +IPSEC_VPN_SERVICE = 'ipsec/config' + + +class Vcns(object): + + def __init__(self, address, user, password): + self.address = address + self.user = user + self.password = password + self.jsonapi_client = VcnsApiClient.VcnsApiHelper(address, user, + password, 'json') + + def do_request(self, method, uri, params=None, format='json', **kwargs): + LOG.debug(_("VcnsApiHelper('%(method)s', '%(uri)s', '%(body)s')"), { + 'method': method, + 'uri': uri, + 'body': jsonutils.dumps(params)}) + if format == 'json': + header, content = self.jsonapi_client.request(method, uri, params) + else: + header, content = self.xmlapi_client.request(method, uri, params) + LOG.debug(_("Header: '%s'"), header) + LOG.debug(_("Content: '%s'"), content) + if content == '': + return header, {} + if kwargs.get('decode', True): + content = jsonutils.loads(content) + return header, content + + def deploy_edge(self, request): + uri = URI_PREFIX + "?async=true" + return self.do_request(HTTP_POST, uri, request, decode=False) + + def get_edge_id(self, job_id): + uri = URI_PREFIX + "/jobs/%s" % job_id + return self.do_request(HTTP_GET, uri, decode=True) + + def get_edge_deploy_status(self, edge_id): + uri = URI_PREFIX + "/%s/status?getlatest=false" % edge_id + return self.do_request(HTTP_GET, uri, decode="True") + + def delete_edge(self, edge_id): + uri = "%s/%s" % (URI_PREFIX, edge_id) + return self.do_request(HTTP_DELETE, uri) + + def update_interface(self, edge_id, vnic): + uri = "%s/%s/vnics/%d" % (URI_PREFIX, edge_id, vnic['index']) + return self.do_request(HTTP_PUT, uri, vnic, decode=True) + + def get_nat_config(self, edge_id): + uri = "%s/%s/nat/config" % (URI_PREFIX, edge_id) + return self.do_request(HTTP_GET, uri, decode=True) + + def update_nat_config(self, edge_id, nat): + uri = "%s/%s/nat/config" % (URI_PREFIX, edge_id) + return self.do_request(HTTP_PUT, uri, nat, decode=True) + + def delete_nat_rule(self, edge_id, rule_id): + uri = "%s/%s/nat/config/rules/%s" % (URI_PREFIX, edge_id, rule_id) + return self.do_request(HTTP_DELETE, uri, decode=True) + + def get_edge_status(self, edge_id): + uri = "%s/%s/status?getlatest=false" % (URI_PREFIX, edge_id) + return self.do_request(HTTP_GET, uri, decode=True) + + def get_edges(self): + uri = URI_PREFIX + return self.do_request(HTTP_GET, uri, decode=True) + + def update_routes(self, edge_id, routes): + uri = "%s/%s/routing/config/static" % (URI_PREFIX, edge_id) + return self.do_request(HTTP_PUT, uri, routes) + + def create_lswitch(self, lsconfig): + uri = "/api/ws.v1/lswitch" + return self.do_request(HTTP_POST, uri, lsconfig, decode=True) + + def delete_lswitch(self, lswitch_id): + uri = "/api/ws.v1/lswitch/%s" % lswitch_id + return self.do_request(HTTP_DELETE, uri) + + def get_loadbalancer_config(self, edge_id): + uri = self._build_uri_path(edge_id, LOADBALANCER_SERVICE) + return self.do_request(HTTP_GET, uri, decode=True) + + def enable_service_loadbalancer(self, edge_id, config): + uri = self._build_uri_path(edge_id, LOADBALANCER_SERVICE) + return self.do_request(HTTP_PUT, uri, config) + + def update_firewall(self, edge_id, fw_req): + uri = self._build_uri_path( + edge_id, FIREWALL_SERVICE) + return self.do_request(HTTP_PUT, uri, fw_req) + + def delete_firewall(self, edge_id): + uri = self._build_uri_path( + edge_id, FIREWALL_SERVICE, None) + return self.do_request(HTTP_DELETE, uri) + + def update_firewall_rule(self, edge_id, vcns_rule_id, fwr_req): + uri = self._build_uri_path( + edge_id, FIREWALL_SERVICE, + FIREWALL_RULE_RESOURCE, + vcns_rule_id) + return self.do_request(HTTP_PUT, uri, fwr_req) + + def delete_firewall_rule(self, edge_id, vcns_rule_id): + uri = self._build_uri_path( + edge_id, FIREWALL_SERVICE, + FIREWALL_RULE_RESOURCE, + vcns_rule_id) + return self.do_request(HTTP_DELETE, uri) + + def add_firewall_rule_above(self, edge_id, ref_vcns_rule_id, fwr_req): + uri = self._build_uri_path( + edge_id, FIREWALL_SERVICE, + FIREWALL_RULE_RESOURCE) + uri += "?aboveRuleId=" + ref_vcns_rule_id + return self.do_request(HTTP_POST, uri, fwr_req) + + def add_firewall_rule(self, edge_id, fwr_req): + uri = self._build_uri_path( + edge_id, FIREWALL_SERVICE, + FIREWALL_RULE_RESOURCE) + return self.do_request(HTTP_POST, uri, fwr_req) + + def get_firewall(self, edge_id): + uri = self._build_uri_path(edge_id, FIREWALL_SERVICE) + return self.do_request(HTTP_GET, uri, decode=True) + + def get_firewall_rule(self, edge_id, vcns_rule_id): + uri = self._build_uri_path( + edge_id, FIREWALL_SERVICE, + FIREWALL_RULE_RESOURCE, + vcns_rule_id) + return self.do_request(HTTP_GET, uri, decode=True) + + # + #Edge LBAAS call helper + # + def create_vip(self, edge_id, vip_new): + uri = self._build_uri_path( + edge_id, LOADBALANCER_SERVICE, + VIP_RESOURCE) + return self.do_request(HTTP_POST, uri, vip_new) + + def get_vip(self, edge_id, vip_vseid): + uri = self._build_uri_path( + edge_id, LOADBALANCER_SERVICE, + VIP_RESOURCE, vip_vseid) + return self.do_request(HTTP_GET, uri, decode=True) + + def update_vip(self, edge_id, vip_vseid, vip_new): + uri = self._build_uri_path( + edge_id, LOADBALANCER_SERVICE, + VIP_RESOURCE, vip_vseid) + return self.do_request(HTTP_PUT, uri, vip_new) + + def delete_vip(self, edge_id, vip_vseid): + uri = self._build_uri_path( + edge_id, LOADBALANCER_SERVICE, + VIP_RESOURCE, vip_vseid) + return self.do_request(HTTP_DELETE, uri) + + def create_pool(self, edge_id, pool_new): + uri = self._build_uri_path( + edge_id, LOADBALANCER_SERVICE, + POOL_RESOURCE) + return self.do_request(HTTP_POST, uri, pool_new) + + def get_pool(self, edge_id, pool_vseid): + uri = self._build_uri_path( + edge_id, LOADBALANCER_SERVICE, + POOL_RESOURCE, pool_vseid) + return self.do_request(HTTP_GET, uri, decode=True) + + def update_pool(self, edge_id, pool_vseid, pool_new): + uri = self._build_uri_path( + edge_id, LOADBALANCER_SERVICE, + POOL_RESOURCE, pool_vseid) + return self.do_request(HTTP_PUT, uri, pool_new) + + def delete_pool(self, edge_id, pool_vseid): + uri = self._build_uri_path( + edge_id, LOADBALANCER_SERVICE, + POOL_RESOURCE, pool_vseid) + return self.do_request(HTTP_DELETE, uri) + + def create_health_monitor(self, edge_id, monitor_new): + uri = self._build_uri_path( + edge_id, LOADBALANCER_SERVICE, + MONITOR_RESOURCE) + return self.do_request(HTTP_POST, uri, monitor_new) + + def get_health_monitor(self, edge_id, monitor_vseid): + uri = self._build_uri_path( + edge_id, LOADBALANCER_SERVICE, + MONITOR_RESOURCE, monitor_vseid) + return self.do_request(HTTP_GET, uri, decode=True) + + def update_health_monitor(self, edge_id, monitor_vseid, monitor_new): + uri = self._build_uri_path( + edge_id, LOADBALANCER_SERVICE, + MONITOR_RESOURCE, + monitor_vseid) + return self.do_request(HTTP_PUT, uri, monitor_new) + + def delete_health_monitor(self, edge_id, monitor_vseid): + uri = self._build_uri_path( + edge_id, LOADBALANCER_SERVICE, + MONITOR_RESOURCE, + monitor_vseid) + return self.do_request(HTTP_DELETE, uri) + + def create_app_profile(self, edge_id, app_profile): + uri = self._build_uri_path( + edge_id, LOADBALANCER_SERVICE, + APP_PROFILE_RESOURCE) + return self.do_request(HTTP_POST, uri, app_profile) + + def update_app_profile(self, edge_id, app_profileid, app_profile): + uri = self._build_uri_path( + edge_id, LOADBALANCER_SERVICE, + APP_PROFILE_RESOURCE, app_profileid) + return self.do_request(HTTP_PUT, uri, app_profile) + + def delete_app_profile(self, edge_id, app_profileid): + uri = self._build_uri_path( + edge_id, LOADBALANCER_SERVICE, + APP_PROFILE_RESOURCE, + app_profileid) + return self.do_request(HTTP_DELETE, uri) + + def update_ipsec_config(self, edge_id, ipsec_config): + uri = self._build_uri_path(edge_id, IPSEC_VPN_SERVICE) + return self.do_request(HTTP_PUT, uri, ipsec_config) + + def delete_ipsec_config(self, edge_id): + uri = self._build_uri_path(edge_id, IPSEC_VPN_SERVICE) + return self.do_request(HTTP_DELETE, uri) + + def get_ipsec_config(self, edge_id): + uri = self._build_uri_path(edge_id, IPSEC_VPN_SERVICE) + return self.do_request(HTTP_GET, uri) + + def _build_uri_path(self, edge_id, + service, + resource=None, + resource_id=None, + parent_resource_id=None, + fields=None, + relations=None, + filters=None, + types=None, + is_attachment=False): + uri_prefix = "%s/%s/%s" % (URI_PREFIX, edge_id, service) + if resource: + res_path = resource + (resource_id and "/%s" % resource_id or '') + uri_path = "%s/%s" % (uri_prefix, res_path) + else: + uri_path = uri_prefix + return uri_path diff --git a/icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/vshield/vcns_driver.py b/icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/vshield/vcns_driver.py new file mode 100644 index 00000000..2a6cbfb5 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/plugins/vmware/vshield/vcns_driver.py @@ -0,0 +1,51 @@ +# Copyright 2013 VMware, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: linb, VMware + +from oslo.config import cfg + +from neutron.openstack.common import log as logging +from neutron.plugins.vmware.common import config # noqa +from neutron.plugins.vmware.vshield import edge_appliance_driver +from neutron.plugins.vmware.vshield import edge_firewall_driver +from neutron.plugins.vmware.vshield import edge_ipsecvpn_driver +from neutron.plugins.vmware.vshield import edge_loadbalancer_driver +from neutron.plugins.vmware.vshield.tasks import tasks +from neutron.plugins.vmware.vshield import vcns + +LOG = logging.getLogger(__name__) + + +class VcnsDriver(edge_appliance_driver.EdgeApplianceDriver, + edge_firewall_driver.EdgeFirewallDriver, + edge_loadbalancer_driver.EdgeLbDriver, + edge_ipsecvpn_driver.EdgeIPsecVpnDriver): + + def __init__(self, callbacks): + super(VcnsDriver, self).__init__() + + self.callbacks = callbacks + self.vcns_uri = cfg.CONF.vcns.manager_uri + self.vcns_user = cfg.CONF.vcns.user + self.vcns_passwd = cfg.CONF.vcns.password + self.datacenter_moid = cfg.CONF.vcns.datacenter_moid + self.deployment_container_id = cfg.CONF.vcns.deployment_container_id + self.resource_pool_id = cfg.CONF.vcns.resource_pool_id + self.datastore_id = cfg.CONF.vcns.datastore_id + self.external_network = cfg.CONF.vcns.external_network + interval = cfg.CONF.vcns.task_status_check_interval + self.task_manager = tasks.TaskManager(interval) + self.task_manager.start() + self.vcns = vcns.Vcns(self.vcns_uri, self.vcns_user, self.vcns_passwd) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/policy.py b/icehouse-patches/neutron/dvr-patch/neutron/policy.py new file mode 100644 index 00000000..dda8290e --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/policy.py @@ -0,0 +1,414 @@ +# Copyright (c) 2012 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Policy engine for neutron. Largely copied from nova. +""" +import itertools +import re + +from oslo.config import cfg + +from neutron.api.v2 import attributes +from neutron.common import exceptions +import neutron.common.utils as utils +from neutron.openstack.common import excutils +from neutron.openstack.common import importutils +from neutron.openstack.common import log as logging +from neutron.openstack.common import policy + + +LOG = logging.getLogger(__name__) +_POLICY_PATH = None +_POLICY_CACHE = {} +ADMIN_CTX_POLICY = 'context_is_admin' +# Maps deprecated 'extension' policies to new-style policies +DEPRECATED_POLICY_MAP = { + 'extension:provider_network': + ['network:provider:network_type', + 'network:provider:physical_network', + 'network:provider:segmentation_id'], + 'extension:router': + ['network:router:external'], + 'extension:port_binding': + ['port:binding:vif_type', 'port:binding:vif_details', + 'port:binding:profile', 'port:binding:host_id'] +} +DEPRECATED_ACTION_MAP = { + 'view': ['get'], + 'set': ['create', 'update'] +} + +cfg.CONF.import_opt('policy_file', 'neutron.common.config') + + +def reset(): + global _POLICY_PATH + global _POLICY_CACHE + _POLICY_PATH = None + _POLICY_CACHE = {} + policy.reset() + + +def init(): + global _POLICY_PATH + global _POLICY_CACHE + if not _POLICY_PATH: + _POLICY_PATH = utils.find_config_file({}, cfg.CONF.policy_file) + if not _POLICY_PATH: + raise exceptions.PolicyFileNotFound(path=cfg.CONF.policy_file) + # pass _set_brain to read_cached_file so that the policy brain + # is reset only if the file has changed + utils.read_cached_file(_POLICY_PATH, _POLICY_CACHE, + reload_func=_set_rules) + + +def get_resource_and_action(action): + """Extract resource and action (write, read) from api operation.""" + data = action.split(':', 1)[0].split('_', 1) + return ("%ss" % data[-1], data[0] != 'get') + + +def _set_rules(data): + default_rule = 'default' + LOG.debug(_("Loading policies from file: %s"), _POLICY_PATH) + # Ensure backward compatibility with folsom/grizzly convention + # for extension rules + policies = policy.Rules.load_json(data, default_rule) + for pol in policies.keys(): + if any([pol.startswith(depr_pol) for depr_pol in + DEPRECATED_POLICY_MAP.keys()]): + LOG.warn(_("Found deprecated policy rule:%s. Please consider " + "upgrading your policy configuration file"), pol) + pol_name, action = pol.rsplit(':', 1) + try: + new_actions = DEPRECATED_ACTION_MAP[action] + new_policies = DEPRECATED_POLICY_MAP[pol_name] + # bind new actions and policies together + for actual_policy in ['_'.join(item) for item in + itertools.product(new_actions, + new_policies)]: + if actual_policy not in policies: + # New policy, same rule + LOG.info(_("Inserting policy:%(new_policy)s in place " + "of deprecated policy:%(old_policy)s"), + {'new_policy': actual_policy, + 'old_policy': pol}) + policies[actual_policy] = policies[pol] + # Remove old-style policy + del policies[pol] + except KeyError: + LOG.error(_("Backward compatibility unavailable for " + "deprecated policy %s. The policy will " + "not be enforced"), pol) + policy.set_rules(policies) + + +def _is_attribute_explicitly_set(attribute_name, resource, target): + """Verify that an attribute is present and has a non-default value.""" + return ('default' in resource[attribute_name] and + attribute_name in target and + target[attribute_name] is not attributes.ATTR_NOT_SPECIFIED and + target[attribute_name] != resource[attribute_name]['default']) + + +def _build_subattr_match_rule(attr_name, attr, action, target): + """Create the rule to match for sub-attribute policy checks.""" + # TODO(salv-orlando): Instead of relying on validator info, introduce + # typing for API attributes + # Expect a dict as type descriptor + validate = attr['validate'] + key = filter(lambda k: k.startswith('type:dict'), validate.keys()) + if not key: + LOG.warn(_("Unable to find data type descriptor for attribute %s"), + attr_name) + return + data = validate[key[0]] + if not isinstance(data, dict): + LOG.debug(_("Attribute type descriptor is not a dict. Unable to " + "generate any sub-attr policy rule for %s."), + attr_name) + return + sub_attr_rules = [policy.RuleCheck('rule', '%s:%s:%s' % + (action, attr_name, + sub_attr_name)) for + sub_attr_name in data if sub_attr_name in + target[attr_name]] + return policy.AndCheck(sub_attr_rules) + + +def _build_match_rule(action, target): + """Create the rule to match for a given action. + + The policy rule to be matched is built in the following way: + 1) add entries for matching permission on objects + 2) add an entry for the specific action (e.g.: create_network) + 3) add an entry for attributes of a resource for which the action + is being executed (e.g.: create_network:shared) + 4) add an entry for sub-attributes of a resource for which the + action is being executed + (e.g.: create_router:external_gateway_info:network_id) + """ + match_rule = policy.RuleCheck('rule', action) + resource, is_write = get_resource_and_action(action) + # Attribute-based checks shall not be enforced on GETs + if is_write: + # assigning to variable with short name for improving readability + res_map = attributes.RESOURCE_ATTRIBUTE_MAP + if resource in res_map: + for attribute_name in res_map[resource]: + if _is_attribute_explicitly_set(attribute_name, + res_map[resource], + target): + attribute = res_map[resource][attribute_name] + if 'enforce_policy' in attribute: + attr_rule = policy.RuleCheck('rule', '%s:%s' % + (action, attribute_name)) + # Build match entries for sub-attributes, if present + validate = attribute.get('validate') + if (validate and any([k.startswith('type:dict') and v + for (k, v) in + validate.iteritems()])): + attr_rule = policy.AndCheck( + [attr_rule, _build_subattr_match_rule( + attribute_name, attribute, + action, target)]) + match_rule = policy.AndCheck([match_rule, attr_rule]) + return match_rule + + +# This check is registered as 'tenant_id' so that it can override +# GenericCheck which was used for validating parent resource ownership. +# This will prevent us from having to handling backward compatibility +# for policy.json +# TODO(salv-orlando): Reinstate GenericCheck for simple tenant_id checks +@policy.register('tenant_id') +class OwnerCheck(policy.Check): + """Resource ownership check. + + This check verifies the owner of the current resource, or of another + resource referenced by the one under analysis. + In the former case it falls back to a regular GenericCheck, whereas + in the latter case it leverages the plugin to load the referenced + resource and perform the check. + """ + def __init__(self, kind, match): + # Process the match + try: + self.target_field = re.findall('^\%\((.*)\)s$', + match)[0] + except IndexError: + err_reason = (_("Unable to identify a target field from:%s." + "match should be in the form %%()s") % + match) + LOG.exception(err_reason) + raise exceptions.PolicyInitError( + policy="%s:%s" % (kind, match), + reason=err_reason) + super(OwnerCheck, self).__init__(kind, match) + + def __call__(self, target, creds): + if self.target_field not in target: + # policy needs a plugin check + # target field is in the form resource:field + # however if they're not separated by a colon, use an underscore + # as a separator for backward compatibility + + def do_split(separator): + parent_res, parent_field = self.target_field.split( + separator, 1) + return parent_res, parent_field + + for separator in (':', '_'): + try: + parent_res, parent_field = do_split(separator) + break + except ValueError: + LOG.debug(_("Unable to find ':' as separator in %s."), + self.target_field) + else: + # If we are here split failed with both separators + err_reason = (_("Unable to find resource name in %s") % + self.target_field) + LOG.exception(err_reason) + raise exceptions.PolicyCheckError( + policy="%s:%s" % (self.kind, self.match), + reason=err_reason) + parent_foreign_key = attributes.RESOURCE_FOREIGN_KEYS.get( + "%ss" % parent_res, None) + if not parent_foreign_key: + err_reason = (_("Unable to verify match:%(match)s as the " + "parent resource: %(res)s was not found") % + {'match': self.match, 'res': parent_res}) + LOG.exception(err_reason) + raise exceptions.PolicyCheckError( + policy="%s:%s" % (self.kind, self.match), + reason=err_reason) + # NOTE(salv-orlando): This check currently assumes the parent + # resource is handled by the core plugin. It might be worth + # having a way to map resources to plugins so to make this + # check more general + # FIXME(ihrachys): if import is put in global, circular + # import failure occurs + from neutron import manager + f = getattr(manager.NeutronManager.get_instance().plugin, + 'get_%s' % parent_res) + # f *must* exist, if not found it is better to let neutron + # explode. Check will be performed with admin context + context = importutils.import_module('neutron.context') + try: + data = f(context.get_admin_context(), + target[parent_foreign_key], + fields=[parent_field]) + target[self.target_field] = data[parent_field] + except Exception: + with excutils.save_and_reraise_exception(): + LOG.exception(_('Policy check error while calling %s!'), f) + match = self.match % target + if self.kind in creds: + return match == unicode(creds[self.kind]) + return False + + +@policy.register('field') +class FieldCheck(policy.Check): + def __init__(self, kind, match): + # Process the match + resource, field_value = match.split(':', 1) + field, value = field_value.split('=', 1) + + super(FieldCheck, self).__init__(kind, '%s:%s:%s' % + (resource, field, value)) + + # Value might need conversion - we need help from the attribute map + try: + attr = attributes.RESOURCE_ATTRIBUTE_MAP[resource][field] + conv_func = attr['convert_to'] + except KeyError: + conv_func = lambda x: x + + self.field = field + self.value = conv_func(value) + + def __call__(self, target_dict, cred_dict): + target_value = target_dict.get(self.field) + # target_value might be a boolean, explicitly compare with None + if target_value is None: + LOG.debug(_("Unable to find requested field: %(field)s in " + "target: %(target_dict)s"), + {'field': self.field, + 'target_dict': target_dict}) + return False + return target_value == self.value + + +def _prepare_check(context, action, target): + """Prepare rule, target, and credentials for the policy engine.""" + # Compare with None to distinguish case in which target is {} + if target is None: + target = {} + match_rule = _build_match_rule(action, target) + credentials = context.to_dict() + return match_rule, target, credentials + + +def check(context, action, target, plugin=None, might_not_exist=False): + """Verifies that the action is valid on the target in this context. + + :param context: neutron context + :param action: string representing the action to be checked + this should be colon separated for clarity. + :param target: dictionary representing the object of the action + for object creation this should be a dictionary representing the + location of the object e.g. ``{'project_id': context.project_id}`` + :param plugin: currently unused and deprecated. + Kept for backward compatibility. + :param might_not_exist: If True the policy check is skipped (and the + function returns True) if the specified policy does not exist. + Defaults to false. + + :return: Returns True if access is permitted else False. + """ + if might_not_exist and not (policy._rules and action in policy._rules): + return True + return policy.check(*(_prepare_check(context, action, target))) + + +def enforce(context, action, target, plugin=None): + """Verifies that the action is valid on the target in this context. + + :param context: neutron context + :param action: string representing the action to be checked + this should be colon separated for clarity. + :param target: dictionary representing the object of the action + for object creation this should be a dictionary representing the + location of the object e.g. ``{'project_id': context.project_id}`` + :param plugin: currently unused and deprecated. + Kept for backward compatibility. + + :raises neutron.exceptions.PolicyNotAuthorized: if verification fails. + """ + + rule, target, credentials = _prepare_check(context, action, target) + result = policy.check(rule, target, credentials, action=action) + if not result: + LOG.debug(_("Failed policy check for '%s'"), action) + raise exceptions.PolicyNotAuthorized(action=action) + return result + + +def check_is_admin(context): + """Verify context has admin rights according to policy settings.""" + init() + # the target is user-self + credentials = context.to_dict() + target = credentials + # Backward compatibility: if ADMIN_CTX_POLICY is not + # found, default to validating role:admin + admin_policy = (ADMIN_CTX_POLICY in policy._rules + and ADMIN_CTX_POLICY or 'role:admin') + return policy.check(admin_policy, target, credentials) + + +def _extract_roles(rule, roles): + if isinstance(rule, policy.RoleCheck): + roles.append(rule.match.lower()) + elif isinstance(rule, policy.RuleCheck): + _extract_roles(policy._rules[rule.match], roles) + elif hasattr(rule, 'rules'): + for rule in rule.rules: + _extract_roles(rule, roles) + + +def get_admin_roles(): + """Return a list of roles which are granted admin rights according + to policy settings. + """ + # NOTE(salvatore-orlando): This function provides a solution for + # populating implicit contexts with the appropriate roles so that + # they correctly pass policy checks, and will become superseded + # once all explicit policy checks are removed from db logic and + # plugin modules. For backward compatibility it returns the literal + # admin if ADMIN_CTX_POLICY is not defined + init() + if not policy._rules or ADMIN_CTX_POLICY not in policy._rules: + return ['admin'] + try: + admin_ctx_rule = policy._rules[ADMIN_CTX_POLICY] + except (KeyError, TypeError): + return + roles = [] + _extract_roles(admin_ctx_rule, roles) + return roles diff --git a/icehouse-patches/neutron/dvr-patch/neutron/quota.py b/icehouse-patches/neutron/dvr-patch/neutron/quota.py new file mode 100644 index 00000000..3fc07a32 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/quota.py @@ -0,0 +1,332 @@ +# Copyright 2011 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Quotas for instances, volumes, and floating ips.""" + +import sys + +from oslo.config import cfg +import webob + +from neutron.common import exceptions +from neutron.openstack.common import importutils +from neutron.openstack.common import log as logging + +LOG = logging.getLogger(__name__) +QUOTA_DB_MODULE = 'neutron.db.quota_db' +QUOTA_DB_DRIVER = 'neutron.db.quota_db.DbQuotaDriver' +QUOTA_CONF_DRIVER = 'neutron.quota.ConfDriver' + +quota_opts = [ + cfg.ListOpt('quota_items', + default=['network', 'subnet', 'port'], + help=_('Resource name(s) that are supported in quota ' + 'features')), + cfg.IntOpt('default_quota', + default=-1, + help=_('Default number of resource allowed per tenant. ' + 'A negative value means unlimited.')), + cfg.IntOpt('quota_network', + default=10, + help=_('Number of networks allowed per tenant.' + 'A negative value means unlimited.')), + cfg.IntOpt('quota_subnet', + default=10, + help=_('Number of subnets allowed per tenant, ' + 'A negative value means unlimited.')), + cfg.IntOpt('quota_port', + default=50, + help=_('Number of ports allowed per tenant. ' + 'A negative value means unlimited.')), + cfg.StrOpt('quota_driver', + default=QUOTA_DB_DRIVER, + help=_('Default driver to use for quota checks')), +] +# Register the configuration options +cfg.CONF.register_opts(quota_opts, 'QUOTAS') + + +class ConfDriver(object): + """Configuration driver. + + Driver to perform necessary checks to enforce quotas and obtain + quota information. The default driver utilizes the default values + in neutron.conf. + """ + + def _get_quotas(self, context, resources, keys): + """Get quotas. + + A helper method which retrieves the quotas for the specific + resources identified by keys, and which apply to the current + context. + + :param context: The request context, for access checks. + :param resources: A dictionary of the registered resources. + :param keys: A list of the desired quotas to retrieve. + """ + + # Filter resources + desired = set(keys) + sub_resources = dict((k, v) for k, v in resources.items() + if k in desired) + + # Make sure we accounted for all of them... + if len(keys) != len(sub_resources): + unknown = desired - set(sub_resources.keys()) + raise exceptions.QuotaResourceUnknown(unknown=sorted(unknown)) + quotas = {} + for resource in sub_resources.values(): + quotas[resource.name] = resource.default + return quotas + + def limit_check(self, context, tenant_id, + resources, values): + """Check simple quota limits. + + For limits--those quotas for which there is no usage + synchronization function--this method checks that a set of + proposed values are permitted by the limit restriction. + + This method will raise a QuotaResourceUnknown exception if a + given resource is unknown or if it is not a simple limit + resource. + + If any of the proposed values is over the defined quota, an + OverQuota exception will be raised with the sorted list of the + resources which are too high. Otherwise, the method returns + nothing. + + :param context: The request context, for access checks. + :param tennant_id: The tenant_id to check quota. + :param resources: A dictionary of the registered resources. + :param values: A dictionary of the values to check against the + quota. + """ + + # Ensure no value is less than zero + unders = [key for key, val in values.items() if val < 0] + if unders: + raise exceptions.InvalidQuotaValue(unders=sorted(unders)) + + # Get the applicable quotas + quotas = self._get_quotas(context, resources, values.keys()) + + # Check the quotas and construct a list of the resources that + # would be put over limit by the desired values + overs = [key for key, val in values.items() + if quotas[key] >= 0 and quotas[key] < val] + if overs: + raise exceptions.OverQuota(overs=sorted(overs), quotas=quotas, + usages={}) + + @staticmethod + def get_tenant_quotas(context, resources, tenant_id): + quotas = {} + sub_resources = dict((k, v) for k, v in resources.items()) + for resource in sub_resources.values(): + quotas[resource.name] = resource.default + return quotas + + @staticmethod + def get_all_quotas(context, resources): + return [] + + @staticmethod + def delete_tenant_quota(context, tenant_id): + msg = _('Access to this resource was denied.') + raise webob.exc.HTTPForbidden(msg) + + @staticmethod + def update_quota_limit(context, tenant_id, resource, limit): + msg = _('Access to this resource was denied.') + raise webob.exc.HTTPForbidden(msg) + + +class BaseResource(object): + """Describe a single resource for quota checking.""" + + def __init__(self, name, flag): + """Initializes a resource. + + :param name: The name of the resource, i.e., "instances". + :param flag: The name of the flag or configuration option + """ + + self.name = name + self.flag = flag + + @property + def default(self): + """Return the default value of the quota.""" + return getattr(cfg.CONF.QUOTAS, + self.flag, + cfg.CONF.QUOTAS.default_quota) + + +class CountableResource(BaseResource): + """Describe a resource where the counts are determined by a function.""" + + def __init__(self, name, count, flag=None): + """Initializes a CountableResource. + + Countable resources are those resources which directly + correspond to objects in the database, i.e., netowk, subnet, + etc.,. A CountableResource must be constructed with a counting + function, which will be called to determine the current counts + of the resource. + + The counting function will be passed the context, along with + the extra positional and keyword arguments that are passed to + Quota.count(). It should return an integer specifying the + count. + + :param name: The name of the resource, i.e., "instances". + :param count: A callable which returns the count of the + resource. The arguments passed are as described + above. + :param flag: The name of the flag or configuration option + which specifies the default value of the quota + for this resource. + """ + + super(CountableResource, self).__init__(name, flag=flag) + self.count = count + + +class QuotaEngine(object): + """Represent the set of recognized quotas.""" + + def __init__(self, quota_driver_class=None): + """Initialize a Quota object.""" + + self._resources = {} + self._driver = None + self._driver_class = quota_driver_class + + def get_driver(self): + if self._driver is None: + _driver_class = (self._driver_class or + cfg.CONF.QUOTAS.quota_driver) + if (_driver_class == QUOTA_DB_DRIVER and + QUOTA_DB_MODULE not in sys.modules): + # If quotas table is not loaded, force config quota driver. + _driver_class = QUOTA_CONF_DRIVER + LOG.info(_("ConfDriver is used as quota_driver because the " + "loaded plugin does not support 'quotas' table.")) + if isinstance(_driver_class, basestring): + _driver_class = importutils.import_object(_driver_class) + self._driver = _driver_class + LOG.info(_('Loaded quota_driver: %s.'), _driver_class) + return self._driver + + def __contains__(self, resource): + return resource in self._resources + + def register_resource(self, resource): + """Register a resource.""" + if resource.name in self._resources: + LOG.warn(_('%s is already registered.'), resource.name) + return + self._resources[resource.name] = resource + + def register_resource_by_name(self, resourcename): + """Register a resource by name.""" + resource = CountableResource(resourcename, _count_resource, + 'quota_' + resourcename) + self.register_resource(resource) + + def register_resources(self, resources): + """Register a list of resources.""" + + for resource in resources: + self.register_resource(resource) + + def count(self, context, resource, *args, **kwargs): + """Count a resource. + + For countable resources, invokes the count() function and + returns its result. Arguments following the context and + resource are passed directly to the count function declared by + the resource. + + :param context: The request context, for access checks. + :param resource: The name of the resource, as a string. + """ + + # Get the resource + res = self._resources.get(resource) + if not res or not hasattr(res, 'count'): + raise exceptions.QuotaResourceUnknown(unknown=[resource]) + + return res.count(context, *args, **kwargs) + + def limit_check(self, context, tenant_id, **values): + """Check simple quota limits. + + For limits--those quotas for which there is no usage + synchronization function--this method checks that a set of + proposed values are permitted by the limit restriction. The + values to check are given as keyword arguments, where the key + identifies the specific quota limit to check, and the value is + the proposed value. + + This method will raise a QuotaResourceUnknown exception if a + given resource is unknown or if it is not a simple limit + resource. + + If any of the proposed values is over the defined quota, an + OverQuota exception will be raised with the sorted list of the + resources which are too high. Otherwise, the method returns + nothing. + + :param context: The request context, for access checks. + """ + + return self.get_driver().limit_check(context, tenant_id, + self._resources, values) + + @property + def resources(self): + return self._resources + + +QUOTAS = QuotaEngine() + + +def _count_resource(context, plugin, resources, tenant_id): + count_getter_name = "get_%s_count" % resources + + # Some plugins support a count method for particular resources, + # using a DB's optimized counting features. We try to use that one + # if present. Otherwise just use regular getter to retrieve all objects + # and count in python, allowing older plugins to still be supported + try: + obj_count_getter = getattr(plugin, count_getter_name) + return obj_count_getter(context, filters={'tenant_id': [tenant_id]}) + except (NotImplementedError, AttributeError): + obj_getter = getattr(plugin, "get_%s" % resources) + obj_list = obj_getter(context, filters={'tenant_id': [tenant_id]}) + return len(obj_list) if obj_list else 0 + + +def register_resources_from_config(): + resources = [] + for resource_item in cfg.CONF.QUOTAS.quota_items: + resources.append(CountableResource(resource_item, _count_resource, + 'quota_' + resource_item)) + QUOTAS.register_resources(resources) + + +register_resources_from_config() diff --git a/icehouse-patches/neutron/dvr-patch/neutron/scheduler/__init__.py b/icehouse-patches/neutron/dvr-patch/neutron/scheduler/__init__.py new file mode 100644 index 00000000..e8293255 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/scheduler/__init__.py @@ -0,0 +1,14 @@ +# Copyright (c) 2013 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/icehouse-patches/neutron/dvr-patch/neutron/scheduler/dhcp_agent_scheduler.py b/icehouse-patches/neutron/dvr-patch/neutron/scheduler/dhcp_agent_scheduler.py new file mode 100644 index 00000000..f29b823c --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/scheduler/dhcp_agent_scheduler.py @@ -0,0 +1,133 @@ +# Copyright (c) 2013 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import random + +from oslo.config import cfg +from oslo.db import exception as db_exc +from sqlalchemy import sql + +from neutron.common import constants +from neutron.db import agents_db +from neutron.db import agentschedulers_db +from neutron.openstack.common import log as logging + + +LOG = logging.getLogger(__name__) + + +class ChanceScheduler(object): + """Allocate a DHCP agent for a network in a random way. + More sophisticated scheduler (similar to filter scheduler in nova?) + can be introduced later. + """ + + def _schedule_bind_network(self, context, agents, network_id): + for agent in agents: + context.session.begin(subtransactions=True) + try: + binding = agentschedulers_db.NetworkDhcpAgentBinding() + binding.dhcp_agent = agent + binding.network_id = network_id + context.session.add(binding) + # try to actually write the changes and catch integrity + # DBDuplicateEntry + context.session.commit() + except db_exc.DBDuplicateEntry: + # it's totally ok, someone just did our job! + context.session.rollback() + LOG.info(_('Agent %s already present'), agent) + LOG.debug(_('Network %(network_id)s is scheduled to be ' + 'hosted by DHCP agent %(agent_id)s'), + {'network_id': network_id, + 'agent_id': agent}) + + def schedule(self, plugin, context, network): + """Schedule the network to active DHCP agent(s). + + A list of scheduled agents is returned. + """ + agents_per_network = cfg.CONF.dhcp_agents_per_network + + #TODO(gongysh) don't schedule the networks with only + # subnets whose enable_dhcp is false + with context.session.begin(subtransactions=True): + dhcp_agents = plugin.get_dhcp_agents_hosting_networks( + context, [network['id']], active=True) + if len(dhcp_agents) >= agents_per_network: + LOG.debug(_('Network %s is hosted already'), + network['id']) + return + n_agents = agents_per_network - len(dhcp_agents) + enabled_dhcp_agents = plugin.get_agents_db( + context, filters={ + 'agent_type': [constants.AGENT_TYPE_DHCP], + 'admin_state_up': [True]}) + if not enabled_dhcp_agents: + LOG.warn(_('No more DHCP agents')) + return + active_dhcp_agents = [ + agent for agent in set(enabled_dhcp_agents) + if not agents_db.AgentDbMixin.is_agent_down( + agent['heartbeat_timestamp']) + and agent not in dhcp_agents + ] + if not active_dhcp_agents: + LOG.warn(_('No more DHCP agents')) + return + n_agents = min(len(active_dhcp_agents), n_agents) + chosen_agents = random.sample(active_dhcp_agents, n_agents) + self._schedule_bind_network(context, chosen_agents, network['id']) + return chosen_agents + + def auto_schedule_networks(self, plugin, context, host): + """Schedule non-hosted networks to the DHCP agent on + the specified host. + """ + agents_per_network = cfg.CONF.dhcp_agents_per_network + # a list of (agent, net_ids) tuples + bindings_to_add = [] + with context.session.begin(subtransactions=True): + fields = ['network_id', 'enable_dhcp'] + subnets = plugin.get_subnets(context, fields=fields) + net_ids = set(s['network_id'] for s in subnets + if s['enable_dhcp']) + if not net_ids: + LOG.debug(_('No non-hosted networks')) + return False + query = context.session.query(agents_db.Agent) + query = query.filter(agents_db.Agent.agent_type == + constants.AGENT_TYPE_DHCP, + agents_db.Agent.host == host, + agents_db.Agent.admin_state_up == sql.true()) + dhcp_agents = query.all() + for dhcp_agent in dhcp_agents: + if agents_db.AgentDbMixin.is_agent_down( + dhcp_agent.heartbeat_timestamp): + LOG.warn(_('DHCP agent %s is not active'), dhcp_agent.id) + continue + for net_id in net_ids: + agents = plugin.get_dhcp_agents_hosting_networks( + context, [net_id], active=True) + if len(agents) >= agents_per_network: + continue + if any(dhcp_agent.id == agent.id for agent in agents): + continue + bindings_to_add.append((dhcp_agent, net_id)) + # do it outside transaction so particular scheduling results don't + # make other to fail + for agent, net_id in bindings_to_add: + self._schedule_bind_network(context, [agent], net_id) + return True diff --git a/icehouse-patches/neutron/dvr-patch/neutron/scheduler/l3_agent_scheduler.py b/icehouse-patches/neutron/dvr-patch/neutron/scheduler/l3_agent_scheduler.py new file mode 100644 index 00000000..9da17228 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/scheduler/l3_agent_scheduler.py @@ -0,0 +1,235 @@ +# Copyright (c) 2013 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import abc +import random + +import six +from sqlalchemy.orm import exc +from sqlalchemy import sql + +from neutron.common import constants +from neutron.db import agents_db +from neutron.db import l3_agentschedulers_db +from neutron.db import l3_db +from neutron.db import l3_dvrscheduler_db +from neutron.openstack.common import log as logging + + +LOG = logging.getLogger(__name__) + + +@six.add_metaclass(abc.ABCMeta) +class L3Scheduler(object): + + @abc.abstractmethod + def schedule(self, plugin, context, router_id, + candidates=None, hints=None): + """Schedule the router to an active L3 agent. + + Schedule the router only if it is not already scheduled. + """ + pass + + def auto_schedule_routers(self, plugin, context, host, router_ids): + """Schedule non-hosted routers to L3 Agent running on host. + + If router_ids is given, each router in router_ids is scheduled + if it is not scheduled yet. Otherwise all unscheduled routers + are scheduled. + Don't schedule the routers which are hosted already + by active l3 agents. + """ + with context.session.begin(subtransactions=True): + # query if we have valid l3 agent on the host + query = context.session.query(agents_db.Agent) + query = query.filter(agents_db.Agent.agent_type == + constants.AGENT_TYPE_L3, + agents_db.Agent.host == host, + agents_db.Agent.admin_state_up == sql.true()) + try: + l3_agent = query.one() + except (exc.MultipleResultsFound, exc.NoResultFound): + LOG.debug(_('No enabled L3 agent on host %s'), + host) + return False + if agents_db.AgentDbMixin.is_agent_down( + l3_agent.heartbeat_timestamp): + LOG.warn(_('L3 agent %s is not active'), l3_agent.id) + # check if each of the specified routers is hosted + if router_ids: + routers = plugin.get_routers( + context, filters={'id': router_ids}) + unscheduled_routers = [] + for router in routers: + l3_agents = plugin.get_l3_agents_hosting_routers( + context, [router['id']], admin_state_up=True) + if l3_agents and not router.get('distributed', False): + LOG.debug(_('Router %(router_id)s has already been' + ' hosted by L3 agent %(agent_id)s'), + {'router_id': router['id'], + 'agent_id': l3_agents[0]['id']}) + else: + unscheduled_routers.append(router) + if not unscheduled_routers: + # all (specified) routers are already scheduled + return False + else: + # get all routers that are not hosted + #TODO(gongysh) consider the disabled agent's router + stmt = ~sql.exists().where( + l3_db.Router.id == + l3_agentschedulers_db.RouterL3AgentBinding.router_id) + unscheduled_router_ids = [router_id_[0] for router_id_ in + context.session.query( + l3_db.Router.id).filter(stmt)] + if not unscheduled_router_ids: + LOG.debug(_('No non-hosted routers')) + return False + unscheduled_routers = plugin.get_routers( + context, filters={'id': unscheduled_router_ids}) + + # check if the configuration of l3 agent is compatible + # with the router + to_removed_ids = [] + for router in unscheduled_routers: + candidates = plugin.get_l3_agent_candidates(context, + router, + [l3_agent]) + if not candidates: + to_removed_ids.append(router['id']) + unscheduled_router_ids = set(r['id'] for r in unscheduled_routers) + router_ids = unscheduled_router_ids - set(to_removed_ids) + if not router_ids: + LOG.warn(_('No routers compatible with L3 agent configuration' + ' on host %s'), host) + return False + + for router_id in router_ids: + router_dict = plugin.get_router(context, router_id) + if router_dict.get('distributed', False): + query = (context.session. + query(l3_agentschedulers_db.RouterL3AgentBinding)) + query = query.filter( + l3_agentschedulers_db.RouterL3AgentBinding.router_id + == router_id, + l3_agentschedulers_db.RouterL3AgentBinding.l3_agent_id + == (l3_agent.id)) + if query.count(): + continue + self.bind_router(context, router_id, l3_agent) + return True + + def get_candidates(self, plugin, context, sync_router, subnet_id): + """Return L3 agents where a router could be scheduled.""" + with context.session.begin(subtransactions=True): + # allow one router is hosted by just + # one enabled l3 agent hosting since active is just a + # timing problem. Non-active l3 agent can return to + # active any time + l3_agents = plugin.get_l3_agents_hosting_routers( + context, [sync_router['id']], admin_state_up=True) + old_l3agentset = set(l3_agents) + if l3_agents and not sync_router.get('distributed', False): + LOG.debug(_('Router %(router_id)s has already been hosted' + ' by L3 agent %(agent_id)s'), + {'router_id': sync_router['id'], + 'agent_id': l3_agents[0]['id']}) + return + + active_l3_agents = plugin.get_l3_agents(context, active=True) + if not active_l3_agents: + LOG.warn(_('No active L3 agents')) + return + new_l3agents = plugin.get_l3_agent_candidates(context, + sync_router, + active_l3_agents, + subnet_id) + if sync_router.get('distributed', False): + new_l3agentset = set(new_l3agents) + candidates = list(new_l3agentset - old_l3agentset) + else: + candidates = new_l3agents + if not candidates: + LOG.warn(_('No L3 agents can host the router %s'), + sync_router['id']) + return + + return candidates + + def bind_router(self, context, router_id, chosen_agent): + """Bind the router to the l3 agent which has been chosen.""" + with context.session.begin(subtransactions=True): + binding = l3_agentschedulers_db.RouterL3AgentBinding() + binding.l3_agent = chosen_agent + binding.router_id = router_id + context.session.add(binding) + LOG.debug(_('Router %(router_id)s is scheduled to ' + 'L3 agent %(agent_id)s'), + {'router_id': router_id, + 'agent_id': chosen_agent.id}) + + +class ChanceScheduler(L3Scheduler): + """Randomly allocate an L3 agent for a router.""" + + def schedule(self, plugin, context, router_id, + candidates=None, hints=None): + with context.session.begin(subtransactions=True): + sync_router = plugin.get_router(context, router_id) + subnet_id = hints.get('subnet_id', None) if hints else None + candidates = candidates or self.get_candidates( + plugin, context, sync_router, subnet_id) + if (hints and + 'gw_exists' in hints and + sync_router.get('distributed', False)): + l3dvrsch = l3_dvrscheduler_db.L3_DVRsch_db_mixin() + l3dvrsch.schedule_snat_router(plugin, + context, + router_id, + hints['gw_exists']) + if not candidates: + return + if sync_router.get('distributed', False): + for chosen_agent in candidates: + self.bind_router(context, router_id, chosen_agent) + else: + chosen_agent = random.choice(candidates) + self.bind_router(context, router_id, chosen_agent) + return chosen_agent + + +class LeastRoutersScheduler(L3Scheduler): + """Allocate to an L3 agent with the least number of routers bound.""" + + def schedule(self, plugin, context, router_id, + candidates=None, hints=None): + with context.session.begin(subtransactions=True): + sync_router = plugin.get_router(context, router_id) + subnet_id = hints.get('subnet_id', None) if hints else None + candidates = candidates or self.get_candidates( + plugin, context, sync_router, subnet_id) + if not candidates: + return + if sync_router.get('distributed', False): + for chosen_agent in candidates: + self.bind_router(context, router_id, chosen_agent) + else: + candidate_ids = [candidate['id'] for candidate in candidates] + chosen_agent = plugin.get_l3_agent_with_min_routers( + context, candidate_ids) + self.bind_router(context, router_id, chosen_agent) + + return chosen_agent diff --git a/icehouse-patches/neutron/dvr-patch/neutron/server/__init__.py b/icehouse-patches/neutron/dvr-patch/neutron/server/__init__.py new file mode 100644 index 00000000..eb34ad85 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/server/__init__.py @@ -0,0 +1,70 @@ +#!/usr/bin/env python + +# Copyright 2011 VMware, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# If ../neutron/__init__.py exists, add ../ to Python search path, so that +# it will override what happens to be installed in /usr/(local/)lib/python... + +import sys + +import eventlet +eventlet.monkey_patch() + +from oslo.config import cfg + +from neutron.common import config +from neutron import service + +from neutron.openstack.common import gettextutils +from neutron.openstack.common import log as logging +gettextutils.install('neutron', lazy=True) + +LOG = logging.getLogger(__name__) + + +def main(): + # the configuration will be read into the cfg.CONF global data structure + config.init(sys.argv[1:]) + if not cfg.CONF.config_file: + sys.exit(_("ERROR: Unable to find configuration file via the default" + " search paths (~/.neutron/, ~/, /etc/neutron/, /etc/) and" + " the '--config-file' option!")) + try: + pool = eventlet.GreenPool() + + neutron_api = service.serve_wsgi(service.NeutronApiService) + api_thread = pool.spawn(neutron_api.wait) + + try: + neutron_rpc = service.serve_rpc() + except NotImplementedError: + LOG.info(_("RPC was already started in parent process by plugin.")) + else: + rpc_thread = pool.spawn(neutron_rpc.wait) + + # api and rpc should die together. When one dies, kill the other. + rpc_thread.link(lambda gt: api_thread.kill()) + api_thread.link(lambda gt: rpc_thread.kill()) + + pool.waitall() + except KeyboardInterrupt: + pass + except RuntimeError as e: + sys.exit(_("ERROR: %s") % e) + + +if __name__ == "__main__": + main() diff --git a/icehouse-patches/neutron/dvr-patch/neutron/service.py b/icehouse-patches/neutron/dvr-patch/neutron/service.py new file mode 100644 index 00000000..cf357d16 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/service.py @@ -0,0 +1,299 @@ +# Copyright 2011 VMware, Inc +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import inspect +import logging as std_logging +import os +import random + +from oslo.config import cfg +from oslo.messaging import server as rpc_server + +from neutron.common import config +from neutron.common import rpc as n_rpc +from neutron import context +from neutron.db import api as session +from neutron import manager +from neutron.openstack.common import excutils +from neutron.openstack.common import importutils +from neutron.openstack.common import log as logging +from neutron.openstack.common import loopingcall +from neutron.openstack.common import service as common_service +from neutron import wsgi + + +service_opts = [ + cfg.IntOpt('periodic_interval', + default=40, + help=_('Seconds between running periodic tasks')), + cfg.IntOpt('api_workers', + default=0, + help=_('Number of separate worker processes for service')), + cfg.IntOpt('rpc_workers', + default=0, + help=_('Number of RPC worker processes for service')), + cfg.IntOpt('periodic_fuzzy_delay', + default=5, + help=_('Range of seconds to randomly delay when starting the ' + 'periodic task scheduler to reduce stampeding. ' + '(Disable by setting to 0)')), +] +CONF = cfg.CONF +CONF.register_opts(service_opts) + +LOG = logging.getLogger(__name__) + + +class WsgiService(object): + """Base class for WSGI based services. + + For each api you define, you must also define these flags: + :_listen: The address on which to listen + :_listen_port: The port on which to listen + + """ + + def __init__(self, app_name): + self.app_name = app_name + self.wsgi_app = None + + def start(self): + self.wsgi_app = _run_wsgi(self.app_name) + + def wait(self): + self.wsgi_app.wait() + + +class NeutronApiService(WsgiService): + """Class for neutron-api service.""" + + @classmethod + def create(cls, app_name='neutron'): + + # Setup logging early, supplying both the CLI options and the + # configuration mapping from the config file + # We only update the conf dict for the verbose and debug + # flags. Everything else must be set up in the conf file... + # Log the options used when starting if we're in debug mode... + + config.setup_logging(cfg.CONF) + # Dump the initial option values + cfg.CONF.log_opt_values(LOG, std_logging.DEBUG) + service = cls(app_name) + return service + + +def serve_wsgi(cls): + + try: + service = cls.create() + service.start() + except Exception: + with excutils.save_and_reraise_exception(): + LOG.exception(_('Unrecoverable error: please check log ' + 'for details.')) + + return service + + +class RpcWorker(object): + """Wraps a worker to be handled by ProcessLauncher""" + def __init__(self, plugin): + self._plugin = plugin + self._servers = [] + + def start(self): + # We may have just forked from parent process. A quick disposal of the + # existing sql connections avoids producing errors later when they are + # discovered to be broken. + session.get_engine().pool.dispose() + self._servers = self._plugin.start_rpc_listeners() + + def wait(self): + for server in self._servers: + if isinstance(server, rpc_server.MessageHandlingServer): + server.wait() + + def stop(self): + for server in self._servers: + if isinstance(server, rpc_server.MessageHandlingServer): + server.kill() + self._servers = [] + + +def serve_rpc(): + plugin = manager.NeutronManager.get_plugin() + + # If 0 < rpc_workers then start_rpc_listeners would be called in a + # subprocess and we cannot simply catch the NotImplementedError. It is + # simpler to check this up front by testing whether the plugin supports + # multiple RPC workers. + if not plugin.rpc_workers_supported(): + LOG.debug(_("Active plugin doesn't implement start_rpc_listeners")) + if 0 < cfg.CONF.rpc_workers: + msg = _("'rpc_workers = %d' ignored because start_rpc_listeners " + "is not implemented.") + LOG.error(msg, cfg.CONF.rpc_workers) + raise NotImplementedError + + try: + rpc = RpcWorker(plugin) + + if cfg.CONF.rpc_workers < 1: + rpc.start() + return rpc + else: + launcher = common_service.ProcessLauncher(wait_interval=1.0) + launcher.launch_service(rpc, workers=cfg.CONF.rpc_workers) + return launcher + except Exception: + with excutils.save_and_reraise_exception(): + LOG.exception(_('Unrecoverable error: please check log ' + 'for details.')) + + +def _run_wsgi(app_name): + app = config.load_paste_app(app_name) + if not app: + LOG.error(_('No known API applications configured.')) + return + server = wsgi.Server("Neutron") + server.start(app, cfg.CONF.bind_port, cfg.CONF.bind_host, + workers=cfg.CONF.api_workers) + # Dump all option values here after all options are parsed + cfg.CONF.log_opt_values(LOG, std_logging.DEBUG) + LOG.info(_("Neutron service started, listening on %(host)s:%(port)s"), + {'host': cfg.CONF.bind_host, + 'port': cfg.CONF.bind_port}) + return server + + +class Service(n_rpc.Service): + """Service object for binaries running on hosts. + + A service takes a manager and enables rpc by listening to queues based + on topic. It also periodically runs tasks on the manager. + """ + + def __init__(self, host, binary, topic, manager, report_interval=None, + periodic_interval=None, periodic_fuzzy_delay=None, + *args, **kwargs): + + self.binary = binary + self.manager_class_name = manager + manager_class = importutils.import_class(self.manager_class_name) + self.manager = manager_class(host=host, *args, **kwargs) + self.report_interval = report_interval + self.periodic_interval = periodic_interval + self.periodic_fuzzy_delay = periodic_fuzzy_delay + self.saved_args, self.saved_kwargs = args, kwargs + self.timers = [] + super(Service, self).__init__(host, topic, manager=self.manager) + + def start(self): + self.manager.init_host() + super(Service, self).start() + if self.report_interval: + pulse = loopingcall.FixedIntervalLoopingCall(self.report_state) + pulse.start(interval=self.report_interval, + initial_delay=self.report_interval) + self.timers.append(pulse) + + if self.periodic_interval: + if self.periodic_fuzzy_delay: + initial_delay = random.randint(0, self.periodic_fuzzy_delay) + else: + initial_delay = None + + periodic = loopingcall.FixedIntervalLoopingCall( + self.periodic_tasks) + periodic.start(interval=self.periodic_interval, + initial_delay=initial_delay) + self.timers.append(periodic) + self.manager.after_start() + + def __getattr__(self, key): + manager = self.__dict__.get('manager', None) + return getattr(manager, key) + + @classmethod + def create(cls, host=None, binary=None, topic=None, manager=None, + report_interval=None, periodic_interval=None, + periodic_fuzzy_delay=None): + """Instantiates class and passes back application object. + + :param host: defaults to CONF.host + :param binary: defaults to basename of executable + :param topic: defaults to bin_name - 'nova-' part + :param manager: defaults to CONF._manager + :param report_interval: defaults to CONF.report_interval + :param periodic_interval: defaults to CONF.periodic_interval + :param periodic_fuzzy_delay: defaults to CONF.periodic_fuzzy_delay + + """ + if not host: + host = CONF.host + if not binary: + binary = os.path.basename(inspect.stack()[-1][1]) + if not topic: + topic = binary.rpartition('neutron-')[2] + topic = topic.replace("-", "_") + if not manager: + manager = CONF.get('%s_manager' % topic, None) + if report_interval is None: + report_interval = CONF.report_interval + if periodic_interval is None: + periodic_interval = CONF.periodic_interval + if periodic_fuzzy_delay is None: + periodic_fuzzy_delay = CONF.periodic_fuzzy_delay + service_obj = cls(host, binary, topic, manager, + report_interval=report_interval, + periodic_interval=periodic_interval, + periodic_fuzzy_delay=periodic_fuzzy_delay) + + return service_obj + + def kill(self): + """Destroy the service object.""" + self.stop() + + def stop(self): + super(Service, self).stop() + for x in self.timers: + try: + x.stop() + except Exception: + LOG.exception(_("Exception occurs when timer stops")) + pass + self.timers = [] + + def wait(self): + super(Service, self).wait() + for x in self.timers: + try: + x.wait() + except Exception: + LOG.exception(_("Exception occurs when waiting for timer")) + pass + + def periodic_tasks(self, raise_on_error=False): + """Tasks to be run at a periodic interval.""" + ctxt = context.get_admin_context() + self.manager.periodic_tasks(ctxt, raise_on_error=raise_on_error) + + def report_state(self): + """Update the state of this service.""" + # Todo(gongysh) report state to neutron server + pass diff --git a/icehouse-patches/neutron/dvr-patch/neutron/services/__init__.py b/icehouse-patches/neutron/dvr-patch/neutron/services/__init__.py new file mode 100644 index 00000000..b706747c --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/services/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2012 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/icehouse-patches/neutron/dvr-patch/neutron/services/firewall/__init__.py b/icehouse-patches/neutron/dvr-patch/neutron/services/firewall/__init__.py new file mode 100644 index 00000000..a6a8955d --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/services/firewall/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2013 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/icehouse-patches/neutron/dvr-patch/neutron/services/firewall/agents/__init__.py b/icehouse-patches/neutron/dvr-patch/neutron/services/firewall/agents/__init__.py new file mode 100644 index 00000000..a6a8955d --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/services/firewall/agents/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2013 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/icehouse-patches/neutron/dvr-patch/neutron/services/firewall/agents/firewall_agent_api.py b/icehouse-patches/neutron/dvr-patch/neutron/services/firewall/agents/firewall_agent_api.py new file mode 100644 index 00000000..9dcc44ae --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/services/firewall/agents/firewall_agent_api.py @@ -0,0 +1,83 @@ +# Copyright (c) 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Sumit Naiksatam, sumitnaiksatam@gmail.com, Big Switch Networks, Inc. +# @author: Sridar Kandaswamy, skandasw@cisco.com, Cisco Systems, Inc. +# @author: Dan Florea, dflorea@cisco.com, Cisco Systems, Inc. + +from oslo.config import cfg + +from neutron.common import rpc as n_rpc +from neutron.openstack.common import log as logging + +LOG = logging.getLogger(__name__) + +FWaaSOpts = [ + cfg.StrOpt( + 'driver', + default='', + help=_("Name of the FWaaS Driver")), + cfg.BoolOpt( + 'enabled', + default=False, + help=_("Enable FWaaS")), +] +cfg.CONF.register_opts(FWaaSOpts, 'fwaas') + + +class FWaaSPluginApiMixin(n_rpc.RpcProxy): + """Agent side of the FWaaS agent to FWaaS Plugin RPC API.""" + + RPC_API_VERSION = '1.0' + + def __init__(self, topic, host): + super(FWaaSPluginApiMixin, + self).__init__(topic=topic, + default_version=self.RPC_API_VERSION) + self.host = host + + def set_firewall_status(self, context, firewall_id, status): + """Make a RPC to set the status of a firewall.""" + return self.call(context, + self.make_msg('set_firewall_status', host=self.host, + firewall_id=firewall_id, status=status), + topic=self.topic) + + def firewall_deleted(self, context, firewall_id): + """Make a RPC to indicate that the firewall resources are deleted.""" + return self.call(context, + self.make_msg('firewall_deleted', host=self.host, + firewall_id=firewall_id), + topic=self.topic) + + +class FWaaSAgentRpcCallbackMixin(object): + """Mixin for FWaaS agent Implementations.""" + + def __init__(self, host): + + super(FWaaSAgentRpcCallbackMixin, self).__init__(host) + + def create_firewall(self, context, firewall, host): + """Handle RPC cast from plugin to create a firewall.""" + pass + + def update_firewall(self, context, firewall, host): + """Handle RPC cast from plugin to update a firewall.""" + pass + + def delete_firewall(self, context, firewall, host): + """Handle RPC cast from plugin to delete a firewall.""" + pass diff --git a/icehouse-patches/neutron/dvr-patch/neutron/services/firewall/agents/l3reference/__init__.py b/icehouse-patches/neutron/dvr-patch/neutron/services/firewall/agents/l3reference/__init__.py new file mode 100644 index 00000000..a6a8955d --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/services/firewall/agents/l3reference/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2013 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/icehouse-patches/neutron/dvr-patch/neutron/services/firewall/agents/l3reference/firewall_l3_agent.py b/icehouse-patches/neutron/dvr-patch/neutron/services/firewall/agents/l3reference/firewall_l3_agent.py new file mode 100644 index 00000000..fbe8c132 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/services/firewall/agents/l3reference/firewall_l3_agent.py @@ -0,0 +1,293 @@ +# Copyright (c) 2013 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Sumit Naiksatam, sumitnaiksatam@gmail.com, Big Switch Networks, Inc. +# @author: Sridar Kandaswamy, skandasw@cisco.com, Cisco Systems, Inc. +# @author: Dan Florea, dflorea@cisco.com, Cisco Systems, Inc. + +from oslo.config import cfg + +from neutron.agent.common import config +from neutron.agent.linux import ip_lib +from neutron.common import topics +from neutron import context +from neutron.extensions import firewall as fw_ext +from neutron.openstack.common import importutils +from neutron.openstack.common import log as logging +from neutron.plugins.common import constants +from neutron.services.firewall.agents import firewall_agent_api as api + +LOG = logging.getLogger(__name__) + + +class FWaaSL3PluginApi(api.FWaaSPluginApiMixin): + """Agent side of the FWaaS agent to FWaaS Plugin RPC API.""" + + def __init__(self, topic, host): + super(FWaaSL3PluginApi, self).__init__(topic, host) + + def get_firewalls_for_tenant(self, context, **kwargs): + """Get the Firewalls with rules from the Plugin to send to driver.""" + LOG.debug(_("Retrieve Firewall with rules from Plugin")) + + return self.call(context, + self.make_msg('get_firewalls_for_tenant', + host=self.host), + topic=self.topic) + + def get_tenants_with_firewalls(self, context, **kwargs): + """Get all Tenants that have Firewalls configured from plugin.""" + LOG.debug(_("Retrieve Tenants with Firewalls configured from Plugin")) + + return self.call(context, + self.make_msg('get_tenants_with_firewalls', + host=self.host), + topic=self.topic) + + +class FWaaSL3AgentRpcCallback(api.FWaaSAgentRpcCallbackMixin): + """FWaaS Agent support to be used by Neutron L3 agent.""" + + def __init__(self, conf): + LOG.debug(_("Initializing firewall agent")) + self.conf = conf + fwaas_driver_class_path = cfg.CONF.fwaas.driver + self.fwaas_enabled = cfg.CONF.fwaas.enabled + if self.fwaas_enabled: + try: + self.fwaas_driver = importutils.import_object( + fwaas_driver_class_path) + LOG.debug(_("FWaaS Driver Loaded: '%s'"), + fwaas_driver_class_path) + except ImportError: + msg = _('Error importing FWaaS device driver: %s') + raise ImportError(msg % fwaas_driver_class_path) + self.services_sync = False + self.root_helper = config.get_root_helper(conf) + # setup RPC to msg fwaas plugin + self.fwplugin_rpc = FWaaSL3PluginApi(topics.FIREWALL_PLUGIN, + conf.host) + super(FWaaSL3AgentRpcCallback, self).__init__(host=conf.host) + + def _get_router_info_list_for_tenant(self, routers, tenant_id): + """Returns the list of router info objects on which to apply the fw.""" + root_ip = ip_lib.IPWrapper(self.root_helper) + # Get the routers for the tenant + router_ids = [ + router['id'] + for router in routers + if router['tenant_id'] == tenant_id] + local_ns_list = root_ip.get_namespaces( + self.root_helper) if self.conf.use_namespaces else [] + + router_info_list = [] + # Pick up namespaces for Tenant Routers + for rid in router_ids: + # for routers without an interface - get_routers returns + # the router - but this is not yet populated in router_info + if rid not in self.router_info: + continue + if self.router_info[rid].use_namespaces: + router_ns = self.router_info[rid].ns_name + if router_ns in local_ns_list: + router_info_list.append(self.router_info[rid]) + else: + router_info_list.append(self.router_info[rid]) + return router_info_list + + def _invoke_driver_for_plugin_api(self, context, fw, func_name): + """Invoke driver method for plugin API and provide status back.""" + LOG.debug(_("%(func_name)s from agent for fw: %(fwid)s"), + {'func_name': func_name, 'fwid': fw['id']}) + try: + routers = self.plugin_rpc.get_routers(context) + router_info_list = self._get_router_info_list_for_tenant( + routers, + fw['tenant_id']) + if not router_info_list: + LOG.debug(_('No Routers on tenant: %s'), fw['tenant_id']) + # fw was created before any routers were added, and if a + # delete is sent then we need to ack so that plugin can + # cleanup. + if func_name == 'delete_firewall': + self.fwplugin_rpc.firewall_deleted(context, fw['id']) + return + LOG.debug(_("Apply fw on Router List: '%s'"), + [ri.router['id'] for ri in router_info_list]) + # call into the driver + try: + self.fwaas_driver.__getattribute__(func_name)( + router_info_list, + fw) + if fw['admin_state_up']: + status = constants.ACTIVE + else: + status = constants.DOWN + except fw_ext.FirewallInternalDriverError: + LOG.error(_("Firewall Driver Error for %(func_name)s " + "for fw: %(fwid)s"), + {'func_name': func_name, 'fwid': fw['id']}) + status = constants.ERROR + # delete needs different handling + if func_name == 'delete_firewall': + if status in [constants.ACTIVE, constants.DOWN]: + self.fwplugin_rpc.firewall_deleted(context, fw['id']) + else: + self.fwplugin_rpc.set_firewall_status( + context, + fw['id'], + status) + except Exception: + LOG.exception( + _("FWaaS RPC failure in %(func_name)s for fw: %(fwid)s"), + {'func_name': func_name, 'fwid': fw['id']}) + self.services_sync = True + return + + def _invoke_driver_for_sync_from_plugin(self, ctx, router_info_list, fw): + """Invoke the delete driver method for status of PENDING_DELETE and + update method for all other status to (re)apply on driver which is + Idempotent. + """ + if fw['status'] == constants.PENDING_DELETE: + try: + self.fwaas_driver.delete_firewall(router_info_list, fw) + self.fwplugin_rpc.firewall_deleted( + ctx, + fw['id']) + except fw_ext.FirewallInternalDriverError: + LOG.error(_("Firewall Driver Error on fw state %(fwmsg)s " + "for fw: %(fwid)s"), + {'fwmsg': fw['status'], 'fwid': fw['id']}) + self.fwplugin_rpc.set_firewall_status( + ctx, + fw['id'], + constants.ERROR) + else: + # PENDING_UPDATE, PENDING_CREATE, ... + try: + self.fwaas_driver.update_firewall(router_info_list, fw) + if fw['admin_state_up']: + status = constants.ACTIVE + else: + status = constants.DOWN + except fw_ext.FirewallInternalDriverError: + LOG.error(_("Firewall Driver Error on fw state %(fwmsg)s " + "for fw: %(fwid)s"), + {'fwmsg': fw['status'], 'fwid': fw['id']}) + status = constants.ERROR + + self.fwplugin_rpc.set_firewall_status( + ctx, + fw['id'], + status) + + def _process_router_add(self, ri): + """On router add, get fw with rules from plugin and update driver.""" + LOG.debug(_("Process router add, router_id: '%s'"), ri.router['id']) + routers = [] + routers.append(ri.router) + router_info_list = self._get_router_info_list_for_tenant( + routers, + ri.router['tenant_id']) + if router_info_list: + # Get the firewall with rules + # for the tenant the router is on. + ctx = context.Context('', ri.router['tenant_id']) + fw_list = self.fwplugin_rpc.get_firewalls_for_tenant(ctx) + LOG.debug(_("Process router add, fw_list: '%s'"), + [fw['id'] for fw in fw_list]) + for fw in fw_list: + self._invoke_driver_for_sync_from_plugin( + ctx, + router_info_list, + fw) + + def process_router_add(self, ri): + """On router add, get fw with rules from plugin and update driver.""" + # avoid msg to plugin when fwaas is not configured + if not self.fwaas_enabled: + return + try: + self._process_router_add(ri) + except Exception: + LOG.exception( + _("FWaaS RPC info call failed for '%s'."), + ri.router['id']) + self.services_sync = True + + def process_services_sync(self, ctx): + """On RPC issues sync with plugin and apply the sync data.""" + # avoid msg to plugin when fwaas is not configured + if not self.fwaas_enabled: + return + try: + # get all routers + routers = self.plugin_rpc.get_routers(ctx) + # get the list of tenants with firewalls configured + # from the plugin + tenant_ids = self.fwplugin_rpc.get_tenants_with_firewalls(ctx) + LOG.debug(_("Tenants with Firewalls: '%s'"), tenant_ids) + for tenant_id in tenant_ids: + ctx = context.Context('', tenant_id) + fw_list = self.fwplugin_rpc.get_firewalls_for_tenant(ctx) + if fw_list: + # if fw present on tenant + router_info_list = self._get_router_info_list_for_tenant( + routers, + tenant_id) + if router_info_list: + LOG.debug(_("Router List: '%s'"), + [ri.router['id'] for ri in router_info_list]) + LOG.debug(_("fw_list: '%s'"), + [fw['id'] for fw in fw_list]) + # apply sync data on fw for this tenant + for fw in fw_list: + # fw, routers present on this host for tenant + # install + LOG.debug(_("Apply fw on Router List: '%s'"), + [ri.router['id'] + for ri in router_info_list]) + # no need to apply sync data for ACTIVE fw + if fw['status'] != constants.ACTIVE: + self._invoke_driver_for_sync_from_plugin( + ctx, + router_info_list, + fw) + self.services_sync = False + except Exception: + LOG.exception(_("Failed fwaas process services sync")) + self.services_sync = True + + def create_firewall(self, context, firewall, host): + """Handle Rpc from plugin to create a firewall.""" + return self._invoke_driver_for_plugin_api( + context, + firewall, + 'create_firewall') + + def update_firewall(self, context, firewall, host): + """Handle Rpc from plugin to update a firewall.""" + return self._invoke_driver_for_plugin_api( + context, + firewall, + 'update_firewall') + + def delete_firewall(self, context, firewall, host): + """Handle Rpc from plugin to delete a firewall.""" + return self._invoke_driver_for_plugin_api( + context, + firewall, + 'delete_firewall') diff --git a/icehouse-patches/neutron/dvr-patch/neutron/services/firewall/agents/varmour/__init__.py b/icehouse-patches/neutron/dvr-patch/neutron/services/firewall/agents/varmour/__init__.py new file mode 100644 index 00000000..a6a8955d --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/services/firewall/agents/varmour/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2013 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/icehouse-patches/neutron/dvr-patch/neutron/services/firewall/agents/varmour/varmour_api.py b/icehouse-patches/neutron/dvr-patch/neutron/services/firewall/agents/varmour/varmour_api.py new file mode 100644 index 00000000..7da2002b --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/services/firewall/agents/varmour/varmour_api.py @@ -0,0 +1,145 @@ +# Copyright 2013 vArmour Networks Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Gary Duan, gduan@varmour.com, vArmour Networks + +import base64 + +import httplib2 +from oslo.config import cfg + +from neutron.openstack.common import jsonutils as json +from neutron.openstack.common import log as logging +from neutron.services.firewall.agents.varmour import varmour_utils as va_utils + +OPTS = [ + cfg.StrOpt('director', default='localhost', + help=_("vArmour director ip")), + cfg.StrOpt('director_port', default='443', + help=_("vArmour director port")), + cfg.StrOpt('username', default='varmour', + help=_("vArmour director username")), + cfg.StrOpt('password', default='varmour', secret=True, + help=_("vArmour director password")), ] + +cfg.CONF.register_opts(OPTS, "vArmour") + +LOG = logging.getLogger(__name__) + +REST_URL_PREFIX = '/api/v1.0' + + +class vArmourAPIException(Exception): + message = _("An unknown exception.") + + def __init__(self, **kwargs): + try: + self.err = self.message % kwargs + + except Exception: + self.err = self.message + + def __str__(self): + return self.err + + +class AuthenticationFailure(vArmourAPIException): + message = _("Invalid login credential.") + + +class vArmourRestAPI(object): + + def __init__(self): + LOG.debug(_('vArmourRestAPI: started')) + self.user = cfg.CONF.vArmour.username + self.passwd = cfg.CONF.vArmour.password + self.server = cfg.CONF.vArmour.director + self.port = cfg.CONF.vArmour.director_port + self.timeout = 3 + self.key = '' + + def auth(self): + headers = {} + enc = base64.b64encode(self.user + ':' + self.passwd) + headers['Authorization'] = 'Basic ' + enc + resp = self.rest_api('POST', va_utils.REST_URL_AUTH, None, headers) + if resp and resp['status'] == 200: + self.key = resp['body']['auth'] + return True + else: + raise AuthenticationFailure() + + def commit(self): + self.rest_api('POST', va_utils.REST_URL_COMMIT) + + def rest_api(self, method, url, body=None, headers=None): + url = REST_URL_PREFIX + url + if body: + body_data = json.dumps(body) + else: + body_data = '' + if not headers: + headers = {} + enc = base64.b64encode('%s:%s' % (self.user, self.key)) + headers['Authorization'] = 'Basic ' + enc + + LOG.debug(_("vArmourRestAPI: %(server)s %(port)s"), + {'server': self.server, 'port': self.port}) + + try: + action = "https://" + self.server + ":" + self.port + url + + LOG.debug(_("vArmourRestAPI Sending: " + "%(method)s %(action)s %(headers)s %(body_data)s"), + {'method': method, 'action': action, + 'headers': headers, 'body_data': body_data}) + + h = httplib2.Http(timeout=3, + disable_ssl_certificate_validation=True) + resp, resp_str = h.request(action, method, + body=body_data, + headers=headers) + + LOG.debug(_("vArmourRestAPI Response: %(status)s %(resp_str)s"), + {'status': resp.status, 'resp_str': resp_str}) + + if resp.status == 200: + return {'status': resp.status, + 'reason': resp.reason, + 'body': json.loads(resp_str)} + except Exception: + LOG.error(_('vArmourRestAPI: Could not establish HTTP connection')) + + def del_cfg_objs(self, url, prefix): + resp = self.rest_api('GET', url) + if resp and resp['status'] == 200: + olist = resp['body']['response'] + if not olist: + return + + for o in olist: + if o.startswith(prefix): + self.rest_api('DELETE', url + '/"name:%s"' % o) + self.commit() + + def count_cfg_objs(self, url, prefix): + count = 0 + resp = self.rest_api('GET', url) + if resp and resp['status'] == 200: + for o in resp['body']['response']: + if o.startswith(prefix): + count += 1 + + return count diff --git a/icehouse-patches/neutron/dvr-patch/neutron/services/firewall/agents/varmour/varmour_router.py b/icehouse-patches/neutron/dvr-patch/neutron/services/firewall/agents/varmour/varmour_router.py new file mode 100644 index 00000000..e496f119 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/services/firewall/agents/varmour/varmour_router.py @@ -0,0 +1,349 @@ +# Copyright 2013 vArmour Networks Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Gary Duan, vArmour Networks Inc. +# + +import sys + +import eventlet +eventlet.monkey_patch() + +import netaddr +from oslo.config import cfg + +from neutron.agent.common import config +from neutron.agent import l3_agent +from neutron.agent.linux import external_process +from neutron.agent.linux import interface +from neutron.agent.linux import ip_lib +from neutron.common import config as common_config +from neutron.common import constants as l3_constants +from neutron.common import topics +from neutron.openstack.common import log as logging +from neutron.openstack.common import service +from neutron import service as neutron_service +from neutron.services.firewall.agents.l3reference import firewall_l3_agent +from neutron.services.firewall.agents.varmour import varmour_api +from neutron.services.firewall.agents.varmour import varmour_utils as va_utils + + +LOG = logging.getLogger(__name__) + + +class vArmourL3NATAgent(l3_agent.L3NATAgent, + firewall_l3_agent.FWaaSL3AgentRpcCallback): + def __init__(self, host, conf=None): + LOG.debug(_('vArmourL3NATAgent: __init__')) + self.rest = varmour_api.vArmourRestAPI() + super(vArmourL3NATAgent, self).__init__(host, conf) + + def _destroy_router_namespaces(self, only_router_id=None): + return + + def _destroy_router_namespace(self, namespace): + return + + def _create_router_namespace(self, ri): + return + + def _router_added(self, router_id, router): + LOG.debug(_("_router_added: %s"), router_id) + ri = l3_agent.RouterInfo(router_id, self.root_helper, + self.conf.use_namespaces, router) + self.router_info[router_id] = ri + super(vArmourL3NATAgent, self).process_router_add(ri) + + def _router_removed(self, router_id): + LOG.debug(_("_router_removed: %s"), router_id) + + ri = self.router_info[router_id] + if ri: + ri.router['gw_port'] = None + ri.router[l3_constants.INTERFACE_KEY] = [] + ri.router[l3_constants.FLOATINGIP_KEY] = [] + self.process_router(ri) + + name = va_utils.get_snat_rule_name(ri) + self.rest.del_cfg_objs(va_utils.REST_URL_CONF_NAT_RULE, name) + + name = va_utils.get_dnat_rule_name(ri) + self.rest.del_cfg_objs(va_utils.REST_URL_CONF_NAT_RULE, name) + + name = va_utils.get_trusted_zone_name(ri) + self._va_unset_zone_interfaces(name, True) + + name = va_utils.get_untrusted_zone_name(ri) + self._va_unset_zone_interfaces(name, True) + + del self.router_info[router_id] + + def _spawn_metadata_proxy(self, router_id, ns_name): + return + + def _destroy_metadata_proxy(self, router_id, ns_name): + return + + def _set_subnet_info(self, port): + ips = port['fixed_ips'] + if not ips: + raise Exception(_("Router port %s has no IP address") % port['id']) + return + if len(ips) > 1: + LOG.warn(_("Ignoring multiple IPs on router port %s"), port['id']) + prefixlen = netaddr.IPNetwork(port['subnet']['cidr']).prefixlen + port['ip_cidr'] = "%s/%s" % (ips[0]['ip_address'], prefixlen) + + def _va_unset_zone_interfaces(self, zone_name, remove_zone=False): + # return True if zone exists; otherwise, return False + LOG.debug(_("_va_unset_zone_interfaces: %s"), zone_name) + resp = self.rest.rest_api('GET', va_utils.REST_URL_CONF_ZONE) + if resp and resp['status'] == 200: + zlist = resp['body']['response'] + for zn in zlist: + if zn == zone_name: + commit = False + + if 'interface' in zlist[zn]: + for intf in zlist[zn]['interface']: + self.rest.rest_api('DELETE', + va_utils.REST_URL_CONF + + va_utils.REST_ZONE_NAME % zn + + va_utils.REST_INTF_NAME % intf) + commit = True + if remove_zone: + self.rest.rest_api('DELETE', + va_utils.REST_URL_CONF + + va_utils.REST_ZONE_NAME % zn) + commit = True + + if commit: + self.rest.commit() + + return True + + return False + + def _va_pif_2_lif(self, pif): + return pif + '.0' + + def _va_set_interface_ip(self, pif, cidr): + LOG.debug(_("_va_set_interface_ip: %(pif)s %(cidr)s"), + {'pif': pif, 'cidr': cidr}) + + lif = self._va_pif_2_lif(pif) + obj = va_utils.REST_INTF_NAME % pif + va_utils.REST_LOGIC_NAME % lif + body = { + 'name': lif, + 'family': 'ipv4', + 'address': cidr + } + self.rest.rest_api('PUT', va_utils.REST_URL_CONF + obj, body) + + def _va_get_port_name(self, port_list, name): + if name: + for p in port_list: + if p['VM name'] == name: + return p['name'] + + def _va_config_trusted_zone(self, ri, plist): + zone = va_utils.get_trusted_zone_name(ri) + LOG.debug(_("_va_config_trusted_zone: %s"), zone) + + body = { + 'name': zone, + 'type': 'L3', + 'interface': [] + } + + if not self._va_unset_zone_interfaces(zone): + # if zone doesn't exist, create it + self.rest.rest_api('POST', va_utils.REST_URL_CONF_ZONE, body) + self.rest.commit() + + # add new internal ports to trusted zone + for p in ri.internal_ports: + if p['admin_state_up']: + dev = self.get_internal_device_name(p['id']) + pif = self._va_get_port_name(plist, dev) + if pif: + lif = self._va_pif_2_lif(pif) + if lif not in body['interface']: + body['interface'].append(lif) + + self._va_set_interface_ip(pif, p['ip_cidr']) + + if body['interface']: + self.rest.rest_api('PUT', va_utils.REST_URL_CONF_ZONE, body) + self.rest.commit() + + def _va_config_untrusted_zone(self, ri, plist): + zone = va_utils.get_untrusted_zone_name(ri) + LOG.debug(_("_va_config_untrusted_zone: %s"), zone) + + body = { + 'name': zone, + 'type': 'L3', + 'interface': [] + } + + if not self._va_unset_zone_interfaces(zone): + # if zone doesn't exist, create it + self.rest.rest_api('POST', va_utils.REST_URL_CONF_ZONE, body) + self.rest.commit() + + # add new gateway ports to untrusted zone + if ri.ex_gw_port: + LOG.debug(_("_va_config_untrusted_zone: gw=%r"), ri.ex_gw_port) + dev = self.get_external_device_name(ri.ex_gw_port['id']) + pif = self._va_get_port_name(plist, dev) + if pif: + lif = self._va_pif_2_lif(pif) + + self._va_set_interface_ip(pif, ri.ex_gw_port['ip_cidr']) + + body['interface'].append(lif) + self.rest.rest_api('PUT', va_utils.REST_URL_CONF_ZONE, body) + self.rest.commit() + + def _va_config_router_snat_rules(self, ri, plist): + LOG.debug(_('_va_config_router_snat_rules: %s'), ri.router['id']) + + prefix = va_utils.get_snat_rule_name(ri) + self.rest.del_cfg_objs(va_utils.REST_URL_CONF_NAT_RULE, prefix) + + if not ri.enable_snat: + return + + for idx, p in enumerate(ri.internal_ports): + if p['admin_state_up']: + dev = self.get_internal_device_name(p['id']) + pif = self._va_get_port_name(plist, dev) + if pif: + net = netaddr.IPNetwork(p['ip_cidr']) + body = { + 'name': '%s_%d' % (prefix, idx), + 'ingress-context-type': 'interface', + 'ingress-index': self._va_pif_2_lif(pif), + 'source-address': [ + [str(netaddr.IPAddress(net.first + 2)), + str(netaddr.IPAddress(net.last - 1))] + ], + 'flag': 'interface translate-source' + } + self.rest.rest_api('POST', + va_utils.REST_URL_CONF_NAT_RULE, + body) + + if ri.internal_ports: + self.rest.commit() + + def _va_config_floating_ips(self, ri): + LOG.debug(_('_va_config_floating_ips: %s'), ri.router['id']) + + prefix = va_utils.get_dnat_rule_name(ri) + self.rest.del_cfg_objs(va_utils.REST_URL_CONF_NAT_RULE, prefix) + + # add new dnat rules + for idx, fip in enumerate(ri.floating_ips): + body = { + 'name': '%s_%d' % (prefix, idx), + 'ingress-context-type': 'zone', + 'ingress-index': va_utils.get_untrusted_zone_name(ri), + 'destination-address': [[fip['floating_ip_address'], + fip['floating_ip_address']]], + 'static': [fip['fixed_ip_address'], fip['fixed_ip_address']], + 'flag': 'translate-destination' + } + self.rest.rest_api('POST', va_utils.REST_URL_CONF_NAT_RULE, body) + + if ri.floating_ips: + self.rest.commit() + + def process_router(self, ri): + LOG.debug(_("process_router: %s"), ri.router['id']) + super(vArmourL3NATAgent, self).process_router(ri) + + self.rest.auth() + + # read internal port name and configuration port name map + resp = self.rest.rest_api('GET', va_utils.REST_URL_INTF_MAP) + if resp and resp['status'] == 200: + try: + plist = resp['body']['response'] + except ValueError: + LOG.warn(_("Unable to parse interface mapping.")) + return + else: + LOG.warn(_("Unable to read interface mapping.")) + return + + if ri.ex_gw_port: + self._set_subnet_info(ri.ex_gw_port) + self._va_config_trusted_zone(ri, plist) + self._va_config_untrusted_zone(ri, plist) + self._va_config_router_snat_rules(ri, plist) + self._va_config_floating_ips(ri) + + def _handle_router_snat_rules(self, ri, ex_gw_port, internal_cidrs, + interface_name, action): + return + + def _send_gratuitous_arp_packet(self, ri, interface_name, ip_address): + return + + def external_gateway_added(self, ri, ex_gw_port, + interface_name, internal_cidrs): + LOG.debug(_("external_gateway_added: %s"), ri.router['id']) + + if not ip_lib.device_exists(interface_name, + root_helper=self.root_helper, + namespace=ri.ns_name): + self.driver.plug(ex_gw_port['network_id'], + ex_gw_port['id'], interface_name, + ex_gw_port['mac_address'], + bridge=self.conf.external_network_bridge, + namespace=ri.ns_name, + prefix=l3_agent.EXTERNAL_DEV_PREFIX) + self.driver.init_l3(interface_name, [ex_gw_port['ip_cidr']], + namespace=ri.ns_name) + + def _update_routing_table(self, ri, operation, route): + return + + +class vArmourL3NATAgentWithStateReport(vArmourL3NATAgent, + l3_agent.L3NATAgentWithStateReport): + pass + + +def main(): + conf = cfg.CONF + conf.register_opts(vArmourL3NATAgent.OPTS) + config.register_interface_driver_opts_helper(conf) + config.register_use_namespaces_opts_helper(conf) + config.register_agent_state_opts_helper(conf) + config.register_root_helper(conf) + conf.register_opts(interface.OPTS) + conf.register_opts(external_process.OPTS) + common_config.init(sys.argv[1:]) + config.setup_logging(conf) + server = neutron_service.Service.create( + binary='neutron-l3-agent', + topic=topics.L3_AGENT, + report_interval=cfg.CONF.AGENT.report_interval, + manager='neutron.services.firewall.agents.varmour.varmour_router.' + 'vArmourL3NATAgentWithStateReport') + service.launch(server).wait() diff --git a/icehouse-patches/neutron/dvr-patch/neutron/services/firewall/agents/varmour/varmour_utils.py b/icehouse-patches/neutron/dvr-patch/neutron/services/firewall/agents/varmour/varmour_utils.py new file mode 100644 index 00000000..d69733de --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/services/firewall/agents/varmour/varmour_utils.py @@ -0,0 +1,72 @@ +# Copyright 2013 vArmour Networks Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Gary Duan, gduan@varmour.com, vArmour Networks + +ROUTER_OBJ_PREFIX = 'r-' +OBJ_PREFIX_LEN = 8 +TRUST_ZONE = '_z_trust' +UNTRUST_ZONE = '_z_untrust' +SNAT_RULE = '_snat' +DNAT_RULE = '_dnat' +ROUTER_POLICY = '_p' + +REST_URL_CONF = '/config' +REST_URL_AUTH = '/auth' +REST_URL_COMMIT = '/commit' +REST_URL_INTF_MAP = '/operation/interface/mapping' + +REST_URL_CONF_NAT_RULE = REST_URL_CONF + '/nat/rule' +REST_URL_CONF_ZONE = REST_URL_CONF + '/zone' +REST_URL_CONF_POLICY = REST_URL_CONF + '/policy' +REST_URL_CONF_ADDR = REST_URL_CONF + '/address' +REST_URL_CONF_SERVICE = REST_URL_CONF + '/service' + +REST_ZONE_NAME = '/zone/"name:%s"' +REST_INTF_NAME = '/interface/"name:%s"' +REST_LOGIC_NAME = '/logical/"name:%s"' +REST_SERVICE_NAME = '/service/"name:%s"/rule' + + +def get_router_object_prefix(ri): + return ROUTER_OBJ_PREFIX + ri.router['id'][:OBJ_PREFIX_LEN] + + +def get_firewall_object_prefix(ri, fw): + return get_router_object_prefix(ri) + '-' + fw['id'][:OBJ_PREFIX_LEN] + + +def get_trusted_zone_name(ri): + return get_router_object_prefix(ri) + TRUST_ZONE + + +def get_untrusted_zone_name(ri): + return get_router_object_prefix(ri) + UNTRUST_ZONE + + +def get_snat_rule_name(ri): + return get_router_object_prefix(ri) + SNAT_RULE + + +def get_dnat_rule_name(ri): + return get_router_object_prefix(ri) + DNAT_RULE + + +def get_router_policy_name(ri): + return get_router_object_prefix(ri) + ROUTER_POLICY + + +def get_firewall_policy_name(ri, fw, rule): + return get_firewall_object_prefix(ri, fw) + rule['id'][:OBJ_PREFIX_LEN] diff --git a/icehouse-patches/neutron/dvr-patch/neutron/services/firewall/drivers/__init__.py b/icehouse-patches/neutron/dvr-patch/neutron/services/firewall/drivers/__init__.py new file mode 100644 index 00000000..a6a8955d --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/services/firewall/drivers/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2013 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/icehouse-patches/neutron/dvr-patch/neutron/services/firewall/drivers/fwaas_base.py b/icehouse-patches/neutron/dvr-patch/neutron/services/firewall/drivers/fwaas_base.py new file mode 100644 index 00000000..e57c7eaa --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/services/firewall/drivers/fwaas_base.py @@ -0,0 +1,98 @@ +# Copyright 2013 Dell Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Rajesh Mohan, Rajesh_Mohan3@Dell.com, DELL Inc. + +import abc + +import six + + +@six.add_metaclass(abc.ABCMeta) +class FwaasDriverBase(object): + """Firewall as a Service Driver base class. + + Using FwaasDriver Class, an instance of L3 perimeter Firewall + can be created. The firewall co-exists with the L3 agent. + + One instance is created for each tenant. One firewall policy + is associated with each tenant (in the Havana release). + + The Firewall can be visualized as having two zones (in Havana + release), trusted and untrusted. + + All the 'internal' interfaces of Neutron Router is treated as trusted. The + interface connected to 'external network' is treated as untrusted. + + The policy is applied on traffic ingressing/egressing interfaces on + the trusted zone. This implies that policy will be applied for traffic + passing from + - trusted to untrusted zones + - untrusted to trusted zones + - trusted to trusted zones + + Policy WILL NOT be applied for traffic from untrusted to untrusted zones. + This is not a problem in Havana release as there is only one interface + connected to external network. + + Since the policy is applied on the internal interfaces, the traffic + will be not be NATed to floating IP. For incoming traffic, the + traffic will get NATed to internal IP address before it hits + the firewall rules. So, while writing the rules, care should be + taken if using rules based on floating IP. + + The firewall rule addition/deletion/insertion/update are done by the + management console. When the policy is sent to the driver, the complete + policy is sent and the whole policy has to be applied atomically. The + firewall rules will not get updated individually. This is to avoid problems + related to out-of-order notifications or inconsistent behaviour by partial + application of rules. + """ + + @abc.abstractmethod + def create_firewall(self, apply_list, firewall): + """Create the Firewall with default (drop all) policy. + + The default policy will be applied on all the interfaces of + trusted zone. + """ + pass + + @abc.abstractmethod + def delete_firewall(self, apply_list, firewall): + """Delete firewall. + + Removes all policies created by this instance and frees up + all the resources. + """ + pass + + @abc.abstractmethod + def update_firewall(self, apply_list, firewall): + """Apply the policy on all trusted interfaces. + + Remove previous policy and apply the new policy on all trusted + interfaces. + """ + pass + + @abc.abstractmethod + def apply_default_policy(self, apply_list, firewall): + """Apply the default policy on all trusted interfaces. + + Remove current policy and apply the default policy on all trusted + interfaces. + """ + pass diff --git a/icehouse-patches/neutron/dvr-patch/neutron/services/firewall/drivers/linux/__init__.py b/icehouse-patches/neutron/dvr-patch/neutron/services/firewall/drivers/linux/__init__.py new file mode 100644 index 00000000..a6a8955d --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/services/firewall/drivers/linux/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2013 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/icehouse-patches/neutron/dvr-patch/neutron/services/firewall/drivers/linux/iptables_fwaas.py b/icehouse-patches/neutron/dvr-patch/neutron/services/firewall/drivers/linux/iptables_fwaas.py new file mode 100644 index 00000000..8dc3fd9f --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/services/firewall/drivers/linux/iptables_fwaas.py @@ -0,0 +1,273 @@ +# Copyright 2013 Dell Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Rajesh Mohan, Rajesh_Mohan3@Dell.com, DELL Inc. + +from neutron.agent.linux import iptables_manager +from neutron.extensions import firewall as fw_ext +from neutron.openstack.common import log as logging +from neutron.services.firewall.drivers import fwaas_base + +LOG = logging.getLogger(__name__) +FWAAS_DRIVER_NAME = 'Fwaas iptables driver' +FWAAS_DEFAULT_CHAIN = 'fwaas-default-policy' +INGRESS_DIRECTION = 'ingress' +EGRESS_DIRECTION = 'egress' +CHAIN_NAME_PREFIX = {INGRESS_DIRECTION: 'i', + EGRESS_DIRECTION: 'o'} + +""" Firewall rules are applied on internal-interfaces of Neutron router. + The packets ingressing tenant's network will be on the output + direction on internal-interfaces. +""" +IPTABLES_DIR = {INGRESS_DIRECTION: '-o', + EGRESS_DIRECTION: '-i'} +IPV4 = 'ipv4' +IPV6 = 'ipv6' +IP_VER_TAG = {IPV4: 'v4', + IPV6: 'v6'} + + +class IptablesFwaasDriver(fwaas_base.FwaasDriverBase): + """IPTables driver for Firewall As A Service.""" + + def __init__(self): + LOG.debug(_("Initializing fwaas iptables driver")) + + def create_firewall(self, apply_list, firewall): + LOG.debug(_('Creating firewall %(fw_id)s for tenant %(tid)s)'), + {'fw_id': firewall['id'], 'tid': firewall['tenant_id']}) + try: + if firewall['admin_state_up']: + self._setup_firewall(apply_list, firewall) + else: + self.apply_default_policy(apply_list, firewall) + except (LookupError, RuntimeError): + # catch known library exceptions and raise Fwaas generic exception + LOG.exception(_("Failed to create firewall: %s"), firewall['id']) + raise fw_ext.FirewallInternalDriverError(driver=FWAAS_DRIVER_NAME) + + def delete_firewall(self, apply_list, firewall): + LOG.debug(_('Deleting firewall %(fw_id)s for tenant %(tid)s)'), + {'fw_id': firewall['id'], 'tid': firewall['tenant_id']}) + fwid = firewall['id'] + try: + for router_info in apply_list: + ipt_mgr = router_info.iptables_manager + self._remove_chains(fwid, ipt_mgr) + self._remove_default_chains(ipt_mgr) + # apply the changes immediately (no defer in firewall path) + ipt_mgr.defer_apply_off() + except (LookupError, RuntimeError): + # catch known library exceptions and raise Fwaas generic exception + LOG.exception(_("Failed to delete firewall: %s"), fwid) + raise fw_ext.FirewallInternalDriverError(driver=FWAAS_DRIVER_NAME) + + def update_firewall(self, apply_list, firewall): + LOG.debug(_('Updating firewall %(fw_id)s for tenant %(tid)s)'), + {'fw_id': firewall['id'], 'tid': firewall['tenant_id']}) + try: + if firewall['admin_state_up']: + self._setup_firewall(apply_list, firewall) + else: + self.apply_default_policy(apply_list, firewall) + except (LookupError, RuntimeError): + # catch known library exceptions and raise Fwaas generic exception + LOG.exception(_("Failed to update firewall: %s"), firewall['id']) + raise fw_ext.FirewallInternalDriverError(driver=FWAAS_DRIVER_NAME) + + def apply_default_policy(self, apply_list, firewall): + LOG.debug(_('Applying firewall %(fw_id)s for tenant %(tid)s)'), + {'fw_id': firewall['id'], 'tid': firewall['tenant_id']}) + fwid = firewall['id'] + try: + for router_info in apply_list: + ipt_mgr = router_info.iptables_manager + + # the following only updates local memory; no hole in FW + self._remove_chains(fwid, ipt_mgr) + self._remove_default_chains(ipt_mgr) + + # create default 'DROP ALL' policy chain + self._add_default_policy_chain_v4v6(ipt_mgr) + self._enable_policy_chain(fwid, ipt_mgr) + + # apply the changes immediately (no defer in firewall path) + ipt_mgr.defer_apply_off() + except (LookupError, RuntimeError): + # catch known library exceptions and raise Fwaas generic exception + LOG.exception(_("Failed to apply default policy on firewall: %s"), + fwid) + raise fw_ext.FirewallInternalDriverError(driver=FWAAS_DRIVER_NAME) + + def _setup_firewall(self, apply_list, firewall): + fwid = firewall['id'] + for router_info in apply_list: + ipt_mgr = router_info.iptables_manager + + # the following only updates local memory; no hole in FW + self._remove_chains(fwid, ipt_mgr) + self._remove_default_chains(ipt_mgr) + + # create default 'DROP ALL' policy chain + self._add_default_policy_chain_v4v6(ipt_mgr) + #create chain based on configured policy + self._setup_chains(firewall, ipt_mgr) + + # apply the changes immediately (no defer in firewall path) + ipt_mgr.defer_apply_off() + + def _get_chain_name(self, fwid, ver, direction): + return '%s%s%s' % (CHAIN_NAME_PREFIX[direction], + IP_VER_TAG[ver], + fwid) + + def _setup_chains(self, firewall, ipt_mgr): + """Create Fwaas chain using the rules in the policy + """ + fw_rules_list = firewall['firewall_rule_list'] + fwid = firewall['id'] + + #default rules for invalid packets and established sessions + invalid_rule = self._drop_invalid_packets_rule() + est_rule = self._allow_established_rule() + + for ver in [IPV4, IPV6]: + if ver == IPV4: + table = ipt_mgr.ipv4['filter'] + else: + table = ipt_mgr.ipv6['filter'] + ichain_name = self._get_chain_name(fwid, ver, INGRESS_DIRECTION) + ochain_name = self._get_chain_name(fwid, ver, EGRESS_DIRECTION) + for name in [ichain_name, ochain_name]: + table.add_chain(name) + table.add_rule(name, invalid_rule) + table.add_rule(name, est_rule) + + for rule in fw_rules_list: + if not rule['enabled']: + continue + iptbl_rule = self._convert_fwaas_to_iptables_rule(rule) + if rule['ip_version'] == 4: + ver = IPV4 + table = ipt_mgr.ipv4['filter'] + else: + ver = IPV6 + table = ipt_mgr.ipv6['filter'] + ichain_name = self._get_chain_name(fwid, ver, INGRESS_DIRECTION) + ochain_name = self._get_chain_name(fwid, ver, EGRESS_DIRECTION) + table.add_rule(ichain_name, iptbl_rule) + table.add_rule(ochain_name, iptbl_rule) + self._enable_policy_chain(fwid, ipt_mgr) + + def _remove_default_chains(self, nsid): + """Remove fwaas default policy chain.""" + self._remove_chain_by_name(IPV4, FWAAS_DEFAULT_CHAIN, nsid) + self._remove_chain_by_name(IPV6, FWAAS_DEFAULT_CHAIN, nsid) + + def _remove_chains(self, fwid, ipt_mgr): + """Remove fwaas policy chain.""" + for ver in [IPV4, IPV6]: + for direction in [INGRESS_DIRECTION, EGRESS_DIRECTION]: + chain_name = self._get_chain_name(fwid, ver, direction) + self._remove_chain_by_name(ver, chain_name, ipt_mgr) + + def _add_default_policy_chain_v4v6(self, ipt_mgr): + ipt_mgr.ipv4['filter'].add_chain(FWAAS_DEFAULT_CHAIN) + ipt_mgr.ipv4['filter'].add_rule(FWAAS_DEFAULT_CHAIN, '-j DROP') + ipt_mgr.ipv6['filter'].add_chain(FWAAS_DEFAULT_CHAIN) + ipt_mgr.ipv6['filter'].add_rule(FWAAS_DEFAULT_CHAIN, '-j DROP') + + def _remove_chain_by_name(self, ver, chain_name, ipt_mgr): + if ver == IPV4: + ipt_mgr.ipv4['filter'].ensure_remove_chain(chain_name) + else: + ipt_mgr.ipv6['filter'].ensure_remove_chain(chain_name) + + def _add_rules_to_chain(self, ipt_mgr, ver, chain_name, rules): + if ver == IPV4: + table = ipt_mgr.ipv4['filter'] + else: + table = ipt_mgr.ipv6['filter'] + for rule in rules: + table.add_rule(chain_name, rule) + + def _enable_policy_chain(self, fwid, ipt_mgr): + bname = iptables_manager.binary_name + + for (ver, tbl) in [(IPV4, ipt_mgr.ipv4['filter']), + (IPV6, ipt_mgr.ipv6['filter'])]: + for direction in [INGRESS_DIRECTION, EGRESS_DIRECTION]: + chain_name = self._get_chain_name(fwid, ver, direction) + chain_name = iptables_manager.get_chain_name(chain_name) + if chain_name in tbl.chains: + jump_rule = ['%s qr-+ -j %s-%s' % (IPTABLES_DIR[direction], + bname, chain_name)] + self._add_rules_to_chain(ipt_mgr, ver, 'FORWARD', + jump_rule) + + #jump to DROP_ALL policy + chain_name = iptables_manager.get_chain_name(FWAAS_DEFAULT_CHAIN) + jump_rule = ['-o qr-+ -j %s-%s' % (bname, chain_name)] + self._add_rules_to_chain(ipt_mgr, IPV4, 'FORWARD', jump_rule) + self._add_rules_to_chain(ipt_mgr, IPV6, 'FORWARD', jump_rule) + + #jump to DROP_ALL policy + chain_name = iptables_manager.get_chain_name(FWAAS_DEFAULT_CHAIN) + jump_rule = ['-i qr-+ -j %s-%s' % (bname, chain_name)] + self._add_rules_to_chain(ipt_mgr, IPV4, 'FORWARD', jump_rule) + self._add_rules_to_chain(ipt_mgr, IPV6, 'FORWARD', jump_rule) + + def _convert_fwaas_to_iptables_rule(self, rule): + action = rule.get('action') == 'allow' and 'ACCEPT' or 'DROP' + args = [self._protocol_arg(rule.get('protocol')), + self._port_arg('dport', + rule.get('protocol'), + rule.get('destination_port')), + self._port_arg('sport', + rule.get('protocol'), + rule.get('source_port')), + self._ip_prefix_arg('s', rule.get('source_ip_address')), + self._ip_prefix_arg('d', rule.get('destination_ip_address')), + self._action_arg(action)] + + iptables_rule = ' '.join(args) + return iptables_rule + + def _drop_invalid_packets_rule(self): + return '-m state --state INVALID -j DROP' + + def _allow_established_rule(self): + return '-m state --state ESTABLISHED,RELATED -j ACCEPT' + + def _action_arg(self, action): + if action: + return '-j %s' % action + return '' + + def _protocol_arg(self, protocol): + if protocol: + return '-p %s' % protocol + return '' + + def _port_arg(self, direction, protocol, port): + if not (protocol in ['udp', 'tcp'] and port): + return '' + return '--%s %s' % (direction, port) + + def _ip_prefix_arg(self, direction, ip_prefix): + if ip_prefix: + return '-%s %s' % (direction, ip_prefix) + return '' diff --git a/icehouse-patches/neutron/dvr-patch/neutron/services/firewall/drivers/varmour/__init__.py b/icehouse-patches/neutron/dvr-patch/neutron/services/firewall/drivers/varmour/__init__.py new file mode 100644 index 00000000..a6a8955d --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/services/firewall/drivers/varmour/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2013 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/icehouse-patches/neutron/dvr-patch/neutron/services/firewall/drivers/varmour/varmour_fwaas.py b/icehouse-patches/neutron/dvr-patch/neutron/services/firewall/drivers/varmour/varmour_fwaas.py new file mode 100644 index 00000000..57ca02bf --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/services/firewall/drivers/varmour/varmour_fwaas.py @@ -0,0 +1,205 @@ +# Copyright 2013 vArmour Networks Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Gary Duan, gduan@varmour.com, vArmour Networks + +from neutron.openstack.common import log as logging +from neutron.services.firewall.agents.varmour import varmour_api +from neutron.services.firewall.agents.varmour import varmour_utils as va_utils +from neutron.services.firewall.drivers import fwaas_base + +LOG = logging.getLogger(__name__) + + +class vArmourFwaasDriver(fwaas_base.FwaasDriverBase): + def __init__(self): + LOG.debug(_("Initializing fwaas vArmour driver")) + + self.rest = varmour_api.vArmourRestAPI() + + def create_firewall(self, apply_list, firewall): + LOG.debug(_('create_firewall (%s)'), firewall['id']) + + return self.update_firewall(apply_list, firewall) + + def update_firewall(self, apply_list, firewall): + LOG.debug(_("update_firewall (%s)"), firewall['id']) + + if firewall['admin_state_up']: + return self._update_firewall(apply_list, firewall) + else: + return self.apply_default_policy(apply_list, firewall) + + def delete_firewall(self, apply_list, firewall): + LOG.debug(_("delete_firewall (%s)"), firewall['id']) + + return self.apply_default_policy(apply_list, firewall) + + def apply_default_policy(self, apply_list, firewall): + LOG.debug(_("apply_default_policy (%s)"), firewall['id']) + + self.rest.auth() + + for ri in apply_list: + self._clear_policy(ri, firewall) + + return True + + def _update_firewall(self, apply_list, firewall): + LOG.debug(_("Updating firewall (%s)"), firewall['id']) + + self.rest.auth() + + for ri in apply_list: + self._clear_policy(ri, firewall) + self._setup_policy(ri, firewall) + + return True + + def _setup_policy(self, ri, fw): + # create zones no matter if they exist. Interfaces are added by router + body = { + 'type': 'L3', + 'interface': [] + } + + body['name'] = va_utils.get_trusted_zone_name(ri) + self.rest.rest_api('POST', va_utils.REST_URL_CONF_ZONE, body) + body['name'] = va_utils.get_untrusted_zone_name(ri) + self.rest.rest_api('POST', va_utils.REST_URL_CONF_ZONE, body) + self.rest.commit() + + servs = dict() + addrs = dict() + for rule in fw['firewall_rule_list']: + if not rule['enabled']: + continue + + if rule['ip_version'] == 4: + service = self._make_service(ri, fw, rule, servs) + s_addr = self._make_address(ri, fw, rule, addrs, True) + d_addr = self._make_address(ri, fw, rule, addrs, False) + + policy = va_utils.get_firewall_policy_name(ri, fw, rule) + z0 = va_utils.get_trusted_zone_name(ri) + z1 = va_utils.get_untrusted_zone_name(ri) + body = self._make_policy(policy + '_0', rule, + z0, z0, s_addr, d_addr, service) + self.rest.rest_api('POST', va_utils.REST_URL_CONF_POLICY, body) + body = self._make_policy(policy + '_1', rule, + z0, z1, s_addr, d_addr, service) + self.rest.rest_api('POST', va_utils.REST_URL_CONF_POLICY, body) + body = self._make_policy(policy + '_2', rule, + z1, z0, s_addr, d_addr, service) + self.rest.rest_api('POST', va_utils.REST_URL_CONF_POLICY, body) + + self.rest.commit() + else: + LOG.warn(_("Unsupported IP version rule.")) + + def _clear_policy(self, ri, fw): + prefix = va_utils.get_firewall_object_prefix(ri, fw) + self.rest.del_cfg_objs(va_utils.REST_URL_CONF_POLICY, prefix) + self.rest.del_cfg_objs(va_utils.REST_URL_CONF_ADDR, prefix) + self.rest.del_cfg_objs(va_utils.REST_URL_CONF_SERVICE, prefix) + + def _make_service(self, ri, fw, rule, servs): + prefix = va_utils.get_firewall_object_prefix(ri, fw) + + if rule.get('protocol'): + key = rule.get('protocol') + if rule.get('source_port'): + key += '-' + rule.get('source_port') + if rule.get('destination_port'): + key += '-' + rule.get('destination_port') + else: + return + + if key in servs: + name = '%s_%d' % (prefix, servs[key]) + else: + # create new service object with index + idx = len(servs) + servs[key] = idx + name = '%s_%d' % (prefix, idx) + + body = {'name': name} + self.rest.rest_api('POST', + va_utils.REST_URL_CONF_SERVICE, + body) + body = self._make_service_rule(rule) + self.rest.rest_api('POST', + va_utils.REST_URL_CONF + + va_utils.REST_SERVICE_NAME % name, + body) + self.rest.commit() + + return name + + def _make_service_rule(self, rule): + body = { + 'name': '1', + 'protocol': rule.get('protocol') + } + if 'source_port' in rule: + body['source-start'] = rule['source_port'] + body['source-end'] = rule['source_port'] + if 'destination_port' in rule: + body['dest-start'] = rule['destination_port'] + body['dest-end'] = rule['destination_port'] + + return body + + def _make_address(self, ri, fw, rule, addrs, is_src): + prefix = va_utils.get_firewall_object_prefix(ri, fw) + + if is_src: + key = rule.get('source_ip_address') + else: + key = rule.get('destination_ip_address') + + if not key: + return + + if key in addrs: + name = '%s_%d' % (prefix, addrs[key]) + else: + # create new address object with idx + idx = len(addrs) + addrs[key] = idx + name = '%s_%d' % (prefix, idx) + + body = { + 'name': name, + 'type': 'ipv4', + 'ipv4': key + } + self.rest.rest_api('POST', va_utils.REST_URL_CONF_ADDR, body) + self.rest.commit() + + return name + + def _make_policy(self, name, rule, zone0, zone1, s_addr, d_addr, service): + body = { + 'name': name, + 'action': 'permit' if rule.get('action') == 'allow' else 'deny', + 'from': zone0, + 'to': zone1, + 'match-source-address': [s_addr or 'Any'], + 'match-dest-address': [d_addr or 'Any'], + 'match-service': [service or 'Any'] + } + + return body diff --git a/icehouse-patches/neutron/dvr-patch/neutron/services/firewall/fwaas_plugin.py b/icehouse-patches/neutron/dvr-patch/neutron/services/firewall/fwaas_plugin.py new file mode 100644 index 00000000..27a54393 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/services/firewall/fwaas_plugin.py @@ -0,0 +1,297 @@ +# Copyright 2013 Big Switch Networks, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Sumit Naiksatam, sumitnaiksatam@gmail.com, Big Switch Networks, Inc. + +from oslo.config import cfg + +from neutron.common import exceptions as n_exception +from neutron.common import rpc as n_rpc +from neutron.common import topics +from neutron import context as neutron_context +from neutron.db import api as qdbapi +from neutron.db.firewall import firewall_db +from neutron.extensions import firewall as fw_ext +from neutron.openstack.common import log as logging +from neutron.plugins.common import constants as const + + +LOG = logging.getLogger(__name__) + + +class FirewallCallbacks(n_rpc.RpcCallback): + RPC_API_VERSION = '1.0' + + def __init__(self, plugin): + super(FirewallCallbacks, self).__init__() + self.plugin = plugin + + def set_firewall_status(self, context, firewall_id, status, **kwargs): + """Agent uses this to set a firewall's status.""" + LOG.debug(_("set_firewall_status() called")) + with context.session.begin(subtransactions=True): + fw_db = self.plugin._get_firewall(context, firewall_id) + # ignore changing status if firewall expects to be deleted + # That case means that while some pending operation has been + # performed on the backend, neutron server received delete request + # and changed firewall status to const.PENDING_DELETE + if fw_db.status == const.PENDING_DELETE: + LOG.debug(_("Firewall %(fw_id)s in PENDING_DELETE state, " + "not changing to %(status)s"), + {'fw_id': firewall_id, 'status': status}) + return False + #TODO(xuhanp): Remove INACTIVE status and use DOWN to + # be consistent with other network resources + if status in (const.ACTIVE, const.INACTIVE, const.DOWN): + fw_db.status = status + return True + else: + fw_db.status = const.ERROR + return False + + def firewall_deleted(self, context, firewall_id, **kwargs): + """Agent uses this to indicate firewall is deleted.""" + LOG.debug(_("firewall_deleted() called")) + with context.session.begin(subtransactions=True): + fw_db = self.plugin._get_firewall(context, firewall_id) + # allow to delete firewalls in ERROR state + if fw_db.status in (const.PENDING_DELETE, const.ERROR): + self.plugin.delete_db_firewall_object(context, firewall_id) + return True + else: + LOG.warn(_('Firewall %(fw)s unexpectedly deleted by agent, ' + 'status was %(status)s'), + {'fw': firewall_id, 'status': fw_db.status}) + fw_db.status = const.ERROR + return False + + def get_firewalls_for_tenant(self, context, **kwargs): + """Agent uses this to get all firewalls and rules for a tenant.""" + LOG.debug(_("get_firewalls_for_tenant() called")) + fw_list = [ + self.plugin._make_firewall_dict_with_rules(context, fw['id']) + for fw in self.plugin.get_firewalls(context) + ] + return fw_list + + def get_firewalls_for_tenant_without_rules(self, context, **kwargs): + """Agent uses this to get all firewalls for a tenant.""" + LOG.debug(_("get_firewalls_for_tenant_without_rules() called")) + fw_list = [fw for fw in self.plugin.get_firewalls(context)] + return fw_list + + def get_tenants_with_firewalls(self, context, **kwargs): + """Agent uses this to get all tenants that have firewalls.""" + LOG.debug(_("get_tenants_with_firewalls() called")) + ctx = neutron_context.get_admin_context() + fw_list = self.plugin.get_firewalls(ctx) + fw_tenant_list = list(set(fw['tenant_id'] for fw in fw_list)) + return fw_tenant_list + + +class FirewallAgentApi(n_rpc.RpcProxy): + """Plugin side of plugin to agent RPC API.""" + + API_VERSION = '1.0' + + def __init__(self, topic, host): + super(FirewallAgentApi, self).__init__(topic, self.API_VERSION) + self.host = host + + def create_firewall(self, context, firewall): + return self.fanout_cast( + context, + self.make_msg('create_firewall', firewall=firewall, + host=self.host), + topic=self.topic + ) + + def update_firewall(self, context, firewall): + return self.fanout_cast( + context, + self.make_msg('update_firewall', firewall=firewall, + host=self.host), + topic=self.topic + ) + + def delete_firewall(self, context, firewall): + return self.fanout_cast( + context, + self.make_msg('delete_firewall', firewall=firewall, + host=self.host), + topic=self.topic + ) + + +class FirewallCountExceeded(n_exception.Conflict): + + """Reference implementation specific exception for firewall count. + + Only one firewall is supported per tenant. When a second + firewall is tried to be created, this exception will be raised. + """ + message = _("Exceeded allowed count of firewalls for tenant " + "%(tenant_id)s. Only one firewall is supported per tenant.") + + +class FirewallPlugin(firewall_db.Firewall_db_mixin): + + """Implementation of the Neutron Firewall Service Plugin. + + This class manages the workflow of FWaaS request/response. + Most DB related works are implemented in class + firewall_db.Firewall_db_mixin. + """ + supported_extension_aliases = ["fwaas"] + + def __init__(self): + """Do the initialization for the firewall service plugin here.""" + qdbapi.register_models() + + self.endpoints = [FirewallCallbacks(self)] + + self.conn = n_rpc.create_connection(new=True) + self.conn.create_consumer( + topics.FIREWALL_PLUGIN, self.endpoints, fanout=False) + self.conn.consume_in_threads() + + self.agent_rpc = FirewallAgentApi( + topics.L3_AGENT, + cfg.CONF.host + ) + + def _make_firewall_dict_with_rules(self, context, firewall_id): + firewall = self.get_firewall(context, firewall_id) + fw_policy_id = firewall['firewall_policy_id'] + if fw_policy_id: + fw_policy = self.get_firewall_policy(context, fw_policy_id) + fw_rules_list = [self.get_firewall_rule( + context, rule_id) for rule_id in fw_policy['firewall_rules']] + firewall['firewall_rule_list'] = fw_rules_list + else: + firewall['firewall_rule_list'] = [] + # FIXME(Sumit): If the size of the firewall object we are creating + # here exceeds the largest message size supported by rabbit/qpid + # then we will have a problem. + return firewall + + def _rpc_update_firewall(self, context, firewall_id): + status_update = {"firewall": {"status": const.PENDING_UPDATE}} + fw = super(FirewallPlugin, self).update_firewall(context, firewall_id, + status_update) + if fw: + fw_with_rules = ( + self._make_firewall_dict_with_rules(context, + firewall_id)) + self.agent_rpc.update_firewall(context, fw_with_rules) + + def _rpc_update_firewall_policy(self, context, firewall_policy_id): + firewall_policy = self.get_firewall_policy(context, firewall_policy_id) + if firewall_policy: + for firewall_id in firewall_policy['firewall_list']: + self._rpc_update_firewall(context, firewall_id) + + def _ensure_update_firewall(self, context, firewall_id): + fwall = self.get_firewall(context, firewall_id) + if fwall['status'] in [const.PENDING_CREATE, + const.PENDING_UPDATE, + const.PENDING_DELETE]: + raise fw_ext.FirewallInPendingState(firewall_id=firewall_id, + pending_state=fwall['status']) + + def _ensure_update_firewall_policy(self, context, firewall_policy_id): + firewall_policy = self.get_firewall_policy(context, firewall_policy_id) + if firewall_policy and 'firewall_list' in firewall_policy: + for firewall_id in firewall_policy['firewall_list']: + self._ensure_update_firewall(context, firewall_id) + + def _ensure_update_firewall_rule(self, context, firewall_rule_id): + fw_rule = self.get_firewall_rule(context, firewall_rule_id) + if 'firewall_policy_id' in fw_rule and fw_rule['firewall_policy_id']: + self._ensure_update_firewall_policy(context, + fw_rule['firewall_policy_id']) + + def create_firewall(self, context, firewall): + LOG.debug(_("create_firewall() called")) + tenant_id = self._get_tenant_id_for_create(context, + firewall['firewall']) + fw_count = self.get_firewalls_count(context, + filters={'tenant_id': [tenant_id]}) + if fw_count: + raise FirewallCountExceeded(tenant_id=tenant_id) + firewall['firewall']['status'] = const.PENDING_CREATE + fw = super(FirewallPlugin, self).create_firewall(context, firewall) + fw_with_rules = ( + self._make_firewall_dict_with_rules(context, fw['id'])) + self.agent_rpc.create_firewall(context, fw_with_rules) + return fw + + def update_firewall(self, context, id, firewall): + LOG.debug(_("update_firewall() called")) + self._ensure_update_firewall(context, id) + firewall['firewall']['status'] = const.PENDING_UPDATE + fw = super(FirewallPlugin, self).update_firewall(context, id, firewall) + fw_with_rules = ( + self._make_firewall_dict_with_rules(context, fw['id'])) + self.agent_rpc.update_firewall(context, fw_with_rules) + return fw + + def delete_db_firewall_object(self, context, id): + firewall = self.get_firewall(context, id) + if firewall['status'] in [const.PENDING_DELETE]: + super(FirewallPlugin, self).delete_firewall(context, id) + + def delete_firewall(self, context, id): + LOG.debug(_("delete_firewall() called")) + status_update = {"firewall": {"status": const.PENDING_DELETE}} + fw = super(FirewallPlugin, self).update_firewall(context, id, + status_update) + fw_with_rules = ( + self._make_firewall_dict_with_rules(context, fw['id'])) + self.agent_rpc.delete_firewall(context, fw_with_rules) + + def update_firewall_policy(self, context, id, firewall_policy): + LOG.debug(_("update_firewall_policy() called")) + self._ensure_update_firewall_policy(context, id) + fwp = super(FirewallPlugin, + self).update_firewall_policy(context, id, firewall_policy) + self._rpc_update_firewall_policy(context, id) + return fwp + + def update_firewall_rule(self, context, id, firewall_rule): + LOG.debug(_("update_firewall_rule() called")) + self._ensure_update_firewall_rule(context, id) + fwr = super(FirewallPlugin, + self).update_firewall_rule(context, id, firewall_rule) + firewall_policy_id = fwr['firewall_policy_id'] + if firewall_policy_id: + self._rpc_update_firewall_policy(context, firewall_policy_id) + return fwr + + def insert_rule(self, context, id, rule_info): + LOG.debug(_("insert_rule() called")) + self._ensure_update_firewall_policy(context, id) + fwp = super(FirewallPlugin, + self).insert_rule(context, id, rule_info) + self._rpc_update_firewall_policy(context, id) + return fwp + + def remove_rule(self, context, id, rule_info): + LOG.debug(_("remove_rule() called")) + self._ensure_update_firewall_policy(context, id) + fwp = super(FirewallPlugin, + self).remove_rule(context, id, rule_info) + self._rpc_update_firewall_policy(context, id) + return fwp diff --git a/icehouse-patches/neutron/dvr-patch/neutron/services/l3_router/__init__.py b/icehouse-patches/neutron/dvr-patch/neutron/services/l3_router/__init__.py new file mode 100644 index 00000000..e8293255 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/services/l3_router/__init__.py @@ -0,0 +1,14 @@ +# Copyright (c) 2013 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/icehouse-patches/neutron/dvr-patch/neutron/services/l3_router/l3_apic.py b/icehouse-patches/neutron/dvr-patch/neutron/services/l3_router/l3_apic.py new file mode 100644 index 00000000..02198e8d --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/services/l3_router/l3_apic.py @@ -0,0 +1,135 @@ +# Copyright (c) 2014 Cisco Systems Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Arvind Somya (asomya@cisco.com), Cisco Systems Inc. + +from neutron.db import api as qdbapi +from neutron.db import db_base_plugin_v2 +from neutron.db import extraroute_db +from neutron.db import l3_gwmode_db +from neutron.db import model_base +from neutron.openstack.common import excutils +from neutron.openstack.common import log as logging +from neutron.plugins.common import constants +from neutron.plugins.ml2.drivers.cisco.apic import apic_manager + +LOG = logging.getLogger(__name__) + + +class ApicL3ServicePlugin(db_base_plugin_v2.NeutronDbPluginV2, + db_base_plugin_v2.CommonDbMixin, + extraroute_db.ExtraRoute_db_mixin, + l3_gwmode_db.L3_NAT_db_mixin): + """Implementation of the APIC L3 Router Service Plugin. + + This class implements a L3 service plugin that provides + internal gateway functionality for the Cisco APIC (Application + Policy Infrastructure Controller). + """ + supported_extension_aliases = ["router", "ext-gw-mode", "extraroute"] + + def __init__(self): + super(ApicL3ServicePlugin, self).__init__() + qdbapi.register_models(base=model_base.BASEV2) + self.manager = apic_manager.APICManager() + + @staticmethod + def get_plugin_type(): + return constants.L3_ROUTER_NAT + + @staticmethod + def get_plugin_description(): + """Returns string description of the plugin.""" + return _("L3 Router Service Plugin for basic L3 using the APIC") + + def _add_epg_to_contract(self, tenant_id, epg, contract): + """Add an End Point Group(EPG) to a contract as provider/consumer.""" + if self.manager.db.get_provider_contract(): + # Set this network's EPG as a consumer + self.manager.set_contract_for_epg(tenant_id, epg.epg_id, + contract.contract_id) + else: + # Set this network's EPG as a provider + self.manager.set_contract_for_epg(tenant_id, epg.epg_id, + contract.contract_id, + provider=True) + + def add_router_interface(self, context, router_id, interface_info): + """Attach a subnet to a router.""" + tenant_id = context.tenant_id + subnet_id = interface_info['subnet_id'] + LOG.debug("Attaching subnet %(subnet_id)s to " + "router %(router_id)s" % {'subnet_id': subnet_id, + 'router_id': router_id}) + + # Get network for this subnet + subnet = self.get_subnet(context, subnet_id) + network_id = subnet['network_id'] + net_name = self.get_network(context, network_id)['name'] + + # Setup tenant filters and contracts + contract = self.manager.create_tenant_contract(tenant_id) + + # Check for a provider EPG + epg = self.manager.ensure_epg_created_for_network(tenant_id, + network_id, + net_name) + self._add_epg_to_contract(tenant_id, epg, contract) + + # Create DB port + try: + return super(ApicL3ServicePlugin, self).add_router_interface( + context, router_id, interface_info) + except Exception: + LOG.error(_("Error attaching subnet %(subnet_id)s to " + "router %(router_id)s") % {'subnet_id': subnet_id, + 'router_id': router_id}) + with excutils.save_and_reraise_exception(): + self.manager.delete_contract_for_epg(tenant_id, epg.epg_id, + contract.contract_id, + provider=epg.provider) + + def remove_router_interface(self, context, router_id, interface_info): + """Detach a subnet from a router.""" + tenant_id = context.tenant_id + subnet_id = interface_info['subnet_id'] + LOG.debug("Detaching subnet %(subnet_id)s from " + "router %(router_id)s" % {'subnet_id': subnet_id, + 'router_id': router_id}) + + # Get network for this subnet + subnet = self.get_subnet(context, subnet_id) + network_id = subnet['network_id'] + network = self.get_network(context, network_id) + + contract = self.manager.create_tenant_contract(tenant_id) + + epg = self.manager.ensure_epg_created_for_network(tenant_id, + network_id, + network['name']) + # Delete contract for this epg + self.manager.delete_contract_for_epg(tenant_id, epg.epg_id, + contract.contract_id, + provider=epg.provider) + + try: + return super(ApicL3ServicePlugin, self).remove_router_interface( + context, router_id, interface_info) + except Exception: + LOG.error(_("Error detaching subnet %(subnet_id)s from " + "router %(router_id)s") % {'subnet_id': subnet_id, + 'router_id': router_id}) + with excutils.save_and_reraise_exception(): + self._add_epg_to_contract(tenant_id, epg, contract) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/services/l3_router/l3_router_plugin.py b/icehouse-patches/neutron/dvr-patch/neutron/services/l3_router/l3_router_plugin.py new file mode 100644 index 00000000..246199b8 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/services/l3_router/l3_router_plugin.py @@ -0,0 +1,101 @@ +# Copyright (c) 2013 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Bob Melander, Cisco Systems, Inc. + +from oslo.config import cfg + +from neutron.api.rpc.agentnotifiers import l3_rpc_agent_api +from neutron.common import constants as q_const +from neutron.common import rpc as n_rpc +from neutron.common import topics +from neutron.db import api as qdbapi +from neutron.db import db_base_plugin_v2 +from neutron.db import extraroute_db +from neutron.db import l3_agentschedulers_db +from neutron.db import l3_dvr_db +from neutron.db import l3_gwmode_db +from neutron.db import l3_rpc_base +from neutron.db import model_base +from neutron.openstack.common import importutils +from neutron.plugins.common import constants + + +class L3RouterPluginRpcCallbacks(n_rpc.RpcCallback, + l3_rpc_base.L3RpcCallbackMixin): + + RPC_API_VERSION = '1.2' + # history + # 1.2 Added methods for DVR support + + +class L3RouterPlugin(db_base_plugin_v2.CommonDbMixin, + extraroute_db.ExtraRoute_db_mixin, + l3_dvr_db.L3_NAT_with_dvr_db_mixin, + l3_gwmode_db.L3_NAT_db_mixin, + l3_agentschedulers_db.L3AgentSchedulerDbMixin): + + """Implementation of the Neutron L3 Router Service Plugin. + + This class implements a L3 service plugin that provides + router and floatingip resources and manages associated + request/response. + All DB related work is implemented in classes + l3_db.L3_NAT_db_mixin, l3_dvr_db.L3_NAT_with_dvr_db_mixin, and + extraroute_db.ExtraRoute_db_mixin. + """ + supported_extension_aliases = ["dvr", "router", "ext-gw-mode", + "extraroute", "l3_agent_scheduler"] + + def __init__(self): + qdbapi.register_models(base=model_base.BASEV2) + self.setup_rpc() + self.router_scheduler = importutils.import_object( + cfg.CONF.router_scheduler_driver) + + def setup_rpc(self): + # RPC support + self.topic = topics.L3PLUGIN + self.conn = n_rpc.create_connection(new=True) + self.agent_notifiers.update( + {q_const.AGENT_TYPE_L3: l3_rpc_agent_api.L3AgentNotifyAPI()}) + self.endpoints = [L3RouterPluginRpcCallbacks()] + self.conn.create_consumer(self.topic, self.endpoints, + fanout=False) + self.conn.consume_in_threads() + + def get_plugin_type(self): + return constants.L3_ROUTER_NAT + + def get_plugin_description(self): + """returns string description of the plugin.""" + return ("L3 Router Service Plugin for basic L3 forwarding" + " between (L2) Neutron networks and access to external" + " networks via a NAT gateway.") + + def create_floatingip(self, context, floatingip): + """Create floating IP. + + :param context: Neutron request context + :param floatingip: data fo the floating IP being created + :returns: A floating IP object on success + + AS the l3 router plugin aysnchrounously creates floating IPs + leveraging tehe l3 agent, the initial status fro the floating + IP object will be DOWN. + """ + return super(L3RouterPlugin, self).create_floatingip( + context, floatingip, + initial_status=q_const.FLOATINGIP_STATUS_DOWN) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/services/loadbalancer/__init__.py b/icehouse-patches/neutron/dvr-patch/neutron/services/loadbalancer/__init__.py new file mode 100644 index 00000000..a6a8955d --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/services/loadbalancer/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2013 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/icehouse-patches/neutron/dvr-patch/neutron/services/loadbalancer/agent/agent.py b/icehouse-patches/neutron/dvr-patch/neutron/services/loadbalancer/agent/agent.py new file mode 100644 index 00000000..0434c688 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/services/loadbalancer/agent/agent.py @@ -0,0 +1,70 @@ +# Copyright 2013 New Dream Network, LLC (DreamHost) +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Mark McClain, DreamHost + +import sys + +import eventlet +eventlet.monkey_patch() + +from oslo.config import cfg + +from neutron.agent.common import config +from neutron.agent.linux import interface +from neutron.common import config as common_config +from neutron.common import rpc as n_rpc +from neutron.common import topics +from neutron.openstack.common import service +from neutron.services.loadbalancer.agent import agent_manager as manager + +OPTS = [ + cfg.IntOpt( + 'periodic_interval', + default=10, + help=_('Seconds between periodic task runs') + ) +] + + +class LbaasAgentService(n_rpc.Service): + def start(self): + super(LbaasAgentService, self).start() + self.tg.add_timer( + cfg.CONF.periodic_interval, + self.manager.run_periodic_tasks, + None, + None + ) + + +def main(): + cfg.CONF.register_opts(OPTS) + cfg.CONF.register_opts(manager.OPTS) + # import interface options just in case the driver uses namespaces + cfg.CONF.register_opts(interface.OPTS) + config.register_interface_driver_opts_helper(cfg.CONF) + config.register_agent_state_opts_helper(cfg.CONF) + config.register_root_helper(cfg.CONF) + + common_config.init(sys.argv[1:]) + config.setup_logging(cfg.CONF) + + mgr = manager.LbaasAgentManager(cfg.CONF) + svc = LbaasAgentService( + host=cfg.CONF.host, + topic=topics.LOADBALANCER_AGENT, + manager=mgr + ) + service.launch(svc).wait() diff --git a/icehouse-patches/neutron/dvr-patch/neutron/services/loadbalancer/agent/agent_api.py b/icehouse-patches/neutron/dvr-patch/neutron/services/loadbalancer/agent/agent_api.py new file mode 100644 index 00000000..eb2a165c --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/services/loadbalancer/agent/agent_api.py @@ -0,0 +1,98 @@ +# Copyright 2013 New Dream Network, LLC (DreamHost) +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Mark McClain, DreamHost + +from neutron.common import rpc as n_rpc + + +class LbaasAgentApi(n_rpc.RpcProxy): + """Agent side of the Agent to Plugin RPC API.""" + + API_VERSION = '2.0' + # history + # 1.0 Initial version + # 2.0 Generic API for agent based drivers + # - get_logical_device() handling changed on plugin side; + # - pool_deployed() and update_status() methods added; + + def __init__(self, topic, context, host): + super(LbaasAgentApi, self).__init__(topic, self.API_VERSION) + self.context = context + self.host = host + + def get_ready_devices(self): + return self.call( + self.context, + self.make_msg('get_ready_devices', host=self.host), + topic=self.topic + ) + + def pool_destroyed(self, pool_id): + return self.call( + self.context, + self.make_msg('pool_destroyed', pool_id=pool_id), + topic=self.topic + ) + + def pool_deployed(self, pool_id): + return self.call( + self.context, + self.make_msg('pool_deployed', pool_id=pool_id), + topic=self.topic + ) + + def get_logical_device(self, pool_id): + return self.call( + self.context, + self.make_msg( + 'get_logical_device', + pool_id=pool_id + ), + topic=self.topic + ) + + def update_status(self, obj_type, obj_id, status): + return self.call( + self.context, + self.make_msg('update_status', obj_type=obj_type, obj_id=obj_id, + status=status), + topic=self.topic + ) + + def plug_vip_port(self, port_id): + return self.call( + self.context, + self.make_msg('plug_vip_port', port_id=port_id, host=self.host), + topic=self.topic + ) + + def unplug_vip_port(self, port_id): + return self.call( + self.context, + self.make_msg('unplug_vip_port', port_id=port_id, host=self.host), + topic=self.topic + ) + + def update_pool_stats(self, pool_id, stats): + return self.call( + self.context, + self.make_msg( + 'update_pool_stats', + pool_id=pool_id, + stats=stats, + host=self.host + ), + topic=self.topic + ) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/services/loadbalancer/agent/agent_device_driver.py b/icehouse-patches/neutron/dvr-patch/neutron/services/loadbalancer/agent/agent_device_driver.py new file mode 100644 index 00000000..ad65bcfe --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/services/loadbalancer/agent/agent_device_driver.py @@ -0,0 +1,96 @@ +# Copyright 2013 OpenStack Foundation. All rights reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import abc + +import six + + +@six.add_metaclass(abc.ABCMeta) +class AgentDeviceDriver(object): + """Abstract device driver that defines the API required by LBaaS agent.""" + + @abc.abstractmethod + def get_name(cls): + """Returns unique name across all LBaaS device drivers.""" + pass + + @abc.abstractmethod + def deploy_instance(self, logical_config): + """Fully deploys a loadbalancer instance from a given config.""" + pass + + @abc.abstractmethod + def undeploy_instance(self, pool_id): + """Fully undeploys the loadbalancer instance.""" + pass + + @abc.abstractmethod + def get_stats(self, pool_id): + pass + + def remove_orphans(self, known_pool_ids): + # Not all drivers will support this + raise NotImplementedError() + + @abc.abstractmethod + def create_vip(self, vip): + pass + + @abc.abstractmethod + def update_vip(self, old_vip, vip): + pass + + @abc.abstractmethod + def delete_vip(self, vip): + pass + + @abc.abstractmethod + def create_pool(self, pool): + pass + + @abc.abstractmethod + def update_pool(self, old_pool, pool): + pass + + @abc.abstractmethod + def delete_pool(self, pool): + pass + + @abc.abstractmethod + def create_member(self, member): + pass + + @abc.abstractmethod + def update_member(self, old_member, member): + pass + + @abc.abstractmethod + def delete_member(self, member): + pass + + @abc.abstractmethod + def create_pool_health_monitor(self, health_monitor, pool_id): + pass + + @abc.abstractmethod + def update_pool_health_monitor(self, + old_health_monitor, + health_monitor, + pool_id): + pass + + @abc.abstractmethod + def delete_pool_health_monitor(self, health_monitor, pool_id): + pass diff --git a/icehouse-patches/neutron/dvr-patch/neutron/services/loadbalancer/agent/agent_manager.py b/icehouse-patches/neutron/dvr-patch/neutron/services/loadbalancer/agent/agent_manager.py new file mode 100644 index 00000000..52da4f5e --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/services/loadbalancer/agent/agent_manager.py @@ -0,0 +1,336 @@ +# Copyright 2013 New Dream Network, LLC (DreamHost) +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Mark McClain, DreamHost + +from oslo.config import cfg + +from neutron.agent import rpc as agent_rpc +from neutron.common import constants as n_const +from neutron.common import exceptions as n_exc +from neutron.common import rpc as n_rpc +from neutron.common import topics +from neutron import context +from neutron.openstack.common import importutils +from neutron.openstack.common import log as logging +from neutron.openstack.common import loopingcall +from neutron.openstack.common import periodic_task +from neutron.plugins.common import constants +from neutron.services.loadbalancer.agent import agent_api + +LOG = logging.getLogger(__name__) + +OPTS = [ + cfg.MultiStrOpt( + 'device_driver', + default=['neutron.services.loadbalancer.drivers' + '.haproxy.namespace_driver.HaproxyNSDriver'], + help=_('Drivers used to manage loadbalancing devices'), + ), +] + + +class DeviceNotFoundOnAgent(n_exc.NotFound): + msg = _('Unknown device with pool_id %(pool_id)s') + + +class LbaasAgentManager(n_rpc.RpcCallback, periodic_task.PeriodicTasks): + + RPC_API_VERSION = '2.0' + # history + # 1.0 Initial version + # 1.1 Support agent_updated call + # 2.0 Generic API for agent based drivers + # - modify/reload/destroy_pool methods were removed; + # - added methods to handle create/update/delete for every lbaas + # object individually; + + def __init__(self, conf): + super(LbaasAgentManager, self).__init__() + self.conf = conf + self.context = context.get_admin_context_without_session() + self.plugin_rpc = agent_api.LbaasAgentApi( + topics.LOADBALANCER_PLUGIN, + self.context, + self.conf.host + ) + self._load_drivers() + + self.agent_state = { + 'binary': 'neutron-lbaas-agent', + 'host': conf.host, + 'topic': topics.LOADBALANCER_AGENT, + 'configurations': {'device_drivers': self.device_drivers.keys()}, + 'agent_type': n_const.AGENT_TYPE_LOADBALANCER, + 'start_flag': True} + self.admin_state_up = True + + self._setup_state_rpc() + self.needs_resync = False + # pool_id->device_driver_name mapping used to store known instances + self.instance_mapping = {} + + def _load_drivers(self): + self.device_drivers = {} + for driver in self.conf.device_driver: + try: + driver_inst = importutils.import_object( + driver, + self.conf, + self.plugin_rpc + ) + except ImportError: + msg = _('Error importing loadbalancer device driver: %s') + raise SystemExit(msg % driver) + + driver_name = driver_inst.get_name() + if driver_name not in self.device_drivers: + self.device_drivers[driver_name] = driver_inst + else: + msg = _('Multiple device drivers with the same name found: %s') + raise SystemExit(msg % driver_name) + + def _setup_state_rpc(self): + self.state_rpc = agent_rpc.PluginReportStateAPI( + topics.LOADBALANCER_PLUGIN) + report_interval = self.conf.AGENT.report_interval + if report_interval: + heartbeat = loopingcall.FixedIntervalLoopingCall( + self._report_state) + heartbeat.start(interval=report_interval) + + def _report_state(self): + try: + instance_count = len(self.instance_mapping) + self.agent_state['configurations']['instances'] = instance_count + self.state_rpc.report_state(self.context, + self.agent_state) + self.agent_state.pop('start_flag', None) + except Exception: + LOG.exception(_("Failed reporting state!")) + + def initialize_service_hook(self, started_by): + self.sync_state() + + @periodic_task.periodic_task + def periodic_resync(self, context): + if self.needs_resync: + self.needs_resync = False + self.sync_state() + + @periodic_task.periodic_task(spacing=6) + def collect_stats(self, context): + for pool_id, driver_name in self.instance_mapping.items(): + driver = self.device_drivers[driver_name] + try: + stats = driver.get_stats(pool_id) + if stats: + self.plugin_rpc.update_pool_stats(pool_id, stats) + except Exception: + LOG.exception(_('Error updating statistics on pool %s'), + pool_id) + self.needs_resync = True + + def sync_state(self): + known_instances = set(self.instance_mapping.keys()) + try: + ready_instances = set(self.plugin_rpc.get_ready_devices()) + + for deleted_id in known_instances - ready_instances: + self._destroy_pool(deleted_id) + + for pool_id in ready_instances: + self._reload_pool(pool_id) + + except Exception: + LOG.exception(_('Unable to retrieve ready devices')) + self.needs_resync = True + + self.remove_orphans() + + def _get_driver(self, pool_id): + if pool_id not in self.instance_mapping: + raise DeviceNotFoundOnAgent(pool_id=pool_id) + + driver_name = self.instance_mapping[pool_id] + return self.device_drivers[driver_name] + + def _reload_pool(self, pool_id): + try: + logical_config = self.plugin_rpc.get_logical_device(pool_id) + driver_name = logical_config['driver'] + if driver_name not in self.device_drivers: + LOG.error(_('No device driver ' + 'on agent: %s.'), driver_name) + self.plugin_rpc.update_status( + 'pool', pool_id, constants.ERROR) + return + + self.device_drivers[driver_name].deploy_instance(logical_config) + self.instance_mapping[pool_id] = driver_name + self.plugin_rpc.pool_deployed(pool_id) + except Exception: + LOG.exception(_('Unable to deploy instance for pool: %s'), pool_id) + self.needs_resync = True + + def _destroy_pool(self, pool_id): + driver = self._get_driver(pool_id) + try: + driver.undeploy_instance(pool_id) + del self.instance_mapping[pool_id] + self.plugin_rpc.pool_destroyed(pool_id) + except Exception: + LOG.exception(_('Unable to destroy device for pool: %s'), pool_id) + self.needs_resync = True + + def remove_orphans(self): + for driver_name in self.device_drivers: + pool_ids = [pool_id for pool_id in self.instance_mapping + if self.instance_mapping[pool_id] == driver_name] + try: + self.device_drivers[driver_name].remove_orphans(pool_ids) + except NotImplementedError: + pass # Not all drivers will support this + + def _handle_failed_driver_call(self, operation, obj_type, obj_id, driver): + LOG.exception(_('%(operation)s %(obj)s %(id)s failed on device driver ' + '%(driver)s'), + {'operation': operation.capitalize(), 'obj': obj_type, + 'id': obj_id, 'driver': driver}) + self.plugin_rpc.update_status(obj_type, obj_id, constants.ERROR) + + def create_vip(self, context, vip): + driver = self._get_driver(vip['pool_id']) + try: + driver.create_vip(vip) + except Exception: + self._handle_failed_driver_call('create', 'vip', vip['id'], + driver.get_name()) + else: + self.plugin_rpc.update_status('vip', vip['id'], constants.ACTIVE) + + def update_vip(self, context, old_vip, vip): + driver = self._get_driver(vip['pool_id']) + try: + driver.update_vip(old_vip, vip) + except Exception: + self._handle_failed_driver_call('update', 'vip', vip['id'], + driver.get_name()) + else: + self.plugin_rpc.update_status('vip', vip['id'], constants.ACTIVE) + + def delete_vip(self, context, vip): + driver = self._get_driver(vip['pool_id']) + driver.delete_vip(vip) + + def create_pool(self, context, pool, driver_name): + if driver_name not in self.device_drivers: + LOG.error(_('No device driver on agent: %s.'), driver_name) + self.plugin_rpc.update_status('pool', pool['id'], constants.ERROR) + return + + driver = self.device_drivers[driver_name] + try: + driver.create_pool(pool) + except Exception: + self._handle_failed_driver_call('create', 'pool', pool['id'], + driver.get_name()) + else: + self.instance_mapping[pool['id']] = driver_name + self.plugin_rpc.update_status('pool', pool['id'], constants.ACTIVE) + + def update_pool(self, context, old_pool, pool): + driver = self._get_driver(pool['id']) + try: + driver.update_pool(old_pool, pool) + except Exception: + self._handle_failed_driver_call('update', 'pool', pool['id'], + driver.get_name()) + else: + self.plugin_rpc.update_status('pool', pool['id'], constants.ACTIVE) + + def delete_pool(self, context, pool): + driver = self._get_driver(pool['id']) + driver.delete_pool(pool) + del self.instance_mapping[pool['id']] + + def create_member(self, context, member): + driver = self._get_driver(member['pool_id']) + try: + driver.create_member(member) + except Exception: + self._handle_failed_driver_call('create', 'member', member['id'], + driver.get_name()) + else: + self.plugin_rpc.update_status('member', member['id'], + constants.ACTIVE) + + def update_member(self, context, old_member, member): + driver = self._get_driver(member['pool_id']) + try: + driver.update_member(old_member, member) + except Exception: + self._handle_failed_driver_call('update', 'member', member['id'], + driver.get_name()) + else: + self.plugin_rpc.update_status('member', member['id'], + constants.ACTIVE) + + def delete_member(self, context, member): + driver = self._get_driver(member['pool_id']) + driver.delete_member(member) + + def create_pool_health_monitor(self, context, health_monitor, pool_id): + driver = self._get_driver(pool_id) + assoc_id = {'pool_id': pool_id, 'monitor_id': health_monitor['id']} + try: + driver.create_pool_health_monitor(health_monitor, pool_id) + except Exception: + self._handle_failed_driver_call( + 'create', 'health_monitor', assoc_id, driver.get_name()) + else: + self.plugin_rpc.update_status( + 'health_monitor', assoc_id, constants.ACTIVE) + + def update_pool_health_monitor(self, context, old_health_monitor, + health_monitor, pool_id): + driver = self._get_driver(pool_id) + assoc_id = {'pool_id': pool_id, 'monitor_id': health_monitor['id']} + try: + driver.update_pool_health_monitor(old_health_monitor, + health_monitor, + pool_id) + except Exception: + self._handle_failed_driver_call( + 'update', 'health_monitor', assoc_id, driver.get_name()) + else: + self.plugin_rpc.update_status( + 'health_monitor', assoc_id, constants.ACTIVE) + + def delete_pool_health_monitor(self, context, health_monitor, pool_id): + driver = self._get_driver(pool_id) + driver.delete_pool_health_monitor(health_monitor, pool_id) + + def agent_updated(self, context, payload): + """Handle the agent_updated notification event.""" + if payload['admin_state_up'] != self.admin_state_up: + self.admin_state_up = payload['admin_state_up'] + if self.admin_state_up: + self.needs_resync = True + else: + for pool_id in self.instance_mapping.keys(): + LOG.info(_("Destroying pool %s due to agent disabling"), + pool_id) + self._destroy_pool(pool_id) + LOG.info(_("Agent_updated by server side %s!"), payload) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/services/loadbalancer/agent_scheduler.py b/icehouse-patches/neutron/dvr-patch/neutron/services/loadbalancer/agent_scheduler.py new file mode 100644 index 00000000..a196e230 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/services/loadbalancer/agent_scheduler.py @@ -0,0 +1,128 @@ +# Copyright (c) 2013 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import random + +import sqlalchemy as sa +from sqlalchemy import orm +from sqlalchemy.orm import joinedload + +from neutron.common import constants +from neutron.db import agents_db +from neutron.db import agentschedulers_db +from neutron.db import model_base +from neutron.extensions import lbaas_agentscheduler +from neutron.openstack.common import log as logging + +LOG = logging.getLogger(__name__) + + +class PoolLoadbalancerAgentBinding(model_base.BASEV2): + """Represents binding between neutron loadbalancer pools and agents.""" + + pool_id = sa.Column(sa.String(36), + sa.ForeignKey("pools.id", ondelete='CASCADE'), + primary_key=True) + agent = orm.relation(agents_db.Agent) + agent_id = sa.Column(sa.String(36), sa.ForeignKey("agents.id", + ondelete='CASCADE'), + nullable=False) + + +class LbaasAgentSchedulerDbMixin(agentschedulers_db.AgentSchedulerDbMixin, + lbaas_agentscheduler + .LbaasAgentSchedulerPluginBase): + + def get_lbaas_agent_hosting_pool(self, context, pool_id, active=None): + query = context.session.query(PoolLoadbalancerAgentBinding) + query = query.options(joinedload('agent')) + binding = query.get(pool_id) + + if (binding and self.is_eligible_agent( + active, binding.agent)): + return {'agent': self._make_agent_dict(binding.agent)} + + def get_lbaas_agents(self, context, active=None, filters=None): + query = context.session.query(agents_db.Agent) + query = query.filter_by(agent_type=constants.AGENT_TYPE_LOADBALANCER) + if active is not None: + query = query.filter_by(admin_state_up=active) + if filters: + for key, value in filters.iteritems(): + column = getattr(agents_db.Agent, key, None) + if column: + query = query.filter(column.in_(value)) + + return [agent + for agent in query + if self.is_eligible_agent(active, agent)] + + def list_pools_on_lbaas_agent(self, context, id): + query = context.session.query(PoolLoadbalancerAgentBinding.pool_id) + query = query.filter_by(agent_id=id) + pool_ids = [item[0] for item in query] + if pool_ids: + return {'pools': self.get_pools(context, filters={'id': pool_ids})} + else: + return {'pools': []} + + def get_lbaas_agent_candidates(self, device_driver, active_agents): + candidates = [] + for agent in active_agents: + agent_conf = self.get_configuration_dict(agent) + if device_driver in agent_conf['device_drivers']: + candidates.append(agent) + return candidates + + +class ChanceScheduler(object): + """Allocate a loadbalancer agent for a vip in a random way.""" + + def schedule(self, plugin, context, pool, device_driver): + """Schedule the pool to an active loadbalancer agent if there + is no enabled agent hosting it. + """ + with context.session.begin(subtransactions=True): + lbaas_agent = plugin.get_lbaas_agent_hosting_pool( + context, pool['id']) + if lbaas_agent: + LOG.debug(_('Pool %(pool_id)s has already been hosted' + ' by lbaas agent %(agent_id)s'), + {'pool_id': pool['id'], + 'agent_id': lbaas_agent['id']}) + return + + active_agents = plugin.get_lbaas_agents(context, active=True) + if not active_agents: + LOG.warn(_('No active lbaas agents for pool %s'), pool['id']) + return + + candidates = plugin.get_lbaas_agent_candidates(device_driver, + active_agents) + if not candidates: + LOG.warn(_('No lbaas agent supporting device driver %s'), + device_driver) + return + + chosen_agent = random.choice(candidates) + binding = PoolLoadbalancerAgentBinding() + binding.agent = chosen_agent + binding.pool_id = pool['id'] + context.session.add(binding) + LOG.debug(_('Pool %(pool_id)s is scheduled to ' + 'lbaas agent %(agent_id)s'), + {'pool_id': pool['id'], + 'agent_id': chosen_agent['id']}) + return chosen_agent diff --git a/icehouse-patches/neutron/dvr-patch/neutron/services/loadbalancer/constants.py b/icehouse-patches/neutron/dvr-patch/neutron/services/loadbalancer/constants.py new file mode 100644 index 00000000..0f834467 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/services/loadbalancer/constants.py @@ -0,0 +1,45 @@ +# Copyright 2013 Mirantis, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +LB_METHOD_ROUND_ROBIN = 'ROUND_ROBIN' +LB_METHOD_LEAST_CONNECTIONS = 'LEAST_CONNECTIONS' +LB_METHOD_SOURCE_IP = 'SOURCE_IP' + +PROTOCOL_TCP = 'TCP' +PROTOCOL_HTTP = 'HTTP' +PROTOCOL_HTTPS = 'HTTPS' + +HEALTH_MONITOR_PING = 'PING' +HEALTH_MONITOR_TCP = 'TCP' +HEALTH_MONITOR_HTTP = 'HTTP' +HEALTH_MONITOR_HTTPS = 'HTTPS' + +SESSION_PERSISTENCE_SOURCE_IP = 'SOURCE_IP' +SESSION_PERSISTENCE_HTTP_COOKIE = 'HTTP_COOKIE' +SESSION_PERSISTENCE_APP_COOKIE = 'APP_COOKIE' + +STATS_ACTIVE_CONNECTIONS = 'active_connections' +STATS_MAX_CONNECTIONS = 'max_connections' +STATS_TOTAL_CONNECTIONS = 'total_connections' +STATS_CURRENT_SESSIONS = 'current_sessions' +STATS_MAX_SESSIONS = 'max_sessions' +STATS_TOTAL_SESSIONS = 'total_sessions' +STATS_IN_BYTES = 'bytes_in' +STATS_OUT_BYTES = 'bytes_out' +STATS_CONNECTION_ERRORS = 'connection_errors' +STATS_RESPONSE_ERRORS = 'response_errors' +STATS_STATUS = 'status' +STATS_HEALTH = 'health' +STATS_FAILED_CHECKS = 'failed_checks' diff --git a/icehouse-patches/neutron/dvr-patch/neutron/services/loadbalancer/drivers/__init__.py b/icehouse-patches/neutron/dvr-patch/neutron/services/loadbalancer/drivers/__init__.py new file mode 100644 index 00000000..44391ce4 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/services/loadbalancer/drivers/__init__.py @@ -0,0 +1,15 @@ +# Copyright 2013 New Dream Network, LLC (DreamHost) +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Mark McClain, DreamHost diff --git a/icehouse-patches/neutron/dvr-patch/neutron/services/loadbalancer/drivers/abstract_driver.py b/icehouse-patches/neutron/dvr-patch/neutron/services/loadbalancer/drivers/abstract_driver.py new file mode 100644 index 00000000..61123c30 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/services/loadbalancer/drivers/abstract_driver.py @@ -0,0 +1,128 @@ +# Copyright 2013 Radware LTD. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Avishay Balderman, Radware + +import abc + +import six + + +@six.add_metaclass(abc.ABCMeta) +class LoadBalancerAbstractDriver(object): + """Abstract lbaas driver that expose ~same API as lbaas plugin. + + The configuration elements (Vip,Member,etc) are the dicts that + are returned to the tenant. + Get operations are not part of the API - it will be handled + by the lbaas plugin. + """ + + @abc.abstractmethod + def create_vip(self, context, vip): + """A real driver would invoke a call to his backend + and set the Vip status to ACTIVE/ERROR according + to the backend call result + self.plugin.update_status(context, Vip, vip["id"], + constants.ACTIVE) + """ + pass + + @abc.abstractmethod + def update_vip(self, context, old_vip, vip): + """Driver may call the code below in order to update the status. + self.plugin.update_status(context, Vip, id, constants.ACTIVE) + """ + pass + + @abc.abstractmethod + def delete_vip(self, context, vip): + """A real driver would invoke a call to his backend + and try to delete the Vip. + if the deletion was successful, delete the record from the database. + if the deletion has failed, set the Vip status to ERROR. + """ + pass + + @abc.abstractmethod + def create_pool(self, context, pool): + """Driver may call the code below in order to update the status. + self.plugin.update_status(context, Pool, pool["id"], + constants.ACTIVE) + """ + pass + + @abc.abstractmethod + def update_pool(self, context, old_pool, pool): + """Driver may call the code below in order to update the status. + self.plugin.update_status(context, + Pool, + pool["id"], constants.ACTIVE) + """ + pass + + @abc.abstractmethod + def delete_pool(self, context, pool): + """Driver can call the code below in order to delete the pool. + self.plugin._delete_db_pool(context, pool["id"]) + or set the status to ERROR if deletion failed + """ + pass + + @abc.abstractmethod + def stats(self, context, pool_id): + pass + + @abc.abstractmethod + def create_member(self, context, member): + """Driver may call the code below in order to update the status. + self.plugin.update_status(context, Member, member["id"], + constants.ACTIVE) + """ + pass + + @abc.abstractmethod + def update_member(self, context, old_member, member): + """Driver may call the code below in order to update the status. + self.plugin.update_status(context, Member, + member["id"], constants.ACTIVE) + """ + pass + + @abc.abstractmethod + def delete_member(self, context, member): + pass + + @abc.abstractmethod + def update_pool_health_monitor(self, context, + old_health_monitor, + health_monitor, + pool_id): + pass + + @abc.abstractmethod + def create_pool_health_monitor(self, context, + health_monitor, + pool_id): + """Driver may call the code below in order to update the status. + self.plugin.update_pool_health_monitor(context, + health_monitor["id"], + pool_id, + constants.ACTIVE) + """ + pass + + @abc.abstractmethod + def delete_pool_health_monitor(self, context, health_monitor, pool_id): + pass diff --git a/icehouse-patches/neutron/dvr-patch/neutron/services/loadbalancer/drivers/common/agent_driver_base.py b/icehouse-patches/neutron/dvr-patch/neutron/services/loadbalancer/drivers/common/agent_driver_base.py new file mode 100644 index 00000000..0f8e036d --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/services/loadbalancer/drivers/common/agent_driver_base.py @@ -0,0 +1,443 @@ +# Copyright 2013 New Dream Network, LLC (DreamHost) +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Mark McClain, DreamHost + +import uuid + +from oslo.config import cfg + +from neutron.common import constants as q_const +from neutron.common import exceptions as n_exc +from neutron.common import rpc as n_rpc +from neutron.common import topics +from neutron.db import agents_db +from neutron.db.loadbalancer import loadbalancer_db +from neutron.extensions import lbaas_agentscheduler +from neutron.extensions import portbindings +from neutron.openstack.common import importutils +from neutron.openstack.common import log as logging +from neutron.plugins.common import constants +from neutron.services.loadbalancer.drivers import abstract_driver + +LOG = logging.getLogger(__name__) + +AGENT_SCHEDULER_OPTS = [ + cfg.StrOpt('loadbalancer_pool_scheduler_driver', + default='neutron.services.loadbalancer.agent_scheduler' + '.ChanceScheduler', + help=_('Driver to use for scheduling ' + 'pool to a default loadbalancer agent')), +] + +cfg.CONF.register_opts(AGENT_SCHEDULER_OPTS) + + +class DriverNotSpecified(n_exc.NeutronException): + message = _("Device driver for agent should be specified " + "in plugin driver.") + + +class LoadBalancerCallbacks(n_rpc.RpcCallback): + + RPC_API_VERSION = '2.0' + # history + # 1.0 Initial version + # 2.0 Generic API for agent based drivers + # - get_logical_device() handling changed; + # - pool_deployed() and update_status() methods added; + + def __init__(self, plugin): + super(LoadBalancerCallbacks, self).__init__() + self.plugin = plugin + + def get_ready_devices(self, context, host=None): + with context.session.begin(subtransactions=True): + agents = self.plugin.get_lbaas_agents(context, + filters={'host': [host]}) + if not agents: + return [] + elif len(agents) > 1: + LOG.warning(_('Multiple lbaas agents found on host %s'), host) + pools = self.plugin.list_pools_on_lbaas_agent(context, + agents[0].id) + pool_ids = [pool['id'] for pool in pools['pools']] + + qry = context.session.query(loadbalancer_db.Pool.id) + qry = qry.filter(loadbalancer_db.Pool.id.in_(pool_ids)) + qry = qry.filter( + loadbalancer_db.Pool.status.in_( + constants.ACTIVE_PENDING_STATUSES)) + up = True # makes pep8 and sqlalchemy happy + qry = qry.filter(loadbalancer_db.Pool.admin_state_up == up) + return [id for id, in qry] + + def get_logical_device(self, context, pool_id=None): + with context.session.begin(subtransactions=True): + qry = context.session.query(loadbalancer_db.Pool) + qry = qry.filter_by(id=pool_id) + pool = qry.one() + retval = {} + retval['pool'] = self.plugin._make_pool_dict(pool) + + if pool.vip: + retval['vip'] = self.plugin._make_vip_dict(pool.vip) + retval['vip']['port'] = ( + self.plugin._core_plugin._make_port_dict(pool.vip.port) + ) + for fixed_ip in retval['vip']['port']['fixed_ips']: + fixed_ip['subnet'] = ( + self.plugin._core_plugin.get_subnet( + context, + fixed_ip['subnet_id'] + ) + ) + retval['members'] = [ + self.plugin._make_member_dict(m) + for m in pool.members if ( + m.status in constants.ACTIVE_PENDING_STATUSES or + m.status == constants.INACTIVE) + ] + retval['healthmonitors'] = [ + self.plugin._make_health_monitor_dict(hm.healthmonitor) + for hm in pool.monitors + if hm.status in constants.ACTIVE_PENDING_STATUSES + ] + retval['driver'] = ( + self.plugin.drivers[pool.provider.provider_name].device_driver) + + return retval + + def pool_deployed(self, context, pool_id): + with context.session.begin(subtransactions=True): + qry = context.session.query(loadbalancer_db.Pool) + qry = qry.filter_by(id=pool_id) + pool = qry.one() + + # set all resources to active + if pool.status in constants.ACTIVE_PENDING_STATUSES: + pool.status = constants.ACTIVE + + if (pool.vip and pool.vip.status in + constants.ACTIVE_PENDING_STATUSES): + pool.vip.status = constants.ACTIVE + + for m in pool.members: + if m.status in constants.ACTIVE_PENDING_STATUSES: + m.status = constants.ACTIVE + + for hm in pool.monitors: + if hm.status in constants.ACTIVE_PENDING_STATUSES: + hm.status = constants.ACTIVE + + def update_status(self, context, obj_type, obj_id, status): + model_mapping = { + 'pool': loadbalancer_db.Pool, + 'vip': loadbalancer_db.Vip, + 'member': loadbalancer_db.Member, + 'health_monitor': loadbalancer_db.PoolMonitorAssociation + } + if obj_type not in model_mapping: + raise n_exc.Invalid(_('Unknown object type: %s') % obj_type) + try: + if obj_type == 'health_monitor': + self.plugin.update_pool_health_monitor( + context, obj_id['monitor_id'], obj_id['pool_id'], status) + else: + self.plugin.update_status( + context, model_mapping[obj_type], obj_id, status) + except n_exc.NotFound: + # update_status may come from agent on an object which was + # already deleted from db with other request + LOG.warning(_('Cannot update status: %(obj_type)s %(obj_id)s ' + 'not found in the DB, it was probably deleted ' + 'concurrently'), + {'obj_type': obj_type, 'obj_id': obj_id}) + + def pool_destroyed(self, context, pool_id=None): + """Agent confirmation hook that a pool has been destroyed. + + This method exists for subclasses to change the deletion + behavior. + """ + pass + + def plug_vip_port(self, context, port_id=None, host=None): + if not port_id: + return + + try: + port = self.plugin._core_plugin.get_port( + context, + port_id + ) + except n_exc.PortNotFound: + msg = _('Unable to find port %s to plug.') + LOG.debug(msg, port_id) + return + + port['admin_state_up'] = True + port['device_owner'] = 'neutron:' + constants.LOADBALANCER + port['device_id'] = str(uuid.uuid5(uuid.NAMESPACE_DNS, str(host))) + port[portbindings.HOST_ID] = host + self.plugin._core_plugin.update_port( + context, + port_id, + {'port': port} + ) + + def unplug_vip_port(self, context, port_id=None, host=None): + if not port_id: + return + + try: + port = self.plugin._core_plugin.get_port( + context, + port_id + ) + except n_exc.PortNotFound: + msg = _('Unable to find port %s to unplug. This can occur when ' + 'the Vip has been deleted first.') + LOG.debug(msg, port_id) + return + + port['admin_state_up'] = False + port['device_owner'] = '' + port['device_id'] = '' + + try: + self.plugin._core_plugin.update_port( + context, + port_id, + {'port': port} + ) + + except n_exc.PortNotFound: + msg = _('Unable to find port %s to unplug. This can occur when ' + 'the Vip has been deleted first.') + LOG.debug(msg, port_id) + + def update_pool_stats(self, context, pool_id=None, stats=None, host=None): + self.plugin.update_pool_stats(context, pool_id, data=stats) + + +class LoadBalancerAgentApi(n_rpc.RpcProxy): + """Plugin side of plugin to agent RPC API.""" + + BASE_RPC_API_VERSION = '2.0' + # history + # 1.0 Initial version + # 1.1 Support agent_updated call + # 2.0 Generic API for agent based drivers + # - modify/reload/destroy_pool methods were removed; + # - added methods to handle create/update/delete for every lbaas + # object individually; + + def __init__(self, topic): + super(LoadBalancerAgentApi, self).__init__( + topic, default_version=self.BASE_RPC_API_VERSION) + + def _cast(self, context, method_name, method_args, host, version=None): + return self.cast( + context, + self.make_msg(method_name, **method_args), + topic='%s.%s' % (self.topic, host), + version=version + ) + + def create_vip(self, context, vip, host): + return self._cast(context, 'create_vip', {'vip': vip}, host) + + def update_vip(self, context, old_vip, vip, host): + return self._cast(context, 'update_vip', + {'old_vip': old_vip, 'vip': vip}, host) + + def delete_vip(self, context, vip, host): + return self._cast(context, 'delete_vip', {'vip': vip}, host) + + def create_pool(self, context, pool, host, driver_name): + return self._cast(context, 'create_pool', + {'pool': pool, 'driver_name': driver_name}, host) + + def update_pool(self, context, old_pool, pool, host): + return self._cast(context, 'update_pool', + {'old_pool': old_pool, 'pool': pool}, host) + + def delete_pool(self, context, pool, host): + return self._cast(context, 'delete_pool', {'pool': pool}, host) + + def create_member(self, context, member, host): + return self._cast(context, 'create_member', {'member': member}, host) + + def update_member(self, context, old_member, member, host): + return self._cast(context, 'update_member', + {'old_member': old_member, 'member': member}, host) + + def delete_member(self, context, member, host): + return self._cast(context, 'delete_member', {'member': member}, host) + + def create_pool_health_monitor(self, context, health_monitor, pool_id, + host): + return self._cast(context, 'create_pool_health_monitor', + {'health_monitor': health_monitor, + 'pool_id': pool_id}, host) + + def update_pool_health_monitor(self, context, old_health_monitor, + health_monitor, pool_id, host): + return self._cast(context, 'update_pool_health_monitor', + {'old_health_monitor': old_health_monitor, + 'health_monitor': health_monitor, + 'pool_id': pool_id}, host) + + def delete_pool_health_monitor(self, context, health_monitor, pool_id, + host): + return self._cast(context, 'delete_pool_health_monitor', + {'health_monitor': health_monitor, + 'pool_id': pool_id}, host) + + def agent_updated(self, context, admin_state_up, host): + return self._cast(context, 'agent_updated', + {'payload': {'admin_state_up': admin_state_up}}, + host) + + +class AgentDriverBase(abstract_driver.LoadBalancerAbstractDriver): + + # name of device driver that should be used by the agent; + # vendor specific plugin drivers must override it; + device_driver = None + + def __init__(self, plugin): + if not self.device_driver: + raise DriverNotSpecified() + + self.agent_rpc = LoadBalancerAgentApi(topics.LOADBALANCER_AGENT) + + self.plugin = plugin + self._set_callbacks_on_plugin() + self.plugin.agent_notifiers.update( + {q_const.AGENT_TYPE_LOADBALANCER: self.agent_rpc}) + + self.pool_scheduler = importutils.import_object( + cfg.CONF.loadbalancer_pool_scheduler_driver) + + def _set_callbacks_on_plugin(self): + # other agent based plugin driver might already set callbacks on plugin + if hasattr(self.plugin, 'agent_callbacks'): + return + + self.plugin.agent_endpoints = [ + LoadBalancerCallbacks(self.plugin), + agents_db.AgentExtRpcCallback(self.plugin) + ] + self.plugin.conn = n_rpc.create_connection(new=True) + self.plugin.conn.create_consumer( + topics.LOADBALANCER_PLUGIN, + self.plugin.agent_endpoints, + fanout=False) + self.plugin.conn.consume_in_threads() + + def get_pool_agent(self, context, pool_id): + agent = self.plugin.get_lbaas_agent_hosting_pool(context, pool_id) + if not agent: + raise lbaas_agentscheduler.NoActiveLbaasAgent(pool_id=pool_id) + return agent['agent'] + + def create_vip(self, context, vip): + agent = self.get_pool_agent(context, vip['pool_id']) + self.agent_rpc.create_vip(context, vip, agent['host']) + + def update_vip(self, context, old_vip, vip): + agent = self.get_pool_agent(context, vip['pool_id']) + if vip['status'] in constants.ACTIVE_PENDING_STATUSES: + self.agent_rpc.update_vip(context, old_vip, vip, agent['host']) + else: + self.agent_rpc.delete_vip(context, vip, agent['host']) + + def delete_vip(self, context, vip): + self.plugin._delete_db_vip(context, vip['id']) + agent = self.get_pool_agent(context, vip['pool_id']) + self.agent_rpc.delete_vip(context, vip, agent['host']) + + def create_pool(self, context, pool): + agent = self.pool_scheduler.schedule(self.plugin, context, pool, + self.device_driver) + if not agent: + raise lbaas_agentscheduler.NoEligibleLbaasAgent(pool_id=pool['id']) + self.agent_rpc.create_pool(context, pool, agent['host'], + self.device_driver) + + def update_pool(self, context, old_pool, pool): + agent = self.get_pool_agent(context, pool['id']) + if pool['status'] in constants.ACTIVE_PENDING_STATUSES: + self.agent_rpc.update_pool(context, old_pool, pool, + agent['host']) + else: + self.agent_rpc.delete_pool(context, pool, agent['host']) + + def delete_pool(self, context, pool): + # get agent first to know host as binding will be deleted + # after pool is deleted from db + agent = self.plugin.get_lbaas_agent_hosting_pool(context, pool['id']) + self.plugin._delete_db_pool(context, pool['id']) + if agent: + self.agent_rpc.delete_pool(context, pool, agent['agent']['host']) + + def create_member(self, context, member): + agent = self.get_pool_agent(context, member['pool_id']) + self.agent_rpc.create_member(context, member, agent['host']) + + def update_member(self, context, old_member, member): + agent = self.get_pool_agent(context, member['pool_id']) + # member may change pool id + if member['pool_id'] != old_member['pool_id']: + old_pool_agent = self.plugin.get_lbaas_agent_hosting_pool( + context, old_member['pool_id']) + if old_pool_agent: + self.agent_rpc.delete_member(context, old_member, + old_pool_agent['agent']['host']) + self.agent_rpc.create_member(context, member, agent['host']) + else: + self.agent_rpc.update_member(context, old_member, member, + agent['host']) + + def delete_member(self, context, member): + self.plugin._delete_db_member(context, member['id']) + agent = self.get_pool_agent(context, member['pool_id']) + self.agent_rpc.delete_member(context, member, agent['host']) + + def create_pool_health_monitor(self, context, healthmon, pool_id): + # healthmon is not used here + agent = self.get_pool_agent(context, pool_id) + self.agent_rpc.create_pool_health_monitor(context, healthmon, + pool_id, agent['host']) + + def update_pool_health_monitor(self, context, old_health_monitor, + health_monitor, pool_id): + agent = self.get_pool_agent(context, pool_id) + self.agent_rpc.update_pool_health_monitor(context, old_health_monitor, + health_monitor, pool_id, + agent['host']) + + def delete_pool_health_monitor(self, context, health_monitor, pool_id): + self.plugin._delete_db_pool_health_monitor( + context, health_monitor['id'], pool_id + ) + + agent = self.get_pool_agent(context, pool_id) + self.agent_rpc.delete_pool_health_monitor(context, health_monitor, + pool_id, agent['host']) + + def stats(self, context, pool_id): + pass diff --git a/icehouse-patches/neutron/dvr-patch/neutron/services/loadbalancer/drivers/embrane/agent/lb_operations.py b/icehouse-patches/neutron/dvr-patch/neutron/services/loadbalancer/drivers/embrane/agent/lb_operations.py new file mode 100644 index 00000000..f31caf77 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/services/loadbalancer/drivers/embrane/agent/lb_operations.py @@ -0,0 +1,179 @@ +# Copyright 2014 Embrane, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Ivar Lazzaro, Embrane, Inc. ivar@embrane.com + +import functools + +from heleosapi import exceptions as h_exc + +from neutron.openstack.common import log as logging +from neutron.services.loadbalancer import constants as lcon +from neutron.services.loadbalancer.drivers.embrane import constants as econ + +LOG = logging.getLogger(__name__) +handlers = {} + + +def handler(event, handler): + def wrap(f): + if event not in handler.keys(): + handler[event] = [f] + else: + handler[event].append(f) + + @functools.wraps(f) + def wrapped_f(*args, **kwargs): + return f(*args, **kwargs) + + return wrapped_f + + return wrap + + +@handler(econ.Events.CREATE_VIP, handlers) +def _provision_load_balancer(driver, context, vip, flavor, + vip_utif_info, vip_ip_allocation_info, + pool_utif_info=None, + pool_ip_allocation_info=None, + pool=None, members=None, + monitors=None): + api = driver._heleos_api + tenant_id = context.tenant_id + admin_state = vip["admin_state_up"] + # Architectural configuration + api.create_load_balancer(tenant_id=tenant_id, + router_id=vip["id"], + name=vip["name"], + flavor=flavor, + up=False) + api.grow_interface(vip_utif_info, False, tenant_id, vip["id"]) + if pool: + api.grow_interface(pool_utif_info, False, tenant_id, + vip["id"]) + + # Logical configuration + api.allocate_address(vip["id"], True, vip_ip_allocation_info) + if pool: + api.allocate_address(vip["id"], True, pool_ip_allocation_info) + dva = api.configure_load_balancer(vip["id"], admin_state, + vip, pool, + monitors, members) + return api.extract_dva_state(dva) + + +@handler(econ.Events.UPDATE_VIP, handlers) +def _update_load_balancer(driver, context, vip, + old_pool_id=None, old_port_id=None, + removed_ip=None, pool_utif_info=None, + pool_ip_allocation_info=None, + new_pool=None, members=None, + monitors=None): + api = driver._heleos_api + tenant_id = context.tenant_id + admin_state = vip["admin_state_up"] + + if old_pool_id: + # Architectural Changes + api.de_allocate_address(vip['id'], False, old_port_id, removed_ip) + api.shrink_interface(tenant_id, vip["id"], False, old_port_id) + api.grow_interface(pool_utif_info, False, tenant_id, vip["id"]) + # Configuration Changes + api.allocate_address(vip["id"], True, pool_ip_allocation_info) + api.replace_pool(vip["id"], True, vip, old_pool_id, + new_pool, monitors, members) + + api.update_vservice(vip["id"], True, vip) + # Dva update + dva = api.update_dva(tenant_id, vip["id"], vip["name"], + admin_state, description=vip["description"]) + + return api.extract_dva_state(dva) + + +@handler(econ.Events.DELETE_VIP, handlers) +def _delete_load_balancer(driver, context, vip): + try: + driver._heleos_api.delete_dva(context.tenant_id, vip['id']) + except h_exc.DvaNotFound: + LOG.warning(_('The load balancer %s had no physical representation, ' + 'likely already deleted'), vip['id']) + return econ.DELETED + + +@handler(econ.Events.UPDATE_POOL, handlers) +def _update_server_pool(driver, context, vip, pool, + monitors=None): + api = driver._heleos_api + cookie = ((vip.get('session_persistence') or {}).get('type') == + lcon.SESSION_PERSISTENCE_HTTP_COOKIE) + return api.extract_dva_state(api.update_pool(vip['id'], + vip['admin_state_up'], + pool, cookie, monitors)) + + +@handler(econ.Events.ADD_OR_UPDATE_MEMBER, handlers) +def _add_or_update_pool_member(driver, context, vip, member, protocol): + api = driver._heleos_api + return api.extract_dva_state(api.update_backend_server( + vip['id'], vip['admin_state_up'], member, protocol)) + + +@handler(econ.Events.REMOVE_MEMBER, handlers) +def _remove_member_from_pool(driver, context, vip, member): + api = driver._heleos_api + return api.extract_dva_state(api.remove_pool_member(vip['id'], + vip['admin_state_up'], + member)) + + +@handler(econ.Events.DELETE_MEMBER, handlers) +def _delete_member(driver, context, vip, member): + with context.session.begin(subtransactions=True): + api = driver._heleos_api + dva = api.delete_backend_server(vip['id'], vip['admin_state_up'], + member) + driver._delete_member(context, member) + return api.extract_dva_state(dva) + + +@handler(econ.Events.ADD_POOL_HM, handlers) +def _create_pool_hm(driver, context, vip, hm, pool_id): + api = driver._heleos_api + return api.extract_dva_state(api.add_pool_monitor( + vip['id'], vip['admin_state_up'], hm, pool_id)) + + +@handler(econ.Events.UPDATE_POOL_HM, handlers) +def _update_pool_hm(driver, context, vip, hm, pool_id): + api = driver._heleos_api + return api.extract_dva_state(api.update_pool_monitor( + vip['id'], vip['admin_state_up'], hm, pool_id)) + + +@handler(econ.Events.DELETE_POOL_HM, handlers) +def _delete_pool_hm(driver, context, vip, hm, pool_id): + with context.session.begin(subtransactions=True): + api = driver._heleos_api + dva = api.add_pool_monitor(vip['id'], vip['admin_state_up'], + hm, pool_id) + driver._delete_pool_hm(context, hm, pool_id) + return api.extract_dva_state(dva) + + +@handler(econ.Events.POLL_GRAPH, handlers) +def _poll_graph(driver, context, vip): + api = driver._heleos_api + return api.extract_dva_state(api.get_dva(vip['id'])) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/services/loadbalancer/drivers/embrane/config.py b/icehouse-patches/neutron/dvr-patch/neutron/services/loadbalancer/drivers/embrane/config.py new file mode 100644 index 00000000..cac9a63d --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/services/loadbalancer/drivers/embrane/config.py @@ -0,0 +1,53 @@ +# Copyright 2014 Embrane, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Ivar Lazzaro, Embrane, Inc. ivar@embrane.com + +from oslo.config import cfg + +# User may want to use LB service together with the L3 plugin, but using +# different resources. The service will inherit the configuration from the +# L3 heleos plugin if present and not overridden. +heleos_opts = [ + cfg.StrOpt('esm_mgmt', + help=_('ESM management root address')), + cfg.StrOpt('admin_username', + help=_('ESM admin username.')), + cfg.StrOpt('admin_password', + secret=True, + help=_('ESM admin password.')), + cfg.StrOpt('lb_image', + help=_('Load Balancer image id (Embrane LB)')), + cfg.StrOpt('inband_id', + help=_('In band Security Zone id for LBs')), + cfg.StrOpt('oob_id', + help=_('Out of band Security Zone id for LBs')), + cfg.StrOpt('mgmt_id', + help=_('Management Security Zone id for LBs')), + cfg.StrOpt('dummy_utif_id', + help=_('Dummy user traffic Security Zone id for LBs')), + cfg.StrOpt('resource_pool_id', + help=_('Shared resource pool id')), + cfg.StrOpt('lb_flavor', default="small", + help=_('choose LB image flavor to use, accepted values: small, ' + 'medium')), + cfg.IntOpt('sync_interval', default=60, + help=_('resource synchronization interval in seconds')), + cfg.BoolOpt('async_requests', + help=_('Define if the requests have ' + 'run asynchronously or not')), +] + +cfg.CONF.register_opts(heleos_opts, 'heleoslb') diff --git a/icehouse-patches/neutron/dvr-patch/neutron/services/loadbalancer/drivers/embrane/models.py b/icehouse-patches/neutron/dvr-patch/neutron/services/loadbalancer/drivers/embrane/models.py new file mode 100644 index 00000000..51adfcf2 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/services/loadbalancer/drivers/embrane/models.py @@ -0,0 +1,30 @@ +# Copyright 2014 Embrane, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Ivar Lazzaro, Embrane, Inc. ivar@embrane.com + +import sqlalchemy as sql + +from neutron.db import model_base + + +class PoolPort(model_base.BASEV2): + """Represents the connection between pools and ports.""" + __tablename__ = 'embrane_pool_port' + + pool_id = sql.Column(sql.String(36), sql.ForeignKey('pools.id'), + primary_key=True) + port_id = sql.Column(sql.String(36), sql.ForeignKey('ports.id'), + nullable=False) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/services/loadbalancer/drivers/haproxy/__init__.py b/icehouse-patches/neutron/dvr-patch/neutron/services/loadbalancer/drivers/haproxy/__init__.py new file mode 100644 index 00000000..44391ce4 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/services/loadbalancer/drivers/haproxy/__init__.py @@ -0,0 +1,15 @@ +# Copyright 2013 New Dream Network, LLC (DreamHost) +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Mark McClain, DreamHost diff --git a/icehouse-patches/neutron/dvr-patch/neutron/services/loadbalancer/drivers/haproxy/cfg.py b/icehouse-patches/neutron/dvr-patch/neutron/services/loadbalancer/drivers/haproxy/cfg.py new file mode 100644 index 00000000..63aa02da --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/services/loadbalancer/drivers/haproxy/cfg.py @@ -0,0 +1,236 @@ +# Copyright 2013 New Dream Network, LLC (DreamHost) +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Mark McClain, DreamHost + +import itertools +from six import moves + +from neutron.agent.linux import utils +from neutron.plugins.common import constants as qconstants +from neutron.services.loadbalancer import constants + + +PROTOCOL_MAP = { + constants.PROTOCOL_TCP: 'tcp', + constants.PROTOCOL_HTTP: 'http', + constants.PROTOCOL_HTTPS: 'tcp', +} + +BALANCE_MAP = { + constants.LB_METHOD_ROUND_ROBIN: 'roundrobin', + constants.LB_METHOD_LEAST_CONNECTIONS: 'leastconn', + constants.LB_METHOD_SOURCE_IP: 'source' +} + +STATS_MAP = { + constants.STATS_ACTIVE_CONNECTIONS: 'scur', + constants.STATS_MAX_CONNECTIONS: 'smax', + constants.STATS_CURRENT_SESSIONS: 'scur', + constants.STATS_MAX_SESSIONS: 'smax', + constants.STATS_TOTAL_CONNECTIONS: 'stot', + constants.STATS_TOTAL_SESSIONS: 'stot', + constants.STATS_IN_BYTES: 'bin', + constants.STATS_OUT_BYTES: 'bout', + constants.STATS_CONNECTION_ERRORS: 'econ', + constants.STATS_RESPONSE_ERRORS: 'eresp' +} + +ACTIVE_PENDING_STATUSES = qconstants.ACTIVE_PENDING_STATUSES +INACTIVE = qconstants.INACTIVE + + +def save_config(conf_path, logical_config, socket_path=None, + user_group='nogroup'): + """Convert a logical configuration to the HAProxy version.""" + data = [] + data.extend(_build_global(logical_config, socket_path=socket_path, + user_group=user_group)) + data.extend(_build_defaults(logical_config)) + data.extend(_build_frontend(logical_config)) + data.extend(_build_backend(logical_config)) + utils.replace_file(conf_path, '\n'.join(data)) + + +def _build_global(config, socket_path=None, user_group='nogroup'): + opts = [ + 'daemon', + 'user nobody', + 'group %s' % user_group, + 'log /dev/log local0', + 'log /dev/log local1 notice' + ] + + if socket_path: + opts.append('stats socket %s mode 0666 level user' % socket_path) + + return itertools.chain(['global'], ('\t' + o for o in opts)) + + +def _build_defaults(config): + opts = [ + 'log global', + 'retries 3', + 'option redispatch', + 'timeout connect 5000', + 'timeout client 50000', + 'timeout server 50000', + ] + + return itertools.chain(['defaults'], ('\t' + o for o in opts)) + + +def _build_frontend(config): + protocol = config['vip']['protocol'] + + opts = [ + 'option tcplog', + 'bind %s:%d' % ( + _get_first_ip_from_port(config['vip']['port']), + config['vip']['protocol_port'] + ), + 'mode %s' % PROTOCOL_MAP[protocol], + 'default_backend %s' % config['pool']['id'], + ] + + if config['vip']['connection_limit'] >= 0: + opts.append('maxconn %s' % config['vip']['connection_limit']) + + if protocol == constants.PROTOCOL_HTTP: + opts.append('option forwardfor') + + return itertools.chain( + ['frontend %s' % config['vip']['id']], + ('\t' + o for o in opts) + ) + + +def _build_backend(config): + protocol = config['pool']['protocol'] + lb_method = config['pool']['lb_method'] + + opts = [ + 'mode %s' % PROTOCOL_MAP[protocol], + 'balance %s' % BALANCE_MAP.get(lb_method, 'roundrobin') + ] + + if protocol == constants.PROTOCOL_HTTP: + opts.append('option forwardfor') + + # add the first health_monitor (if available) + server_addon, health_opts = _get_server_health_option(config) + opts.extend(health_opts) + + # add session persistence (if available) + persist_opts = _get_session_persistence(config) + opts.extend(persist_opts) + + # add the members + for member in config['members']: + if ((member['status'] in ACTIVE_PENDING_STATUSES or + member['status'] == INACTIVE) + and member['admin_state_up']): + server = (('server %(id)s %(address)s:%(protocol_port)s ' + 'weight %(weight)s') % member) + server_addon + if _has_http_cookie_persistence(config): + server += ' cookie %d' % config['members'].index(member) + opts.append(server) + + return itertools.chain( + ['backend %s' % config['pool']['id']], + ('\t' + o for o in opts) + ) + + +def _get_first_ip_from_port(port): + for fixed_ip in port['fixed_ips']: + return fixed_ip['ip_address'] + + +def _get_server_health_option(config): + """return the first active health option.""" + for monitor in config['healthmonitors']: + # not checking the status of healthmonitor for two reasons: + # 1) status field is absent in HealthMonitor model + # 2) only active HealthMonitors are fetched with + # LoadBalancerCallbacks.get_logical_device + if monitor['admin_state_up']: + break + else: + return '', [] + + server_addon = ' check inter %(delay)ds fall %(max_retries)d' % monitor + opts = [ + 'timeout check %ds' % monitor['timeout'] + ] + + if monitor['type'] in (constants.HEALTH_MONITOR_HTTP, + constants.HEALTH_MONITOR_HTTPS): + opts.append('option httpchk %(http_method)s %(url_path)s' % monitor) + opts.append( + 'http-check expect rstatus %s' % + '|'.join(_expand_expected_codes(monitor['expected_codes'])) + ) + + if monitor['type'] == constants.HEALTH_MONITOR_HTTPS: + opts.append('option ssl-hello-chk') + + return server_addon, opts + + +def _get_session_persistence(config): + persistence = config['vip'].get('session_persistence') + if not persistence: + return [] + + opts = [] + if persistence['type'] == constants.SESSION_PERSISTENCE_SOURCE_IP: + opts.append('stick-table type ip size 10k') + opts.append('stick on src') + elif (persistence['type'] == constants.SESSION_PERSISTENCE_HTTP_COOKIE and + config.get('members')): + opts.append('cookie SRV insert indirect nocache') + elif (persistence['type'] == constants.SESSION_PERSISTENCE_APP_COOKIE and + persistence.get('cookie_name')): + opts.append('appsession %s len 56 timeout 3h' % + persistence['cookie_name']) + + return opts + + +def _has_http_cookie_persistence(config): + return (config['vip'].get('session_persistence') and + config['vip']['session_persistence']['type'] == + constants.SESSION_PERSISTENCE_HTTP_COOKIE) + + +def _expand_expected_codes(codes): + """Expand the expected code string in set of codes. + + 200-204 -> 200, 201, 202, 204 + 200, 203 -> 200, 203 + """ + + retval = set() + for code in codes.replace(',', ' ').split(' '): + code = code.strip() + + if not code: + continue + elif '-' in code: + low, hi = code.split('-')[:2] + retval.update(str(i) for i in moves.xrange(int(low), int(hi) + 1)) + else: + retval.add(code) + return retval diff --git a/icehouse-patches/neutron/dvr-patch/neutron/services/loadbalancer/drivers/haproxy/namespace_driver.py b/icehouse-patches/neutron/dvr-patch/neutron/services/loadbalancer/drivers/haproxy/namespace_driver.py new file mode 100644 index 00000000..967c65e7 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/services/loadbalancer/drivers/haproxy/namespace_driver.py @@ -0,0 +1,394 @@ +# Copyright 2013 New Dream Network, LLC (DreamHost) +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Mark McClain, DreamHost +import os +import shutil +import socket + +import netaddr +from oslo.config import cfg + +from neutron.agent.common import config +from neutron.agent.linux import ip_lib +from neutron.agent.linux import utils +from neutron.common import exceptions +from neutron.common import utils as n_utils +from neutron.openstack.common import excutils +from neutron.openstack.common import importutils +from neutron.openstack.common import log as logging +from neutron.plugins.common import constants +from neutron.services.loadbalancer.agent import agent_device_driver +from neutron.services.loadbalancer import constants as lb_const +from neutron.services.loadbalancer.drivers.haproxy import cfg as hacfg + +LOG = logging.getLogger(__name__) +NS_PREFIX = 'qlbaas-' +DRIVER_NAME = 'haproxy_ns' + +STATE_PATH_DEFAULT = '$state_path/lbaas' +USER_GROUP_DEFAULT = 'nogroup' +OPTS = [ + cfg.StrOpt( + 'loadbalancer_state_path', + default=STATE_PATH_DEFAULT, + help=_('Location to store config and state files'), + deprecated_opts=[cfg.DeprecatedOpt('loadbalancer_state_path')], + ), + cfg.StrOpt( + 'user_group', + default=USER_GROUP_DEFAULT, + help=_('The user group'), + deprecated_opts=[cfg.DeprecatedOpt('user_group')], + ), + cfg.IntOpt( + 'send_gratuitous_arp', + default=3, + help=_('When delete and re-add the same vip, send this many ' + 'gratuitous ARPs to flush the ARP cache in the Router. ' + 'Set it below or equal to 0 to disable this feature.'), + ) +] +cfg.CONF.register_opts(OPTS, 'haproxy') + + +class HaproxyNSDriver(agent_device_driver.AgentDeviceDriver): + def __init__(self, conf, plugin_rpc): + self.conf = conf + self.root_helper = config.get_root_helper(conf) + self.state_path = conf.haproxy.loadbalancer_state_path + try: + vif_driver = importutils.import_object(conf.interface_driver, conf) + except ImportError: + with excutils.save_and_reraise_exception(): + msg = (_('Error importing interface driver: %s') + % conf.haproxy.interface_driver) + LOG.error(msg) + + self.vif_driver = vif_driver + self.plugin_rpc = plugin_rpc + self.pool_to_port_id = {} + + @classmethod + def get_name(cls): + return DRIVER_NAME + + def create(self, logical_config): + pool_id = logical_config['pool']['id'] + namespace = get_ns_name(pool_id) + + self._plug(namespace, logical_config['vip']['port']) + self._spawn(logical_config) + + def update(self, logical_config): + pool_id = logical_config['pool']['id'] + pid_path = self._get_state_file_path(pool_id, 'pid') + + extra_args = ['-sf'] + extra_args.extend(p.strip() for p in open(pid_path, 'r')) + self._spawn(logical_config, extra_args) + + def _spawn(self, logical_config, extra_cmd_args=()): + pool_id = logical_config['pool']['id'] + namespace = get_ns_name(pool_id) + conf_path = self._get_state_file_path(pool_id, 'conf') + pid_path = self._get_state_file_path(pool_id, 'pid') + sock_path = self._get_state_file_path(pool_id, 'sock') + user_group = self.conf.haproxy.user_group + + hacfg.save_config(conf_path, logical_config, sock_path, user_group) + cmd = ['haproxy', '-f', conf_path, '-p', pid_path] + cmd.extend(extra_cmd_args) + + ns = ip_lib.IPWrapper(self.root_helper, namespace) + ns.netns.execute(cmd) + + # remember the pool<>port mapping + self.pool_to_port_id[pool_id] = logical_config['vip']['port']['id'] + + @n_utils.synchronized('haproxy-driver') + def undeploy_instance(self, pool_id, cleanup_namespace=False): + namespace = get_ns_name(pool_id) + ns = ip_lib.IPWrapper(self.root_helper, namespace) + pid_path = self._get_state_file_path(pool_id, 'pid') + + # kill the process + kill_pids_in_file(self.root_helper, pid_path) + + # unplug the ports + if pool_id in self.pool_to_port_id: + self._unplug(namespace, self.pool_to_port_id[pool_id]) + + # delete all devices from namespace; + # used when deleting orphans and port_id is not known for pool_id + if cleanup_namespace: + for device in ns.get_devices(exclude_loopback=True): + self.vif_driver.unplug(device.name, namespace=namespace) + + # remove the configuration directory + conf_dir = os.path.dirname(self._get_state_file_path(pool_id, '')) + if os.path.isdir(conf_dir): + shutil.rmtree(conf_dir) + ns.garbage_collect_namespace() + + def exists(self, pool_id): + namespace = get_ns_name(pool_id) + root_ns = ip_lib.IPWrapper(self.root_helper) + + socket_path = self._get_state_file_path(pool_id, 'sock', False) + if root_ns.netns.exists(namespace) and os.path.exists(socket_path): + try: + s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) + s.connect(socket_path) + return True + except socket.error: + pass + return False + + def get_stats(self, pool_id): + socket_path = self._get_state_file_path(pool_id, 'sock', False) + TYPE_BACKEND_REQUEST = 2 + TYPE_SERVER_REQUEST = 4 + if os.path.exists(socket_path): + parsed_stats = self._get_stats_from_socket( + socket_path, + entity_type=TYPE_BACKEND_REQUEST | TYPE_SERVER_REQUEST) + pool_stats = self._get_backend_stats(parsed_stats) + pool_stats['members'] = self._get_servers_stats(parsed_stats) + return pool_stats + else: + LOG.warn(_('Stats socket not found for pool %s'), pool_id) + return {} + + def _get_backend_stats(self, parsed_stats): + TYPE_BACKEND_RESPONSE = '1' + for stats in parsed_stats: + if stats.get('type') == TYPE_BACKEND_RESPONSE: + unified_stats = dict((k, stats.get(v, '')) + for k, v in hacfg.STATS_MAP.items()) + return unified_stats + + return {} + + def _get_servers_stats(self, parsed_stats): + TYPE_SERVER_RESPONSE = '2' + res = {} + for stats in parsed_stats: + if stats.get('type') == TYPE_SERVER_RESPONSE: + res[stats['svname']] = { + lb_const.STATS_STATUS: (constants.INACTIVE + if stats['status'] == 'DOWN' + else constants.ACTIVE), + lb_const.STATS_HEALTH: stats['check_status'], + lb_const.STATS_FAILED_CHECKS: stats['chkfail'] + } + return res + + def _get_stats_from_socket(self, socket_path, entity_type): + try: + s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) + s.connect(socket_path) + s.send('show stat -1 %s -1\n' % entity_type) + raw_stats = '' + chunk_size = 1024 + while True: + chunk = s.recv(chunk_size) + raw_stats += chunk + if len(chunk) < chunk_size: + break + + return self._parse_stats(raw_stats) + except socket.error as e: + LOG.warn(_('Error while connecting to stats socket: %s'), e) + return {} + + def _parse_stats(self, raw_stats): + stat_lines = raw_stats.splitlines() + if len(stat_lines) < 2: + return [] + stat_names = [name.strip('# ') for name in stat_lines[0].split(',')] + res_stats = [] + for raw_values in stat_lines[1:]: + if not raw_values: + continue + stat_values = [value.strip() for value in raw_values.split(',')] + res_stats.append(dict(zip(stat_names, stat_values))) + + return res_stats + + def _get_state_file_path(self, pool_id, kind, ensure_state_dir=True): + """Returns the file name for a given kind of config file.""" + confs_dir = os.path.abspath(os.path.normpath(self.state_path)) + conf_dir = os.path.join(confs_dir, pool_id) + if ensure_state_dir: + if not os.path.isdir(conf_dir): + os.makedirs(conf_dir, 0o755) + return os.path.join(conf_dir, kind) + + def _plug(self, namespace, port, reuse_existing=True): + self.plugin_rpc.plug_vip_port(port['id']) + interface_name = self.vif_driver.get_device_name(Wrap(port)) + + if ip_lib.device_exists(interface_name, self.root_helper, namespace): + if not reuse_existing: + raise exceptions.PreexistingDeviceFailure( + dev_name=interface_name + ) + else: + self.vif_driver.plug( + port['network_id'], + port['id'], + interface_name, + port['mac_address'], + namespace=namespace + ) + + cidrs = [ + '%s/%s' % (ip['ip_address'], + netaddr.IPNetwork(ip['subnet']['cidr']).prefixlen) + for ip in port['fixed_ips'] + ] + self.vif_driver.init_l3(interface_name, cidrs, namespace=namespace) + + gw_ip = port['fixed_ips'][0]['subnet'].get('gateway_ip') + + if not gw_ip: + host_routes = port['fixed_ips'][0]['subnet'].get('host_routes', []) + for host_route in host_routes: + if host_route['destination'] == "0.0.0.0/0": + gw_ip = host_route['nexthop'] + break + + if gw_ip: + cmd = ['route', 'add', 'default', 'gw', gw_ip] + ip_wrapper = ip_lib.IPWrapper(self.root_helper, + namespace=namespace) + ip_wrapper.netns.execute(cmd, check_exit_code=False) + # When delete and re-add the same vip, we need to + # send gratuitous ARP to flush the ARP cache in the Router. + gratuitous_arp = self.conf.haproxy.send_gratuitous_arp + if gratuitous_arp > 0: + for ip in port['fixed_ips']: + cmd_arping = ['arping', '-U', + '-I', interface_name, + '-c', gratuitous_arp, + ip['ip_address']] + ip_wrapper.netns.execute(cmd_arping, check_exit_code=False) + + def _unplug(self, namespace, port_id): + port_stub = {'id': port_id} + self.plugin_rpc.unplug_vip_port(port_id) + interface_name = self.vif_driver.get_device_name(Wrap(port_stub)) + self.vif_driver.unplug(interface_name, namespace=namespace) + + @n_utils.synchronized('haproxy-driver') + def deploy_instance(self, logical_config): + # do actual deploy only if vip and pool are configured and active + if (not logical_config or + 'vip' not in logical_config or + (logical_config['vip']['status'] not in + constants.ACTIVE_PENDING_STATUSES) or + not logical_config['vip']['admin_state_up'] or + (logical_config['pool']['status'] not in + constants.ACTIVE_PENDING_STATUSES) or + not logical_config['pool']['admin_state_up']): + return + + if self.exists(logical_config['pool']['id']): + self.update(logical_config) + else: + self.create(logical_config) + + def _refresh_device(self, pool_id): + logical_config = self.plugin_rpc.get_logical_device(pool_id) + self.deploy_instance(logical_config) + + def create_vip(self, vip): + self._refresh_device(vip['pool_id']) + + def update_vip(self, old_vip, vip): + self._refresh_device(vip['pool_id']) + + def delete_vip(self, vip): + self.undeploy_instance(vip['pool_id']) + + def create_pool(self, pool): + # nothing to do here because a pool needs a vip to be useful + pass + + def update_pool(self, old_pool, pool): + self._refresh_device(pool['id']) + + def delete_pool(self, pool): + # delete_pool may be called before vip deletion in case + # pool's admin state set to down + if self.exists(pool['id']): + self.undeploy_instance(pool['id']) + + def create_member(self, member): + self._refresh_device(member['pool_id']) + + def update_member(self, old_member, member): + self._refresh_device(member['pool_id']) + + def delete_member(self, member): + self._refresh_device(member['pool_id']) + + def create_pool_health_monitor(self, health_monitor, pool_id): + self._refresh_device(pool_id) + + def update_pool_health_monitor(self, old_health_monitor, health_monitor, + pool_id): + self._refresh_device(pool_id) + + def delete_pool_health_monitor(self, health_monitor, pool_id): + self._refresh_device(pool_id) + + def remove_orphans(self, known_pool_ids): + if not os.path.exists(self.state_path): + return + + orphans = (pool_id for pool_id in os.listdir(self.state_path) + if pool_id not in known_pool_ids) + for pool_id in orphans: + if self.exists(pool_id): + self.undeploy_instance(pool_id, cleanup_namespace=True) + + +# NOTE (markmcclain) For compliance with interface.py which expects objects +class Wrap(object): + """A light attribute wrapper for compatibility with the interface lib.""" + def __init__(self, d): + self.__dict__.update(d) + + def __getitem__(self, key): + return self.__dict__[key] + + +def get_ns_name(namespace_id): + return NS_PREFIX + namespace_id + + +def kill_pids_in_file(root_helper, pid_path): + if os.path.exists(pid_path): + with open(pid_path, 'r') as pids: + for pid in pids: + pid = pid.strip() + try: + utils.execute(['kill', '-9', pid], root_helper) + except RuntimeError: + LOG.exception( + _('Unable to kill haproxy process: %s'), + pid + ) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/services/loadbalancer/drivers/haproxy/plugin_driver.py b/icehouse-patches/neutron/dvr-patch/neutron/services/loadbalancer/drivers/haproxy/plugin_driver.py new file mode 100644 index 00000000..6cdda7a9 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/services/loadbalancer/drivers/haproxy/plugin_driver.py @@ -0,0 +1,21 @@ +# Copyright (c) 2013 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from neutron.services.loadbalancer.drivers.common import agent_driver_base +from neutron.services.loadbalancer.drivers.haproxy import namespace_driver + + +class HaproxyOnHostPluginDriver(agent_driver_base.AgentDriverBase): + device_driver = namespace_driver.DRIVER_NAME diff --git a/icehouse-patches/neutron/dvr-patch/neutron/services/loadbalancer/drivers/radware/__init__.py b/icehouse-patches/neutron/dvr-patch/neutron/services/loadbalancer/drivers/radware/__init__.py new file mode 100644 index 00000000..4d42f0e6 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/services/loadbalancer/drivers/radware/__init__.py @@ -0,0 +1,15 @@ +# Copyright 2013 Radware LLC (Radware) +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Avishay Balderman, Radware diff --git a/icehouse-patches/neutron/dvr-patch/neutron/services/loadbalancer/drivers/radware/driver.py b/icehouse-patches/neutron/dvr-patch/neutron/services/loadbalancer/drivers/radware/driver.py new file mode 100644 index 00000000..30596621 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/services/loadbalancer/drivers/radware/driver.py @@ -0,0 +1,1095 @@ +# Copyright 2013 Radware LTD. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Avishay Balderman, Radware + +import base64 +import copy +import httplib +import netaddr +import threading +import time + + +import eventlet +eventlet.monkey_patch(thread=True) + +from oslo.config import cfg +from six.moves import queue as Queue + +from neutron.api.v2 import attributes +from neutron.common import log as call_log +from neutron import context +from neutron.db.loadbalancer import loadbalancer_db as lb_db +from neutron.extensions import loadbalancer +from neutron.openstack.common import excutils +from neutron.openstack.common import jsonutils as json +from neutron.openstack.common import log as logging +from neutron.plugins.common import constants +from neutron.services.loadbalancer.drivers import abstract_driver +from neutron.services.loadbalancer.drivers.radware import exceptions as r_exc + +LOG = logging.getLogger(__name__) + +RESP_STATUS = 0 +RESP_REASON = 1 +RESP_STR = 2 +RESP_DATA = 3 + +TEMPLATE_HEADER = {'Content-Type': + 'application/vnd.com.radware.vdirect.' + 'template-parameters+json'} +PROVISION_HEADER = {'Content-Type': + 'application/vnd.com.radware.' + 'vdirect.status+json'} +CREATE_SERVICE_HEADER = {'Content-Type': + 'application/vnd.com.radware.' + 'vdirect.adc-service-specification+json'} + +driver_opts = [ + cfg.StrOpt('vdirect_address', + help=_('IP address of vDirect server.')), + cfg.StrOpt('ha_secondary_address', + help=_('IP address of secondary vDirect server.')), + cfg.StrOpt('vdirect_user', + default='vDirect', + help=_('vDirect user name.')), + cfg.StrOpt('vdirect_password', + default='radware', + help=_('vDirect user password.')), + cfg.StrOpt('service_adc_type', + default="VA", + help=_('Service ADC type. Default: VA.')), + cfg.StrOpt('service_adc_version', + default="", + help=_('Service ADC version.')), + cfg.BoolOpt('service_ha_pair', + default=False, + help=_('Enables or disables the Service HA pair. ' + 'Default: False.')), + cfg.IntOpt('service_throughput', + default=1000, + help=_('Service throughput. Default: 1000.')), + cfg.IntOpt('service_ssl_throughput', + default=100, + help=_('Service SSL throughput. Default: 100.')), + cfg.IntOpt('service_compression_throughput', + default=100, + help=_('Service compression throughput. Default: 100.')), + cfg.IntOpt('service_cache', + default=20, + help=_('Size of service cache. Default: 20.')), + cfg.StrOpt('l2_l3_workflow_name', + default='openstack_l2_l3', + help=_('Name of l2_l3 workflow. Default: ' + 'openstack_l2_l3.')), + cfg.StrOpt('l4_workflow_name', + default='openstack_l4', + help=_('Name of l4 workflow. Default: openstack_l4.')), + cfg.DictOpt('l2_l3_ctor_params', + default={"service": "_REPLACE_", + "ha_network_name": "HA-Network", + "ha_ip_pool_name": "default", + "allocate_ha_vrrp": True, + "allocate_ha_ips": True, + "twoleg_enabled": "_REPLACE_"}, + help=_('Parameter for l2_l3 workflow constructor.')), + cfg.DictOpt('l2_l3_setup_params', + default={"data_port": 1, + "data_ip_address": "192.168.200.99", + "data_ip_mask": "255.255.255.0", + "gateway": "192.168.200.1", + "ha_port": 2}, + help=_('Parameter for l2_l3 workflow setup.')), + cfg.ListOpt('actions_to_skip', + default=['setup_l2_l3'], + help=_('List of actions that are not pushed to ' + 'the completion queue.')), + cfg.StrOpt('l4_action_name', + default='BaseCreate', + help=_('Name of the l4 workflow action. ' + 'Default: BaseCreate.')), + cfg.ListOpt('service_resource_pool_ids', + default=[], + help=_('Resource pool IDs.')), + cfg.IntOpt('service_isl_vlan', + default=-1, + help=_('A required VLAN for the interswitch link to use.')), + cfg.BoolOpt('service_session_mirroring_enabled', + default=False, + help=_('Enable or disable Alteon interswitch link for ' + 'stateful session failover. Default: False.')) +] + +cfg.CONF.register_opts(driver_opts, "radware") + + +class LoadBalancerDriver(abstract_driver.LoadBalancerAbstractDriver): + + """Radware lbaas driver.""" + + def __init__(self, plugin): + rad = cfg.CONF.radware + self.plugin = plugin + self.service = { + "haPair": rad.service_ha_pair, + "sessionMirroringEnabled": rad.service_session_mirroring_enabled, + "primary": { + "capacity": { + "throughput": rad.service_throughput, + "sslThroughput": rad.service_ssl_throughput, + "compressionThroughput": + rad.service_compression_throughput, + "cache": rad.service_cache + }, + "network": { + "type": "portgroup", + "portgroups": ['DATA_NETWORK'] + }, + "adcType": rad.service_adc_type, + "acceptableAdc": "Exact" + } + } + if rad.service_resource_pool_ids: + ids = rad.service_resource_pool_ids + self.service['resourcePoolIds'] = [ + {'name': id} for id in ids + ] + if rad.service_isl_vlan: + self.service['islVlan'] = rad.service_isl_vlan + self.l2_l3_wf_name = rad.l2_l3_workflow_name + self.l4_wf_name = rad.l4_workflow_name + self.l2_l3_ctor_params = rad.l2_l3_ctor_params + self.l2_l3_setup_params = rad.l2_l3_setup_params + self.l4_action_name = rad.l4_action_name + self.actions_to_skip = rad.actions_to_skip + vdirect_address = rad.vdirect_address + sec_server = rad.ha_secondary_address + self.rest_client = vDirectRESTClient(server=vdirect_address, + secondary_server=sec_server, + user=rad.vdirect_user, + password=rad.vdirect_password) + self.queue = Queue.Queue() + self.completion_handler = OperationCompletionHandler(self.queue, + self.rest_client, + plugin) + self.workflow_templates_exists = False + self.completion_handler.setDaemon(True) + self.completion_handler_started = False + + def _populate_vip_graph(self, context, vip): + ext_vip = self.plugin.populate_vip_graph(context, vip) + vip_network_id = self._get_vip_network_id(context, ext_vip) + pool_network_id = self._get_pool_network_id(context, ext_vip) + + # if VIP and PIP are different, we need an IP address for the PIP + # so create port on PIP's network and use its IP address + if vip_network_id != pool_network_id: + pip_address = self._create_port_for_pip( + context, + vip['tenant_id'], + _make_pip_name_from_vip(vip), + pool_network_id) + ext_vip['pip_address'] = pip_address + else: + ext_vip['pip_address'] = vip['address'] + + ext_vip['vip_network_id'] = vip_network_id + ext_vip['pool_network_id'] = pool_network_id + return ext_vip + + def create_vip(self, context, vip): + log_info = {'vip': vip, + 'extended_vip': 'NOT_ASSIGNED', + 'service_name': 'NOT_ASSIGNED'} + try: + ext_vip = self._populate_vip_graph(context, vip) + + service_name = self._get_service(ext_vip) + log_info['extended_vip'] = ext_vip + log_info['service_name'] = service_name + + self._create_workflow( + vip['pool_id'], self.l4_wf_name, + {"service": service_name}) + self._update_workflow( + vip['pool_id'], + self.l4_action_name, ext_vip, context) + + finally: + LOG.debug(_('vip: %(vip)s, ' + 'extended_vip: %(extended_vip)s, ' + 'service_name: %(service_name)s, '), + log_info) + + def update_vip(self, context, old_vip, vip): + ext_vip = self._populate_vip_graph(context, vip) + self._update_workflow( + vip['pool_id'], self.l4_action_name, + ext_vip, context, False, lb_db.Vip, vip['id']) + + def delete_vip(self, context, vip): + """Delete a Vip + + First delete it from the device. If deletion ended OK + - remove data from DB as well. + If the deletion failed - mark vip with error status in DB + + """ + + ext_vip = self._populate_vip_graph(context, vip) + params = _translate_vip_object_graph(ext_vip, + self.plugin, context) + ids = params.pop('__ids__') + + try: + # get neutron port id associated with the vip (present if vip and + # pip are different) and release it after workflow removed + port_filter = { + 'name': [_make_pip_name_from_vip(vip)], + } + ports = self.plugin._core_plugin.get_ports(context, + filters=port_filter) + if ports: + LOG.debug(_('Retrieved pip nport: %(port)r for ' + 'vip: %(vip)s'), {'port': ports[0], + 'vip': vip['id']}) + + delete_pip_nport_function = self._get_delete_pip_nports( + context, ports) + else: + delete_pip_nport_function = None + LOG.debug(_('Found no pip nports associated with ' + 'vip: %s'), vip['id']) + + # removing the WF will cause deletion of the configuration from the + # device + self._remove_workflow(ids, context, delete_pip_nport_function) + + except r_exc.RESTRequestFailure: + pool_id = ext_vip['pool_id'] + LOG.exception(_('Failed to remove workflow %s. ' + 'Going to set vip to ERROR status'), + pool_id) + + self.plugin.update_status(context, lb_db.Vip, ids['vip'], + constants.ERROR) + + def _get_delete_pip_nports(self, context, ports): + def _delete_pip_nports(success): + if success: + for port in ports: + try: + self.plugin._core_plugin.delete_port( + context, port['id']) + LOG.debug(_('pip nport id: %s'), port['id']) + except Exception as exception: + # stop exception propagation, nport may have + # been deleted by other means + LOG.warning(_('pip nport delete failed: %r'), + exception) + return _delete_pip_nports + + def create_pool(self, context, pool): + # nothing to do + pass + + def update_pool(self, context, old_pool, pool): + self._handle_pool(context, pool) + + def delete_pool(self, context, pool,): + self._handle_pool(context, pool, delete=True) + + def _handle_pool(self, context, pool, delete=False): + vip_id = self.plugin.get_pool(context, pool['id']).get('vip_id', None) + if vip_id: + if delete: + raise loadbalancer.PoolInUse(pool_id=pool['id']) + else: + vip = self.plugin.get_vip(context, vip_id) + ext_vip = self._populate_vip_graph(context, vip) + self._update_workflow( + pool['id'], self.l4_action_name, + ext_vip, context, delete, lb_db.Pool, pool['id']) + else: + if delete: + self.plugin._delete_db_pool(context, pool['id']) + else: + # we keep the pool in PENDING_UPDATE + # no point to modify it since it is not connected to vip yet + pass + + def create_member(self, context, member): + self._handle_member(context, member) + + def update_member(self, context, old_member, member): + self._handle_member(context, member) + + def delete_member(self, context, member): + self._handle_member(context, member, delete=True) + + def _handle_member(self, context, member, delete=False): + """Navigate the model. If a Vip is found - activate a bulk WF action. + """ + vip_id = self.plugin.get_pool( + context, member['pool_id']).get('vip_id') + if vip_id: + vip = self.plugin.get_vip(context, vip_id) + ext_vip = self._populate_vip_graph(context, vip) + self._update_workflow( + member['pool_id'], self.l4_action_name, + ext_vip, context, + delete, lb_db.Member, member['id']) + # We have to delete this member but it is not connected to a vip yet + elif delete: + self.plugin._delete_db_member(context, member['id']) + + def create_health_monitor(self, context, health_monitor): + # Anything to do here? the hm is not connected to the graph yet + pass + + def update_pool_health_monitor(self, context, old_health_monitor, + health_monitor, + pool_id): + self._handle_pool_health_monitor(context, health_monitor, pool_id) + + def create_pool_health_monitor(self, context, + health_monitor, pool_id): + self._handle_pool_health_monitor(context, health_monitor, pool_id) + + def delete_pool_health_monitor(self, context, health_monitor, pool_id): + self._handle_pool_health_monitor(context, health_monitor, pool_id, + True) + + def _handle_pool_health_monitor(self, context, health_monitor, + pool_id, delete=False): + """Push a graph to vDirect + + Navigate the model. Check if a pool is associated to the vip + and push the graph to vDirect + + """ + + vip_id = self.plugin.get_pool(context, pool_id).get('vip_id', None) + + debug_params = {"hm_id": health_monitor['id'], "pool_id": pool_id, + "delete": delete, "vip_id": vip_id} + LOG.debug(_('_handle_pool_health_monitor. health_monitor = %(hm_id)s ' + 'pool_id = %(pool_id)s delete = %(delete)s ' + 'vip_id = %(vip_id)s'), + debug_params) + + if vip_id: + vip = self.plugin.get_vip(context, vip_id) + ext_vip = self._populate_vip_graph(context, vip) + self._update_workflow(pool_id, self.l4_action_name, + ext_vip, context, + delete, lb_db.PoolMonitorAssociation, + health_monitor['id']) + elif delete: + self.plugin._delete_db_pool_health_monitor(context, + health_monitor['id'], + pool_id) + + def stats(self, context, pool_id): + # TODO(avishayb) implement + return {"bytes_in": 0, + "bytes_out": 0, + "active_connections": 0, + "total_connections": 0} + + def _get_vip_network_id(self, context, extended_vip): + subnet = self.plugin._core_plugin.get_subnet( + context, extended_vip['subnet_id']) + return subnet['network_id'] + + def _start_completion_handling_thread(self): + if not self.completion_handler_started: + LOG.info(_('Starting operation completion handling thread')) + self.completion_handler.start() + self.completion_handler_started = True + + def _get_pool_network_id(self, context, extended_vip): + subnet = self.plugin._core_plugin.get_subnet( + context, extended_vip['pool']['subnet_id']) + return subnet['network_id'] + + @call_log.log + def _update_workflow(self, wf_name, action, + wf_params, context, + delete=False, + lbaas_entity=None, entity_id=None): + """Update the WF state. Push the result to a queue for processing.""" + + if not self.workflow_templates_exists: + self._verify_workflow_templates() + + if action not in self.actions_to_skip: + params = _translate_vip_object_graph(wf_params, + self.plugin, + context) + else: + params = wf_params + + resource = '/api/workflow/%s/action/%s' % (wf_name, action) + response = _rest_wrapper(self.rest_client.call('POST', resource, + {'parameters': params}, + TEMPLATE_HEADER)) + LOG.debug(_('_update_workflow response: %s '), response) + + if action not in self.actions_to_skip: + ids = params.pop('__ids__', None) + oper = OperationAttributes(response['uri'], + ids, + lbaas_entity, + entity_id, + delete=delete) + LOG.debug(_('Pushing operation %s to the queue'), oper) + + self._start_completion_handling_thread() + self.queue.put_nowait(oper) + + def _remove_workflow(self, ids, context, post_remove_function): + + wf_name = ids['pool'] + LOG.debug(_('Remove the workflow %s') % wf_name) + resource = '/api/workflow/%s' % (wf_name) + rest_return = self.rest_client.call('DELETE', resource, None, None) + response = _rest_wrapper(rest_return, [204, 202, 404]) + if rest_return[RESP_STATUS] in [404]: + if post_remove_function: + try: + post_remove_function(True) + LOG.debug(_('Post-remove workflow function ' + '%r completed'), post_remove_function) + except Exception: + with excutils.save_and_reraise_exception(): + LOG.exception(_('Post-remove workflow function ' + '%r failed'), post_remove_function) + self.plugin._delete_db_vip(context, ids['vip']) + else: + oper = OperationAttributes( + response['uri'], + ids, + lb_db.Vip, + ids['vip'], + delete=True, + post_op_function=post_remove_function) + LOG.debug(_('Pushing operation %s to the queue'), oper) + + self._start_completion_handling_thread() + self.queue.put_nowait(oper) + + def _remove_service(self, service_name): + resource = '/api/service/%s' % (service_name) + _rest_wrapper(self.rest_client.call('DELETE', + resource, None, None), + [202]) + + def _get_service(self, ext_vip): + """Get a service name. + + if you can't find one, + create a service and create l2_l3 WF. + + """ + if not self.workflow_templates_exists: + self._verify_workflow_templates() + if ext_vip['vip_network_id'] != ext_vip['pool_network_id']: + networks_name = '%s_%s' % (ext_vip['vip_network_id'], + ext_vip['pool_network_id']) + self.l2_l3_ctor_params["twoleg_enabled"] = True + else: + networks_name = ext_vip['vip_network_id'] + self.l2_l3_ctor_params["twoleg_enabled"] = False + incoming_service_name = 'srv_%s' % (networks_name,) + service_name = self._get_available_service(incoming_service_name) + if not service_name: + LOG.debug( + 'Could not find a service named ' + incoming_service_name) + service_name = self._create_service(ext_vip['vip_network_id'], + ext_vip['pool_network_id'], + ext_vip['tenant_id']) + self.l2_l3_ctor_params["service"] = incoming_service_name + wf_name = 'l2_l3_' + networks_name + self._create_workflow( + wf_name, self.l2_l3_wf_name, self.l2_l3_ctor_params) + self._update_workflow( + wf_name, "setup_l2_l3", self.l2_l3_setup_params, None) + else: + LOG.debug('A service named ' + service_name + ' was found.') + return service_name + + def _create_service(self, vip_network_id, pool_network_id, tenant_id): + """create the service and provision it (async).""" + # 1) create the service + service = copy.deepcopy(self.service) + if vip_network_id != pool_network_id: + service_name = 'srv_%s_%s' % (vip_network_id, pool_network_id) + service['primary']['network']['portgroups'] = [vip_network_id, + pool_network_id] + else: + service_name = 'srv_' + vip_network_id + service['primary']['network']['portgroups'] = [vip_network_id] + resource = '/api/service?name=%s&tenant=%s' % (service_name, tenant_id) + + response = _rest_wrapper(self.rest_client.call('POST', resource, + service, + CREATE_SERVICE_HEADER), [201]) + + # 2) provision the service + provision_uri = response['links']['actions']['provision'] + _rest_wrapper(self.rest_client.call('POST', provision_uri, + None, PROVISION_HEADER)) + return service_name + + def _get_available_service(self, service_name): + """Check if service exsists and return its name if it does.""" + resource = '/api/service/' + service_name + try: + _rest_wrapper(self.rest_client.call('GET', + resource, + None, None), [200]) + except Exception: + return + return service_name + + def _workflow_exists(self, pool_id): + """Check if a WF having the name of the pool_id exists.""" + resource = '/api/workflow/' + pool_id + try: + _rest_wrapper(self.rest_client.call('GET', + resource, + None, + None), [200]) + except Exception: + return False + return True + + def _create_workflow(self, wf_name, wf_template_name, + create_workflow_params=None): + """Create a WF if it doesn't exists yet.""" + if not self.workflow_templates_exists: + self._verify_workflow_templates() + if not self._workflow_exists(wf_name): + if not create_workflow_params: + create_workflow_params = {} + resource = '/api/workflowTemplate/%s?name=%s' % ( + wf_template_name, wf_name) + params = {'parameters': create_workflow_params} + response = _rest_wrapper(self.rest_client.call('POST', + resource, + params, + TEMPLATE_HEADER)) + LOG.debug(_('create_workflow response: %s'), str(response)) + + def _verify_workflow_templates(self): + """Verify the existence of workflows on vDirect server.""" + workflows = {self.l2_l3_wf_name: + False, self.l4_wf_name: False} + resource = '/api/workflowTemplate' + response = _rest_wrapper(self.rest_client.call('GET', + resource, + None, + None), [200]) + for wf in workflows.keys(): + for wf_template in response: + if wf == wf_template['name']: + workflows[wf] = True + break + for wf, found in workflows.items(): + if not found: + raise r_exc.WorkflowMissing(workflow=wf) + self.workflow_templates_exists = True + + def _create_port_for_pip(self, context, tenant_id, port_name, subnet): + """Creates port on subnet, returns that port's IP.""" + + # create port, we just want any IP allocated to the port based on the + # network id, so setting 'fixed_ips' to ATTR_NOT_SPECIFIED + port_data = { + 'tenant_id': tenant_id, + 'name': port_name, + 'network_id': subnet, + 'mac_address': attributes.ATTR_NOT_SPECIFIED, + 'admin_state_up': False, + 'device_id': '', + 'device_owner': 'neutron:' + constants.LOADBALANCER, + 'fixed_ips': attributes.ATTR_NOT_SPECIFIED + } + port = self.plugin._core_plugin.create_port(context, + {'port': port_data}) + return port['fixed_ips'][0]['ip_address'] + + +class vDirectRESTClient: + """REST server proxy to Radware vDirect.""" + + def __init__(self, + server='localhost', + secondary_server=None, + user=None, + password=None, + port=2189, + ssl=True, + timeout=5000, + base_uri=''): + self.server = server + self.secondary_server = secondary_server + self.port = port + self.ssl = ssl + self.base_uri = base_uri + self.timeout = timeout + if user and password: + self.auth = base64.encodestring('%s:%s' % (user, password)) + self.auth = self.auth.replace('\n', '') + else: + raise r_exc.AuthenticationMissing() + + debug_params = {'server': self.server, + 'sec_server': self.secondary_server, + 'port': self.port, + 'ssl': self.ssl} + LOG.debug(_('vDirectRESTClient:init server=%(server)s, ' + 'secondary server=%(sec_server)s, ' + 'port=%(port)d, ' + 'ssl=%(ssl)r'), debug_params) + + def _flip_servers(self): + LOG.warning(_('Fliping servers. Current is: %(server)s, ' + 'switching to %(secondary)s'), + {'server': self.server, + 'secondary': self.secondary_server}) + self.server, self.secondary_server = self.secondary_server, self.server + + def _recover(self, action, resource, data, headers, binary=False): + if self.server and self.secondary_server: + self._flip_servers() + resp = self._call(action, resource, data, + headers, binary) + return resp + else: + LOG.exception(_('REST client is not able to recover ' + 'since only one vDirect server is ' + 'configured.')) + return -1, None, None, None + + def call(self, action, resource, data, headers, binary=False): + resp = self._call(action, resource, data, headers, binary) + if resp[RESP_STATUS] == -1: + LOG.warning(_('vDirect server is not responding (%s).'), + self.server) + return self._recover(action, resource, data, headers, binary) + elif resp[RESP_STATUS] in (301, 307): + LOG.warning(_('vDirect server is not active (%s).'), + self.server) + return self._recover(action, resource, data, headers, binary) + else: + return resp + + @call_log.log + def _call(self, action, resource, data, headers, binary=False): + if resource.startswith('http'): + uri = resource + else: + uri = self.base_uri + resource + if binary: + body = data + else: + body = json.dumps(data) + + debug_data = 'binary' if binary else body + debug_data = debug_data if debug_data else 'EMPTY' + if not headers: + headers = {'Authorization': 'Basic %s' % self.auth} + else: + headers['Authorization'] = 'Basic %s' % self.auth + conn = None + if self.ssl: + conn = httplib.HTTPSConnection( + self.server, self.port, timeout=self.timeout) + if conn is None: + LOG.error(_('vdirectRESTClient: Could not establish HTTPS ' + 'connection')) + return 0, None, None, None + else: + conn = httplib.HTTPConnection( + self.server, self.port, timeout=self.timeout) + if conn is None: + LOG.error(_('vdirectRESTClient: Could not establish HTTP ' + 'connection')) + return 0, None, None, None + + try: + conn.request(action, uri, body, headers) + response = conn.getresponse() + respstr = response.read() + respdata = respstr + try: + respdata = json.loads(respstr) + except ValueError: + # response was not JSON, ignore the exception + pass + ret = (response.status, response.reason, respstr, respdata) + except Exception as e: + log_dict = {'action': action, 'e': e} + LOG.error(_('vdirectRESTClient: %(action)s failure, %(e)r'), + log_dict) + ret = -1, None, None, None + conn.close() + return ret + + +class OperationAttributes: + + """Holds operation attributes. + + The parameter 'post_op_function' (if supplied) is a function that takes + one boolean argument, specifying the success of the operation + + """ + + def __init__(self, + operation_url, + object_graph, + lbaas_entity=None, + entity_id=None, + delete=False, + post_op_function=None): + self.operation_url = operation_url + self.object_graph = object_graph + self.delete = delete + self.lbaas_entity = lbaas_entity + self.entity_id = entity_id + self.creation_time = time.time() + self.post_op_function = post_op_function + + def __repr__(self): + items = ("%s = %r" % (k, v) for k, v in self.__dict__.items()) + return "<%s: {%s}>" % (self.__class__.__name__, ', '.join(items)) + + +class OperationCompletionHandler(threading.Thread): + + """Update DB with operation status or delete the entity from DB.""" + + def __init__(self, queue, rest_client, plugin): + threading.Thread.__init__(self) + self.queue = queue + self.rest_client = rest_client + self.plugin = plugin + self.stoprequest = threading.Event() + self.opers_to_handle_before_rest = 0 + + def join(self, timeout=None): + self.stoprequest.set() + super(OperationCompletionHandler, self).join(timeout) + + def handle_operation_completion(self, oper): + result = self.rest_client.call('GET', + oper.operation_url, + None, + None) + completed = result[RESP_DATA]['complete'] + reason = result[RESP_REASON], + description = result[RESP_STR] + if completed: + # operation is done - update the DB with the status + # or delete the entire graph from DB + success = result[RESP_DATA]['success'] + sec_to_completion = time.time() - oper.creation_time + debug_data = {'oper': oper, + 'sec_to_completion': sec_to_completion, + 'success': success} + LOG.debug(_('Operation %(oper)s is completed after ' + '%(sec_to_completion)d sec ' + 'with success status: %(success)s :'), + debug_data) + db_status = None + if not success: + # failure - log it and set the return ERROR as DB state + if reason or description: + msg = 'Reason:%s. Description:%s' % (reason, description) + else: + msg = "unknown" + error_params = {"operation": oper, "msg": msg} + LOG.error(_('Operation %(operation)s failed. Reason: %(msg)s'), + error_params) + db_status = constants.ERROR + else: + if oper.delete: + _remove_object_from_db(self.plugin, oper) + else: + db_status = constants.ACTIVE + + if db_status: + _update_vip_graph_status(self.plugin, oper, db_status) + + OperationCompletionHandler._run_post_op_function(success, oper) + + return completed + + def run(self): + while not self.stoprequest.isSet(): + try: + oper = self.queue.get(timeout=1) + + # Get the current queue size (N) and set the counter with it. + # Handle N operations with no intermission. + # Once N operations handles, get the size again and repeat. + if self.opers_to_handle_before_rest <= 0: + self.opers_to_handle_before_rest = self.queue.qsize() + 1 + + LOG.debug('Operation consumed from the queue: ' + + str(oper)) + # check the status - if oper is done: update the db , + # else push the oper again to the queue + if not self.handle_operation_completion(oper): + LOG.debug(_('Operation %s is not completed yet..') % oper) + # Not completed - push to the queue again + self.queue.put_nowait(oper) + + self.queue.task_done() + self.opers_to_handle_before_rest -= 1 + + # Take one second rest before start handling + # new operations or operations handled before + if self.opers_to_handle_before_rest <= 0: + time.sleep(1) + + except Queue.Empty: + continue + except Exception: + m = _("Exception was thrown inside OperationCompletionHandler") + LOG.exception(m) + + @staticmethod + def _run_post_op_function(success, oper): + if oper.post_op_function: + log_data = {'func': oper.post_op_function, 'oper': oper} + try: + oper.post_op_function(success) + LOG.debug(_('Post-operation function ' + '%(func)r completed ' + 'after operation %(oper)r'), + log_data) + except Exception: + with excutils.save_and_reraise_exception(): + LOG.exception(_('Post-operation function ' + '%(func)r failed ' + 'after operation %(oper)r'), + log_data) + + +def _rest_wrapper(response, success_codes=[202]): + """Wrap a REST call and make sure a valid status is returned.""" + if not response: + raise r_exc.RESTRequestFailure( + status=-1, + reason="Unknown", + description="Unknown", + success_codes=success_codes + ) + elif response[RESP_STATUS] not in success_codes: + raise r_exc.RESTRequestFailure( + status=response[RESP_STATUS], + reason=response[RESP_REASON], + description=response[RESP_STR], + success_codes=success_codes + ) + else: + return response[RESP_DATA] + + +def _make_pip_name_from_vip(vip): + """Standard way of making PIP name based on VIP ID.""" + return 'pip_' + vip['id'] + + +def _update_vip_graph_status(plugin, oper, status): + """Update the status + + Of all the Vip object graph + or a specific entity in the graph. + + """ + + ctx = context.get_admin_context(load_admin_roles=False) + + LOG.debug(_('_update: %s '), oper) + if oper.lbaas_entity == lb_db.PoolMonitorAssociation: + plugin.update_pool_health_monitor(ctx, + oper.entity_id, + oper.object_graph['pool'], + status) + elif oper.entity_id: + plugin.update_status(ctx, + oper.lbaas_entity, + oper.entity_id, + status) + else: + _update_vip_graph_status_cascade(plugin, + oper.object_graph, + ctx, status) + + +def _update_vip_graph_status_cascade(plugin, ids, ctx, status): + plugin.update_status(ctx, + lb_db.Vip, + ids['vip'], + status) + plugin.update_status(ctx, + lb_db.Pool, + ids['pool'], + status) + for member_id in ids['members']: + plugin.update_status(ctx, + lb_db.Member, + member_id, + status) + for hm_id in ids['health_monitors']: + plugin.update_pool_health_monitor(ctx, + hm_id, + ids['pool'], + status) + + +def _remove_object_from_db(plugin, oper): + """Remove a specific entity from db.""" + LOG.debug(_('_remove_object_from_db %s'), str(oper)) + + ctx = context.get_admin_context(load_admin_roles=False) + + if oper.lbaas_entity == lb_db.PoolMonitorAssociation: + plugin._delete_db_pool_health_monitor(ctx, + oper.entity_id, + oper.object_graph['pool']) + elif oper.lbaas_entity == lb_db.Member: + plugin._delete_db_member(ctx, oper.entity_id) + elif oper.lbaas_entity == lb_db.Vip: + plugin._delete_db_vip(ctx, oper.entity_id) + elif oper.lbaas_entity == lb_db.Pool: + plugin._delete_db_pool(ctx, oper.entity_id) + else: + raise r_exc.UnsupportedEntityOperation( + operation='Remove from DB', entity=oper.lbaas_entity + ) + +TRANSLATION_DEFAULTS = {'session_persistence_type': 'none', + 'session_persistence_cookie_name': 'none', + 'url_path': '/', + 'http_method': 'GET', + 'expected_codes': '200', + 'subnet': '255.255.255.255', + 'mask': '255.255.255.255', + 'gw': '255.255.255.255', + } +VIP_PROPERTIES = ['address', 'protocol_port', 'protocol', 'connection_limit', + 'admin_state_up', 'session_persistence_type', + 'session_persistence_cookie_name'] +POOL_PROPERTIES = ['protocol', 'lb_method', 'admin_state_up'] +MEMBER_PROPERTIES = ['address', 'protocol_port', 'weight', 'admin_state_up', + 'subnet', 'mask', 'gw'] +HEALTH_MONITOR_PROPERTIES = ['type', 'delay', 'timeout', 'max_retries', + 'admin_state_up', 'url_path', 'http_method', + 'expected_codes', 'id'] + + +def _translate_vip_object_graph(extended_vip, plugin, context): + """Translate the extended vip + + translate to a structure that can be + understood by the workflow. + + """ + def _create_key(prefix, property_name): + return prefix + '_' + property_name + '_array' + + def _trans_prop_name(prop_name): + if prop_name == 'id': + return 'uuid' + else: + return prop_name + + def get_ids(extended_vip): + ids = {} + ids['vip'] = extended_vip['id'] + ids['pool'] = extended_vip['pool']['id'] + ids['members'] = [m['id'] for m in extended_vip['members']] + ids['health_monitors'] = [ + hm['id'] for hm in extended_vip['health_monitors'] + ] + return ids + + trans_vip = {} + LOG.debug('Vip graph to be translated: ' + str(extended_vip)) + for vip_property in VIP_PROPERTIES: + trans_vip['vip_' + vip_property] = extended_vip.get( + vip_property, TRANSLATION_DEFAULTS.get(vip_property)) + for pool_property in POOL_PROPERTIES: + trans_vip['pool_' + pool_property] = extended_vip[ + 'pool'][pool_property] + for member_property in MEMBER_PROPERTIES: + trans_vip[_create_key('member', member_property)] = [] + + two_leg = (extended_vip['pip_address'] != extended_vip['address']) + if two_leg: + pool_subnet = plugin._core_plugin.get_subnet( + context, extended_vip['pool']['subnet_id']) + + for member in extended_vip['members']: + if member['status'] != constants.PENDING_DELETE: + if (two_leg and netaddr.IPAddress(member['address']) + not in netaddr.IPNetwork(pool_subnet['cidr'])): + member_ports = plugin._core_plugin.get_ports( + context, + filters={'fixed_ips': {'ip_address': [member['address']]}, + 'tenant_id': [extended_vip['tenant_id']]}) + if len(member_ports) == 1: + member_subnet = plugin._core_plugin.get_subnet( + context, + member_ports[0]['fixed_ips'][0]['subnet_id']) + member_network = netaddr.IPNetwork(member_subnet['cidr']) + member['subnet'] = str(member_network.network) + member['mask'] = str(member_network.netmask) + else: + member['subnet'] = member['address'] + + member['gw'] = pool_subnet['gateway_ip'] + + for member_property in MEMBER_PROPERTIES: + trans_vip[_create_key('member', member_property)].append( + member.get(member_property, + TRANSLATION_DEFAULTS.get(member_property))) + + for hm_property in HEALTH_MONITOR_PROPERTIES: + trans_vip[ + _create_key('hm', _trans_prop_name(hm_property))] = [] + for hm in extended_vip['health_monitors']: + hm_pool = plugin.get_pool_health_monitor(context, + hm['id'], + extended_vip['pool']['id']) + if hm_pool['status'] != constants.PENDING_DELETE: + for hm_property in HEALTH_MONITOR_PROPERTIES: + value = hm.get(hm_property, + TRANSLATION_DEFAULTS.get(hm_property)) + trans_vip[_create_key('hm', + _trans_prop_name(hm_property))].append(value) + ids = get_ids(extended_vip) + trans_vip['__ids__'] = ids + for key in ['pip_address']: + if key in extended_vip: + trans_vip[key] = extended_vip[key] + LOG.debug('Translated Vip graph: ' + str(trans_vip)) + return trans_vip diff --git a/icehouse-patches/neutron/dvr-patch/neutron/services/loadbalancer/drivers/radware/exceptions.py b/icehouse-patches/neutron/dvr-patch/neutron/services/loadbalancer/drivers/radware/exceptions.py new file mode 100644 index 00000000..4c7b4c5d --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/services/loadbalancer/drivers/radware/exceptions.py @@ -0,0 +1,42 @@ +# Copyright 2013 Radware LTD. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Evgeny Fedoruk, Radware + + +from neutron.common import exceptions + + +class RadwareLBaasException(exceptions.NeutronException): + message = _('An unknown exception occurred in Radware LBaaS provider.') + + +class AuthenticationMissing(RadwareLBaasException): + message = _('vDirect user/password missing. ' + 'Specify in configuration file, under [radware] section') + + +class WorkflowMissing(RadwareLBaasException): + message = _('Workflow %(workflow)s is missing on vDirect server. ' + 'Upload missing workflow') + + +class RESTRequestFailure(RadwareLBaasException): + message = _('REST request failed with status %(status)s. ' + 'Reason: %(reason)s, Description: %(description)s. ' + 'Success status codes are %(success_codes)s') + + +class UnsupportedEntityOperation(RadwareLBaasException): + message = _('%(operation)s operation is not supported for %(entity)s.') diff --git a/icehouse-patches/neutron/dvr-patch/neutron/services/loadbalancer/plugin.py b/icehouse-patches/neutron/dvr-patch/neutron/services/loadbalancer/plugin.py new file mode 100644 index 00000000..4e992e08 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/services/loadbalancer/plugin.py @@ -0,0 +1,326 @@ +# +# Copyright 2013 Radware LTD. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Avishay Balderman, Radware + +from neutron.api.v2 import attributes as attrs +from neutron.common import exceptions as n_exc +from neutron import context +from neutron.db import api as qdbapi +from neutron.db.loadbalancer import loadbalancer_db as ldb +from neutron.db import servicetype_db as st_db +from neutron.extensions import loadbalancer +from neutron.openstack.common import excutils +from neutron.openstack.common import log as logging +from neutron.plugins.common import constants +from neutron.services.loadbalancer import agent_scheduler +from neutron.services import provider_configuration as pconf +from neutron.services import service_base + +LOG = logging.getLogger(__name__) + + +class LoadBalancerPlugin(ldb.LoadBalancerPluginDb, + agent_scheduler.LbaasAgentSchedulerDbMixin): + """Implementation of the Neutron Loadbalancer Service Plugin. + + This class manages the workflow of LBaaS request/response. + Most DB related works are implemented in class + loadbalancer_db.LoadBalancerPluginDb. + """ + supported_extension_aliases = ["lbaas", + "lbaas_agent_scheduler", + "service-type"] + + # lbaas agent notifiers to handle agent update operations; + # can be updated by plugin drivers while loading; + # will be extracted by neutron manager when loading service plugins; + agent_notifiers = {} + + def __init__(self): + """Initialization for the loadbalancer service plugin.""" + + qdbapi.register_models() + self.service_type_manager = st_db.ServiceTypeManager.get_instance() + self._load_drivers() + + def _load_drivers(self): + """Loads plugin-drivers specified in configuration.""" + self.drivers, self.default_provider = service_base.load_drivers( + constants.LOADBALANCER, self) + + # we're at the point when extensions are not loaded yet + # so prevent policy from being loaded + ctx = context.get_admin_context(load_admin_roles=False) + # stop service in case provider was removed, but resources were not + self._check_orphan_pool_associations(ctx, self.drivers.keys()) + + def _check_orphan_pool_associations(self, context, provider_names): + """Checks remaining associations between pools and providers. + + If admin has not undeployed resources with provider that was deleted + from configuration, neutron service is stopped. Admin must delete + resources prior to removing providers from configuration. + """ + pools = self.get_pools(context) + lost_providers = set([pool['provider'] for pool in pools + if pool['provider'] not in provider_names]) + # resources are left without provider - stop the service + if lost_providers: + msg = _("Delete associated loadbalancer pools before " + "removing providers %s") % list(lost_providers) + LOG.exception(msg) + raise SystemExit(1) + + def _get_driver_for_provider(self, provider): + if provider in self.drivers: + return self.drivers[provider] + # raise if not associated (should never be reached) + raise n_exc.Invalid(_("Error retrieving driver for provider %s") % + provider) + + def _get_driver_for_pool(self, context, pool_id): + pool = self.get_pool(context, pool_id) + try: + return self.drivers[pool['provider']] + except KeyError: + raise n_exc.Invalid(_("Error retrieving provider for pool %s") % + pool_id) + + def get_plugin_type(self): + return constants.LOADBALANCER + + def get_plugin_description(self): + return "Neutron LoadBalancer Service Plugin" + + def create_vip(self, context, vip): + v = super(LoadBalancerPlugin, self).create_vip(context, vip) + driver = self._get_driver_for_pool(context, v['pool_id']) + driver.create_vip(context, v) + return v + + def update_vip(self, context, id, vip): + if 'status' not in vip['vip']: + vip['vip']['status'] = constants.PENDING_UPDATE + old_vip = self.get_vip(context, id) + v = super(LoadBalancerPlugin, self).update_vip(context, id, vip) + driver = self._get_driver_for_pool(context, v['pool_id']) + driver.update_vip(context, old_vip, v) + return v + + def _delete_db_vip(self, context, id): + # proxy the call until plugin inherits from DBPlugin + super(LoadBalancerPlugin, self).delete_vip(context, id) + + def delete_vip(self, context, id): + self.update_status(context, ldb.Vip, + id, constants.PENDING_DELETE) + v = self.get_vip(context, id) + driver = self._get_driver_for_pool(context, v['pool_id']) + driver.delete_vip(context, v) + + def _get_provider_name(self, context, pool): + if ('provider' in pool and + pool['provider'] != attrs.ATTR_NOT_SPECIFIED): + provider_name = pconf.normalize_provider_name(pool['provider']) + self.validate_provider(provider_name) + return provider_name + else: + if not self.default_provider: + raise pconf.DefaultServiceProviderNotFound( + service_type=constants.LOADBALANCER) + return self.default_provider + + def create_pool(self, context, pool): + provider_name = self._get_provider_name(context, pool['pool']) + p = super(LoadBalancerPlugin, self).create_pool(context, pool) + + self.service_type_manager.add_resource_association( + context, + constants.LOADBALANCER, + provider_name, p['id']) + #need to add provider name to pool dict, + #because provider was not known to db plugin at pool creation + p['provider'] = provider_name + driver = self.drivers[provider_name] + try: + driver.create_pool(context, p) + except loadbalancer.NoEligibleBackend: + # that should catch cases when backend of any kind + # is not available (agent, appliance, etc) + self.update_status(context, ldb.Pool, + p['id'], constants.ERROR, + "No eligible backend") + raise loadbalancer.NoEligibleBackend(pool_id=p['id']) + return p + + def update_pool(self, context, id, pool): + if 'status' not in pool['pool']: + pool['pool']['status'] = constants.PENDING_UPDATE + old_pool = self.get_pool(context, id) + p = super(LoadBalancerPlugin, self).update_pool(context, id, pool) + driver = self._get_driver_for_provider(p['provider']) + driver.update_pool(context, old_pool, p) + return p + + def _delete_db_pool(self, context, id): + # proxy the call until plugin inherits from DBPlugin + # rely on uuid uniqueness: + try: + with context.session.begin(subtransactions=True): + self.service_type_manager.del_resource_associations( + context, [id]) + super(LoadBalancerPlugin, self).delete_pool(context, id) + except Exception: + # that should not happen + # if it's still a case - something goes wrong + # log the error and mark the pool as ERROR + LOG.error(_('Failed to delete pool %s, putting it in ERROR state'), + id) + with excutils.save_and_reraise_exception(): + self.update_status(context, ldb.Pool, + id, constants.ERROR) + + def delete_pool(self, context, id): + # check for delete conditions and update the status + # within a transaction to avoid a race + with context.session.begin(subtransactions=True): + self.update_status(context, ldb.Pool, + id, constants.PENDING_DELETE) + self._ensure_pool_delete_conditions(context, id) + p = self.get_pool(context, id) + driver = self._get_driver_for_provider(p['provider']) + driver.delete_pool(context, p) + + def create_member(self, context, member): + m = super(LoadBalancerPlugin, self).create_member(context, member) + driver = self._get_driver_for_pool(context, m['pool_id']) + driver.create_member(context, m) + return m + + def update_member(self, context, id, member): + if 'status' not in member['member']: + member['member']['status'] = constants.PENDING_UPDATE + old_member = self.get_member(context, id) + m = super(LoadBalancerPlugin, self).update_member(context, id, member) + driver = self._get_driver_for_pool(context, m['pool_id']) + driver.update_member(context, old_member, m) + return m + + def _delete_db_member(self, context, id): + # proxy the call until plugin inherits from DBPlugin + super(LoadBalancerPlugin, self).delete_member(context, id) + + def delete_member(self, context, id): + self.update_status(context, ldb.Member, + id, constants.PENDING_DELETE) + m = self.get_member(context, id) + driver = self._get_driver_for_pool(context, m['pool_id']) + driver.delete_member(context, m) + + def _validate_hm_parameters(self, delay, timeout): + if delay < timeout: + raise loadbalancer.DelayOrTimeoutInvalid() + + def create_health_monitor(self, context, health_monitor): + new_hm = health_monitor['health_monitor'] + self._validate_hm_parameters(new_hm['delay'], new_hm['timeout']) + + hm = super(LoadBalancerPlugin, self).create_health_monitor( + context, + health_monitor + ) + return hm + + def update_health_monitor(self, context, id, health_monitor): + new_hm = health_monitor['health_monitor'] + old_hm = self.get_health_monitor(context, id) + delay = new_hm.get('delay', old_hm.get('delay')) + timeout = new_hm.get('timeout', old_hm.get('timeout')) + self._validate_hm_parameters(delay, timeout) + + hm = super(LoadBalancerPlugin, self).update_health_monitor( + context, + id, + health_monitor + ) + + with context.session.begin(subtransactions=True): + qry = context.session.query( + ldb.PoolMonitorAssociation + ).filter_by(monitor_id=hm['id']).join(ldb.Pool) + for assoc in qry: + driver = self._get_driver_for_pool(context, assoc['pool_id']) + driver.update_pool_health_monitor(context, old_hm, + hm, assoc['pool_id']) + return hm + + def _delete_db_pool_health_monitor(self, context, hm_id, pool_id): + super(LoadBalancerPlugin, self).delete_pool_health_monitor(context, + hm_id, + pool_id) + + def _delete_db_health_monitor(self, context, id): + super(LoadBalancerPlugin, self).delete_health_monitor(context, id) + + def create_pool_health_monitor(self, context, health_monitor, pool_id): + retval = super(LoadBalancerPlugin, self).create_pool_health_monitor( + context, + health_monitor, + pool_id + ) + monitor_id = health_monitor['health_monitor']['id'] + hm = self.get_health_monitor(context, monitor_id) + driver = self._get_driver_for_pool(context, pool_id) + driver.create_pool_health_monitor(context, hm, pool_id) + return retval + + def delete_pool_health_monitor(self, context, id, pool_id): + self.update_pool_health_monitor(context, id, pool_id, + constants.PENDING_DELETE) + hm = self.get_health_monitor(context, id) + driver = self._get_driver_for_pool(context, pool_id) + driver.delete_pool_health_monitor(context, hm, pool_id) + + def stats(self, context, pool_id): + driver = self._get_driver_for_pool(context, pool_id) + stats_data = driver.stats(context, pool_id) + # if we get something from the driver - + # update the db and return the value from db + # else - return what we have in db + if stats_data: + super(LoadBalancerPlugin, self).update_pool_stats( + context, + pool_id, + stats_data + ) + return super(LoadBalancerPlugin, self).stats(context, + pool_id) + + def populate_vip_graph(self, context, vip): + """Populate the vip with: pool, members, healthmonitors.""" + + pool = self.get_pool(context, vip['pool_id']) + vip['pool'] = pool + vip['members'] = [self.get_member(context, member_id) + for member_id in pool['members']] + vip['health_monitors'] = [self.get_health_monitor(context, hm_id) + for hm_id in pool['health_monitors']] + return vip + + def validate_provider(self, provider): + if provider not in self.drivers: + raise pconf.ServiceProviderNotFound( + provider=provider, service_type=constants.LOADBALANCER) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/services/metering/agents/metering_agent.py b/icehouse-patches/neutron/dvr-patch/neutron/services/metering/agents/metering_agent.py new file mode 100644 index 00000000..2747fe84 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/services/metering/agents/metering_agent.py @@ -0,0 +1,296 @@ +# Copyright (C) 2013 eNovance SAS +# +# Author: Sylvain Afchain +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import sys +import time + +import eventlet +eventlet.monkey_patch() + +from oslo.config import cfg + +from neutron.agent.common import config +from neutron.agent import rpc as agent_rpc +from neutron.common import config as common_config +from neutron.common import constants as constants +from neutron.common import rpc as n_rpc +from neutron.common import topics +from neutron.common import utils +from neutron import context +from neutron import manager +from neutron.openstack.common import importutils +from neutron.openstack.common import log as logging +from neutron.openstack.common import loopingcall +from neutron.openstack.common import periodic_task +from neutron.openstack.common import service +from neutron import service as neutron_service + + +LOG = logging.getLogger(__name__) + + +class MeteringPluginRpc(n_rpc.RpcProxy): + + BASE_RPC_API_VERSION = '1.0' + + def __init__(self, host): + super(MeteringPluginRpc, + self).__init__(topic=topics.METERING_AGENT, + default_version=self.BASE_RPC_API_VERSION) + + def _get_sync_data_metering(self, context): + try: + return self.call(context, + self.make_msg('get_sync_data_metering', + host=self.host), + topic=topics.METERING_PLUGIN) + except Exception: + LOG.exception(_("Failed synchronizing routers")) + + +class MeteringAgent(MeteringPluginRpc, manager.Manager): + + Opts = [ + cfg.StrOpt('driver', + default='neutron.services.metering.drivers.noop.' + 'noop_driver.NoopMeteringDriver', + help=_("Metering driver")), + cfg.IntOpt('measure_interval', default=30, + help=_("Interval between two metering measures")), + cfg.IntOpt('report_interval', default=300, + help=_("Interval between two metering reports")), + ] + + def __init__(self, host, conf=None): + self.conf = conf or cfg.CONF + self._load_drivers() + self.root_helper = config.get_root_helper(self.conf) + self.context = context.get_admin_context_without_session() + self.metering_info = {} + self.metering_loop = loopingcall.FixedIntervalLoopingCall( + self._metering_loop + ) + measure_interval = self.conf.measure_interval + self.last_report = 0 + self.metering_loop.start(interval=measure_interval) + self.host = host + + self.label_tenant_id = {} + self.routers = {} + self.metering_infos = {} + super(MeteringAgent, self).__init__(host=host) + + def _load_drivers(self): + """Loads plugin-driver from configuration.""" + LOG.info(_("Loading Metering driver %s"), self.conf.driver) + if not self.conf.driver: + raise SystemExit(_('A metering driver must be specified')) + self.metering_driver = importutils.import_object( + self.conf.driver, self, self.conf) + + def _metering_notification(self): + for label_id, info in self.metering_infos.items(): + data = {'label_id': label_id, + 'tenant_id': self.label_tenant_id.get(label_id), + 'pkts': info['pkts'], + 'bytes': info['bytes'], + 'time': info['time'], + 'first_update': info['first_update'], + 'last_update': info['last_update'], + 'host': self.host} + + LOG.debug(_("Send metering report: %s"), data) + notifier = n_rpc.get_notifier('metering') + notifier.info(self.context, 'l3.meter', data) + info['pkts'] = 0 + info['bytes'] = 0 + info['time'] = 0 + + def _purge_metering_info(self): + ts = int(time.time()) + report_interval = self.conf.report_interval + for label_id, info in self.metering_info.items(): + if info['last_update'] > ts + report_interval: + del self.metering_info[label_id] + + def _add_metering_info(self, label_id, pkts, bytes): + ts = int(time.time()) + info = self.metering_infos.get(label_id, {'bytes': 0, + 'pkts': 0, + 'time': 0, + 'first_update': ts, + 'last_update': ts}) + info['bytes'] += bytes + info['pkts'] += pkts + info['time'] += ts - info['last_update'] + info['last_update'] = ts + + self.metering_infos[label_id] = info + + return info + + def _add_metering_infos(self): + self.label_tenant_id = {} + for router in self.routers.values(): + tenant_id = router['tenant_id'] + labels = router.get(constants.METERING_LABEL_KEY, []) + for label in labels: + label_id = label['id'] + self.label_tenant_id[label_id] = tenant_id + + tenant_id = self.label_tenant_id.get + accs = self._get_traffic_counters(self.context, self.routers.values()) + if not accs: + return + + for label_id, acc in accs.items(): + self._add_metering_info(label_id, acc['pkts'], acc['bytes']) + + def _metering_loop(self): + self._add_metering_infos() + + ts = int(time.time()) + delta = ts - self.last_report + + report_interval = self.conf.report_interval + if delta > report_interval: + self._metering_notification() + self._purge_metering_info() + self.last_report = ts + + @utils.synchronized('metering-agent') + def _invoke_driver(self, context, meterings, func_name): + try: + return getattr(self.metering_driver, func_name)(context, meterings) + except AttributeError: + LOG.exception(_("Driver %(driver)s does not implement %(func)s"), + {'driver': self.conf.driver, + 'func': func_name}) + except RuntimeError: + LOG.exception(_("Driver %(driver)s:%(func)s runtime error"), + {'driver': self.conf.driver, + 'func': func_name}) + + @periodic_task.periodic_task(run_immediately=True) + def _sync_routers_task(self, context): + routers = self._get_sync_data_metering(self.context) + if not routers: + return + self._update_routers(context, routers) + + def router_deleted(self, context, router_id): + self._add_metering_infos() + + if router_id in self.routers: + del self.routers[router_id] + + return self._invoke_driver(context, router_id, + 'remove_router') + + def routers_updated(self, context, routers=None): + if not routers: + routers = self._get_sync_data_metering(self.context) + if not routers: + return + self._update_routers(context, routers) + + def _update_routers(self, context, routers): + for router in routers: + self.routers[router['id']] = router + + return self._invoke_driver(context, routers, + 'update_routers') + + def _get_traffic_counters(self, context, routers): + LOG.debug(_("Get router traffic counters")) + return self._invoke_driver(context, routers, 'get_traffic_counters') + + def update_metering_label_rules(self, context, routers): + LOG.debug(_("Update metering rules from agent")) + return self._invoke_driver(context, routers, + 'update_metering_label_rules') + + def add_metering_label(self, context, routers): + LOG.debug(_("Creating a metering label from agent")) + return self._invoke_driver(context, routers, + 'add_metering_label') + + def remove_metering_label(self, context, routers): + self._add_metering_infos() + + LOG.debug(_("Delete a metering label from agent")) + return self._invoke_driver(context, routers, + 'remove_metering_label') + + +class MeteringAgentWithStateReport(MeteringAgent): + + def __init__(self, host, conf=None): + super(MeteringAgentWithStateReport, self).__init__(host=host, + conf=conf) + self.state_rpc = agent_rpc.PluginReportStateAPI(topics.PLUGIN) + self.agent_state = { + 'binary': 'neutron-metering-agent', + 'host': host, + 'topic': topics.METERING_AGENT, + 'configurations': { + 'metering_driver': self.conf.driver, + 'measure_interval': + self.conf.measure_interval, + 'report_interval': self.conf.report_interval + }, + 'start_flag': True, + 'agent_type': constants.AGENT_TYPE_METERING} + report_interval = cfg.CONF.AGENT.report_interval + self.use_call = True + if report_interval: + self.heartbeat = loopingcall.FixedIntervalLoopingCall( + self._report_state) + self.heartbeat.start(interval=report_interval) + + def _report_state(self): + try: + self.state_rpc.report_state(self.context, self.agent_state, + self.use_call) + self.agent_state.pop('start_flag', None) + self.use_call = False + except AttributeError: + # This means the server does not support report_state + LOG.warn(_("Neutron server does not support state report." + " State report for this agent will be disabled.")) + self.heartbeat.stop() + return + except Exception: + LOG.exception(_("Failed reporting state!")) + + def agent_updated(self, context, payload): + LOG.info(_("agent_updated by server side %s!"), payload) + + +def main(): + conf = cfg.CONF + conf.register_opts(MeteringAgent.Opts) + config.register_agent_state_opts_helper(conf) + config.register_root_helper(conf) + common_config.init(sys.argv[1:]) + config.setup_logging(conf) + server = neutron_service.Service.create( + binary='neutron-metering-agent', + topic=topics.METERING_AGENT, + report_interval=cfg.CONF.AGENT.report_interval, + manager='neutron.services.metering.agents.' + 'metering_agent.MeteringAgentWithStateReport') + service.launch(server).wait() diff --git a/icehouse-patches/neutron/dvr-patch/neutron/services/metering/metering_plugin.py b/icehouse-patches/neutron/dvr-patch/neutron/services/metering/metering_plugin.py new file mode 100644 index 00000000..b2e9215b --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/services/metering/metering_plugin.py @@ -0,0 +1,74 @@ +# Copyright (C) 2013 eNovance SAS +# +# Author: Sylvain Afchain +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from neutron.api.rpc.agentnotifiers import metering_rpc_agent_api +from neutron.common import rpc as n_rpc +from neutron.common import topics +from neutron.db.metering import metering_db +from neutron.db.metering import metering_rpc + + +class MeteringPlugin(metering_db.MeteringDbMixin): + """Implementation of the Neutron Metering Service Plugin.""" + supported_extension_aliases = ["metering"] + + def __init__(self): + super(MeteringPlugin, self).__init__() + + self.endpoints = [metering_rpc.MeteringRpcCallbacks(self)] + + self.conn = n_rpc.create_connection(new=True) + self.conn.create_consumer( + topics.METERING_PLUGIN, self.endpoints, fanout=False) + self.conn.consume_in_threads() + + self.meter_rpc = metering_rpc_agent_api.MeteringAgentNotifyAPI() + + def create_metering_label(self, context, metering_label): + label = super(MeteringPlugin, self).create_metering_label( + context, metering_label) + + data = self.get_sync_data_metering(context) + self.meter_rpc.add_metering_label(context, data) + + return label + + def delete_metering_label(self, context, label_id): + data = self.get_sync_data_metering(context, label_id) + label = super(MeteringPlugin, self).delete_metering_label( + context, label_id) + + self.meter_rpc.remove_metering_label(context, data) + + return label + + def create_metering_label_rule(self, context, metering_label_rule): + rule = super(MeteringPlugin, self).create_metering_label_rule( + context, metering_label_rule) + + data = self.get_sync_data_metering(context) + self.meter_rpc.update_metering_label_rules(context, data) + + return rule + + def delete_metering_label_rule(self, context, rule_id): + rule = super(MeteringPlugin, self).delete_metering_label_rule( + context, rule_id) + + data = self.get_sync_data_metering(context) + self.meter_rpc.update_metering_label_rules(context, data) + + return rule diff --git a/icehouse-patches/neutron/dvr-patch/neutron/services/provider_configuration.py b/icehouse-patches/neutron/dvr-patch/neutron/services/provider_configuration.py new file mode 100644 index 00000000..b9dd8f62 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/services/provider_configuration.py @@ -0,0 +1,161 @@ +# Copyright 2013 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo.config import cfg + +from neutron.common import exceptions as n_exc +from neutron.openstack.common import log as logging +from neutron.plugins.common import constants + +LOG = logging.getLogger(__name__) + + +serviceprovider_opts = [ + cfg.MultiStrOpt('service_provider', default=[], + help=_('Defines providers for advanced services ' + 'using the format: ' + '::[:default]')) +] + +cfg.CONF.register_opts(serviceprovider_opts, 'service_providers') + + +#global scope function that should be used in service APIs +def normalize_provider_name(name): + return name.lower() + + +def parse_service_provider_opt(): + """Parse service definition opts and returns result.""" + def validate_name(name): + if len(name) > 255: + raise n_exc.Invalid( + _("Provider name is limited by 255 characters: %s") % name) + + svc_providers_opt = cfg.CONF.service_providers.service_provider + res = [] + for prov_def in svc_providers_opt: + split = prov_def.split(':') + try: + svc_type, name, driver = split[:3] + except ValueError: + raise n_exc.Invalid(_("Invalid service provider format")) + validate_name(name) + name = normalize_provider_name(name) + default = False + if len(split) == 4 and split[3]: + if split[3] == 'default': + default = True + else: + msg = (_("Invalid provider format. " + "Last part should be 'default' or empty: %s") % + prov_def) + LOG.error(msg) + raise n_exc.Invalid(msg) + if svc_type not in constants.ALLOWED_SERVICES: + msg = (_("Service type '%(svc_type)s' is not allowed, " + "allowed types: %(allowed)s") % + {'svc_type': svc_type, + 'allowed': constants.ALLOWED_SERVICES}) + LOG.error(msg) + raise n_exc.Invalid(msg) + res.append({'service_type': svc_type, + 'name': name, + 'driver': driver, + 'default': default}) + return res + + +class ServiceProviderNotFound(n_exc.InvalidInput): + message = _("Service provider '%(provider)s' could not be found " + "for service type %(service_type)s") + + +class DefaultServiceProviderNotFound(n_exc.InvalidInput): + message = _("Service type %(service_type)s does not have a default " + "service provider") + + +class ServiceProviderAlreadyAssociated(n_exc.Conflict): + message = _("Resource '%(resource_id)s' is already associated with " + "provider '%(provider)s' for service type '%(service_type)s'") + + +class ProviderConfiguration(object): + def __init__(self, prov_data): + self.providers = {} + for prov in prov_data: + self.add_provider(prov) + + def _ensure_driver_unique(self, driver): + for k, v in self.providers.items(): + if v['driver'] == driver: + msg = (_("Driver %s is not unique across providers") % + driver) + LOG.exception(msg) + raise n_exc.Invalid(msg) + + def _ensure_default_unique(self, type, default): + if not default: + return + for k, v in self.providers.items(): + if k[0] == type and v['default']: + msg = _("Multiple default providers " + "for service %s") % type + LOG.exception(msg) + raise n_exc.Invalid(msg) + + def add_provider(self, provider): + self._ensure_driver_unique(provider['driver']) + self._ensure_default_unique(provider['service_type'], + provider['default']) + provider_type = (provider['service_type'], provider['name']) + if provider_type in self.providers: + msg = (_("Multiple providers specified for service " + "%s") % provider['service_type']) + LOG.exception(msg) + raise n_exc.Invalid(msg) + self.providers[provider_type] = {'driver': provider['driver'], + 'default': provider['default']} + + def _check_entry(self, k, v, filters): + # small helper to deal with query filters + if not filters: + return True + for index, key in enumerate(['service_type', 'name']): + if key in filters: + if k[index] not in filters[key]: + return False + + for key in ['driver', 'default']: + if key in filters: + if v[key] not in filters[key]: + return False + return True + + def _fields(self, resource, fields): + if fields: + return dict(((key, item) for key, item in resource.items() + if key in fields)) + return resource + + def get_service_providers(self, filters=None, fields=None): + return [self._fields({'service_type': k[0], + 'name': k[1], + 'driver': v['driver'], + 'default': v['default']}, + fields) + for k, v in self.providers.items() + if self._check_entry(k, v, filters)] diff --git a/icehouse-patches/neutron/dvr-patch/neutron/services/service_base.py b/icehouse-patches/neutron/dvr-patch/neutron/services/service_base.py new file mode 100644 index 00000000..d69c7960 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/services/service_base.py @@ -0,0 +1,101 @@ +# Copyright 2012 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import abc + +import six + +from neutron.api import extensions +from neutron.db import servicetype_db as sdb +from neutron.openstack.common import excutils +from neutron.openstack.common import importutils +from neutron.openstack.common import log as logging +from neutron.services import provider_configuration as pconf + +LOG = logging.getLogger(__name__) + + +@six.add_metaclass(abc.ABCMeta) +class ServicePluginBase(extensions.PluginInterface): + """Define base interface for any Advanced Service plugin.""" + supported_extension_aliases = [] + + @abc.abstractmethod + def get_plugin_type(self): + """Return one of predefined service types. + + See neutron/plugins/common/constants.py + """ + pass + + @abc.abstractmethod + def get_plugin_name(self): + """Return a symbolic name for the plugin. + + Each service plugin should have a symbolic name. This name + will be used, for instance, by service definitions in service types + """ + pass + + @abc.abstractmethod + def get_plugin_description(self): + """Return string description of the plugin.""" + pass + + +def load_drivers(service_type, plugin): + """Loads drivers for specific service. + + Passes plugin instance to driver's constructor + """ + service_type_manager = sdb.ServiceTypeManager.get_instance() + providers = (service_type_manager. + get_service_providers( + None, + filters={'service_type': [service_type]}) + ) + if not providers: + msg = (_("No providers specified for '%s' service, exiting") % + service_type) + LOG.error(msg) + raise SystemExit(1) + + drivers = {} + for provider in providers: + try: + drivers[provider['name']] = importutils.import_object( + provider['driver'], plugin + ) + LOG.debug(_("Loaded '%(provider)s' provider for service " + "%(service_type)s"), + {'provider': provider['driver'], + 'service_type': service_type}) + except ImportError: + with excutils.save_and_reraise_exception(): + LOG.exception(_("Error loading provider '%(provider)s' for " + "service %(service_type)s"), + {'provider': provider['driver'], + 'service_type': service_type}) + + default_provider = None + try: + provider = service_type_manager.get_default_service_provider( + None, service_type) + default_provider = provider['name'] + except pconf.DefaultServiceProviderNotFound: + LOG.info(_("Default provider is not specified for service type %s"), + service_type) + + return drivers, default_provider diff --git a/icehouse-patches/neutron/dvr-patch/neutron/services/vpn/__init__.py b/icehouse-patches/neutron/dvr-patch/neutron/services/vpn/__init__.py new file mode 100644 index 00000000..f236ecd7 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/services/vpn/__init__.py @@ -0,0 +1,16 @@ +# (c) Copyright 2013 Hewlett-Packard Development Company, L.P. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Swaminathan Vasudevan, Hewlett-Packard diff --git a/icehouse-patches/neutron/dvr-patch/neutron/services/vpn/agent.py b/icehouse-patches/neutron/dvr-patch/neutron/services/vpn/agent.py new file mode 100644 index 00000000..5ac69915 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/services/vpn/agent.py @@ -0,0 +1,146 @@ +# Copyright 2013, Nachi Ueno, NTT I3, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +from oslo.config import cfg + +from neutron.agent import l3_agent +from neutron.extensions import vpnaas +from neutron.openstack.common import importutils + +vpn_agent_opts = [ + cfg.MultiStrOpt( + 'vpn_device_driver', + default=['neutron.services.vpn.device_drivers.' + 'ipsec.OpenSwanDriver'], + help=_("The vpn device drivers Neutron will use")), +] +cfg.CONF.register_opts(vpn_agent_opts, 'vpnagent') + + +class VPNAgent(l3_agent.L3NATAgentWithStateReport): + """VPNAgent class which can handle vpn service drivers.""" + def __init__(self, host, conf=None): + super(VPNAgent, self).__init__(host=host, conf=conf) + self.setup_device_drivers(host) + + def setup_device_drivers(self, host): + """Setting up device drivers. + + :param host: hostname. This is needed for rpc + Each devices will stays as processes. + They will communiate with + server side service plugin using rpc with + device specific rpc topic. + :returns: None + """ + device_drivers = cfg.CONF.vpnagent.vpn_device_driver + self.devices = [] + for device_driver in device_drivers: + try: + self.devices.append( + importutils.import_object(device_driver, self, host)) + except ImportError: + raise vpnaas.DeviceDriverImportError( + device_driver=device_driver) + + def get_namespace(self, router_id): + """Get namespace of router. + + :router_id: router_id + :returns: namespace string. + Note if the router is not exist, this function + returns None + """ + router_info = self.router_info.get(router_id) + if not router_info: + return + return router_info.ns_name + + def add_nat_rule(self, router_id, chain, rule, top=False): + """Add nat rule in namespace. + + :param router_id: router_id + :param chain: a string of chain name + :param rule: a string of rule + :param top: if top is true, the rule + will be placed on the top of chain + Note if there is no rotuer, this method do nothing + """ + router_info = self.router_info.get(router_id) + if not router_info: + return + router_info.iptables_manager.ipv4['nat'].add_rule( + chain, rule, top=top) + + def remove_nat_rule(self, router_id, chain, rule, top=False): + """Remove nat rule in namespace. + + :param router_id: router_id + :param chain: a string of chain name + :param rule: a string of rule + :param top: unused + needed to have same argument with add_nat_rule + """ + router_info = self.router_info.get(router_id) + if not router_info: + return + router_info.iptables_manager.ipv4['nat'].remove_rule( + chain, rule, top=top) + + def iptables_apply(self, router_id): + """Apply IPtables. + + :param router_id: router_id + This method do nothing if there is no router + """ + router_info = self.router_info.get(router_id) + if not router_info: + return + router_info.iptables_manager.apply() + + def _router_added(self, router_id, router): + """Router added event. + + This method overwrites parent class method. + :param router_id: id of added router + :param router: dict of rotuer + """ + super(VPNAgent, self)._router_added(router_id, router) + for device in self.devices: + device.create_router(router_id) + + def _router_removed(self, router_id): + """Router removed event. + + This method overwrites parent class method. + :param router_id: id of removed router + """ + super(VPNAgent, self)._router_removed(router_id) + for device in self.devices: + device.destroy_router(router_id) + + def _process_routers(self, routers, all_routers=False): + """Router sync event. + + This method overwrites parent class method. + :param routers: list of routers + """ + super(VPNAgent, self)._process_routers(routers, all_routers) + for device in self.devices: + device.sync(self.context, routers) + + +def main(): + l3_agent.main( + manager='neutron.services.vpn.agent.VPNAgent') diff --git a/icehouse-patches/neutron/dvr-patch/neutron/services/vpn/common/__init__.py b/icehouse-patches/neutron/dvr-patch/neutron/services/vpn/common/__init__.py new file mode 100644 index 00000000..46d9c1be --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/services/vpn/common/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2013, Nachi Ueno, NTT I3, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/icehouse-patches/neutron/dvr-patch/neutron/services/vpn/common/topics.py b/icehouse-patches/neutron/dvr-patch/neutron/services/vpn/common/topics.py new file mode 100644 index 00000000..d17c829d --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/services/vpn/common/topics.py @@ -0,0 +1,20 @@ +# Copyright 2013, Nachi Ueno, NTT I3, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +IPSEC_DRIVER_TOPIC = 'ipsec_driver' +IPSEC_AGENT_TOPIC = 'ipsec_agent' +CISCO_IPSEC_DRIVER_TOPIC = 'cisco_csr_ipsec_driver' +CISCO_IPSEC_AGENT_TOPIC = 'cisco_csr_ipsec_agent' diff --git a/icehouse-patches/neutron/dvr-patch/neutron/services/vpn/device_drivers/__init__.py b/icehouse-patches/neutron/dvr-patch/neutron/services/vpn/device_drivers/__init__.py new file mode 100644 index 00000000..3f01f935 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/services/vpn/device_drivers/__init__.py @@ -0,0 +1,36 @@ +# Copyright 2013, Nachi Ueno, NTT I3, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +import abc + +import six + + +@six.add_metaclass(abc.ABCMeta) +class DeviceDriver(object): + + def __init__(self, agent, host): + pass + + @abc.abstractmethod + def sync(self, context, processes): + pass + + @abc.abstractmethod + def create_router(self, process_id): + pass + + @abc.abstractmethod + def destroy_router(self, process_id): + pass diff --git a/icehouse-patches/neutron/dvr-patch/neutron/services/vpn/device_drivers/cisco_csr_rest_client.py b/icehouse-patches/neutron/dvr-patch/neutron/services/vpn/device_drivers/cisco_csr_rest_client.py new file mode 100644 index 00000000..61693e9e --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/services/vpn/device_drivers/cisco_csr_rest_client.py @@ -0,0 +1,258 @@ +# Copyright 2014 Cisco Systems, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Paul Michali, Cisco Systems, Inc. + +import time + +import netaddr +import requests +from requests import exceptions as r_exc + +from neutron.openstack.common import jsonutils +from neutron.openstack.common import log as logging + + +TIMEOUT = 20.0 + +LOG = logging.getLogger(__name__) +HEADER_CONTENT_TYPE_JSON = {'content-type': 'application/json'} +URL_BASE = 'https://%(host)s/api/v1/%(resource)s' + + +def make_route_id(cidr, interface): + """Build ID that will be used to identify route for later deletion.""" + net = netaddr.IPNetwork(cidr) + return '%(network)s_%(prefix)s_%(interface)s' % { + 'network': net.network, + 'prefix': net.prefixlen, + 'interface': interface} + + +class CsrRestClient(object): + + """REST CsrRestClient for accessing the Cisco Cloud Services Router.""" + + def __init__(self, host, tunnel_ip, username, password, timeout=None): + self.host = host + self.tunnel_ip = tunnel_ip + self.auth = (username, password) + self.token = None + self.status = requests.codes.OK + self.timeout = timeout + self.max_tries = 5 + self.session = requests.Session() + + def _response_info_for(self, response, method): + """Return contents or location from response. + + For a POST or GET with a 200 response, the response content + is returned. + + For a POST with a 201 response, return the header's location, + which contains the identifier for the created resource. + + If there is an error, return the response content, so that + it can be used in error processing ('error-code', 'error-message', + and 'detail' fields). + """ + if method in ('POST', 'GET') and self.status == requests.codes.OK: + LOG.debug(_('RESPONSE: %s'), response.json()) + return response.json() + if method == 'POST' and self.status == requests.codes.CREATED: + return response.headers.get('location', '') + if self.status >= requests.codes.BAD_REQUEST and response.content: + if 'error-code' in response.content: + content = jsonutils.loads(response.content) + LOG.debug("Error response content %s", content) + return content + + def _request(self, method, url, **kwargs): + """Perform REST request and save response info.""" + try: + LOG.debug(_("%(method)s: Request for %(resource)s payload: " + "%(payload)s"), + {'method': method.upper(), 'resource': url, + 'payload': kwargs.get('data')}) + start_time = time.time() + response = self.session.request(method, url, verify=False, + timeout=self.timeout, **kwargs) + LOG.debug(_("%(method)s Took %(time).2f seconds to process"), + {'method': method.upper(), + 'time': time.time() - start_time}) + except (r_exc.Timeout, r_exc.SSLError) as te: + # Should never see SSLError, unless requests package is old (<2.0) + timeout_val = 0.0 if self.timeout is None else self.timeout + LOG.warning(_("%(method)s: Request timeout%(ssl)s " + "(%(timeout).3f sec) for CSR(%(host)s)"), + {'method': method, + 'timeout': timeout_val, + 'ssl': '(SSLError)' + if isinstance(te, r_exc.SSLError) else '', + 'host': self.host}) + self.status = requests.codes.REQUEST_TIMEOUT + except r_exc.ConnectionError: + LOG.exception(_("%(method)s: Unable to connect to CSR(%(host)s)"), + {'method': method, 'host': self.host}) + self.status = requests.codes.NOT_FOUND + except Exception as e: + LOG.error(_("%(method)s: Unexpected error for CSR (%(host)s): " + "%(error)s"), + {'method': method, 'host': self.host, 'error': e}) + self.status = requests.codes.INTERNAL_SERVER_ERROR + else: + self.status = response.status_code + LOG.debug(_("%(method)s: Completed [%(status)s]"), + {'method': method, 'status': self.status}) + return self._response_info_for(response, method) + + def authenticate(self): + """Obtain a token to use for subsequent CSR REST requests. + + This is called when there is no token yet, or if the token has expired + and attempts to use it resulted in an UNAUTHORIZED REST response. + """ + + url = URL_BASE % {'host': self.host, 'resource': 'auth/token-services'} + headers = {'Content-Length': '0', + 'Accept': 'application/json'} + headers.update(HEADER_CONTENT_TYPE_JSON) + LOG.debug(_("%(auth)s with CSR %(host)s"), + {'auth': 'Authenticating' if self.token is None + else 'Reauthenticating', 'host': self.host}) + self.token = None + response = self._request("POST", url, headers=headers, auth=self.auth) + if response: + self.token = response['token-id'] + LOG.debug(_("Successfully authenticated with CSR %s"), self.host) + return True + LOG.error(_("Failed authentication with CSR %(host)s [%(status)s]"), + {'host': self.host, 'status': self.status}) + + def _do_request(self, method, resource, payload=None, more_headers=None, + full_url=False): + """Perform a REST request to a CSR resource. + + If this is the first time interacting with the CSR, a token will + be obtained. If the request fails, due to an expired token, the + token will be obtained and the request will be retried once more. + """ + + if self.token is None: + if not self.authenticate(): + return + + if full_url: + url = resource + else: + url = ('https://%(host)s/api/v1/%(resource)s' % + {'host': self.host, 'resource': resource}) + headers = {'Accept': 'application/json', 'X-auth-token': self.token} + if more_headers: + headers.update(more_headers) + if payload: + payload = jsonutils.dumps(payload) + response = self._request(method, url, data=payload, headers=headers) + if self.status == requests.codes.UNAUTHORIZED: + if not self.authenticate(): + return + headers['X-auth-token'] = self.token + response = self._request(method, url, data=payload, + headers=headers) + if self.status != requests.codes.REQUEST_TIMEOUT: + return response + LOG.error(_("%(method)s: Request timeout for CSR(%(host)s)"), + {'method': method, 'host': self.host}) + + def get_request(self, resource, full_url=False): + """Perform a REST GET requests for a CSR resource.""" + return self._do_request('GET', resource, full_url=full_url) + + def post_request(self, resource, payload=None): + """Perform a POST request to a CSR resource.""" + return self._do_request('POST', resource, payload=payload, + more_headers=HEADER_CONTENT_TYPE_JSON) + + def put_request(self, resource, payload=None): + """Perform a PUT request to a CSR resource.""" + return self._do_request('PUT', resource, payload=payload, + more_headers=HEADER_CONTENT_TYPE_JSON) + + def delete_request(self, resource): + """Perform a DELETE request on a CSR resource.""" + return self._do_request('DELETE', resource, + more_headers=HEADER_CONTENT_TYPE_JSON) + + def create_ike_policy(self, policy_info): + base_ike_policy_info = {u'version': u'v1', + u'local-auth-method': u'pre-share'} + base_ike_policy_info.update(policy_info) + return self.post_request('vpn-svc/ike/policies', + payload=base_ike_policy_info) + + def create_ipsec_policy(self, policy_info): + base_ipsec_policy_info = {u'mode': u'tunnel'} + base_ipsec_policy_info.update(policy_info) + return self.post_request('vpn-svc/ipsec/policies', + payload=base_ipsec_policy_info) + + def create_pre_shared_key(self, psk_info): + return self.post_request('vpn-svc/ike/keyrings', payload=psk_info) + + def create_ipsec_connection(self, connection_info): + base_conn_info = {u'vpn-type': u'site-to-site', + u'ip-version': u'ipv4'} + connection_info.update(base_conn_info) + return self.post_request('vpn-svc/site-to-site', + payload=connection_info) + + def configure_ike_keepalive(self, keepalive_info): + base_keepalive_info = {u'periodic': True} + keepalive_info.update(base_keepalive_info) + return self.put_request('vpn-svc/ike/keepalive', keepalive_info) + + def create_static_route(self, route_info): + return self.post_request('routing-svc/static-routes', + payload=route_info) + + def delete_static_route(self, route_id): + return self.delete_request('routing-svc/static-routes/%s' % route_id) + + def set_ipsec_connection_state(self, tunnel, admin_up=True): + """Set the IPSec site-to-site connection (tunnel) admin state. + + Note: When a tunnel is created, it will be admin up. + """ + info = {u'vpn-interface-name': tunnel, u'enabled': admin_up} + return self.put_request('vpn-svc/site-to-site/%s/state' % tunnel, info) + + def delete_ipsec_connection(self, conn_id): + return self.delete_request('vpn-svc/site-to-site/%s' % conn_id) + + def delete_ipsec_policy(self, policy_id): + return self.delete_request('vpn-svc/ipsec/policies/%s' % policy_id) + + def delete_ike_policy(self, policy_id): + return self.delete_request('vpn-svc/ike/policies/%s' % policy_id) + + def delete_pre_shared_key(self, key_id): + return self.delete_request('vpn-svc/ike/keyrings/%s' % key_id) + + def read_tunnel_statuses(self): + results = self.get_request('vpn-svc/site-to-site/active/sessions') + if self.status != requests.codes.OK or not results: + return [] + tunnels = [(t[u'vpn-interface-name'], t[u'status']) + for t in results['items']] + return tunnels diff --git a/icehouse-patches/neutron/dvr-patch/neutron/services/vpn/device_drivers/cisco_ipsec.py b/icehouse-patches/neutron/dvr-patch/neutron/services/vpn/device_drivers/cisco_ipsec.py new file mode 100644 index 00000000..33a806d4 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/services/vpn/device_drivers/cisco_ipsec.py @@ -0,0 +1,858 @@ +# Copyright 2014 Cisco Systems, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Paul Michali, Cisco Systems, Inc. + +import abc +import collections +import requests + +import netaddr +from oslo.config import cfg +from oslo import messaging +import six + +from neutron.common import exceptions +from neutron.common import rpc as n_rpc +from neutron import context as ctx +from neutron.openstack.common import lockutils +from neutron.openstack.common import log as logging +from neutron.openstack.common import loopingcall +from neutron.plugins.common import constants +from neutron.plugins.common import utils as plugin_utils +from neutron.services.vpn.common import topics +from neutron.services.vpn import device_drivers +from neutron.services.vpn.device_drivers import ( + cisco_csr_rest_client as csr_client) + + +ipsec_opts = [ + cfg.IntOpt('status_check_interval', + default=60, + help=_("Status check interval for Cisco CSR IPSec connections")) +] +cfg.CONF.register_opts(ipsec_opts, 'cisco_csr_ipsec') + +LOG = logging.getLogger(__name__) + +RollbackStep = collections.namedtuple('RollbackStep', + ['action', 'resource_id', 'title']) + + +class CsrResourceCreateFailure(exceptions.NeutronException): + message = _("Cisco CSR failed to create %(resource)s (%(which)s)") + + +class CsrAdminStateChangeFailure(exceptions.NeutronException): + message = _("Cisco CSR failed to change %(tunnel)s admin state to " + "%(state)s") + + +class CsrDriverMismatchError(exceptions.NeutronException): + message = _("Required %(resource)s attribute %(attr)s mapping for Cisco " + "CSR is missing in device driver") + + +class CsrUnknownMappingError(exceptions.NeutronException): + message = _("Device driver does not have a mapping of '%(value)s for " + "attribute %(attr)s of %(resource)s") + + +def find_available_csrs_from_config(config_files): + """Read INI for available Cisco CSRs that driver can use. + + Loads management port, tunnel IP, user, and password information for + available CSRs from configuration file. Driver will use this info to + configure VPN connections. The CSR is associated 1:1 with a Neutron + router. To identify which CSR to use for a VPN service, the public + (GW) IP of the Neutron router will be used as an index into the CSR + config info. + """ + multi_parser = cfg.MultiConfigParser() + LOG.info(_("Scanning config files %s for Cisco CSR configurations"), + config_files) + try: + read_ok = multi_parser.read(config_files) + except cfg.ParseError as pe: + LOG.error(_("Config file parse error: %s"), pe) + return {} + + if len(read_ok) != len(config_files): + raise cfg.Error(_("Unable to parse config files %s for Cisco CSR " + "info") % config_files) + csrs_found = {} + for parsed_file in multi_parser.parsed: + for parsed_item in parsed_file.keys(): + device_type, sep, for_router = parsed_item.partition(':') + if device_type.lower() == 'cisco_csr_rest': + try: + netaddr.IPNetwork(for_router) + except netaddr.core.AddrFormatError: + LOG.error(_("Ignoring Cisco CSR configuration entry - " + "router IP %s is not valid"), for_router) + continue + entry = parsed_file[parsed_item] + # Check for missing fields + try: + rest_mgmt_ip = entry['rest_mgmt'][0] + tunnel_ip = entry['tunnel_ip'][0] + username = entry['username'][0] + password = entry['password'][0] + except KeyError as ke: + LOG.error(_("Ignoring Cisco CSR for router %(router)s " + "- missing %(field)s setting"), + {'router': for_router, 'field': str(ke)}) + continue + # Validate fields + try: + timeout = float(entry['timeout'][0]) + except ValueError: + LOG.error(_("Ignoring Cisco CSR for router %s - " + "timeout is not a floating point number"), + for_router) + continue + except KeyError: + timeout = csr_client.TIMEOUT + try: + netaddr.IPAddress(rest_mgmt_ip) + except netaddr.core.AddrFormatError: + LOG.error(_("Ignoring Cisco CSR for subnet %s - " + "REST management is not an IP address"), + for_router) + continue + try: + netaddr.IPAddress(tunnel_ip) + except netaddr.core.AddrFormatError: + LOG.error(_("Ignoring Cisco CSR for router %s - " + "local tunnel is not an IP address"), + for_router) + continue + csrs_found[for_router] = {'rest_mgmt': rest_mgmt_ip, + 'tunnel_ip': tunnel_ip, + 'username': username, + 'password': password, + 'timeout': timeout} + + LOG.debug(_("Found CSR for router %(router)s: %(info)s"), + {'router': for_router, + 'info': csrs_found[for_router]}) + return csrs_found + + +class CiscoCsrIPsecVpnDriverApi(n_rpc.RpcProxy): + """RPC API for agent to plugin messaging.""" + + def get_vpn_services_on_host(self, context, host): + """Get list of vpnservices on this host. + + The vpnservices including related ipsec_site_connection, + ikepolicy, ipsecpolicy, and Cisco info on this host. + """ + return self.call(context, + self.make_msg('get_vpn_services_on_host', + host=host), + topic=self.topic) + + def update_status(self, context, status): + """Update status for all VPN services and connections.""" + return self.cast(context, + self.make_msg('update_status', + status=status), + topic=self.topic) + + +@six.add_metaclass(abc.ABCMeta) +class CiscoCsrIPsecDriver(device_drivers.DeviceDriver): + """Cisco CSR VPN Device Driver for IPSec. + + This class is designed for use with L3-agent now. + However this driver will be used with another agent in future. + so the use of "Router" is kept minimul now. + Insted of router_id, we are using process_id in this code. + """ + + # history + # 1.0 Initial version + RPC_API_VERSION = '1.0' + + # TODO(ihrachys): we can't use RpcCallback here due to inheritance + # issues + target = messaging.Target(version=RPC_API_VERSION) + + def __init__(self, agent, host): + self.host = host + self.conn = n_rpc.create_connection(new=True) + context = ctx.get_admin_context_without_session() + node_topic = '%s.%s' % (topics.CISCO_IPSEC_AGENT_TOPIC, self.host) + + self.service_state = {} + + self.endpoints = [self] + self.conn.create_consumer(node_topic, self.endpoints, fanout=False) + self.conn.consume_in_threads() + self.agent_rpc = ( + CiscoCsrIPsecVpnDriverApi(topics.CISCO_IPSEC_DRIVER_TOPIC, '1.0')) + self.periodic_report = loopingcall.FixedIntervalLoopingCall( + self.report_status, context) + self.periodic_report.start( + interval=agent.conf.cisco_csr_ipsec.status_check_interval) + + csrs_found = find_available_csrs_from_config(cfg.CONF.config_file) + if csrs_found: + LOG.info(_("Loaded %(num)d Cisco CSR configuration%(plural)s"), + {'num': len(csrs_found), + 'plural': 's'[len(csrs_found) == 1:]}) + else: + raise SystemExit(_('No Cisco CSR configurations found in: %s') % + cfg.CONF.config_file) + self.csrs = dict([(k, csr_client.CsrRestClient(v['rest_mgmt'], + v['tunnel_ip'], + v['username'], + v['password'], + v['timeout'])) + for k, v in csrs_found.items()]) + + def vpnservice_updated(self, context, **kwargs): + """Handle VPNaaS service driver change notifications.""" + LOG.debug(_("Handling VPN service update notification '%s'"), + kwargs.get('reason', '')) + self.sync(context, []) + + def create_vpn_service(self, service_data): + """Create new entry to track VPN service and its connections.""" + vpn_service_id = service_data['id'] + vpn_service_router = service_data['external_ip'] + self.service_state[vpn_service_id] = CiscoCsrVpnService( + service_data, self.csrs.get(vpn_service_router)) + return self.service_state[vpn_service_id] + + def update_connection(self, context, vpn_service_id, conn_data): + """Handle notification for a single IPSec connection.""" + vpn_service = self.service_state[vpn_service_id] + conn_id = conn_data['id'] + conn_is_admin_up = conn_data[u'admin_state_up'] + + if conn_id in vpn_service.conn_state: # Existing connection... + ipsec_conn = vpn_service.conn_state[conn_id] + config_changed = ipsec_conn.check_for_changes(conn_data) + if config_changed: + LOG.debug(_("Update: Existing connection %s changed"), conn_id) + ipsec_conn.delete_ipsec_site_connection(context, conn_id) + ipsec_conn.create_ipsec_site_connection(context, conn_data) + ipsec_conn.conn_info = conn_data + + if ipsec_conn.forced_down: + if vpn_service.is_admin_up and conn_is_admin_up: + LOG.debug(_("Update: Connection %s no longer admin down"), + conn_id) + ipsec_conn.set_admin_state(is_up=True) + ipsec_conn.forced_down = False + else: + if not vpn_service.is_admin_up or not conn_is_admin_up: + LOG.debug(_("Update: Connection %s forced to admin down"), + conn_id) + ipsec_conn.set_admin_state(is_up=False) + ipsec_conn.forced_down = True + else: # New connection... + ipsec_conn = vpn_service.create_connection(conn_data) + ipsec_conn.create_ipsec_site_connection(context, conn_data) + if not vpn_service.is_admin_up or not conn_is_admin_up: + LOG.debug(_("Update: Created new connection %s in admin down " + "state"), conn_id) + ipsec_conn.set_admin_state(is_up=False) + ipsec_conn.forced_down = True + else: + LOG.debug(_("Update: Created new connection %s"), conn_id) + + ipsec_conn.is_dirty = False + ipsec_conn.last_status = conn_data['status'] + ipsec_conn.is_admin_up = conn_is_admin_up + return ipsec_conn + + def update_service(self, context, service_data): + """Handle notification for a single VPN Service and its connections.""" + vpn_service_id = service_data['id'] + csr_id = service_data['external_ip'] + if csr_id not in self.csrs: + LOG.error(_("Update: Skipping VPN service %(service)s as it's " + "router (%(csr_id)s is not associated with a Cisco " + "CSR"), {'service': vpn_service_id, 'csr_id': csr_id}) + return + + if vpn_service_id in self.service_state: + LOG.debug(_("Update: Existing VPN service %s detected"), + vpn_service_id) + vpn_service = self.service_state[vpn_service_id] + else: + LOG.debug(_("Update: New VPN service %s detected"), vpn_service_id) + vpn_service = self.create_vpn_service(service_data) + + vpn_service.is_dirty = False + vpn_service.connections_removed = False + vpn_service.last_status = service_data['status'] + vpn_service.is_admin_up = service_data[u'admin_state_up'] + for conn_data in service_data['ipsec_conns']: + self.update_connection(context, vpn_service_id, conn_data) + LOG.debug(_("Update: Completed update processing")) + return vpn_service + + def update_all_services_and_connections(self, context): + """Update services and connections based on plugin info. + + Perform any create and update operations and then update status. + Mark every visited connection as no longer "dirty" so they will + not be deleted at end of sync processing. + """ + services_data = self.agent_rpc.get_vpn_services_on_host(context, + self.host) + LOG.debug("Sync updating for %d VPN services", len(services_data)) + vpn_services = [] + for service_data in services_data: + vpn_service = self.update_service(context, service_data) + if vpn_service: + vpn_services.append(vpn_service) + return vpn_services + + def mark_existing_connections_as_dirty(self): + """Mark all existing connections as "dirty" for sync.""" + service_count = 0 + connection_count = 0 + for service_state in self.service_state.values(): + service_state.is_dirty = True + service_count += 1 + for conn_id in service_state.conn_state: + service_state.conn_state[conn_id].is_dirty = True + connection_count += 1 + LOG.debug(_("Mark: %(service)d VPN services and %(conn)d IPSec " + "connections marked dirty"), {'service': service_count, + 'conn': connection_count}) + + def remove_unknown_connections(self, context): + """Remove connections that are not known by service driver.""" + service_count = 0 + connection_count = 0 + for vpn_service_id, vpn_service in self.service_state.items(): + dirty = [c_id for c_id, c in vpn_service.conn_state.items() + if c.is_dirty] + vpn_service.connections_removed = len(dirty) > 0 + for conn_id in dirty: + conn_state = vpn_service.conn_state[conn_id] + conn_state.delete_ipsec_site_connection(context, conn_id) + connection_count += 1 + del vpn_service.conn_state[conn_id] + if vpn_service.is_dirty: + service_count += 1 + del self.service_state[vpn_service_id] + elif dirty: + self.connections_removed = True + LOG.debug(_("Sweep: Removed %(service)d dirty VPN service%(splural)s " + "and %(conn)d dirty IPSec connection%(cplural)s"), + {'service': service_count, 'conn': connection_count, + 'splural': 's'[service_count == 1:], + 'cplural': 's'[connection_count == 1:]}) + + def build_report_for_connections_on(self, vpn_service): + """Create the report fragment for IPSec connections on a service. + + Collect the current status from the Cisco CSR and use that to update + the status and generate report fragment for each connection on the + service. If there is no status information, or no change, then no + report info will be created for the connection. The combined report + data is returned. + """ + LOG.debug(_("Report: Collecting status for IPSec connections on VPN " + "service %s"), vpn_service.service_id) + tunnels = vpn_service.get_ipsec_connections_status() + report = {} + for connection in vpn_service.conn_state.values(): + if connection.forced_down: + LOG.debug(_("Connection %s forced down"), connection.conn_id) + current_status = constants.DOWN + else: + current_status = connection.find_current_status_in(tunnels) + LOG.debug(_("Connection %(conn)s reported %(status)s"), + {'conn': connection.conn_id, + 'status': current_status}) + frag = connection.update_status_and_build_report(current_status) + if frag: + LOG.debug(_("Report: Adding info for IPSec connection %s"), + connection.conn_id) + report.update(frag) + return report + + def build_report_for_service(self, vpn_service): + """Create the report info for a VPN service and its IPSec connections. + + Get the report info for the connections on the service, and include + it into the report info for the VPN service. If there is no report + info for the connection, then no change has occurred and no report + will be generated. If there is only one connection for the service, + we'll set the service state to match the connection (with ERROR seen + as DOWN). + """ + conn_report = self.build_report_for_connections_on(vpn_service) + if conn_report or vpn_service.connections_removed: + pending_handled = plugin_utils.in_pending_status( + vpn_service.last_status) + vpn_service.update_last_status() + LOG.debug(_("Report: Adding info for VPN service %s"), + vpn_service.service_id) + return {u'id': vpn_service.service_id, + u'status': vpn_service.last_status, + u'updated_pending_status': pending_handled, + u'ipsec_site_connections': conn_report} + else: + return {} + + @lockutils.synchronized('vpn-agent', 'neutron-') + def report_status(self, context): + """Report status of all VPN services and IPSec connections to plugin. + + This is called periodically by the agent, to push up changes in + status. Use a lock to serialize access to (and changing of) + running state. + """ + return self.report_status_internal(context) + + def report_status_internal(self, context): + """Generate report and send to plugin, if anything changed.""" + service_report = [] + LOG.debug(_("Report: Starting status report processing")) + for vpn_service_id, vpn_service in self.service_state.items(): + LOG.debug(_("Report: Collecting status for VPN service %s"), + vpn_service_id) + report = self.build_report_for_service(vpn_service) + if report: + service_report.append(report) + if service_report: + LOG.info(_("Sending status report update to plugin")) + self.agent_rpc.update_status(context, service_report) + LOG.debug(_("Report: Completed status report processing")) + return service_report + + @lockutils.synchronized('vpn-agent', 'neutron-') + def sync(self, context, routers): + """Synchronize with plugin and report current status. + + Mark all "known" services/connections as dirty, update them based on + information from the plugin, remove (sweep) any connections that are + not updated (dirty), and report updates, if any, back to plugin. + Called when update/delete a service or create/update/delete a + connection (vpnservice_updated message), or router change + (_process_routers). + + Use lock to serialize access (and changes) to running state for VPN + service and IPsec connections. + """ + self.mark_existing_connections_as_dirty() + self.update_all_services_and_connections(context) + self.remove_unknown_connections(context) + self.report_status_internal(context) + + def create_router(self, process_id): + """Actions taken when router created.""" + # Note: Since Cisco CSR is running out-of-band, nothing to do here + pass + + def destroy_router(self, process_id): + """Actions taken when router deleted.""" + # Note: Since Cisco CSR is running out-of-band, nothing to do here + pass + + +class CiscoCsrVpnService(object): + + """Maintains state/status information for a service and its connections.""" + + def __init__(self, service_data, csr): + self.service_id = service_data['id'] + self.conn_state = {} + self.csr = csr + self.is_admin_up = True + # TODO(pcm) FUTURE - handle sharing of policies + + def create_connection(self, conn_data): + conn_id = conn_data['id'] + self.conn_state[conn_id] = CiscoCsrIPSecConnection(conn_data, self.csr) + return self.conn_state[conn_id] + + def get_connection(self, conn_id): + return self.conn_state.get(conn_id) + + def conn_status(self, conn_id): + conn_state = self.get_connection(conn_id) + if conn_state: + return conn_state.last_status + + def snapshot_conn_state(self, ipsec_conn): + """Create/obtain connection state and save current status.""" + conn_state = self.conn_state.setdefault( + ipsec_conn['id'], CiscoCsrIPSecConnection(ipsec_conn, self.csr)) + conn_state.last_status = ipsec_conn['status'] + conn_state.is_dirty = False + return conn_state + + STATUS_MAP = {'ERROR': constants.ERROR, + 'UP-ACTIVE': constants.ACTIVE, + 'UP-IDLE': constants.ACTIVE, + 'UP-NO-IKE': constants.ACTIVE, + 'DOWN': constants.DOWN, + 'DOWN-NEGOTIATING': constants.DOWN} + + def get_ipsec_connections_status(self): + """Obtain current status of all tunnels on a Cisco CSR. + + Convert them to OpenStack status values. + """ + tunnels = self.csr.read_tunnel_statuses() + for tunnel in tunnels: + LOG.debug("CSR Reports %(tunnel)s status '%(status)s'", + {'tunnel': tunnel[0], 'status': tunnel[1]}) + return dict(map(lambda x: (x[0], self.STATUS_MAP[x[1]]), tunnels)) + + def find_matching_connection(self, tunnel_id): + """Find IPSec connection using Cisco CSR tunnel specified, if any.""" + for connection in self.conn_state.values(): + if connection.tunnel == tunnel_id: + return connection.conn_id + + def no_connections_up(self): + return not any(c.last_status == 'ACTIVE' + for c in self.conn_state.values()) + + def update_last_status(self): + if not self.is_admin_up or self.no_connections_up(): + self.last_status = constants.DOWN + else: + self.last_status = constants.ACTIVE + + +class CiscoCsrIPSecConnection(object): + + """State and actions for IPSec site-to-site connections.""" + + def __init__(self, conn_info, csr): + self.conn_info = conn_info + self.csr = csr + self.steps = [] + self.forced_down = False + self.changed = False + + @property + def conn_id(self): + return self.conn_info['id'] + + @property + def is_admin_up(self): + return self.conn_info['admin_state_up'] + + @is_admin_up.setter + def is_admin_up(self, is_up): + self.conn_info['admin_state_up'] = is_up + + @property + def tunnel(self): + return self.conn_info['cisco']['site_conn_id'] + + def check_for_changes(self, curr_conn): + return not all([self.conn_info[attr] == curr_conn[attr] + for attr in ('mtu', 'psk', 'peer_address', + 'peer_cidrs', 'ike_policy', + 'ipsec_policy', 'cisco')]) + + def find_current_status_in(self, statuses): + if self.tunnel in statuses: + return statuses[self.tunnel] + else: + return constants.ERROR + + def update_status_and_build_report(self, current_status): + if current_status != self.last_status: + pending_handled = plugin_utils.in_pending_status(self.last_status) + self.last_status = current_status + return {self.conn_id: {'status': current_status, + 'updated_pending_status': pending_handled}} + else: + return {} + + DIALECT_MAP = {'ike_policy': {'name': 'IKE Policy', + 'v1': u'v1', + # auth_algorithm -> hash + 'sha1': u'sha', + # encryption_algorithm -> encryption + '3des': u'3des', + 'aes-128': u'aes', + 'aes-192': u'aes192', + 'aes-256': u'aes256', + # pfs -> dhGroup + 'group2': 2, + 'group5': 5, + 'group14': 14}, + 'ipsec_policy': {'name': 'IPSec Policy', + # auth_algorithm -> esp-authentication + 'sha1': u'esp-sha-hmac', + # transform_protocol -> ah + 'esp': None, + 'ah': u'ah-sha-hmac', + 'ah-esp': u'ah-sha-hmac', + # encryption_algorithm -> esp-encryption + '3des': u'esp-3des', + 'aes-128': u'esp-aes', + 'aes-192': u'esp-192-aes', + 'aes-256': u'esp-256-aes', + # pfs -> pfs + 'group2': u'group2', + 'group5': u'group5', + 'group14': u'group14'}} + + def translate_dialect(self, resource, attribute, info): + """Map VPNaaS attributes values to CSR values for a resource.""" + name = self.DIALECT_MAP[resource]['name'] + if attribute not in info: + raise CsrDriverMismatchError(resource=name, attr=attribute) + value = info[attribute].lower() + if value in self.DIALECT_MAP[resource]: + return self.DIALECT_MAP[resource][value] + raise CsrUnknownMappingError(resource=name, attr=attribute, + value=value) + + def create_psk_info(self, psk_id, conn_info): + """Collect/create attributes needed for pre-shared key.""" + return {u'keyring-name': psk_id, + u'pre-shared-key-list': [ + {u'key': conn_info['psk'], + u'encrypted': False, + u'peer-address': conn_info['peer_address']}]} + + def create_ike_policy_info(self, ike_policy_id, conn_info): + """Collect/create/map attributes needed for IKE policy.""" + for_ike = 'ike_policy' + policy_info = conn_info[for_ike] + version = self.translate_dialect(for_ike, + 'ike_version', + policy_info) + encrypt_algorithm = self.translate_dialect(for_ike, + 'encryption_algorithm', + policy_info) + auth_algorithm = self.translate_dialect(for_ike, + 'auth_algorithm', + policy_info) + group = self.translate_dialect(for_ike, + 'pfs', + policy_info) + lifetime = policy_info['lifetime_value'] + return {u'version': version, + u'priority-id': ike_policy_id, + u'encryption': encrypt_algorithm, + u'hash': auth_algorithm, + u'dhGroup': group, + u'lifetime': lifetime} + + def create_ipsec_policy_info(self, ipsec_policy_id, info): + """Collect/create attributes needed for IPSec policy. + + Note: OpenStack will provide a default encryption algorithm, if one is + not provided, so a authentication only configuration of (ah, sha1), + which maps to ah-sha-hmac transform protocol, cannot be selected. + As a result, we'll always configure the encryption algorithm, and + will select ah-sha-hmac for transform protocol. + """ + + for_ipsec = 'ipsec_policy' + policy_info = info[for_ipsec] + transform_protocol = self.translate_dialect(for_ipsec, + 'transform_protocol', + policy_info) + auth_algorithm = self.translate_dialect(for_ipsec, + 'auth_algorithm', + policy_info) + encrypt_algorithm = self.translate_dialect(for_ipsec, + 'encryption_algorithm', + policy_info) + group = self.translate_dialect(for_ipsec, 'pfs', policy_info) + lifetime = policy_info['lifetime_value'] + settings = {u'policy-id': ipsec_policy_id, + u'protection-suite': { + u'esp-encryption': encrypt_algorithm, + u'esp-authentication': auth_algorithm}, + u'lifetime-sec': lifetime, + u'pfs': group, + u'anti-replay-window-size': u'disable'} + if transform_protocol: + settings[u'protection-suite'][u'ah'] = transform_protocol + return settings + + def create_site_connection_info(self, site_conn_id, ipsec_policy_id, + conn_info): + """Collect/create attributes needed for the IPSec connection.""" + # TODO(pcm) Enable, once CSR is embedded as a Neutron router + # gw_ip = vpnservice['external_ip'] (need to pass in) + mtu = conn_info['mtu'] + return { + u'vpn-interface-name': site_conn_id, + u'ipsec-policy-id': ipsec_policy_id, + u'local-device': { + # TODO(pcm): FUTURE - Get CSR port of interface with + # local subnet + u'ip-address': u'GigabitEthernet3', + # TODO(pcm): FUTURE - Get IP address of router's public + # I/F, once CSR is used as embedded router. + u'tunnel-ip-address': self.csr.tunnel_ip + # u'tunnel-ip-address': u'%s' % gw_ip + }, + u'remote-device': { + u'tunnel-ip-address': conn_info['peer_address'] + }, + u'mtu': mtu + } + + def create_routes_info(self, site_conn_id, conn_info): + """Collect/create attributes for static routes.""" + routes_info = [] + for peer_cidr in conn_info.get('peer_cidrs', []): + route = {u'destination-network': peer_cidr, + u'outgoing-interface': site_conn_id} + route_id = csr_client.make_route_id(peer_cidr, site_conn_id) + routes_info.append((route_id, route)) + return routes_info + + def _check_create(self, resource, which): + """Determine if REST create request was successful.""" + if self.csr.status == requests.codes.CREATED: + LOG.debug("%(resource)s %(which)s is configured", + {'resource': resource, 'which': which}) + return + LOG.error(_("Unable to create %(resource)s %(which)s: " + "%(status)d"), + {'resource': resource, 'which': which, + 'status': self.csr.status}) + # ToDO(pcm): Set state to error + raise CsrResourceCreateFailure(resource=resource, which=which) + + def do_create_action(self, action_suffix, info, resource_id, title): + """Perform a single REST step for IPSec site connection create.""" + create_action = 'create_%s' % action_suffix + try: + getattr(self.csr, create_action)(info) + except AttributeError: + LOG.exception(_("Internal error - '%s' is not defined"), + create_action) + raise CsrResourceCreateFailure(resource=title, + which=resource_id) + self._check_create(title, resource_id) + self.steps.append(RollbackStep(action_suffix, resource_id, title)) + + def _verify_deleted(self, status, resource, which): + """Determine if REST delete request was successful.""" + if status in (requests.codes.NO_CONTENT, requests.codes.NOT_FOUND): + LOG.debug("%(resource)s configuration %(which)s was removed", + {'resource': resource, 'which': which}) + else: + LOG.warning(_("Unable to delete %(resource)s %(which)s: " + "%(status)d"), {'resource': resource, + 'which': which, + 'status': status}) + + def do_rollback(self): + """Undo create steps that were completed successfully.""" + for step in reversed(self.steps): + delete_action = 'delete_%s' % step.action + LOG.debug(_("Performing rollback action %(action)s for " + "resource %(resource)s"), {'action': delete_action, + 'resource': step.title}) + try: + getattr(self.csr, delete_action)(step.resource_id) + except AttributeError: + LOG.exception(_("Internal error - '%s' is not defined"), + delete_action) + raise CsrResourceCreateFailure(resource=step.title, + which=step.resource_id) + self._verify_deleted(self.csr.status, step.title, step.resource_id) + self.steps = [] + + def create_ipsec_site_connection(self, context, conn_info): + """Creates an IPSec site-to-site connection on CSR. + + Create the PSK, IKE policy, IPSec policy, connection, static route, + and (future) DPD. + """ + # Get all the IDs + conn_id = conn_info['id'] + psk_id = conn_id + site_conn_id = conn_info['cisco']['site_conn_id'] + ike_policy_id = conn_info['cisco']['ike_policy_id'] + ipsec_policy_id = conn_info['cisco']['ipsec_policy_id'] + + LOG.debug(_('Creating IPSec connection %s'), conn_id) + # Get all the attributes needed to create + try: + psk_info = self.create_psk_info(psk_id, conn_info) + ike_policy_info = self.create_ike_policy_info(ike_policy_id, + conn_info) + ipsec_policy_info = self.create_ipsec_policy_info(ipsec_policy_id, + conn_info) + connection_info = self.create_site_connection_info(site_conn_id, + ipsec_policy_id, + conn_info) + routes_info = self.create_routes_info(site_conn_id, conn_info) + except (CsrUnknownMappingError, CsrDriverMismatchError) as e: + LOG.exception(e) + return + + try: + self.do_create_action('pre_shared_key', psk_info, + conn_id, 'Pre-Shared Key') + self.do_create_action('ike_policy', ike_policy_info, + ike_policy_id, 'IKE Policy') + self.do_create_action('ipsec_policy', ipsec_policy_info, + ipsec_policy_id, 'IPSec Policy') + self.do_create_action('ipsec_connection', connection_info, + site_conn_id, 'IPSec Connection') + + # TODO(pcm): FUTURE - Do DPD for v1 and handle if >1 connection + # and different DPD settings + for route_id, route_info in routes_info: + self.do_create_action('static_route', route_info, + route_id, 'Static Route') + except CsrResourceCreateFailure: + self.do_rollback() + LOG.info(_("FAILED: Create of IPSec site-to-site connection %s"), + conn_id) + else: + LOG.info(_("SUCCESS: Created IPSec site-to-site connection %s"), + conn_id) + + def delete_ipsec_site_connection(self, context, conn_id): + """Delete the site-to-site IPSec connection. + + This will be best effort and will continue, if there are any + failures. + """ + LOG.debug(_('Deleting IPSec connection %s'), conn_id) + if not self.steps: + LOG.warning(_('Unable to find connection %s'), conn_id) + else: + self.do_rollback() + + LOG.info(_("SUCCESS: Deleted IPSec site-to-site connection %s"), + conn_id) + + def set_admin_state(self, is_up): + """Change the admin state for the IPSec connection.""" + self.csr.set_ipsec_connection_state(self.tunnel, admin_up=is_up) + if self.csr.status != requests.codes.NO_CONTENT: + state = "UP" if is_up else "DOWN" + LOG.error(_("Unable to change %(tunnel)s admin state to " + "%(state)s"), {'tunnel': self.tunnel, 'state': state}) + raise CsrAdminStateChangeFailure(tunnel=self.tunnel, state=state) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/services/vpn/device_drivers/ipsec.py b/icehouse-patches/neutron/dvr-patch/neutron/services/vpn/device_drivers/ipsec.py new file mode 100644 index 00000000..6d2188fd --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/services/vpn/device_drivers/ipsec.py @@ -0,0 +1,711 @@ +# Copyright 2013, Nachi Ueno, NTT I3, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +import abc +import copy +import os +import re +import shutil + +import jinja2 +import netaddr +from oslo.config import cfg +from oslo import messaging +import six + +from neutron.agent.linux import ip_lib +from neutron.agent.linux import utils +from neutron.common import rpc as n_rpc +from neutron import context +from neutron.openstack.common import lockutils +from neutron.openstack.common import log as logging +from neutron.openstack.common import loopingcall +from neutron.plugins.common import constants +from neutron.plugins.common import utils as plugin_utils +from neutron.services.vpn.common import topics +from neutron.services.vpn import device_drivers + +LOG = logging.getLogger(__name__) +TEMPLATE_PATH = os.path.dirname(__file__) + +ipsec_opts = [ + cfg.StrOpt( + 'config_base_dir', + default='$state_path/ipsec', + help=_('Location to store ipsec server config files')), + cfg.IntOpt('ipsec_status_check_interval', + default=60, + help=_("Interval for checking ipsec status")) +] +cfg.CONF.register_opts(ipsec_opts, 'ipsec') + +openswan_opts = [ + cfg.StrOpt( + 'ipsec_config_template', + default=os.path.join( + TEMPLATE_PATH, + 'template/openswan/ipsec.conf.template'), + help=_('Template file for ipsec configuration')), + cfg.StrOpt( + 'ipsec_secret_template', + default=os.path.join( + TEMPLATE_PATH, + 'template/openswan/ipsec.secret.template'), + help=_('Template file for ipsec secret configuration')) +] + +cfg.CONF.register_opts(openswan_opts, 'openswan') + +JINJA_ENV = None + +STATUS_MAP = { + 'erouted': constants.ACTIVE, + 'unrouted': constants.DOWN +} + +IPSEC_CONNS = 'ipsec_site_connections' + + +def _get_template(template_file): + global JINJA_ENV + if not JINJA_ENV: + templateLoader = jinja2.FileSystemLoader(searchpath="/") + JINJA_ENV = jinja2.Environment(loader=templateLoader) + return JINJA_ENV.get_template(template_file) + + +@six.add_metaclass(abc.ABCMeta) +class BaseSwanProcess(): + """Swan Family Process Manager + + This class manages start/restart/stop ipsec process. + This class create/delete config template + """ + + binary = "ipsec" + CONFIG_DIRS = [ + 'var/run', + 'log', + 'etc', + 'etc/ipsec.d/aacerts', + 'etc/ipsec.d/acerts', + 'etc/ipsec.d/cacerts', + 'etc/ipsec.d/certs', + 'etc/ipsec.d/crls', + 'etc/ipsec.d/ocspcerts', + 'etc/ipsec.d/policies', + 'etc/ipsec.d/private', + 'etc/ipsec.d/reqs', + 'etc/pki/nssdb/' + ] + + DIALECT_MAP = { + "3des": "3des", + "aes-128": "aes128", + "aes-256": "aes256", + "aes-192": "aes192", + "group2": "modp1024", + "group5": "modp1536", + "group14": "modp2048", + "group15": "modp3072", + "bi-directional": "start", + "response-only": "add", + "v2": "insist", + "v1": "never" + } + + def __init__(self, conf, root_helper, process_id, + vpnservice, namespace): + self.conf = conf + self.id = process_id + self.root_helper = root_helper + self.updated_pending_status = False + self.namespace = namespace + self.connection_status = {} + self.config_dir = os.path.join( + cfg.CONF.ipsec.config_base_dir, self.id) + self.etc_dir = os.path.join(self.config_dir, 'etc') + self.update_vpnservice(vpnservice) + + def translate_dialect(self): + if not self.vpnservice: + return + for ipsec_site_conn in self.vpnservice['ipsec_site_connections']: + self._dialect(ipsec_site_conn, 'initiator') + self._dialect(ipsec_site_conn['ikepolicy'], 'ike_version') + for key in ['encryption_algorithm', + 'auth_algorithm', + 'pfs']: + self._dialect(ipsec_site_conn['ikepolicy'], key) + self._dialect(ipsec_site_conn['ipsecpolicy'], key) + + def update_vpnservice(self, vpnservice): + self.vpnservice = vpnservice + self.translate_dialect() + + def _dialect(self, obj, key): + obj[key] = self.DIALECT_MAP.get(obj[key], obj[key]) + + @abc.abstractmethod + def ensure_configs(self): + pass + + def ensure_config_file(self, kind, template, vpnservice): + """Update config file, based on current settings for service.""" + config_str = self._gen_config_content(template, vpnservice) + config_file_name = self._get_config_filename(kind) + utils.replace_file(config_file_name, config_str) + + def remove_config(self): + """Remove whole config file.""" + shutil.rmtree(self.config_dir, ignore_errors=True) + + def _get_config_filename(self, kind): + config_dir = self.etc_dir + return os.path.join(config_dir, kind) + + def _ensure_dir(self, dir_path): + if not os.path.isdir(dir_path): + os.makedirs(dir_path, 0o755) + + def ensure_config_dir(self, vpnservice): + """Create config directory if it does not exist.""" + self._ensure_dir(self.config_dir) + for subdir in self.CONFIG_DIRS: + dir_path = os.path.join(self.config_dir, subdir) + self._ensure_dir(dir_path) + + def _gen_config_content(self, template_file, vpnservice): + template = _get_template(template_file) + return template.render( + {'vpnservice': vpnservice, + 'state_path': cfg.CONF.state_path}) + + @abc.abstractmethod + def get_status(self): + pass + + @property + def status(self): + if self.active: + return constants.ACTIVE + return constants.DOWN + + @property + def active(self): + """Check if the process is active or not.""" + if not self.namespace: + return False + try: + status = self.get_status() + self._update_connection_status(status) + except RuntimeError: + return False + return True + + def update(self): + """Update Status based on vpnservice configuration.""" + if self.vpnservice and not self.vpnservice['admin_state_up']: + self.disable() + else: + self.enable() + + if plugin_utils.in_pending_status(self.vpnservice['status']): + self.updated_pending_status = True + + self.vpnservice['status'] = self.status + for ipsec_site_conn in self.vpnservice['ipsec_site_connections']: + if plugin_utils.in_pending_status(ipsec_site_conn['status']): + conn_id = ipsec_site_conn['id'] + conn_status = self.connection_status.get(conn_id) + if not conn_status: + continue + conn_status['updated_pending_status'] = True + ipsec_site_conn['status'] = conn_status['status'] + + def enable(self): + """Enabling the process.""" + try: + self.ensure_configs() + if self.active: + self.restart() + else: + self.start() + except RuntimeError: + LOG.exception( + _("Failed to enable vpn process on router %s"), + self.id) + + def disable(self): + """Disabling the process.""" + try: + if self.active: + self.stop() + self.remove_config() + except RuntimeError: + LOG.exception( + _("Failed to disable vpn process on router %s"), + self.id) + + @abc.abstractmethod + def restart(self): + """Restart process.""" + + @abc.abstractmethod + def start(self): + """Start process.""" + + @abc.abstractmethod + def stop(self): + """Stop process.""" + + def _update_connection_status(self, status_output): + for line in status_output.split('\n'): + m = re.search('\d\d\d "([a-f0-9\-]+).* (unrouted|erouted);', line) + if not m: + continue + connection_id = m.group(1) + status = m.group(2) + if not self.connection_status.get(connection_id): + self.connection_status[connection_id] = { + 'status': None, + 'updated_pending_status': False + } + self.connection_status[ + connection_id]['status'] = STATUS_MAP[status] + + +class OpenSwanProcess(BaseSwanProcess): + """OpenSwan Process manager class. + + This process class uses three commands + (1) ipsec pluto: IPsec IKE keying daemon + (2) ipsec addconn: Adds new ipsec addconn + (3) ipsec whack: control interface for IPSEC keying daemon + """ + def __init__(self, conf, root_helper, process_id, + vpnservice, namespace): + super(OpenSwanProcess, self).__init__( + conf, root_helper, process_id, + vpnservice, namespace) + self.secrets_file = os.path.join( + self.etc_dir, 'ipsec.secrets') + self.config_file = os.path.join( + self.etc_dir, 'ipsec.conf') + self.pid_path = os.path.join( + self.config_dir, 'var', 'run', 'pluto') + + def _execute(self, cmd, check_exit_code=True): + """Execute command on namespace.""" + ip_wrapper = ip_lib.IPWrapper(self.root_helper, self.namespace) + return ip_wrapper.netns.execute( + cmd, + check_exit_code=check_exit_code) + + def ensure_configs(self): + """Generate config files which are needed for OpenSwan. + + If there is no directory, this function will create + dirs. + """ + self.ensure_config_dir(self.vpnservice) + self.ensure_config_file( + 'ipsec.conf', + self.conf.openswan.ipsec_config_template, + self.vpnservice) + self.ensure_config_file( + 'ipsec.secrets', + self.conf.openswan.ipsec_secret_template, + self.vpnservice) + + def get_status(self): + return self._execute([self.binary, + 'whack', + '--ctlbase', + self.pid_path, + '--status']) + + def restart(self): + """Restart the process.""" + self.stop() + self.start() + return + + def _get_nexthop(self, address): + routes = self._execute( + ['ip', 'route', 'get', address]) + if routes.find('via') >= 0: + return routes.split(' ')[2] + return address + + def _virtual_privates(self): + """Returns line of virtual_privates. + + virtual_private contains the networks + that are allowed as subnet for the remote client. + """ + virtual_privates = [] + nets = [self.vpnservice['subnet']['cidr']] + for ipsec_site_conn in self.vpnservice['ipsec_site_connections']: + nets += ipsec_site_conn['peer_cidrs'] + for net in nets: + version = netaddr.IPNetwork(net).version + virtual_privates.append('%%v%s:%s' % (version, net)) + return ','.join(virtual_privates) + + def start(self): + """Start the process. + + Note: if there is not namespace yet, + just do nothing, and wait next event. + """ + if not self.namespace: + return + virtual_private = self._virtual_privates() + #start pluto IKE keying daemon + self._execute([self.binary, + 'pluto', + '--ctlbase', self.pid_path, + '--ipsecdir', self.etc_dir, + '--use-netkey', + '--uniqueids', + '--nat_traversal', + '--secretsfile', self.secrets_file, + '--virtual_private', virtual_private + ]) + #add connections + for ipsec_site_conn in self.vpnservice['ipsec_site_connections']: + nexthop = self._get_nexthop(ipsec_site_conn['peer_address']) + self._execute([self.binary, + 'addconn', + '--ctlbase', '%s.ctl' % self.pid_path, + '--defaultroutenexthop', nexthop, + '--config', self.config_file, + ipsec_site_conn['id'] + ]) + #TODO(nati) fix this when openswan is fixed + #Due to openswan bug, this command always exit with 3 + #start whack ipsec keying daemon + self._execute([self.binary, + 'whack', + '--ctlbase', self.pid_path, + '--listen', + ], check_exit_code=False) + + for ipsec_site_conn in self.vpnservice['ipsec_site_connections']: + if not ipsec_site_conn['initiator'] == 'start': + continue + #initiate ipsec connection + self._execute([self.binary, + 'whack', + '--ctlbase', self.pid_path, + '--name', ipsec_site_conn['id'], + '--asynchronous', + '--initiate' + ]) + + def disconnect(self): + if not self.namespace: + return + if not self.vpnservice: + return + for conn_id in self.connection_status: + self._execute([self.binary, + 'whack', + '--ctlbase', self.pid_path, + '--name', '%s/0x1' % conn_id, + '--terminate' + ]) + + def stop(self): + #Stop process using whack + #Note this will also stop pluto + self.disconnect() + self._execute([self.binary, + 'whack', + '--ctlbase', self.pid_path, + '--shutdown', + ]) + #clean connection_status info + self.connection_status = {} + + +class IPsecVpnDriverApi(n_rpc.RpcProxy): + """IPSecVpnDriver RPC api.""" + IPSEC_PLUGIN_VERSION = '1.0' + + def get_vpn_services_on_host(self, context, host): + """Get list of vpnservices. + + The vpnservices including related ipsec_site_connection, + ikepolicy and ipsecpolicy on this host + """ + return self.call(context, + self.make_msg('get_vpn_services_on_host', + host=host), + version=self.IPSEC_PLUGIN_VERSION, + topic=self.topic) + + def update_status(self, context, status): + """Update local status. + + This method call updates status attribute of + VPNServices. + """ + return self.cast(context, + self.make_msg('update_status', + status=status), + version=self.IPSEC_PLUGIN_VERSION, + topic=self.topic) + + +@six.add_metaclass(abc.ABCMeta) +class IPsecDriver(device_drivers.DeviceDriver): + """VPN Device Driver for IPSec. + + This class is designed for use with L3-agent now. + However this driver will be used with another agent in future. + so the use of "Router" is kept minimul now. + Insted of router_id, we are using process_id in this code. + """ + + # history + # 1.0 Initial version + + RPC_API_VERSION = '1.0' + + # TODO(ihrachys): we can't use RpcCallback here due to inheritance + # issues + target = messaging.Target(version=RPC_API_VERSION) + + def __init__(self, agent, host): + self.agent = agent + self.conf = self.agent.conf + self.root_helper = self.agent.root_helper + self.host = host + self.conn = n_rpc.create_connection(new=True) + self.context = context.get_admin_context_without_session() + self.topic = topics.IPSEC_AGENT_TOPIC + node_topic = '%s.%s' % (self.topic, self.host) + + self.processes = {} + self.process_status_cache = {} + + self.endpoints = [self] + self.conn.create_consumer(node_topic, self.endpoints, fanout=False) + self.conn.consume_in_threads() + self.agent_rpc = IPsecVpnDriverApi(topics.IPSEC_DRIVER_TOPIC, '1.0') + self.process_status_cache_check = loopingcall.FixedIntervalLoopingCall( + self.report_status, self.context) + self.process_status_cache_check.start( + interval=self.conf.ipsec.ipsec_status_check_interval) + + def _update_nat(self, vpnservice, func): + """Setting up nat rule in iptables. + + We need to setup nat rule for ipsec packet. + :param vpnservice: vpnservices + :param func: self.add_nat_rule or self.remove_nat_rule + """ + local_cidr = vpnservice['subnet']['cidr'] + router_id = vpnservice['router_id'] + for ipsec_site_connection in vpnservice['ipsec_site_connections']: + for peer_cidr in ipsec_site_connection['peer_cidrs']: + func( + router_id, + 'POSTROUTING', + '-s %s -d %s -m policy ' + '--dir out --pol ipsec ' + '-j ACCEPT ' % (local_cidr, peer_cidr), + top=True) + self.agent.iptables_apply(router_id) + + def vpnservice_updated(self, context, **kwargs): + """Vpnservice updated rpc handler + + VPN Service Driver will call this method + when vpnservices updated. + Then this method start sync with server. + """ + self.sync(context, []) + + @abc.abstractmethod + def create_process(self, process_id, vpnservice, namespace): + pass + + def ensure_process(self, process_id, vpnservice=None): + """Ensuring process. + + If the process doesn't exist, it will create process + and store it in self.processs + """ + process = self.processes.get(process_id) + if not process or not process.namespace: + namespace = self.agent.get_namespace(process_id) + process = self.create_process( + process_id, + vpnservice, + namespace) + self.processes[process_id] = process + elif vpnservice: + process.update_vpnservice(vpnservice) + return process + + def create_router(self, process_id): + """Handling create router event. + + Agent calls this method, when the process namespace + is ready. + """ + if process_id in self.processes: + # In case of vpnservice is created + # before router's namespace + process = self.processes[process_id] + self._update_nat(process.vpnservice, self.agent.add_nat_rule) + process.enable() + + def destroy_router(self, process_id): + """Handling destroy_router event. + + Agent calls this method, when the process namespace + is deleted. + """ + if process_id in self.processes: + process = self.processes[process_id] + process.disable() + vpnservice = process.vpnservice + if vpnservice: + self._update_nat(vpnservice, self.agent.remove_nat_rule) + del self.processes[process_id] + + def get_process_status_cache(self, process): + if not self.process_status_cache.get(process.id): + self.process_status_cache[process.id] = { + 'status': None, + 'id': process.vpnservice['id'], + 'updated_pending_status': False, + 'ipsec_site_connections': {}} + return self.process_status_cache[process.id] + + def is_status_updated(self, process, previous_status): + if process.updated_pending_status: + return True + if process.status != previous_status['status']: + return True + if (process.connection_status != + previous_status['ipsec_site_connections']): + return True + + def unset_updated_pending_status(self, process): + process.updated_pending_status = False + for connection_status in process.connection_status.values(): + connection_status['updated_pending_status'] = False + + def copy_process_status(self, process): + return { + 'id': process.vpnservice['id'], + 'status': process.status, + 'updated_pending_status': process.updated_pending_status, + 'ipsec_site_connections': copy.deepcopy(process.connection_status) + } + + def update_downed_connections(self, process_id, new_status): + """Update info to be reported, if connections just went down. + + If there is no longer any information for a connection, because it + has been removed (e.g. due to an admin down of VPN service or IPSec + connection), but there was previous status information for the + connection, mark the connection as down for reporting purposes. + """ + if process_id in self.process_status_cache: + for conn in self.process_status_cache[process_id][IPSEC_CONNS]: + if conn not in new_status[IPSEC_CONNS]: + new_status[IPSEC_CONNS][conn] = { + 'status': constants.DOWN, + 'updated_pending_status': True + } + + def report_status(self, context): + status_changed_vpn_services = [] + for process in self.processes.values(): + previous_status = self.get_process_status_cache(process) + if self.is_status_updated(process, previous_status): + new_status = self.copy_process_status(process) + self.update_downed_connections(process.id, new_status) + status_changed_vpn_services.append(new_status) + self.process_status_cache[process.id] = ( + self.copy_process_status(process)) + # We need unset updated_pending status after it + # is reported to the server side + self.unset_updated_pending_status(process) + + if status_changed_vpn_services: + self.agent_rpc.update_status( + context, + status_changed_vpn_services) + + @lockutils.synchronized('vpn-agent', 'neutron-') + def sync(self, context, routers): + """Sync status with server side. + + :param context: context object for RPC call + :param routers: Router objects which is created in this sync event + + There could be many failure cases should be + considered including the followings. + 1) Agent class restarted + 2) Failure on process creation + 3) VpnService is deleted during agent down + 4) RPC failure + + In order to handle, these failure cases, + This driver takes simple sync strategies. + """ + vpnservices = self.agent_rpc.get_vpn_services_on_host( + context, self.host) + router_ids = [vpnservice['router_id'] for vpnservice in vpnservices] + # Ensure the ipsec process is enabled + for vpnservice in vpnservices: + process = self.ensure_process(vpnservice['router_id'], + vpnservice=vpnservice) + self._update_nat(vpnservice, self.agent.add_nat_rule) + process.update() + + # Delete any IPSec processes that are + # associated with routers, but are not running the VPN service. + for router in routers: + #We are using router id as process_id + process_id = router['id'] + if process_id not in router_ids: + process = self.ensure_process(process_id) + self.destroy_router(process_id) + + # Delete any IPSec processes running + # VPN that do not have an associated router. + process_ids = [process_id + for process_id in self.processes + if process_id not in router_ids] + for process_id in process_ids: + self.destroy_router(process_id) + self.report_status(context) + + +class OpenSwanDriver(IPsecDriver): + def create_process(self, process_id, vpnservice, namespace): + return OpenSwanProcess( + self.conf, + self.root_helper, + process_id, + vpnservice, + namespace) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/services/vpn/plugin.py b/icehouse-patches/neutron/dvr-patch/neutron/services/vpn/plugin.py new file mode 100644 index 00000000..74218e2a --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/services/vpn/plugin.py @@ -0,0 +1,105 @@ + +# (c) Copyright 2013 Hewlett-Packard Development Company, L.P. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Swaminathan Vasudevan, Hewlett-Packard + +from neutron.db.vpn import vpn_db +from neutron.openstack.common import log as logging +from neutron.plugins.common import constants +from neutron.services import service_base + +LOG = logging.getLogger(__name__) + + +class VPNPlugin(vpn_db.VPNPluginDb): + + """Implementation of the VPN Service Plugin. + + This class manages the workflow of VPNaaS request/response. + Most DB related works are implemented in class + vpn_db.VPNPluginDb. + """ + supported_extension_aliases = ["vpnaas", "service-type"] + + +class VPNDriverPlugin(VPNPlugin, vpn_db.VPNPluginRpcDbMixin): + """VpnPlugin which supports VPN Service Drivers.""" + #TODO(nati) handle ikepolicy and ipsecpolicy update usecase + def __init__(self): + super(VPNDriverPlugin, self).__init__() + # Load the service driver from neutron.conf. + drivers, default_provider = service_base.load_drivers( + constants.VPN, self) + LOG.info(_("VPN plugin using service driver: %s"), default_provider) + self.ipsec_driver = drivers[default_provider] + + def _get_driver_for_vpnservice(self, vpnservice): + return self.ipsec_driver + + def _get_driver_for_ipsec_site_connection(self, context, + ipsec_site_connection): + #TODO(nati) get vpnservice when we support service type framework + vpnservice = None + return self._get_driver_for_vpnservice(vpnservice) + + def create_ipsec_site_connection(self, context, ipsec_site_connection): + ipsec_site_connection = super( + VPNDriverPlugin, self).create_ipsec_site_connection( + context, ipsec_site_connection) + driver = self._get_driver_for_ipsec_site_connection( + context, ipsec_site_connection) + driver.create_ipsec_site_connection(context, ipsec_site_connection) + return ipsec_site_connection + + def delete_ipsec_site_connection(self, context, ipsec_conn_id): + ipsec_site_connection = self.get_ipsec_site_connection( + context, ipsec_conn_id) + super(VPNDriverPlugin, self).delete_ipsec_site_connection( + context, ipsec_conn_id) + driver = self._get_driver_for_ipsec_site_connection( + context, ipsec_site_connection) + driver.delete_ipsec_site_connection(context, ipsec_site_connection) + + def update_ipsec_site_connection( + self, context, + ipsec_conn_id, ipsec_site_connection): + old_ipsec_site_connection = self.get_ipsec_site_connection( + context, ipsec_conn_id) + ipsec_site_connection = super( + VPNDriverPlugin, self).update_ipsec_site_connection( + context, + ipsec_conn_id, + ipsec_site_connection) + driver = self._get_driver_for_ipsec_site_connection( + context, ipsec_site_connection) + driver.update_ipsec_site_connection( + context, old_ipsec_site_connection, ipsec_site_connection) + return ipsec_site_connection + + def update_vpnservice(self, context, vpnservice_id, vpnservice): + old_vpn_service = self.get_vpnservice(context, vpnservice_id) + new_vpn_service = super( + VPNDriverPlugin, self).update_vpnservice(context, vpnservice_id, + vpnservice) + driver = self._get_driver_for_vpnservice(old_vpn_service) + driver.update_vpnservice(context, old_vpn_service, new_vpn_service) + return new_vpn_service + + def delete_vpnservice(self, context, vpnservice_id): + vpnservice = self._get_vpnservice(context, vpnservice_id) + super(VPNDriverPlugin, self).delete_vpnservice(context, vpnservice_id) + driver = self._get_driver_for_vpnservice(vpnservice) + driver.delete_vpnservice(context, vpnservice) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/services/vpn/service_drivers/__init__.py b/icehouse-patches/neutron/dvr-patch/neutron/services/vpn/service_drivers/__init__.py new file mode 100644 index 00000000..17db3338 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/services/vpn/service_drivers/__init__.py @@ -0,0 +1,90 @@ +# Copyright 2013, Nachi Ueno, NTT I3, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import abc + +import six + +from neutron.common import rpc as n_rpc +from neutron import manager +from neutron.openstack.common import log as logging +from neutron.plugins.common import constants + +LOG = logging.getLogger(__name__) + + +@six.add_metaclass(abc.ABCMeta) +class VpnDriver(object): + + def __init__(self, service_plugin): + self.service_plugin = service_plugin + + @property + def service_type(self): + pass + + @abc.abstractmethod + def create_vpnservice(self, context, vpnservice): + pass + + @abc.abstractmethod + def update_vpnservice( + self, context, old_vpnservice, vpnservice): + pass + + @abc.abstractmethod + def delete_vpnservice(self, context, vpnservice): + pass + + +class BaseIPsecVpnAgentApi(n_rpc.RpcProxy): + """Base class for IPSec API to agent.""" + + def __init__(self, to_agent_topic, topic, default_version): + self.to_agent_topic = to_agent_topic + super(BaseIPsecVpnAgentApi, self).__init__(topic, default_version) + + def _agent_notification(self, context, method, router_id, + version=None, **kwargs): + """Notify update for the agent. + + This method will find where is the router, and + dispatch notification for the agent. + """ + admin_context = context.is_admin and context or context.elevated() + plugin = manager.NeutronManager.get_service_plugins().get( + constants.L3_ROUTER_NAT) + if not version: + version = self.RPC_API_VERSION + l3_agents = plugin.get_l3_agents_hosting_routers( + admin_context, [router_id], + admin_state_up=True, + active=True) + for l3_agent in l3_agents: + LOG.debug(_('Notify agent at %(topic)s.%(host)s the message ' + '%(method)s %(args)s'), + {'topic': self.to_agent_topic, + 'host': l3_agent.host, + 'method': method, + 'args': kwargs}) + self.cast( + context, self.make_msg(method, **kwargs), + version=version, + topic='%s.%s' % (self.to_agent_topic, l3_agent.host)) + + def vpnservice_updated(self, context, router_id, **kwargs): + """Send update event of vpnservices.""" + self._agent_notification(context, 'vpnservice_updated', router_id, + **kwargs) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/services/vpn/service_drivers/cisco_csr_db.py b/icehouse-patches/neutron/dvr-patch/neutron/services/vpn/service_drivers/cisco_csr_db.py new file mode 100644 index 00000000..e98fb820 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/services/vpn/service_drivers/cisco_csr_db.py @@ -0,0 +1,239 @@ +# Copyright 2014 Cisco Systems, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Paul Michali, Cisco Systems, Inc. + +from oslo.db import exception as db_exc +import sqlalchemy as sa +from sqlalchemy.orm import exc as sql_exc + +from neutron.common import exceptions +from neutron.db import model_base +from neutron.db import models_v2 +from neutron.db.vpn import vpn_db +from neutron.openstack.common import log as logging + +LOG = logging.getLogger(__name__) + +# Note: Artificially limit these to reduce mapping table size and performance +# Tunnel can be 0..7FFFFFFF, IKE policy can be 1..10000, IPSec policy can be +# 1..31 characters long. +MAX_CSR_TUNNELS = 10000 +MAX_CSR_IKE_POLICIES = 2000 +MAX_CSR_IPSEC_POLICIES = 2000 + +TUNNEL = 'Tunnel' +IKE_POLICY = 'IKE Policy' +IPSEC_POLICY = 'IPSec Policy' + +MAPPING_LIMITS = {TUNNEL: (0, MAX_CSR_TUNNELS), + IKE_POLICY: (1, MAX_CSR_IKE_POLICIES), + IPSEC_POLICY: (1, MAX_CSR_IPSEC_POLICIES)} + + +class CsrInternalError(exceptions.NeutronException): + message = _("Fatal - %(reason)s") + + +class IdentifierMap(model_base.BASEV2, models_v2.HasTenant): + + """Maps OpenStack IDs to compatible numbers for Cisco CSR.""" + + __tablename__ = 'cisco_csr_identifier_map' + + ipsec_site_conn_id = sa.Column(sa.String(64), + sa.ForeignKey('ipsec_site_connections.id', + ondelete="CASCADE"), + primary_key=True) + csr_tunnel_id = sa.Column(sa.Integer, nullable=False) + csr_ike_policy_id = sa.Column(sa.Integer, nullable=False) + csr_ipsec_policy_id = sa.Column(sa.Integer, nullable=False) + + +def get_next_available_id(session, table_field, id_type): + """Find first unused id for the specified field in IdentifierMap table. + + As entries are removed, find the first "hole" and return that as the + next available ID. To improve performance, artificially limit + the number of entries to a smaller range. Currently, these IDs are + globally unique. Could enhance in the future to be unique per router + (CSR). + """ + min_value = MAPPING_LIMITS[id_type][0] + max_value = MAPPING_LIMITS[id_type][1] + rows = session.query(table_field).order_by(table_field) + used_ids = set([row[0] for row in rows]) + all_ids = set(range(min_value, max_value + min_value)) + available_ids = all_ids - used_ids + if not available_ids: + msg = _("No available Cisco CSR %(type)s IDs from " + "%(min)d..%(max)d") % {'type': id_type, + 'min': min_value, + 'max': max_value} + LOG.error(msg) + raise IndexError(msg) + return available_ids.pop() + + +def get_next_available_tunnel_id(session): + """Find first available tunnel ID from 0..MAX_CSR_TUNNELS-1.""" + return get_next_available_id(session, IdentifierMap.csr_tunnel_id, + TUNNEL) + + +def get_next_available_ike_policy_id(session): + """Find first available IKE Policy ID from 1..MAX_CSR_IKE_POLICIES.""" + return get_next_available_id(session, IdentifierMap.csr_ike_policy_id, + IKE_POLICY) + + +def get_next_available_ipsec_policy_id(session): + """Find first available IPSec Policy ID from 1..MAX_CSR_IKE_POLICIES.""" + return get_next_available_id(session, IdentifierMap.csr_ipsec_policy_id, + IPSEC_POLICY) + + +def find_conn_with_policy(policy_field, policy_id, conn_id, session): + """Return ID of another conneciton (if any) that uses same policy ID.""" + qry = session.query(vpn_db.IPsecSiteConnection.id) + match = qry.filter_request( + policy_field == policy_id, + vpn_db.IPsecSiteConnection.id != conn_id).first() + if match: + return match[0] + + +def find_connection_using_ike_policy(ike_policy_id, conn_id, session): + """Return ID of another connection that uses same IKE policy ID.""" + return find_conn_with_policy(vpn_db.IPsecSiteConnection.ikepolicy_id, + ike_policy_id, conn_id, session) + + +def find_connection_using_ipsec_policy(ipsec_policy_id, conn_id, session): + """Return ID of another connection that uses same IPSec policy ID.""" + return find_conn_with_policy(vpn_db.IPsecSiteConnection.ipsecpolicy_id, + ipsec_policy_id, conn_id, session) + + +def lookup_policy(policy_type, policy_field, conn_id, session): + """Obtain specified policy's mapping from other connection.""" + try: + return session.query(policy_field).filter_by( + ipsec_site_conn_id=conn_id).one()[0] + except sql_exc.NoResultFound: + msg = _("Database inconsistency between IPSec connection and " + "Cisco CSR mapping table (%s)") % policy_type + raise CsrInternalError(reason=msg) + + +def lookup_ike_policy_id_for(conn_id, session): + """Obtain existing Cisco CSR IKE policy ID from another connection.""" + return lookup_policy(IKE_POLICY, IdentifierMap.csr_ike_policy_id, + conn_id, session) + + +def lookup_ipsec_policy_id_for(conn_id, session): + """Obtain existing Cisco CSR IPSec policy ID from another connection.""" + return lookup_policy(IPSEC_POLICY, IdentifierMap.csr_ipsec_policy_id, + conn_id, session) + + +def determine_csr_policy_id(policy_type, conn_policy_field, map_policy_field, + policy_id, conn_id, session): + """Use existing or reserve a new policy ID for Cisco CSR use. + + TODO(pcm) FUTURE: Once device driver adds support for IKE/IPSec policy + ID sharing, add call to find_conn_with_policy() to find used ID and + then call lookup_policy() to find the current mapping for that ID. + """ + csr_id = get_next_available_id(session, map_policy_field, policy_type) + LOG.debug(_("Reserved new CSR ID %(csr_id)d for %(policy)s " + "ID %(policy_id)s"), {'csr_id': csr_id, + 'policy': policy_type, + 'policy_id': policy_id}) + return csr_id + + +def determine_csr_ike_policy_id(ike_policy_id, conn_id, session): + """Use existing, or reserve a new IKE policy ID for Cisco CSR.""" + return determine_csr_policy_id(IKE_POLICY, + vpn_db.IPsecSiteConnection.ikepolicy_id, + IdentifierMap.csr_ike_policy_id, + ike_policy_id, conn_id, session) + + +def determine_csr_ipsec_policy_id(ipsec_policy_id, conn_id, session): + """Use existing, or reserve a new IPSec policy ID for Cisco CSR.""" + return determine_csr_policy_id(IPSEC_POLICY, + vpn_db.IPsecSiteConnection.ipsecpolicy_id, + IdentifierMap.csr_ipsec_policy_id, + ipsec_policy_id, conn_id, session) + + +def get_tunnel_mapping_for(conn_id, session): + try: + entry = session.query(IdentifierMap).filter_by( + ipsec_site_conn_id=conn_id).one() + LOG.debug(_("Mappings for IPSec connection %(conn)s - " + "tunnel=%(tunnel)s ike_policy=%(csr_ike)d " + "ipsec_policy=%(csr_ipsec)d"), + {'conn': conn_id, 'tunnel': entry.csr_tunnel_id, + 'csr_ike': entry.csr_ike_policy_id, + 'csr_ipsec': entry.csr_ipsec_policy_id}) + return (entry.csr_tunnel_id, entry.csr_ike_policy_id, + entry.csr_ipsec_policy_id) + except sql_exc.NoResultFound: + msg = _("Existing entry for IPSec connection %s not found in Cisco " + "CSR mapping table") % conn_id + raise CsrInternalError(reason=msg) + + +def create_tunnel_mapping(context, conn_info): + """Create Cisco CSR IDs, using mapping table and OpenStack UUIDs.""" + conn_id = conn_info['id'] + ike_policy_id = conn_info['ikepolicy_id'] + ipsec_policy_id = conn_info['ipsecpolicy_id'] + tenant_id = conn_info['tenant_id'] + with context.session.begin(): + csr_tunnel_id = get_next_available_tunnel_id(context.session) + csr_ike_id = determine_csr_ike_policy_id(ike_policy_id, conn_id, + context.session) + csr_ipsec_id = determine_csr_ipsec_policy_id(ipsec_policy_id, conn_id, + context.session) + map_entry = IdentifierMap(tenant_id=tenant_id, + ipsec_site_conn_id=conn_id, + csr_tunnel_id=csr_tunnel_id, + csr_ike_policy_id=csr_ike_id, + csr_ipsec_policy_id=csr_ipsec_id) + try: + context.session.add(map_entry) + # Force committing to database + context.session.flush() + except db_exc.DBDuplicateEntry: + msg = _("Attempt to create duplicate entry in Cisco CSR " + "mapping table for connection %s") % conn_id + raise CsrInternalError(reason=msg) + LOG.info(_("Mapped connection %(conn_id)s to Tunnel%(tunnel_id)d " + "using IKE policy ID %(ike_id)d and IPSec policy " + "ID %(ipsec_id)d"), + {'conn_id': conn_id, 'tunnel_id': csr_tunnel_id, + 'ike_id': csr_ike_id, 'ipsec_id': csr_ipsec_id}) + + +def delete_tunnel_mapping(context, conn_info): + conn_id = conn_info['id'] + with context.session.begin(): + sess_qry = context.session.query(IdentifierMap) + sess_qry.filter_by(ipsec_site_conn_id=conn_id).delete() + LOG.info(_("Removed mapping for connection %s"), conn_id) diff --git a/icehouse-patches/neutron/dvr-patch/neutron/services/vpn/service_drivers/cisco_ipsec.py b/icehouse-patches/neutron/dvr-patch/neutron/services/vpn/service_drivers/cisco_ipsec.py new file mode 100644 index 00000000..93ce7923 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/services/vpn/service_drivers/cisco_ipsec.py @@ -0,0 +1,245 @@ +# Copyright 2014 Cisco Systems, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import netaddr +from netaddr import core as net_exc + +from neutron.common import exceptions +from neutron.common import rpc as n_rpc +from neutron.openstack.common import excutils +from neutron.openstack.common import log as logging +from neutron.plugins.common import constants +from neutron.services.vpn.common import topics +from neutron.services.vpn import service_drivers +from neutron.services.vpn.service_drivers import cisco_csr_db as csr_id_map + + +LOG = logging.getLogger(__name__) + +IPSEC = 'ipsec' +BASE_IPSEC_VERSION = '1.0' +LIFETIME_LIMITS = {'IKE Policy': {'min': 60, 'max': 86400}, + 'IPSec Policy': {'min': 120, 'max': 2592000}} +MIN_CSR_MTU = 1500 +MAX_CSR_MTU = 9192 + + +class CsrValidationFailure(exceptions.BadRequest): + message = _("Cisco CSR does not support %(resource)s attribute %(key)s " + "with value '%(value)s'") + + +class CiscoCsrIPsecVpnDriverCallBack(n_rpc.RpcCallback): + + """Handler for agent to plugin RPC messaging.""" + + # history + # 1.0 Initial version + + RPC_API_VERSION = BASE_IPSEC_VERSION + + def __init__(self, driver): + super(CiscoCsrIPsecVpnDriverCallBack, self).__init__() + self.driver = driver + + def get_vpn_services_on_host(self, context, host=None): + """Retuns info on the vpnservices on the host.""" + plugin = self.driver.service_plugin + vpnservices = plugin._get_agent_hosting_vpn_services( + context, host) + return [self.driver._make_vpnservice_dict(vpnservice, context) + for vpnservice in vpnservices] + + def update_status(self, context, status): + """Update status of all vpnservices.""" + plugin = self.driver.service_plugin + plugin.update_status_by_agent(context, status) + + +class CiscoCsrIPsecVpnAgentApi(service_drivers.BaseIPsecVpnAgentApi, + n_rpc.RpcCallback): + + """API and handler for Cisco IPSec plugin to agent RPC messaging.""" + + RPC_API_VERSION = BASE_IPSEC_VERSION + + def __init__(self, topic, default_version): + super(CiscoCsrIPsecVpnAgentApi, self).__init__( + topics.CISCO_IPSEC_AGENT_TOPIC, topic, default_version) + + +class CiscoCsrIPsecVPNDriver(service_drivers.VpnDriver): + + """Cisco CSR VPN Service Driver class for IPsec.""" + + def __init__(self, service_plugin): + super(CiscoCsrIPsecVPNDriver, self).__init__(service_plugin) + self.endpoints = [CiscoCsrIPsecVpnDriverCallBack(self)] + self.conn = n_rpc.create_connection(new=True) + self.conn.create_consumer( + topics.CISCO_IPSEC_DRIVER_TOPIC, self.endpoints, fanout=False) + self.conn.consume_in_threads() + self.agent_rpc = CiscoCsrIPsecVpnAgentApi( + topics.CISCO_IPSEC_AGENT_TOPIC, BASE_IPSEC_VERSION) + + @property + def service_type(self): + return IPSEC + + def validate_lifetime(self, for_policy, policy_info): + """Ensure lifetime in secs and value is supported, based on policy.""" + units = policy_info['lifetime']['units'] + if units != 'seconds': + raise CsrValidationFailure(resource=for_policy, + key='lifetime:units', + value=units) + value = policy_info['lifetime']['value'] + if (value < LIFETIME_LIMITS[for_policy]['min'] or + value > LIFETIME_LIMITS[for_policy]['max']): + raise CsrValidationFailure(resource=for_policy, + key='lifetime:value', + value=value) + + def validate_ike_version(self, policy_info): + """Ensure IKE policy is v1 for current REST API.""" + version = policy_info['ike_version'] + if version != 'v1': + raise CsrValidationFailure(resource='IKE Policy', + key='ike_version', + value=version) + + def validate_mtu(self, conn_info): + """Ensure the MTU value is supported.""" + mtu = conn_info['mtu'] + if mtu < MIN_CSR_MTU or mtu > MAX_CSR_MTU: + raise CsrValidationFailure(resource='IPSec Connection', + key='mtu', + value=mtu) + + def validate_public_ip_present(self, vpn_service): + """Ensure there is one gateway IP specified for the router used.""" + gw_port = vpn_service.router.gw_port + if not gw_port or len(gw_port.fixed_ips) != 1: + raise CsrValidationFailure(resource='IPSec Connection', + key='router:gw_port:ip_address', + value='missing') + + def validate_peer_id(self, ipsec_conn): + """Ensure that an IP address is specified for peer ID.""" + # TODO(pcm) Should we check peer_address too? + peer_id = ipsec_conn['peer_id'] + try: + netaddr.IPAddress(peer_id) + except net_exc.AddrFormatError: + raise CsrValidationFailure(resource='IPSec Connection', + key='peer_id', value=peer_id) + + def validate_ipsec_connection(self, context, ipsec_conn, vpn_service): + """Validate attributes w.r.t. Cisco CSR capabilities.""" + ike_policy = self.service_plugin.get_ikepolicy( + context, ipsec_conn['ikepolicy_id']) + ipsec_policy = self.service_plugin.get_ipsecpolicy( + context, ipsec_conn['ipsecpolicy_id']) + self.validate_lifetime('IKE Policy', ike_policy) + self.validate_lifetime('IPSec Policy', ipsec_policy) + self.validate_ike_version(ike_policy) + self.validate_mtu(ipsec_conn) + self.validate_public_ip_present(vpn_service) + self.validate_peer_id(ipsec_conn) + LOG.debug(_("IPSec connection %s validated for Cisco CSR"), + ipsec_conn['id']) + + def create_ipsec_site_connection(self, context, ipsec_site_connection): + vpnservice = self.service_plugin._get_vpnservice( + context, ipsec_site_connection['vpnservice_id']) + try: + self.validate_ipsec_connection(context, ipsec_site_connection, + vpnservice) + except CsrValidationFailure: + with excutils.save_and_reraise_exception(): + self.service_plugin.update_ipsec_site_conn_status( + context, ipsec_site_connection['id'], constants.ERROR) + csr_id_map.create_tunnel_mapping(context, ipsec_site_connection) + self.agent_rpc.vpnservice_updated(context, vpnservice['router_id'], + reason='ipsec-conn-create') + + def update_ipsec_site_connection( + self, context, old_ipsec_site_connection, ipsec_site_connection): + vpnservice = self.service_plugin._get_vpnservice( + context, ipsec_site_connection['vpnservice_id']) + self.agent_rpc.vpnservice_updated( + context, vpnservice['router_id'], + reason='ipsec-conn-update') + + def delete_ipsec_site_connection(self, context, ipsec_site_connection): + vpnservice = self.service_plugin._get_vpnservice( + context, ipsec_site_connection['vpnservice_id']) + self.agent_rpc.vpnservice_updated(context, vpnservice['router_id'], + reason='ipsec-conn-delete') + + def create_ikepolicy(self, context, ikepolicy): + pass + + def delete_ikepolicy(self, context, ikepolicy): + pass + + def update_ikepolicy(self, context, old_ikepolicy, ikepolicy): + pass + + def create_ipsecpolicy(self, context, ipsecpolicy): + pass + + def delete_ipsecpolicy(self, context, ipsecpolicy): + pass + + def update_ipsecpolicy(self, context, old_ipsec_policy, ipsecpolicy): + pass + + def create_vpnservice(self, context, vpnservice): + pass + + def update_vpnservice(self, context, old_vpnservice, vpnservice): + self.agent_rpc.vpnservice_updated(context, vpnservice['router_id'], + reason='vpn-service-update') + + def delete_vpnservice(self, context, vpnservice): + self.agent_rpc.vpnservice_updated(context, vpnservice['router_id'], + reason='vpn-service-delete') + + def get_cisco_connection_mappings(self, conn_id, context): + """Obtain persisted mappings for IDs related to connection.""" + tunnel_id, ike_id, ipsec_id = csr_id_map.get_tunnel_mapping_for( + conn_id, context.session) + return {'site_conn_id': u'Tunnel%d' % tunnel_id, + 'ike_policy_id': u'%d' % ike_id, + 'ipsec_policy_id': u'%s' % ipsec_id} + + def _make_vpnservice_dict(self, vpnservice, context): + """Collect all info on service, including Cisco info per IPSec conn.""" + vpnservice_dict = dict(vpnservice) + vpnservice_dict['ipsec_conns'] = [] + vpnservice_dict['subnet'] = dict( + vpnservice.subnet) + vpnservice_dict['external_ip'] = vpnservice.router.gw_port[ + 'fixed_ips'][0]['ip_address'] + for ipsec_conn in vpnservice.ipsec_site_connections: + ipsec_conn_dict = dict(ipsec_conn) + ipsec_conn_dict['ike_policy'] = dict(ipsec_conn.ikepolicy) + ipsec_conn_dict['ipsec_policy'] = dict(ipsec_conn.ipsecpolicy) + ipsec_conn_dict['peer_cidrs'] = [ + peer_cidr.cidr for peer_cidr in ipsec_conn.peer_cidrs] + ipsec_conn_dict['cisco'] = self.get_cisco_connection_mappings( + ipsec_conn['id'], context) + vpnservice_dict['ipsec_conns'].append(ipsec_conn_dict) + return vpnservice_dict diff --git a/icehouse-patches/neutron/dvr-patch/neutron/services/vpn/service_drivers/ipsec.py b/icehouse-patches/neutron/dvr-patch/neutron/services/vpn/service_drivers/ipsec.py new file mode 100644 index 00000000..e3c2ee23 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/services/vpn/service_drivers/ipsec.py @@ -0,0 +1,154 @@ +# Copyright 2013, Nachi Ueno, NTT I3, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +import netaddr + +from neutron.common import rpc as n_rpc +from neutron.openstack.common import log as logging +from neutron.services.vpn.common import topics +from neutron.services.vpn import service_drivers + + +LOG = logging.getLogger(__name__) + +IPSEC = 'ipsec' +BASE_IPSEC_VERSION = '1.0' + + +class IPsecVpnDriverCallBack(n_rpc.RpcCallback): + """Callback for IPSecVpnDriver rpc.""" + + # history + # 1.0 Initial version + + RPC_API_VERSION = BASE_IPSEC_VERSION + + def __init__(self, driver): + super(IPsecVpnDriverCallBack, self).__init__() + self.driver = driver + + def get_vpn_services_on_host(self, context, host=None): + """Returns the vpnservices on the host.""" + plugin = self.driver.service_plugin + vpnservices = plugin._get_agent_hosting_vpn_services( + context, host) + return [self.driver._make_vpnservice_dict(vpnservice) + for vpnservice in vpnservices] + + def update_status(self, context, status): + """Update status of vpnservices.""" + plugin = self.driver.service_plugin + plugin.update_status_by_agent(context, status) + + +class IPsecVpnAgentApi(service_drivers.BaseIPsecVpnAgentApi, + n_rpc.RpcCallback): + """Agent RPC API for IPsecVPNAgent.""" + + RPC_API_VERSION = BASE_IPSEC_VERSION + + def __init__(self, topic, default_version): + super(IPsecVpnAgentApi, self).__init__( + topics.IPSEC_AGENT_TOPIC, topic, default_version) + + +class IPsecVPNDriver(service_drivers.VpnDriver): + """VPN Service Driver class for IPsec.""" + + def __init__(self, service_plugin): + super(IPsecVPNDriver, self).__init__(service_plugin) + self.endpoints = [IPsecVpnDriverCallBack(self)] + self.conn = n_rpc.create_connection(new=True) + self.conn.create_consumer( + topics.IPSEC_DRIVER_TOPIC, self.endpoints, fanout=False) + self.conn.consume_in_threads() + self.agent_rpc = IPsecVpnAgentApi( + topics.IPSEC_AGENT_TOPIC, BASE_IPSEC_VERSION) + + @property + def service_type(self): + return IPSEC + + def create_ipsec_site_connection(self, context, ipsec_site_connection): + vpnservice = self.service_plugin._get_vpnservice( + context, ipsec_site_connection['vpnservice_id']) + self.agent_rpc.vpnservice_updated(context, vpnservice['router_id']) + + def update_ipsec_site_connection( + self, context, old_ipsec_site_connection, ipsec_site_connection): + vpnservice = self.service_plugin._get_vpnservice( + context, ipsec_site_connection['vpnservice_id']) + self.agent_rpc.vpnservice_updated(context, vpnservice['router_id']) + + def delete_ipsec_site_connection(self, context, ipsec_site_connection): + vpnservice = self.service_plugin._get_vpnservice( + context, ipsec_site_connection['vpnservice_id']) + self.agent_rpc.vpnservice_updated(context, vpnservice['router_id']) + + def create_ikepolicy(self, context, ikepolicy): + pass + + def delete_ikepolicy(self, context, ikepolicy): + pass + + def update_ikepolicy(self, context, old_ikepolicy, ikepolicy): + pass + + def create_ipsecpolicy(self, context, ipsecpolicy): + pass + + def delete_ipsecpolicy(self, context, ipsecpolicy): + pass + + def update_ipsecpolicy(self, context, old_ipsec_policy, ipsecpolicy): + pass + + def create_vpnservice(self, context, vpnservice): + pass + + def update_vpnservice(self, context, old_vpnservice, vpnservice): + self.agent_rpc.vpnservice_updated(context, vpnservice['router_id']) + + def delete_vpnservice(self, context, vpnservice): + self.agent_rpc.vpnservice_updated(context, vpnservice['router_id']) + + def _make_vpnservice_dict(self, vpnservice): + """Convert vpnservice information for vpn agent. + + also converting parameter name for vpn agent driver + """ + vpnservice_dict = dict(vpnservice) + vpnservice_dict['ipsec_site_connections'] = [] + vpnservice_dict['subnet'] = dict( + vpnservice.subnet) + vpnservice_dict['external_ip'] = vpnservice.router.gw_port[ + 'fixed_ips'][0]['ip_address'] + for ipsec_site_connection in vpnservice.ipsec_site_connections: + ipsec_site_connection_dict = dict(ipsec_site_connection) + try: + netaddr.IPAddress(ipsec_site_connection['peer_id']) + except netaddr.core.AddrFormatError: + ipsec_site_connection['peer_id'] = ( + '@' + ipsec_site_connection['peer_id']) + ipsec_site_connection_dict['ikepolicy'] = dict( + ipsec_site_connection.ikepolicy) + ipsec_site_connection_dict['ipsecpolicy'] = dict( + ipsec_site_connection.ipsecpolicy) + vpnservice_dict['ipsec_site_connections'].append( + ipsec_site_connection_dict) + peer_cidrs = [ + peer_cidr.cidr + for peer_cidr in ipsec_site_connection.peer_cidrs] + ipsec_site_connection_dict['peer_cidrs'] = peer_cidrs + return vpnservice_dict diff --git a/icehouse-patches/neutron/dvr-patch/neutron/version.py b/icehouse-patches/neutron/dvr-patch/neutron/version.py new file mode 100644 index 00000000..2dbf54df --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/version.py @@ -0,0 +1,17 @@ +# Copyright 2011 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import pbr.version + +version_info = pbr.version.VersionInfo('neutron') diff --git a/icehouse-patches/neutron/dvr-patch/neutron/wsgi.py b/icehouse-patches/neutron/dvr-patch/neutron/wsgi.py new file mode 100644 index 00000000..a98e2226 --- /dev/null +++ b/icehouse-patches/neutron/dvr-patch/neutron/wsgi.py @@ -0,0 +1,1301 @@ +# Copyright 2011 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Utility methods for working with WSGI servers +""" +from __future__ import print_function + +import errno +import os +import socket +import ssl +import sys +import time +from xml.etree import ElementTree as etree +from xml.parsers import expat + +import eventlet.wsgi +eventlet.patcher.monkey_patch(all=False, socket=True, thread=True) +from oslo.config import cfg +import routes.middleware +import webob.dec +import webob.exc + +from neutron.common import constants +from neutron.common import exceptions as exception +from neutron import context +from neutron.db import api +from neutron.openstack.common import excutils +from neutron.openstack.common import gettextutils +from neutron.openstack.common import jsonutils +from neutron.openstack.common import log as logging +from neutron.openstack.common import service as common_service +from neutron.openstack.common import systemd + +socket_opts = [ + cfg.IntOpt('backlog', + default=4096, + help=_("Number of backlog requests to configure " + "the socket with")), + cfg.IntOpt('tcp_keepidle', + default=600, + help=_("Sets the value of TCP_KEEPIDLE in seconds for each " + "server socket. Not supported on OS X.")), + cfg.IntOpt('retry_until_window', + default=30, + help=_("Number of seconds to keep retrying to listen")), + cfg.IntOpt('max_header_line', + default=16384, + help=_("Max header line to accommodate large tokens")), + cfg.BoolOpt('use_ssl', + default=False, + help=_('Enable SSL on the API server')), + cfg.StrOpt('ssl_ca_file', + help=_("CA certificate file to use to verify " + "connecting clients")), + cfg.StrOpt('ssl_cert_file', + help=_("Certificate file to use when starting " + "the server securely")), + cfg.StrOpt('ssl_key_file', + help=_("Private key file to use when starting " + "the server securely")), +] + +CONF = cfg.CONF +CONF.register_opts(socket_opts) + +LOG = logging.getLogger(__name__) + + +class WorkerService(object): + """Wraps a worker to be handled by ProcessLauncher""" + def __init__(self, service, application): + self._service = service + self._application = application + self._server = None + + def start(self): + # We may have just forked from parent process. A quick disposal of the + # existing sql connections avoids producting 500 errors later when they + # are discovered to be broken. + api.get_engine().pool.dispose() + self._server = self._service.pool.spawn(self._service._run, + self._application, + self._service._socket) + + def wait(self): + self._service.pool.waitall() + + def stop(self): + if isinstance(self._server, eventlet.greenthread.GreenThread): + self._server.kill() + self._server = None + + +class Server(object): + """Server class to manage multiple WSGI sockets and applications.""" + + def __init__(self, name, threads=1000): + # Raise the default from 8192 to accommodate large tokens + eventlet.wsgi.MAX_HEADER_LINE = CONF.max_header_line + self.pool = eventlet.GreenPool(threads) + self.name = name + self._launcher = None + self._server = None + + def _get_socket(self, host, port, backlog): + bind_addr = (host, port) + # TODO(dims): eventlet's green dns/socket module does not actually + # support IPv6 in getaddrinfo(). We need to get around this in the + # future or monitor upstream for a fix + try: + info = socket.getaddrinfo(bind_addr[0], + bind_addr[1], + socket.AF_UNSPEC, + socket.SOCK_STREAM)[0] + family = info[0] + bind_addr = info[-1] + except Exception: + LOG.exception(_("Unable to listen on %(host)s:%(port)s"), + {'host': host, 'port': port}) + sys.exit(1) + + if CONF.use_ssl: + if not os.path.exists(CONF.ssl_cert_file): + raise RuntimeError(_("Unable to find ssl_cert_file " + ": %s") % CONF.ssl_cert_file) + + # ssl_key_file is optional because the key may be embedded in the + # certificate file + if CONF.ssl_key_file and not os.path.exists(CONF.ssl_key_file): + raise RuntimeError(_("Unable to find " + "ssl_key_file : %s") % CONF.ssl_key_file) + + # ssl_ca_file is optional + if CONF.ssl_ca_file and not os.path.exists(CONF.ssl_ca_file): + raise RuntimeError(_("Unable to find ssl_ca_file " + ": %s") % CONF.ssl_ca_file) + + def wrap_ssl(sock): + ssl_kwargs = { + 'server_side': True, + 'certfile': CONF.ssl_cert_file, + 'keyfile': CONF.ssl_key_file, + 'cert_reqs': ssl.CERT_NONE, + } + + if CONF.ssl_ca_file: + ssl_kwargs['ca_certs'] = CONF.ssl_ca_file + ssl_kwargs['cert_reqs'] = ssl.CERT_REQUIRED + + return ssl.wrap_socket(sock, **ssl_kwargs) + + sock = None + retry_until = time.time() + CONF.retry_until_window + while not sock and time.time() < retry_until: + try: + sock = eventlet.listen(bind_addr, + backlog=backlog, + family=family) + if CONF.use_ssl: + sock = wrap_ssl(sock) + + except socket.error as err: + with excutils.save_and_reraise_exception() as ctxt: + if err.errno == errno.EADDRINUSE: + ctxt.reraise = False + eventlet.sleep(0.1) + if not sock: + raise RuntimeError(_("Could not bind to %(host)s:%(port)s " + "after trying for %(time)d seconds") % + {'host': host, + 'port': port, + 'time': CONF.retry_until_window}) + sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) + # sockets can hang around forever without keepalive + sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1) + + # This option isn't available in the OS X version of eventlet + if hasattr(socket, 'TCP_KEEPIDLE'): + sock.setsockopt(socket.IPPROTO_TCP, + socket.TCP_KEEPIDLE, + CONF.tcp_keepidle) + + return sock + + def start(self, application, port, host='0.0.0.0', workers=0): + """Run a WSGI server with the given application.""" + self._host = host + self._port = port + backlog = CONF.backlog + + self._socket = self._get_socket(self._host, + self._port, + backlog=backlog) + if workers < 1: + # For the case where only one process is required. + self._server = self.pool.spawn(self._run, application, + self._socket) + systemd.notify_once() + else: + # Minimize the cost of checking for child exit by extending the + # wait interval past the default of 0.01s. + self._launcher = common_service.ProcessLauncher(wait_interval=1.0) + self._server = WorkerService(self, application) + self._launcher.launch_service(self._server, workers=workers) + + @property + def host(self): + return self._socket.getsockname()[0] if self._socket else self._host + + @property + def port(self): + return self._socket.getsockname()[1] if self._socket else self._port + + def stop(self): + if self._launcher: + # The process launcher does not support stop or kill. + self._launcher.running = False + else: + self._server.kill() + + def wait(self): + """Wait until all servers have completed running.""" + try: + if self._launcher: + self._launcher.wait() + else: + self.pool.waitall() + except KeyboardInterrupt: + pass + + def _run(self, application, socket): + """Start a WSGI server in a new green thread.""" + eventlet.wsgi.server(socket, application, custom_pool=self.pool, + log=logging.WritableLogger(LOG)) + + +class Middleware(object): + """Base WSGI middleware wrapper. + + These classes require an application to be initialized that will be called + next. By default the middleware will simply call its wrapped app, or you + can override __call__ to customize its behavior. + """ + + @classmethod + def factory(cls, global_config, **local_config): + """Used for paste app factories in paste.deploy config files. + + Any local configuration (that is, values under the [filter:APPNAME] + section of the paste config) will be passed into the `__init__` method + as kwargs. + + A hypothetical configuration would look like: + + [filter:analytics] + redis_host = 127.0.0.1 + paste.filter_factory = nova.api.analytics:Analytics.factory + + which would result in a call to the `Analytics` class as + + import nova.api.analytics + analytics.Analytics(app_from_paste, redis_host='127.0.0.1') + + You could of course re-implement the `factory` method in subclasses, + but using the kwarg passing it shouldn't be necessary. + + """ + def _factory(app): + return cls(app, **local_config) + return _factory + + def __init__(self, application): + self.application = application + + def process_request(self, req): + """Called on each request. + + If this returns None, the next application down the stack will be + executed. If it returns a response then that response will be returned + and execution will stop here. + + """ + return None + + def process_response(self, response): + """Do whatever you'd like to the response.""" + return response + + @webob.dec.wsgify + def __call__(self, req): + response = self.process_request(req) + if response: + return response + response = req.get_response(self.application) + return self.process_response(response) + + +class Request(webob.Request): + + def best_match_content_type(self): + """Determine the most acceptable content-type. + + Based on: + 1) URI extension (.json/.xml) + 2) Content-type header + 3) Accept* headers + """ + # First lookup http request path + parts = self.path.rsplit('.', 1) + if len(parts) > 1: + _format = parts[1] + if _format in ['json', 'xml']: + return 'application/{0}'.format(_format) + + #Then look up content header + type_from_header = self.get_content_type() + if type_from_header: + return type_from_header + ctypes = ['application/json', 'application/xml'] + + #Finally search in Accept-* headers + bm = self.accept.best_match(ctypes) + return bm or 'application/json' + + def get_content_type(self): + allowed_types = ("application/xml", "application/json") + if "Content-Type" not in self.headers: + LOG.debug(_("Missing Content-Type")) + return None + _type = self.content_type + if _type in allowed_types: + return _type + return None + + def best_match_language(self): + """Determines best available locale from the Accept-Language header. + + :returns: the best language match or None if the 'Accept-Language' + header was not available in the request. + """ + if not self.accept_language: + return None + all_languages = gettextutils.get_available_languages('neutron') + return self.accept_language.best_match(all_languages) + + @property + def context(self): + if 'neutron.context' not in self.environ: + self.environ['neutron.context'] = context.get_admin_context() + return self.environ['neutron.context'] + + +class ActionDispatcher(object): + """Maps method name to local methods through action name.""" + + def dispatch(self, *args, **kwargs): + """Find and call local method.""" + action = kwargs.pop('action', 'default') + action_method = getattr(self, str(action), self.default) + return action_method(*args, **kwargs) + + def default(self, data): + raise NotImplementedError() + + +class DictSerializer(ActionDispatcher): + """Default request body serialization.""" + + def serialize(self, data, action='default'): + return self.dispatch(data, action=action) + + def default(self, data): + return "" + + +class JSONDictSerializer(DictSerializer): + """Default JSON request body serialization.""" + + def default(self, data): + def sanitizer(obj): + return unicode(obj) + return jsonutils.dumps(data, default=sanitizer) + + +class XMLDictSerializer(DictSerializer): + + def __init__(self, metadata=None, xmlns=None): + """Object initialization. + + :param metadata: information needed to deserialize xml into + a dictionary. + :param xmlns: XML namespace to include with serialized xml + """ + super(XMLDictSerializer, self).__init__() + self.metadata = metadata or {} + if not xmlns: + xmlns = self.metadata.get('xmlns') + if not xmlns: + xmlns = constants.XML_NS_V20 + self.xmlns = xmlns + + def default(self, data): + """Return data as XML string. + + :param data: expect data to contain a single key as XML root, or + contain another '*_links' key as atom links. Other + case will use 'VIRTUAL_ROOT_KEY' as XML root. + """ + try: + links = None + has_atom = False + if data is None: + root_key = constants.VIRTUAL_ROOT_KEY + root_value = None + else: + link_keys = [k for k in data.iterkeys() or [] + if k.endswith('_links')] + if link_keys: + links = data.pop(link_keys[0], None) + has_atom = True + root_key = (len(data) == 1 and + data.keys()[0] or constants.VIRTUAL_ROOT_KEY) + root_value = data.get(root_key, data) + doc = etree.Element("_temp_root") + used_prefixes = [] + self._to_xml_node(doc, self.metadata, root_key, + root_value, used_prefixes) + if links: + self._create_link_nodes(list(doc)[0], links) + return self.to_xml_string(list(doc)[0], used_prefixes, has_atom) + except AttributeError as e: + LOG.exception(str(e)) + return '' + + def __call__(self, data): + # Provides a migration path to a cleaner WSGI layer, this + # "default" stuff and extreme extensibility isn't being used + # like originally intended + return self.default(data) + + def to_xml_string(self, node, used_prefixes, has_atom=False): + self._add_xmlns(node, used_prefixes, has_atom) + return etree.tostring(node, encoding='UTF-8') + + #NOTE (ameade): the has_atom should be removed after all of the + # xml serializers and view builders have been updated to the current + # spec that required all responses include the xmlns:atom, the has_atom + # flag is to prevent current tests from breaking + def _add_xmlns(self, node, used_prefixes, has_atom=False): + node.set('xmlns', self.xmlns) + node.set(constants.TYPE_XMLNS, self.xmlns) + if has_atom: + node.set(constants.ATOM_XMLNS, constants.ATOM_NAMESPACE) + node.set(constants.XSI_NIL_ATTR, constants.XSI_NAMESPACE) + ext_ns = self.metadata.get(constants.EXT_NS, {}) + ext_ns_bc = self.metadata.get(constants.EXT_NS_COMP, {}) + for prefix in used_prefixes: + if prefix in ext_ns: + node.set('xmlns:' + prefix, ext_ns[prefix]) + if prefix in ext_ns_bc: + node.set('xmlns:' + prefix, ext_ns_bc[prefix]) + + def _to_xml_node(self, parent, metadata, nodename, data, used_prefixes): + """Recursive method to convert data members to XML nodes.""" + result = etree.SubElement(parent, nodename) + if ":" in nodename: + used_prefixes.append(nodename.split(":", 1)[0]) + #TODO(bcwaldon): accomplish this without a type-check + if isinstance(data, list): + if not data: + result.set( + constants.TYPE_ATTR, + constants.TYPE_LIST) + return result + singular = metadata.get('plurals', {}).get(nodename, None) + if singular is None: + if nodename.endswith('s'): + singular = nodename[:-1] + else: + singular = 'item' + for item in data: + self._to_xml_node(result, metadata, singular, item, + used_prefixes) + #TODO(bcwaldon): accomplish this without a type-check + elif isinstance(data, dict): + if not data: + result.set( + constants.TYPE_ATTR, + constants.TYPE_DICT) + return result + attrs = metadata.get('attributes', {}).get(nodename, {}) + for k, v in data.items(): + if k in attrs: + result.set(k, str(v)) + else: + self._to_xml_node(result, metadata, k, v, + used_prefixes) + elif data is None: + result.set(constants.XSI_ATTR, 'true') + else: + if isinstance(data, bool): + result.set( + constants.TYPE_ATTR, + constants.TYPE_BOOL) + elif isinstance(data, int): + result.set( + constants.TYPE_ATTR, + constants.TYPE_INT) + elif isinstance(data, long): + result.set( + constants.TYPE_ATTR, + constants.TYPE_LONG) + elif isinstance(data, float): + result.set( + constants.TYPE_ATTR, + constants.TYPE_FLOAT) + LOG.debug(_("Data %(data)s type is %(type)s"), + {'data': data, + 'type': type(data)}) + if isinstance(data, str): + result.text = unicode(data, 'utf-8') + else: + result.text = unicode(data) + return result + + def _create_link_nodes(self, xml_doc, links): + for link in links: + link_node = etree.SubElement(xml_doc, 'atom:link') + link_node.set('rel', link['rel']) + link_node.set('href', link['href']) + + +class ResponseHeaderSerializer(ActionDispatcher): + """Default response headers serialization.""" + + def serialize(self, response, data, action): + self.dispatch(response, data, action=action) + + def default(self, response, data): + response.status_int = 200 + + +class ResponseSerializer(object): + """Encode the necessary pieces into a response object.""" + + def __init__(self, body_serializers=None, headers_serializer=None): + self.body_serializers = { + 'application/xml': XMLDictSerializer(), + 'application/json': JSONDictSerializer(), + } + self.body_serializers.update(body_serializers or {}) + + self.headers_serializer = (headers_serializer or + ResponseHeaderSerializer()) + + def serialize(self, response_data, content_type, action='default'): + """Serialize a dict into a string and wrap in a wsgi.Request object. + + :param response_data: dict produced by the Controller + :param content_type: expected mimetype of serialized response body + + """ + response = webob.Response() + self.serialize_headers(response, response_data, action) + self.serialize_body(response, response_data, content_type, action) + return response + + def serialize_headers(self, response, data, action): + self.headers_serializer.serialize(response, data, action) + + def serialize_body(self, response, data, content_type, action): + response.headers['Content-Type'] = content_type + if data is not None: + serializer = self.get_body_serializer(content_type) + response.body = serializer.serialize(data, action) + + def get_body_serializer(self, content_type): + try: + return self.body_serializers[content_type] + except (KeyError, TypeError): + raise exception.InvalidContentType(content_type=content_type) + + +class TextDeserializer(ActionDispatcher): + """Default request body deserialization.""" + + def deserialize(self, datastring, action='default'): + return self.dispatch(datastring, action=action) + + def default(self, datastring): + return {} + + +class JSONDeserializer(TextDeserializer): + + def _from_json(self, datastring): + try: + return jsonutils.loads(datastring) + except ValueError: + msg = _("Cannot understand JSON") + raise exception.MalformedRequestBody(reason=msg) + + def default(self, datastring): + return {'body': self._from_json(datastring)} + + +class ProtectedXMLParser(etree.XMLParser): + def __init__(self, *args, **kwargs): + etree.XMLParser.__init__(self, *args, **kwargs) + self._parser.StartDoctypeDeclHandler = self.start_doctype_decl + + def start_doctype_decl(self, name, sysid, pubid, internal): + raise ValueError(_("Inline DTD forbidden")) + + def doctype(self, name, pubid, system): + raise ValueError(_("Inline DTD forbidden")) + + +class XMLDeserializer(TextDeserializer): + + def __init__(self, metadata=None): + """Object initialization. + + :param metadata: information needed to deserialize xml into + a dictionary. + """ + super(XMLDeserializer, self).__init__() + self.metadata = metadata or {} + xmlns = self.metadata.get('xmlns') + if not xmlns: + xmlns = constants.XML_NS_V20 + self.xmlns = xmlns + + def _get_key(self, tag): + tags = tag.split("}", 1) + if len(tags) == 2: + ns = tags[0][1:] + bare_tag = tags[1] + ext_ns = self.metadata.get(constants.EXT_NS, {}) + if ns == self.xmlns: + return bare_tag + for prefix, _ns in ext_ns.items(): + if ns == _ns: + return prefix + ":" + bare_tag + ext_ns_bc = self.metadata.get(constants.EXT_NS_COMP, {}) + for prefix, _ns in ext_ns_bc.items(): + if ns == _ns: + return prefix + ":" + bare_tag + else: + return tag + + def _get_links(self, root_tag, node): + link_nodes = node.findall(constants.ATOM_LINK_NOTATION) + root_tag = self._get_key(node.tag) + link_key = "%s_links" % root_tag + link_list = [] + for link in link_nodes: + link_list.append({'rel': link.get('rel'), + 'href': link.get('href')}) + # Remove link node in order to avoid link node process as + # an item in _from_xml_node + node.remove(link) + return link_list and {link_key: link_list} or {} + + def _parseXML(self, text): + parser = ProtectedXMLParser() + parser.feed(text) + return parser.close() + + def _from_xml(self, datastring): + if datastring is None: + return None + plurals = set(self.metadata.get('plurals', {})) + try: + node = self._parseXML(datastring) + root_tag = self._get_key(node.tag) + # Deserialize link node was needed by unit test for verifying + # the request's response + links = self._get_links(root_tag, node) + result = self._from_xml_node(node, plurals) + # root_tag = constants.VIRTUAL_ROOT_KEY and links is not None + # is not possible because of the way data are serialized. + if root_tag == constants.VIRTUAL_ROOT_KEY: + return result + return dict({root_tag: result}, **links) + except Exception as e: + with excutils.save_and_reraise_exception(): + parseError = False + # Python2.7 + if (hasattr(etree, 'ParseError') and + isinstance(e, getattr(etree, 'ParseError'))): + parseError = True + # Python2.6 + elif isinstance(e, expat.ExpatError): + parseError = True + if parseError: + msg = _("Cannot understand XML") + raise exception.MalformedRequestBody(reason=msg) + + def _from_xml_node(self, node, listnames): + """Convert a minidom node to a simple Python type. + + :param listnames: list of XML node names whose subnodes should + be considered list items. + + """ + attrNil = node.get(str(etree.QName(constants.XSI_NAMESPACE, "nil"))) + attrType = node.get(str(etree.QName( + self.metadata.get('xmlns'), "type"))) + if (attrNil and attrNil.lower() == 'true'): + return None + elif not len(node) and not node.text: + if (attrType and attrType == constants.TYPE_DICT): + return {} + elif (attrType and attrType == constants.TYPE_LIST): + return [] + else: + return '' + elif (len(node) == 0 and node.text): + converters = {constants.TYPE_BOOL: + lambda x: x.lower() == 'true', + constants.TYPE_INT: + lambda x: int(x), + constants.TYPE_LONG: + lambda x: long(x), + constants.TYPE_FLOAT: + lambda x: float(x)} + if attrType and attrType in converters: + return converters[attrType](node.text) + else: + return node.text + elif self._get_key(node.tag) in listnames: + return [self._from_xml_node(n, listnames) for n in node] + else: + result = dict() + for attr in node.keys(): + if (attr == 'xmlns' or + attr.startswith('xmlns:') or + attr == constants.XSI_ATTR or + attr == constants.TYPE_ATTR): + continue + result[self._get_key(attr)] = node.get(attr) + children = list(node) + for child in children: + result[self._get_key(child.tag)] = self._from_xml_node( + child, listnames) + return result + + def default(self, datastring): + return {'body': self._from_xml(datastring)} + + def __call__(self, datastring): + # Adding a migration path to allow us to remove unncessary classes + return self.default(datastring) + + +class RequestHeadersDeserializer(ActionDispatcher): + """Default request headers deserializer.""" + + def deserialize(self, request, action): + return self.dispatch(request, action=action) + + def default(self, request): + return {} + + +class RequestDeserializer(object): + """Break up a Request object into more useful pieces.""" + + def __init__(self, body_deserializers=None, headers_deserializer=None): + self.body_deserializers = { + 'application/xml': XMLDeserializer(), + 'application/json': JSONDeserializer(), + } + self.body_deserializers.update(body_deserializers or {}) + + self.headers_deserializer = (headers_deserializer or + RequestHeadersDeserializer()) + + def deserialize(self, request): + """Extract necessary pieces of the request. + + :param request: Request object + :returns tuple of expected controller action name, dictionary of + keyword arguments to pass to the controller, the expected + content type of the response + + """ + action_args = self.get_action_args(request.environ) + action = action_args.pop('action', None) + + action_args.update(self.deserialize_headers(request, action)) + action_args.update(self.deserialize_body(request, action)) + + accept = self.get_expected_content_type(request) + + return (action, action_args, accept) + + def deserialize_headers(self, request, action): + return self.headers_deserializer.deserialize(request, action) + + def deserialize_body(self, request, action): + try: + content_type = request.best_match_content_type() + except exception.InvalidContentType: + LOG.debug(_("Unrecognized Content-Type provided in request")) + return {} + + if content_type is None: + LOG.debug(_("No Content-Type provided in request")) + return {} + + if not len(request.body) > 0: + LOG.debug(_("Empty body provided in request")) + return {} + + try: + deserializer = self.get_body_deserializer(content_type) + except exception.InvalidContentType: + with excutils.save_and_reraise_exception(): + LOG.debug(_("Unable to deserialize body as provided " + "Content-Type")) + + return deserializer.deserialize(request.body, action) + + def get_body_deserializer(self, content_type): + try: + return self.body_deserializers[content_type] + except (KeyError, TypeError): + raise exception.InvalidContentType(content_type=content_type) + + def get_expected_content_type(self, request): + return request.best_match_content_type() + + def get_action_args(self, request_environment): + """Parse dictionary created by routes library.""" + try: + args = request_environment['wsgiorg.routing_args'][1].copy() + except Exception: + return {} + + try: + del args['controller'] + except KeyError: + pass + + try: + del args['format'] + except KeyError: + pass + + return args + + +class Application(object): + """Base WSGI application wrapper. Subclasses need to implement __call__.""" + + @classmethod + def factory(cls, global_config, **local_config): + """Used for paste app factories in paste.deploy config files. + + Any local configuration (that is, values under the [app:APPNAME] + section of the paste config) will be passed into the `__init__` method + as kwargs. + + A hypothetical configuration would look like: + + [app:wadl] + latest_version = 1.3 + paste.app_factory = nova.api.fancy_api:Wadl.factory + + which would result in a call to the `Wadl` class as + + import neutron.api.fancy_api + fancy_api.Wadl(latest_version='1.3') + + You could of course re-implement the `factory` method in subclasses, + but using the kwarg passing it shouldn't be necessary. + + """ + return cls(**local_config) + + def __call__(self, environ, start_response): + r"""Subclasses will probably want to implement __call__ like this: + + @webob.dec.wsgify(RequestClass=Request) + def __call__(self, req): + # Any of the following objects work as responses: + + # Option 1: simple string + res = 'message\n' + + # Option 2: a nicely formatted HTTP exception page + res = exc.HTTPForbidden(explanation='Nice try') + + # Option 3: a webob Response object (in case you need to play with + # headers, or you want to be treated like an iterable, or or or) + res = Response(); + res.app_iter = open('somefile') + + # Option 4: any wsgi app to be run next + res = self.application + + # Option 5: you can get a Response object for a wsgi app, too, to + # play with headers etc + res = req.get_response(self.application) + + # You can then just return your response... + return res + # ... or set req.response and return None. + req.response = res + + See the end of http://pythonpaste.org/webob/modules/dec.html + for more info. + + """ + raise NotImplementedError(_('You must implement __call__')) + + +class Debug(Middleware): + """Middleware for debugging. + + Helper class that can be inserted into any WSGI application chain + to get information about the request and response. + """ + + @webob.dec.wsgify + def __call__(self, req): + print(("*" * 40) + " REQUEST ENVIRON") + for key, value in req.environ.items(): + print(key, "=", value) + print() + resp = req.get_response(self.application) + + print(("*" * 40) + " RESPONSE HEADERS") + for (key, value) in resp.headers.iteritems(): + print(key, "=", value) + print() + + resp.app_iter = self.print_generator(resp.app_iter) + + return resp + + @staticmethod + def print_generator(app_iter): + """Print contents of a wrapper string iterator when iterated.""" + print(("*" * 40) + " BODY") + for part in app_iter: + sys.stdout.write(part) + sys.stdout.flush() + yield part + print() + + +class Router(object): + """WSGI middleware that maps incoming requests to WSGI apps.""" + + @classmethod + def factory(cls, global_config, **local_config): + """Return an instance of the WSGI Router class.""" + return cls() + + def __init__(self, mapper): + """Create a router for the given routes.Mapper. + + Each route in `mapper` must specify a 'controller', which is a + WSGI app to call. You'll probably want to specify an 'action' as + well and have your controller be a wsgi.Controller, who will route + the request to the action method. + + Examples: + mapper = routes.Mapper() + sc = ServerController() + + # Explicit mapping of one route to a controller+action + mapper.connect(None, "/svrlist", controller=sc, action="list") + + # Actions are all implicitly defined + mapper.resource("network", "networks", controller=nc) + + # Pointing to an arbitrary WSGI app. You can specify the + # {path_info:.*} parameter so the target app can be handed just that + # section of the URL. + mapper.connect(None, "/v1.0/{path_info:.*}", controller=BlogApp()) + """ + self.map = mapper + self._router = routes.middleware.RoutesMiddleware(self._dispatch, + self.map) + + @webob.dec.wsgify + def __call__(self, req): + """Route the incoming request to a controller based on self.map. + + If no match, return a 404. + """ + return self._router + + @staticmethod + @webob.dec.wsgify(RequestClass=Request) + def _dispatch(req): + """Dispatch a Request. + + Called by self._router after matching the incoming request to a route + and putting the information into req.environ. Either returns 404 + or the routed WSGI app's response. + """ + match = req.environ['wsgiorg.routing_args'][1] + if not match: + language = req.best_match_language() + msg = _('The resource could not be found.') + msg = gettextutils.translate(msg, language) + return webob.exc.HTTPNotFound(explanation=msg) + app = match['controller'] + return app + + +class Resource(Application): + """WSGI app that handles (de)serialization and controller dispatch. + + WSGI app that reads routing information supplied by RoutesMiddleware + and calls the requested action method upon its controller. All + controller action methods must accept a 'req' argument, which is the + incoming wsgi.Request. If the operation is a PUT or POST, the controller + method must also accept a 'body' argument (the deserialized request body). + They may raise a webob.exc exception or return a dict, which will be + serialized by requested content type. + + """ + + def __init__(self, controller, fault_body_function, + deserializer=None, serializer=None): + """Object initialization. + + :param controller: object that implement methods created by routes lib + :param deserializer: object that can serialize the output of a + controller into a webob response + :param serializer: object that can deserialize a webob request + into necessary pieces + :param fault_body_function: a function that will build the response + body for HTTP errors raised by operations + on this resource object + + """ + self.controller = controller + self.deserializer = deserializer or RequestDeserializer() + self.serializer = serializer or ResponseSerializer() + self._fault_body_function = fault_body_function + # use serializer's xmlns for populating Fault generator xmlns + xml_serializer = self.serializer.body_serializers['application/xml'] + if hasattr(xml_serializer, 'xmlns'): + self._xmlns = xml_serializer.xmlns + + @webob.dec.wsgify(RequestClass=Request) + def __call__(self, request): + """WSGI method that controls (de)serialization and method dispatch.""" + + LOG.info(_("%(method)s %(url)s"), {"method": request.method, + "url": request.url}) + + try: + action, args, accept = self.deserializer.deserialize(request) + except exception.InvalidContentType: + msg = _("Unsupported Content-Type") + LOG.exception(_("InvalidContentType: %s"), msg) + return Fault(webob.exc.HTTPBadRequest(explanation=msg), + self._xmlns) + except exception.MalformedRequestBody: + msg = _("Malformed request body") + LOG.exception(_("MalformedRequestBody: %s"), msg) + return Fault(webob.exc.HTTPBadRequest(explanation=msg), + self._xmlns) + + try: + action_result = self.dispatch(request, action, args) + except webob.exc.HTTPException as ex: + LOG.info(_("HTTP exception thrown: %s"), unicode(ex)) + action_result = Fault(ex, + self._xmlns, + self._fault_body_function) + except Exception: + LOG.exception(_("Internal error")) + # Do not include the traceback to avoid returning it to clients. + action_result = Fault(webob.exc.HTTPServerError(), + self._xmlns, + self._fault_body_function) + + if isinstance(action_result, dict) or action_result is None: + response = self.serializer.serialize(action_result, + accept, + action=action) + else: + response = action_result + + try: + msg_dict = dict(url=request.url, status=response.status_int) + msg = _("%(url)s returned with HTTP %(status)d") % msg_dict + except AttributeError as e: + msg_dict = dict(url=request.url, exception=e) + msg = _("%(url)s returned a fault: %(exception)s") % msg_dict + + LOG.info(msg) + + return response + + def dispatch(self, request, action, action_args): + """Find action-spefic method on controller and call it.""" + + controller_method = getattr(self.controller, action) + try: + #NOTE(salvatore-orlando): the controller method must have + # an argument whose name is 'request' + return controller_method(request=request, **action_args) + except TypeError as exc: + LOG.exception(exc) + return Fault(webob.exc.HTTPBadRequest(), + self._xmlns) + + +def _default_body_function(wrapped_exc): + code = wrapped_exc.status_int + fault_data = { + 'Error': { + 'code': code, + 'message': wrapped_exc.explanation}} + # 'code' is an attribute on the fault tag itself + metadata = {'attributes': {'Error': 'code'}} + return fault_data, metadata + + +class Fault(webob.exc.HTTPException): + """Generates an HTTP response from a webob HTTP exception.""" + + def __init__(self, exception, xmlns=None, body_function=None): + """Creates a Fault for the given webob.exc.exception.""" + self.wrapped_exc = exception + self.status_int = self.wrapped_exc.status_int + self._xmlns = xmlns + self._body_function = body_function or _default_body_function + + @webob.dec.wsgify(RequestClass=Request) + def __call__(self, req): + """Generate a WSGI response based on the exception passed to ctor.""" + # Replace the body with fault details. + fault_data, metadata = self._body_function(self.wrapped_exc) + xml_serializer = XMLDictSerializer(metadata, self._xmlns) + content_type = req.best_match_content_type() + serializer = { + 'application/xml': xml_serializer, + 'application/json': JSONDictSerializer(), + }[content_type] + + self.wrapped_exc.body = serializer.serialize(fault_data) + self.wrapped_exc.content_type = content_type + return self.wrapped_exc + + +# NOTE(salvatore-orlando): this class will go once the +# extension API framework is updated +class Controller(object): + """WSGI app that dispatched to methods. + + WSGI app that reads routing information supplied by RoutesMiddleware + and calls the requested action method upon itself. All action methods + must, in addition to their normal parameters, accept a 'req' argument + which is the incoming wsgi.Request. They raise a webob.exc exception, + or return a dict which will be serialized by requested content type. + + """ + + @webob.dec.wsgify(RequestClass=Request) + def __call__(self, req): + """Call the method specified in req.environ by RoutesMiddleware.""" + arg_dict = req.environ['wsgiorg.routing_args'][1] + action = arg_dict['action'] + method = getattr(self, action) + del arg_dict['controller'] + del arg_dict['action'] + if 'format' in arg_dict: + del arg_dict['format'] + arg_dict['request'] = req + result = method(**arg_dict) + + if isinstance(result, dict) or result is None: + if result is None: + status = 204 + content_type = '' + body = None + else: + status = 200 + content_type = req.best_match_content_type() + default_xmlns = self.get_default_xmlns(req) + body = self._serialize(result, content_type, default_xmlns) + + response = webob.Response(status=status, + content_type=content_type, + body=body) + msg_dict = dict(url=req.url, status=response.status_int) + msg = _("%(url)s returned with HTTP %(status)d") % msg_dict + LOG.debug(msg) + return response + else: + return result + + def _serialize(self, data, content_type, default_xmlns): + """Serialize the given dict to the provided content_type. + + Uses self._serialization_metadata if it exists, which is a dict mapping + MIME types to information needed to serialize to that type. + + """ + _metadata = getattr(type(self), '_serialization_metadata', {}) + + serializer = Serializer(_metadata, default_xmlns) + try: + return serializer.serialize(data, content_type) + except exception.InvalidContentType: + msg = _('The requested content type %s is invalid.') % content_type + raise webob.exc.HTTPNotAcceptable(msg) + + def _deserialize(self, data, content_type): + """Deserialize the request body to the specefied content type. + + Uses self._serialization_metadata if it exists, which is a dict mapping + MIME types to information needed to serialize to that type. + + """ + _metadata = getattr(type(self), '_serialization_metadata', {}) + serializer = Serializer(_metadata) + return serializer.deserialize(data, content_type)['body'] + + def get_default_xmlns(self, req): + """Provide the XML namespace to use if none is otherwise specified.""" + return None + + +# NOTE(salvatore-orlando): this class will go once the +# extension API framework is updated +class Serializer(object): + """Serializes and deserializes dictionaries to certain MIME types.""" + + def __init__(self, metadata=None, default_xmlns=None): + """Create a serializer based on the given WSGI environment. + + 'metadata' is an optional dict mapping MIME types to information + needed to serialize a dictionary to that type. + + """ + self.metadata = metadata or {} + self.default_xmlns = default_xmlns + + def _get_serialize_handler(self, content_type): + handlers = { + 'application/json': JSONDictSerializer(), + 'application/xml': XMLDictSerializer(self.metadata), + } + + try: + return handlers[content_type] + except Exception: + raise exception.InvalidContentType(content_type=content_type) + + def serialize(self, data, content_type): + """Serialize a dictionary into the specified content type.""" + return self._get_serialize_handler(content_type).serialize(data) + + def deserialize(self, datastring, content_type): + """Deserialize a string to a dictionary. + + The string must be in the format of a supported MIME type. + + """ + try: + return self.get_deserialize_handler(content_type).deserialize( + datastring) + except Exception: + raise webob.exc.HTTPBadRequest(_("Could not deserialize data")) + + def get_deserialize_handler(self, content_type): + handlers = { + 'application/json': JSONDeserializer(), + 'application/xml': XMLDeserializer(self.metadata), + } + + try: + return handlers[content_type] + except Exception: + raise exception.InvalidContentType(content_type=content_type) diff --git a/icehouse-patches/neutron/ml2-mech-driver-cascaded-patch/README.md b/icehouse-patches/neutron/ml2-mech-driver-cascaded-patch/README.md new file mode 100644 index 00000000..eab9a958 --- /dev/null +++ b/icehouse-patches/neutron/ml2-mech-driver-cascaded-patch/README.md @@ -0,0 +1,65 @@ +Neutron ml2-mech-driver-cascaded-patch +=============================== + + Cascaded Neutron Server acts as the same role of Neutron Server in Cascaded OpenStack Layer. + Cascaded Neutron-Server inherits from Neutron Server, we modifed only one module l2pop. Because we introduce concept of remote port in Cascaded Neutron-Server, and add some code to process the remote port in mechanism driver. + +Key modules +----------- + +* Cascaded Neutron-Server inherits from Neutron Server, we modifed only one module plugins/ml2/drivers/l2pop. Because we introduce concept of remote port in Cascaded Neutron-Server, and add some code to process the remote port in mechanism driver�� + + neutron/plugins/ml2/drivers/l2pop/db.py + neutron/plugins/ml2/drivers/l2pop/mech_driver.py + neutron/plugins/ml2/drivers/l2pop/config.py + +Requirements +------------ +* openstack-neutron-2014.1-1.1 has been installed and DVR patch has been made. + +Installation +------------ + +We provide two ways to install the Cascaded neutron-server code. In this section, we will guide you through installing the Cascaded neutron-server with the minimum configuration. + +* **Note:** + + - Make sure you have an existing installation of **Openstack Icehouse**. + - We recommend that you Do backup at least the following files before installation, because they are to be overwritten or modified: + $NEUTRON_PARENT_DIR/neutron + (replace the $... with actual directory names.) + +* **Manual Installation** + + - Navigate to the local repository and copy the contents in 'neutron' sub-directory to the corresponding places in existing neutron, e.g. + ```cp -r $LOCAL_REPOSITORY_DIR/neutron $NEUTRON_PARENT_DIR``` + (replace the $... with actual directory name.) + ``` + + - Restart the neutron server. + ```service openstack-neutron restart``` + + - Done. The cascaded neutron server should be working with a ml2 plugin. + +* **Automatic Installation** + + - Navigate to the installation directory and run installation script. + ``` + cd $LOCAL_REPOSITORY_DIR/installation + sudo bash ./install.sh + ``` + (replace the $... with actual directory name.) + + - Done. The installation code should modify the neutron code without modifying the configuration. + +* **Troubleshooting** + + In case the automatic installation process is not complete, please check the followings: + + - Make sure your OpenStack version is Icehouse and DVR patch has been made. + + - Check the variables in the beginning of the install.sh scripts. Your installation directories may be different from the default values we provide. + + - The installation code will automatically modify the related codes to $NEUTRON_PARENT_DIR/neutron and not modify the related configuration. + + - In case the automatic installation does not work, try to install manually. diff --git a/icehouse-patches/neutron/ml2-mech-driver-cascaded-patch/installation/install.sh b/icehouse-patches/neutron/ml2-mech-driver-cascaded-patch/installation/install.sh new file mode 100644 index 00000000..ce9903df --- /dev/null +++ b/icehouse-patches/neutron/ml2-mech-driver-cascaded-patch/installation/install.sh @@ -0,0 +1,92 @@ +#!/bin/bash + +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# Copyright (c) 2014 Huawei Technologies. + +_NEUTRON_CONF_DIR="/etc/neutron" +_NEUTRON_CONF_FILE='neutron.conf' +_NEUTRON_INSTALL="/usr/lib64/python2.6/site-packages" +_NEUTRON_DIR="${_NEUTRON_INSTALL}/neutron" + +# if you did not make changes to the installation files, +# please do not edit the following directories. +_CODE_DIR="../neutron/" +_BACKUP_DIR="${_NEUTRON_INSTALL}/.neutron-cascaded-server-installation-backup" + +#_SCRIPT_NAME="${0##*/}" +#_SCRIPT_LOGFILE="/var/log/neutron-cascaded-server/installation/${_SCRIPT_NAME}.log" + +if [[ ${EUID} -ne 0 ]]; then + echo "Please run as root." + exit 1 +fi + +##Redirecting output to logfile as well as stdout +#exec > >(tee -a ${_SCRIPT_LOGFILE}) +#exec 2> >(tee -a ${_SCRIPT_LOGFILE} >&2) + +cd `dirname $0` + +echo "checking installation directories..." +if [ ! -d "${_NEUTRON_DIR}" ] ; then + echo "Could not find the neutron installation. Please check the variables in the beginning of the script." + echo "aborted." + exit 1 +fi +if [ ! -f "${_NEUTRON_CONF_DIR}/${_NEUTRON_CONF_FILE}" ] ; then + echo "Could not find neutron config file. Please check the variables in the beginning of the script." + echo "aborted." + exit 1 +fi + +echo "checking previous installation..." +if [ -d "${_BACKUP_DIR}/neutron" ] ; then + echo "It seems neutron-server-cascaded has already been installed!" + echo "Please check README for solution if this is not true." + exit 1 +fi + +echo "backing up current files that might be overwritten..." +mkdir -p "${_BACKUP_DIR}" +cp -r "${_NEUTRON_DIR}/" "${_BACKUP_DIR}/" +if [ $? -ne 0 ] ; then + rm -r "${_BACKUP_DIR}/neutron" + echo "Error in code backup, aborted." + exit 1 +fi + +echo "copying in new files..." +cp -r "${_CODE_DIR}" `dirname ${_NEUTRON_DIR}` +if [ $? -ne 0 ] ; then + echo "Error in copying, aborted." + echo "Recovering original files..." + cp -r "${_BACKUP_DIR}/neutron" `dirname ${_NEUTRON_DIR}` && rm -r "${_BACKUP_DIR}/neutron" + if [ $? -ne 0 ] ; then + echo "Recovering failed! Please install manually." + fi + exit 1 +fi + +echo "restarting cascaded neutron server..." +service openstack-neutron restart +if [ $? -ne 0 ] ; then + echo "There was an error in restarting the service, please restart cascaded neutron server manually." + exit 1 +fi + +echo "Completed." +echo "See README to get started." + +exit 0 + + diff --git a/icehouse-patches/neutron/ml2-mech-driver-cascaded-patch/neutron/plugins/ml2/drivers/l2pop/config.py b/icehouse-patches/neutron/ml2-mech-driver-cascaded-patch/neutron/plugins/ml2/drivers/l2pop/config.py new file mode 100644 index 00000000..236c3b17 --- /dev/null +++ b/icehouse-patches/neutron/ml2-mech-driver-cascaded-patch/neutron/plugins/ml2/drivers/l2pop/config.py @@ -0,0 +1,33 @@ +# Copyright (c) 2013 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Haojie Jia, Huawei +# @author: Sylvain Afchain, eNovance SAS +# @author: Francois Eleouet, Orange +# @author: Mathieu Rohon, Orange + +from oslo.config import cfg + + +l2_population_options = [ + cfg.IntOpt('agent_boot_time', default=180, + help=_('Delay within which agent is expected to update ' + 'existing ports whent it restarts')), + cfg.StrOpt('cascaded_gateway', default='no_gateway', + help=_('if not existing the gateway host Configure no_gateway' + 'else configure admin_gateway or population_opt')), +] + +cfg.CONF.register_opts(l2_population_options, "l2pop") diff --git a/icehouse-patches/neutron/ml2-mech-driver-cascaded-patch/neutron/plugins/ml2/drivers/l2pop/db.py b/icehouse-patches/neutron/ml2-mech-driver-cascaded-patch/neutron/plugins/ml2/drivers/l2pop/db.py new file mode 100644 index 00000000..3e5a5f8a --- /dev/null +++ b/icehouse-patches/neutron/ml2-mech-driver-cascaded-patch/neutron/plugins/ml2/drivers/l2pop/db.py @@ -0,0 +1,137 @@ +# Copyright (c) 2013 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Haojie Jia, Huawei +# @author: Sylvain Afchain, eNovance SAS +# @author: Francois Eleouet, Orange +# @author: Mathieu Rohon, Orange + +from sqlalchemy import sql + +from neutron.common import constants as const +from neutron.db import agents_db +from neutron.db import db_base_plugin_v2 as base_db +from neutron.db import models_v2 +from neutron.openstack.common import jsonutils +from neutron.openstack.common import timeutils +from neutron.plugins.ml2.drivers.l2pop import constants as l2_const +from neutron.plugins.ml2 import models as ml2_models + + +class L2populationDbMixin(base_db.CommonDbMixin): + + def get_agent_ip_by_host(self, session, agent_host): + agent = self.get_agent_by_host(session, agent_host) + if agent: + return self.get_agent_ip(agent) + + def get_agent_ip(self, agent): + configuration = jsonutils.loads(agent.configurations) + return configuration.get('tunneling_ip') + + def get_agent_uptime(self, agent): + return timeutils.delta_seconds(agent.started_at, + agent.heartbeat_timestamp) + + def get_agent_tunnel_types(self, agent): + configuration = jsonutils.loads(agent.configurations) + return configuration.get('tunnel_types') + + def get_agent_by_host(self, session, agent_host): + with session.begin(subtransactions=True): + query = session.query(agents_db.Agent) + query = query.filter(agents_db.Agent.host == agent_host, + agents_db.Agent.agent_type.in_( + l2_const.SUPPORTED_AGENT_TYPES)) + return query.first() + + def get_network_ports(self, session, network_id): + with session.begin(subtransactions=True): + query = session.query(ml2_models.PortBinding, + agents_db.Agent) + query = query.join(agents_db.Agent, + agents_db.Agent.host == + ml2_models.PortBinding.host) + query = query.join(models_v2.Port) + query = query.filter(models_v2.Port.network_id == network_id, + models_v2.Port.admin_state_up == sql.true(), + agents_db.Agent.agent_type.in_( + l2_const.SUPPORTED_AGENT_TYPES)) + return query + + def get_nondvr_network_ports(self, session, network_id): + query = self.get_network_ports(session, network_id) + return query.filter(models_v2.Port.device_owner != + const.DEVICE_OWNER_DVR_INTERFACE) + + def get_dvr_network_ports(self, session, network_id): + with session.begin(subtransactions=True): + query = session.query(ml2_models.DVRPortBinding, + agents_db.Agent) + query = query.join(agents_db.Agent, + agents_db.Agent.host == + ml2_models.DVRPortBinding.host) + query = query.join(models_v2.Port) + query = query.filter(models_v2.Port.network_id == network_id, + models_v2.Port.admin_state_up == sql.true(), + models_v2.Port.device_owner == + const.DEVICE_OWNER_DVR_INTERFACE, + agents_db.Agent.agent_type.in_( + l2_const.SUPPORTED_AGENT_TYPES)) + return query + + def get_agent_network_active_port_count(self, session, agent_host, + network_id): + with session.begin(subtransactions=True): + query = session.query(models_v2.Port) + query1 = query.join(ml2_models.PortBinding) + query1 = query1.filter(models_v2.Port.network_id == network_id, + models_v2.Port.status == + const.PORT_STATUS_ACTIVE, + models_v2.Port.device_owner != + const.DEVICE_OWNER_DVR_INTERFACE, + ml2_models.PortBinding.host == agent_host) + query2 = query.join(ml2_models.DVRPortBinding) + query2 = query2.filter(models_v2.Port.network_id == network_id, + ml2_models.DVRPortBinding.status == + const.PORT_STATUS_ACTIVE, + models_v2.Port.device_owner == + const.DEVICE_OWNER_DVR_INTERFACE, + ml2_models.DVRPortBinding.host == + agent_host) + return (query1.count() + query2.count()) + + def get_host_ip_from_binding_profile(self, profile): + if(not profile): + return + profile = jsonutils.loads(profile) + return profile.get('host_ip') + + def get_segment_by_network_id(self, session, network_id): + with session.begin(subtransactions=True): + query = session.query(ml2_models.NetworkSegment) + query = query.filter( + ml2_models.NetworkSegment.network_id == network_id, + ml2_models.NetworkSegment.network_type == 'vxlan') + return query.first() + + def get_remote_ports(self, session, network_id): + with session.begin(subtransactions=True): + query = session.query(ml2_models.PortBinding) + query = query.join(models_v2.Port) + query = query.filter( + models_v2.Port.network_id == network_id, + ml2_models.PortBinding.profile.contains('"port_key": "remote_port"')) + return query diff --git a/icehouse-patches/neutron/ml2-mech-driver-cascaded-patch/neutron/plugins/ml2/drivers/l2pop/mech_driver.py b/icehouse-patches/neutron/ml2-mech-driver-cascaded-patch/neutron/plugins/ml2/drivers/l2pop/mech_driver.py new file mode 100644 index 00000000..cc24ec9e --- /dev/null +++ b/icehouse-patches/neutron/ml2-mech-driver-cascaded-patch/neutron/plugins/ml2/drivers/l2pop/mech_driver.py @@ -0,0 +1,385 @@ +# Copyright (c) 2013 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Haojie Jia, Huawei +# @author: Sylvain Afchain, eNovance SAS +# @author: Francois Eleouet, Orange +# @author: Mathieu Rohon, Orange + +from oslo.config import cfg + +from neutron.common import constants as const +from neutron import context as n_context +from neutron.db import api as db_api +from neutron.openstack.common import log as logging +from neutron.plugins.ml2 import driver_api as api +from neutron.plugins.ml2.drivers.l2pop import config # noqa +from neutron.plugins.ml2.drivers.l2pop import db as l2pop_db +from neutron.plugins.ml2.drivers.l2pop import rpc as l2pop_rpc + +LOG = logging.getLogger(__name__) + + +class L2populationMechanismDriver(api.MechanismDriver, + l2pop_db.L2populationDbMixin): + + def __init__(self): + super(L2populationMechanismDriver, self).__init__() + self.L2populationAgentNotify = l2pop_rpc.L2populationAgentNotifyAPI() + + def initialize(self): + LOG.debug(_("Experimental L2 population driver")) + self.rpc_ctx = n_context.get_admin_context_without_session() + self.migrated_ports = {} + self.remove_fdb_entries = {} + self.remove_remote_ports_fdb = {} + + def _get_port_fdb_entries(self, port): + return [[port['mac_address'], port['device_owner'], + ip['ip_address']] for ip in port['fixed_ips']] + + def _is_remote_port(self, port): + return port['binding:profile'].get('port_key') == 'remote_port' + + def create_port_postcommit(self, context): + """ + if port is "remote_port", + then notify all l2-agent or only l2-gateway-agent + else do nothing + """ + port_context = context.current + if(self._is_remote_port(port_context)): + other_fdb_entries = self.get_remote_port_fdb(port_context) + if(not other_fdb_entries): + return + if(cfg.CONF.l2pop.cascaded_gateway == 'no_gateway'): + # notify all l2-agent + self.L2populationAgentNotify.add_fdb_entries(self.rpc_ctx, + other_fdb_entries) + else: + # only notify to l2-gateway-agent + pass + + def get_remote_port_fdb(self, port_context): + port_id = port_context['id'] + network_id = port_context['network_id'] + + session = db_api.get_session() + segment = self.get_segment_by_network_id(session, network_id) + if not segment: + LOG.warning(_("Network %(network_id)s has no " + " vxlan provider, so cannot get segment"), + {'network_id': network_id}) + return + ip = port_context['binding:profile'].get('host_ip') + if not ip: + LOG.debug(_("Unable to retrieve the ip from remote port, " + "check the remote port %(port_id)."), + {'port_id': port_id}) + return + other_fdb_entries = {network_id: + {'segment_id': segment.segmentation_id, + 'network_type': segment.network_type, + 'ports': {}}} + ports = other_fdb_entries[network_id]['ports'] + agent_ports = ports.get(ip, [const.FLOODING_ENTRY]) + agent_ports += self._get_port_fdb_entries(port_context) + ports[ip] = agent_ports + return other_fdb_entries + + def _get_agent_host(self, context, port): + if port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE: + agent_host = context.binding.host + else: + agent_host = port['binding:host_id'] + return agent_host + + def delete_port_precommit(self, context): + # TODO(matrohon): revisit once the original bound segment will be + # available in delete_port_postcommit. in delete_port_postcommit + # agent_active_ports will be equal to 0, and the _update_port_down + # won't need agent_active_ports_count_for_flooding anymore + port = context.current + if(self._is_remote_port(port)): + fdb_entry = self.get_remote_port_fdb(port) + self.remove_remote_ports_fdb[port['id']] = fdb_entry + agent_host = self._get_agent_host(context, port) + + if port['id'] not in self.remove_fdb_entries: + self.remove_fdb_entries[port['id']] = {} + + self.remove_fdb_entries[port['id']][agent_host] = ( + self._update_port_down(context, port, 1)) + + def delete_port_postcommit(self, context): + port = context.current + agent_host = self._get_agent_host(context, port) + + if port['id'] in self.remove_fdb_entries: + for agent_host in list(self.remove_fdb_entries[port['id']]): + self.L2populationAgentNotify.remove_fdb_entries( + self.rpc_ctx, + self.remove_fdb_entries[port['id']][agent_host]) + self.remove_fdb_entries[port['id']].pop(agent_host, 0) + self.remove_fdb_entries.pop(port['id'], 0) + + remote_port_fdb = self.remove_remote_ports_fdb.pop( + context.current['id'], + None) + if(remote_port_fdb): + self.L2populationAgentNotify.remove_fdb_entries( + self.rpc_ctx, remote_port_fdb) + + def _get_diff_ips(self, orig, port): + orig_ips = set([ip['ip_address'] for ip in orig['fixed_ips']]) + port_ips = set([ip['ip_address'] for ip in port['fixed_ips']]) + + # check if an ip has been added or removed + orig_chg_ips = orig_ips.difference(port_ips) + port_chg_ips = port_ips.difference(orig_ips) + + if orig_chg_ips or port_chg_ips: + return orig_chg_ips, port_chg_ips + + def _fixed_ips_changed(self, context, orig, port, diff_ips): + orig_ips, port_ips = diff_ips + + port_infos = self._get_port_infos(context, orig) + if not port_infos: + return + agent, agent_host, agent_ip, segment, port_fdb_entries = port_infos + + orig_mac_ip = [[port['mac_address'], port['device_owner'], ip] + for ip in orig_ips] + port_mac_ip = [[port['mac_address'], port['device_owner'], ip] + for ip in port_ips] + + upd_fdb_entries = {port['network_id']: {agent_ip: {}}} + + ports = upd_fdb_entries[port['network_id']][agent_ip] + if orig_mac_ip: + ports['before'] = orig_mac_ip + + if port_mac_ip: + ports['after'] = port_mac_ip + + self.L2populationAgentNotify.update_fdb_entries( + self.rpc_ctx, {'chg_ip': upd_fdb_entries}) + + return True + + def update_port_postcommit(self, context): + port = context.current + orig = context.original + + diff_ips = self._get_diff_ips(orig, port) + if diff_ips: + self._fixed_ips_changed(context, orig, port, diff_ips) + # TODO(vivek): DVR may need more robust handling of binding:host_id key + if (port.get('binding:host_id') != orig.get('binding:host_id') + and port['status'] == const.PORT_STATUS_ACTIVE + and not self.migrated_ports.get(orig['id'])): + # The port has been migrated. We have to store the original + # binding to send appropriate fdb once the port will be set + # on the destination host + self.migrated_ports[orig['id']] = orig + elif port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE: + binding = context.binding + if binding.status == const.PORT_STATUS_ACTIVE: + self._update_port_up(context) + if binding.status == const.PORT_STATUS_DOWN: + agent_host = binding.host + fdb_entries = {agent_host: + self._update_port_down(context, port)} + self.L2populationAgentNotify.remove_fdb_entries( + self.rpc_ctx, fdb_entries[agent_host]) + elif port['status'] != orig['status']: + agent_host = port['binding:host_id'] + if port['status'] == const.PORT_STATUS_ACTIVE: + self._update_port_up(context) + elif port['status'] == const.PORT_STATUS_DOWN: + fdb_entries = {agent_host: self._update_port_down(context, + port)} + self.L2populationAgentNotify.remove_fdb_entries( + self.rpc_ctx, fdb_entries[agent_host]) + elif port['status'] == const.PORT_STATUS_BUILD: + orig = self.migrated_ports.pop(port['id'], None) + if orig: + # this port has been migrated : remove its entries from fdb + fdb_entries = {agent_host: self._update_port_down(context, + orig)} + self.L2populationAgentNotify.remove_fdb_entries( + self.rpc_ctx, fdb_entries[agent_host]) + + def _get_port_infos(self, context, port): + agent_host = self._get_agent_host(context, port) + if not agent_host: + return + + session = db_api.get_session() + agent = self.get_agent_by_host(session, agent_host) + if not agent: + return + + agent_ip = self.get_agent_ip(agent) + if not agent_ip: + LOG.warning(_("Unable to retrieve the agent ip, check the agent " + "configuration.")) + return + + segment = context.bound_segment + if not segment: + LOG.warning(_("Port %(port)s updated by agent %(agent)s " + "isn't bound to any segment"), + {'port': port['id'], 'agent': agent}) + return + + tunnel_types = self.get_agent_tunnel_types(agent) + if segment['network_type'] not in tunnel_types: + return + + fdb_entries = self._get_port_fdb_entries(port) + + return agent, agent_host, agent_ip, segment, fdb_entries + + def _update_port_up(self, context): + port_context = context.current + port_infos = self._get_port_infos(context, port_context) + if not port_infos: + return + agent, agent_host, agent_ip, segment, port_fdb_entries = port_infos + + network_id = port_context['network_id'] + + session = db_api.get_session() + agent_active_ports = self.get_agent_network_active_port_count( + session, agent_host, network_id) + + other_fdb_entries = {network_id: + {'segment_id': segment['segmentation_id'], + 'network_type': segment['network_type'], + 'ports': {agent_ip: []}}} + + if agent_active_ports == 1 or ( + self.get_agent_uptime(agent) < cfg.CONF.l2pop.agent_boot_time): + # First port activated on current agent in this network, + # we have to provide it with the whole list of fdb entries + agent_fdb_entries = {network_id: + {'segment_id': segment['segmentation_id'], + 'network_type': segment['network_type'], + 'ports': {}}} + ports = agent_fdb_entries[network_id]['ports'] + + nondvr_network_ports = self.get_nondvr_network_ports(session, + network_id) + for network_port in nondvr_network_ports: + binding, agent = network_port + if agent.host == agent_host: + continue + + ip = self.get_agent_ip(agent) + if not ip: + LOG.debug(_("Unable to retrieve the agent ip, check " + "the agent %(agent_host)s configuration."), + {'agent_host': agent.host}) + continue + + agent_ports = ports.get(ip, [const.FLOODING_ENTRY]) + agent_ports += self._get_port_fdb_entries(binding.port) + ports[ip] = agent_ports + + if cfg.CONF.l2pop.cascaded_gateway == 'no_gateway': + remote_ports = self.get_remote_ports(session, network_id) + else: + remote_ports = {} +# elif cfg.CONF.cascaded_gateway == 'admin_gateway' or +# cfg.CONF.cascaded_gateway == 'population_opt': +# if self.is_proxy_port(port_context): +# remote_ports = self.get_remote_ports(session, network_id) +# else: + for binding in remote_ports: + profile = binding['profile'] + ip = self.get_host_ip_from_binding_profile(profile) + if not ip: + LOG.debug(_("Unable to retrieve the agent ip, check " + "the agent %(agent_host)s configuration."), + {'agent_host': agent.host}) + continue + + agent_ports = ports.get(ip, [const.FLOODING_ENTRY]) + agent_ports += self._get_port_fdb_entries(binding.port) + ports[ip] = agent_ports + + dvr_network_ports = self.get_dvr_network_ports(session, network_id) + for network_port in dvr_network_ports: + binding, agent = network_port + if agent.host == agent_host: + continue + + ip = self.get_agent_ip(agent) + if not ip: + LOG.debug(_("Unable to retrieve the agent ip, check " + "the agent %(agent_host)s configuration."), + {'agent_host': agent.host}) + continue + + agent_ports = ports.get(ip, [const.FLOODING_ENTRY]) + ports[ip] = agent_ports + + # And notify other agents to add flooding entry + other_fdb_entries[network_id]['ports'][agent_ip].append( + const.FLOODING_ENTRY) + + if ports.keys(): + self.L2populationAgentNotify.add_fdb_entries( + self.rpc_ctx, agent_fdb_entries, agent_host) + + # Notify other agents to add fdb rule for current port + if port_context['device_owner'] != const.DEVICE_OWNER_DVR_INTERFACE: + other_fdb_entries[network_id]['ports'][agent_ip] += ( + port_fdb_entries) + + self.L2populationAgentNotify.add_fdb_entries(self.rpc_ctx, + other_fdb_entries) + + def _update_port_down(self, context, port_context, + agent_active_ports_count_for_flooding=0): + port_infos = self._get_port_infos(context, port_context) + if not port_infos: + return + agent, agent_host, agent_ip, segment, port_fdb_entries = port_infos + + network_id = port_context['network_id'] + + session = db_api.get_session() + agent_active_ports = self.get_agent_network_active_port_count( + session, agent_host, network_id) + + other_fdb_entries = {network_id: + {'segment_id': segment['segmentation_id'], + 'network_type': segment['network_type'], + 'ports': {agent_ip: []}}} + if agent_active_ports == agent_active_ports_count_for_flooding: + # Agent is removing its last activated port in this network, + # other agents needs to be notified to delete their flooding entry. + other_fdb_entries[network_id]['ports'][agent_ip].append( + const.FLOODING_ENTRY) + + # Notify other agents to remove fdb rule for current port + if port_context['device_owner'] != const.DEVICE_OWNER_DVR_INTERFACE: + fdb_entries = self._get_port_fdb_entries(port_context) + other_fdb_entries[network_id]['ports'][agent_ip] += fdb_entries + + return other_fdb_entries diff --git a/icehouse-patches/neutron/ml2-mech-driver-cascading-patch/README.md b/icehouse-patches/neutron/ml2-mech-driver-cascading-patch/README.md new file mode 100644 index 00000000..63a87f69 --- /dev/null +++ b/icehouse-patches/neutron/ml2-mech-driver-cascading-patch/README.md @@ -0,0 +1,66 @@ +Neutron ml2-mech-driver-cascading-patch +=============================== + + Cascading Neutron Server acts as the same role of Neutron Server in cascading OpenStack Layer. + Cascading Neutron-Server inherits from Neutron Server, we modifed only one module l2pop. Because Cascading Neutron-Server retrieve host ip from binding:profile of port, which is different from Neutron-Server. + + +Key modules +----------- + +* Cascading Neutron-Server inherits from Neutron Server, we modifed only one module plugins/ml2/drivers/l2pop. Because Cascading Neutron-Server retrieve host ip from binding:profile of port, which is different from Neutron-Server.: + + neutron/plugins/ml2/drivers/l2pop/db.py + neutron/plugins/ml2/drivers/l2pop/mech_driver.py + + +Requirements +------------ +* openstack-neutron-2014.1-1.1 has been installed and DVR patch has been made. + +Installation +------------ + +We provide two ways to install the cascading neutron-server code. In this section, we will guide you through installing the cascading neutron-server with the minimum configuration. + +* **Note:** + + - Make sure you have an existing installation of **Openstack Icehouse**. + - We recommend that you Do backup at least the following files before installation, because they are to be overwritten or modified: + $NEUTRON_CONFIG_PARENT_DIR/neutron.conf + (replace the $... with actual directory names.) + +* **Manual Installation** + + - Navigate to the local repository and copy the contents in 'neutron' sub-directory to the corresponding places in existing neutron, e.g. + ```cp -r $LOCAL_REPOSITORY_DIR/neutron $NEUTRON_PARENT_DIR``` + (replace the $... with actual directory name.) + ``` + + - Restart the neutron server. + ```service openstack-neutron restart``` + + - Done. The cascading neutron server should be working with a ml2 plugin. + +* **Automatic Installation** + + - Navigate to the installation directory and run installation script. + ``` + cd $LOCAL_REPOSITORY_DIR/installation + sudo bash ./install.sh + ``` + (replace the $... with actual directory name.) + + - Done. The installation code should modify the neutron code without modifying the configuration. + +* **Troubleshooting** + + In case the automatic installation process is not complete, please check the followings: + + - Make sure your OpenStack version is Icehouse and DVR patch has been made. + + - Check the variables in the beginning of the install.sh scripts. Your installation directories may be different from the default values we provide. + + - The installation code will automatically modify the related codes to $NEUTRON_PARENT_DIR/neutron and not modify the related configuration. + + - In case the automatic installation does not work, try to install manually. diff --git a/icehouse-patches/neutron/ml2-mech-driver-cascading-patch/installation/install.sh b/icehouse-patches/neutron/ml2-mech-driver-cascading-patch/installation/install.sh new file mode 100644 index 00000000..dd1e44f8 --- /dev/null +++ b/icehouse-patches/neutron/ml2-mech-driver-cascading-patch/installation/install.sh @@ -0,0 +1,93 @@ +#!/bin/bash + +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# Copyright (c) 2014 Huawei Technologies. + +_NEUTRON_CONF_DIR="/etc/neutron" +_NEUTRON_CONF_FILE='neutron.conf' +_NEUTRON_INSTALL="/usr/lib64/python2.6/site-packages" +_NEUTRON_DIR="${_NEUTRON_INSTALL}/neutron" + +# if you did not make changes to the installation files, +# please do not edit the following directories. +_CODE_DIR="../neutron/" +_BACKUP_DIR="${_NEUTRON_INSTALL}/.neutron-cascading-server-installation-backup" + +#_SCRIPT_NAME="${0##*/}" +#_SCRIPT_LOGFILE="/var/log/neutron-server-cascading/installation/${_SCRIPT_NAME}.log" + +if [[ ${EUID} -ne 0 ]]; then + echo "Please run as root." + exit 1 +fi + +##Redirecting output to logfile as well as stdout +#exec > >(tee -a ${_SCRIPT_LOGFILE}) +#exec 2> >(tee -a ${_SCRIPT_LOGFILE} >&2) + +cd `dirname $0` + +echo "checking installation directories..." +if [ ! -d "${_NEUTRON_DIR}" ] ; then + echo "Could not find the neutron installation. Please check the variables in the beginning of the script." + echo "aborted." + exit 1 +fi +if [ ! -f "${_NEUTRON_CONF_DIR}/${_NEUTRON_CONF_FILE}" ] ; then + echo "Could not find neutron config file. Please check the variables in the beginning of the script." + echo "aborted." + exit 1 +fi + +echo "checking previous installation..." +if [ -d "${_BACKUP_DIR}/neutron" ] ; then + echo "It seems neutron-server-cascading has already been installed!" + echo "Please check README for solution if this is not true." + exit 1 +fi + +echo "backing up current files that might be overwritten..." +mkdir -p "${_BACKUP_DIR}" +cp -r "${_NEUTRON_DIR}/" "${_BACKUP_DIR}/" +if [ $? -ne 0 ] ; then + rm -r "${_BACKUP_DIR}/neutron" + echo "Error in code backup, aborted." + exit 1 +fi + +echo "copying in new files..." +cp -r "${_CODE_DIR}" `dirname ${_NEUTRON_DIR}` +if [ $? -ne 0 ] ; then + echo "Error in copying, aborted." + echo "Recovering original files..." + cp -r "${_BACKUP_DIR}/neutron" `dirname ${_NEUTRON_DIR}` && rm -r "${_BACKUP_DIR}/neutron" + if [ $? -ne 0 ] ; then + echo "Recovering failed! Please install manually." + fi + exit 1 +fi + +echo "restarting cascading neutron server..." +service openstack-neutron restart +if [ $? -ne 0 ] ; then + echo "There was an error in restarting the service, please restart cascading neutron server manually." + exit 1 +fi + +echo "Completed." +echo "See README to get started." + +exit 0 + + + diff --git a/icehouse-patches/neutron/ml2-mech-driver-cascading-patch/neutron/plugins/ml2/drivers/l2pop/db.py b/icehouse-patches/neutron/ml2-mech-driver-cascading-patch/neutron/plugins/ml2/drivers/l2pop/db.py new file mode 100644 index 00000000..91bd83cc --- /dev/null +++ b/icehouse-patches/neutron/ml2-mech-driver-cascading-patch/neutron/plugins/ml2/drivers/l2pop/db.py @@ -0,0 +1,124 @@ +# Copyright (c) 2013 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Haojie Jia, Huawei +# @author: Sylvain Afchain, eNovance SAS +# @author: Francois Eleouet, Orange +# @author: Mathieu Rohon, Orange + +from sqlalchemy import sql + +from neutron.common import constants as const +from neutron.db import agents_db +from neutron.db import db_base_plugin_v2 as base_db +from neutron.db import models_v2 +from neutron.openstack.common import jsonutils +from neutron.openstack.common import timeutils +from neutron.plugins.ml2.drivers.l2pop import constants as l2_const +from neutron.plugins.ml2 import models as ml2_models + + +class L2populationDbMixin(base_db.CommonDbMixin): + + def get_agent_ip_by_host(self, session, agent_host): + agent = self.get_agent_by_host(session, agent_host) + if agent: + return self.get_agent_ip(agent) + + def get_agent_ip(self, agent): + configuration = jsonutils.loads(agent.configurations) + return configuration.get('tunneling_ip') + + def get_host_ip_from_binding_profile(self, port): + ip = port['binding:profile'].get('host_ip') + return ip + + def get_host_ip_from_binding_profile_str(self, profile): + if(not profile): + return + profile = jsonutils.loads(profile) + return profile.get('host_ip') + + def get_agent_uptime(self, agent): + return timeutils.delta_seconds(agent.started_at, + agent.heartbeat_timestamp) + + def get_agent_tunnel_types(self, agent): + configuration = jsonutils.loads(agent.configurations) + return configuration.get('tunnel_types') + + def get_agent_by_host(self, session, agent_host): + with session.begin(subtransactions=True): + query = session.query(agents_db.Agent) + query = query.filter(agents_db.Agent.host == agent_host, + agents_db.Agent.agent_type.in_( + l2_const.SUPPORTED_AGENT_TYPES)) + return query.first() + + def get_network_ports(self, session, network_id): + with session.begin(subtransactions=True): + query = session.query(ml2_models.PortBinding, + agents_db.Agent) + query = query.join(agents_db.Agent, + agents_db.Agent.host == + ml2_models.PortBinding.host) + query = query.join(models_v2.Port) + query = query.filter(models_v2.Port.network_id == network_id, + models_v2.Port.admin_state_up == sql.true(), + agents_db.Agent.agent_type.in_( + l2_const.SUPPORTED_AGENT_TYPES)) + return query + + def get_nondvr_network_ports(self, session, network_id): + query = self.get_network_ports(session, network_id) + return query.filter(models_v2.Port.device_owner != + const.DEVICE_OWNER_DVR_INTERFACE) + + def get_dvr_network_ports(self, session, network_id): + with session.begin(subtransactions=True): + query = session.query(ml2_models.DVRPortBinding, + agents_db.Agent) + query = query.join(agents_db.Agent, + agents_db.Agent.host == + ml2_models.DVRPortBinding.host) + query = query.join(models_v2.Port) + query = query.filter(models_v2.Port.network_id == network_id, + models_v2.Port.admin_state_up == sql.true(), + models_v2.Port.device_owner == + const.DEVICE_OWNER_DVR_INTERFACE, + agents_db.Agent.agent_type.in_( + l2_const.SUPPORTED_AGENT_TYPES)) + return query + + def get_agent_network_active_port_count(self, session, agent_host, + network_id): + with session.begin(subtransactions=True): + query = session.query(models_v2.Port) + query1 = query.join(ml2_models.PortBinding) + query1 = query1.filter(models_v2.Port.network_id == network_id, + models_v2.Port.status == + const.PORT_STATUS_ACTIVE, + models_v2.Port.device_owner != + const.DEVICE_OWNER_DVR_INTERFACE, + ml2_models.PortBinding.host == agent_host) + query2 = query.join(ml2_models.DVRPortBinding) + query2 = query2.filter(models_v2.Port.network_id == network_id, + ml2_models.DVRPortBinding.status == + const.PORT_STATUS_ACTIVE, + models_v2.Port.device_owner == + const.DEVICE_OWNER_DVR_INTERFACE, + ml2_models.DVRPortBinding.host == + agent_host) + return (query1.count() + query2.count()) diff --git a/icehouse-patches/neutron/ml2-mech-driver-cascading-patch/neutron/plugins/ml2/drivers/l2pop/mech_driver.py b/icehouse-patches/neutron/ml2-mech-driver-cascading-patch/neutron/plugins/ml2/drivers/l2pop/mech_driver.py new file mode 100644 index 00000000..8fb1ee73 --- /dev/null +++ b/icehouse-patches/neutron/ml2-mech-driver-cascading-patch/neutron/plugins/ml2/drivers/l2pop/mech_driver.py @@ -0,0 +1,307 @@ +# Copyright (c) 2013 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Sylvain Afchain, eNovance SAS +# @author: Francois Eleouet, Orange +# @author: Mathieu Rohon, Orange + +from oslo.config import cfg + +from neutron.common import constants as const +from neutron import context as n_context +from neutron.db import api as db_api +from neutron.openstack.common import log as logging +from neutron.plugins.ml2 import driver_api as api +from neutron.plugins.ml2.drivers.l2pop import config # noqa +from neutron.plugins.ml2.drivers.l2pop import db as l2pop_db +from neutron.plugins.ml2.drivers.l2pop import rpc as l2pop_rpc + +LOG = logging.getLogger(__name__) + + +class L2populationMechanismDriver(api.MechanismDriver, + l2pop_db.L2populationDbMixin): + + def __init__(self): + super(L2populationMechanismDriver, self).__init__() + self.L2populationAgentNotify = l2pop_rpc.L2populationAgentNotifyAPI() + + def initialize(self): + LOG.debug(_("Experimental L2 population driver")) + self.rpc_ctx = n_context.get_admin_context_without_session() + self.migrated_ports = {} + self.remove_fdb_entries = {} + + def _get_port_fdb_entries(self, port): + return [[port['mac_address'], port['device_owner'], + ip['ip_address']] for ip in port['fixed_ips']] + + def _get_agent_host(self, context, port): + if port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE: + agent_host = context.binding.host + else: + agent_host = port['binding:host_id'] + return agent_host + + def delete_port_precommit(self, context): + # TODO(matrohon): revisit once the original bound segment will be + # available in delete_port_postcommit. in delete_port_postcommit + # agent_active_ports will be equal to 0, and the _update_port_down + # won't need agent_active_ports_count_for_flooding anymore + port = context.current + agent_host = self._get_agent_host(context, port) + + if port['id'] not in self.remove_fdb_entries: + self.remove_fdb_entries[port['id']] = {} + + self.remove_fdb_entries[port['id']][agent_host] = ( + self._update_port_down(context, port, 1)) + + def delete_port_postcommit(self, context): + port = context.current + agent_host = self._get_agent_host(context, port) + + if port['id'] in self.remove_fdb_entries: + for agent_host in list(self.remove_fdb_entries[port['id']]): + self.L2populationAgentNotify.remove_fdb_entries( + self.rpc_ctx, + self.remove_fdb_entries[port['id']][agent_host]) + self.remove_fdb_entries[port['id']].pop(agent_host, 0) + self.remove_fdb_entries.pop(port['id'], 0) + + def _get_diff_ips(self, orig, port): + orig_ips = set([ip['ip_address'] for ip in orig['fixed_ips']]) + port_ips = set([ip['ip_address'] for ip in port['fixed_ips']]) + + # check if an ip has been added or removed + orig_chg_ips = orig_ips.difference(port_ips) + port_chg_ips = port_ips.difference(orig_ips) + + if orig_chg_ips or port_chg_ips: + return orig_chg_ips, port_chg_ips + + def _fixed_ips_changed(self, context, orig, port, diff_ips): + orig_ips, port_ips = diff_ips + + port_infos = self._get_port_infos(context, orig) + if not port_infos: + return + agent, agent_host, agent_ip, segment, port_fdb_entries = port_infos + + orig_mac_ip = [[port['mac_address'], port['device_owner'], ip] + for ip in orig_ips] + port_mac_ip = [[port['mac_address'], port['device_owner'], ip] + for ip in port_ips] + + upd_fdb_entries = {port['network_id']: {agent_ip: {}}} + + ports = upd_fdb_entries[port['network_id']][agent_ip] + if orig_mac_ip: + ports['before'] = orig_mac_ip + + if port_mac_ip: + ports['after'] = port_mac_ip + + self.L2populationAgentNotify.update_fdb_entries( + self.rpc_ctx, {'chg_ip': upd_fdb_entries}) + + return True + + def update_port_postcommit(self, context): + port = context.current + orig = context.original + + diff_ips = self._get_diff_ips(orig, port) + if diff_ips: + self._fixed_ips_changed(context, orig, port, diff_ips) + # TODO(vivek): DVR may need more robust handling of binding:host_id key + if (port.get('binding:host_id') != orig.get('binding:host_id') + and port['status'] == const.PORT_STATUS_ACTIVE + and not self.migrated_ports.get(orig['id'])): + # The port has been migrated. We have to store the original + # binding to send appropriate fdb once the port will be set + # on the destination host + self.migrated_ports[orig['id']] = orig + elif port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE: + binding = context.binding + if binding.status == const.PORT_STATUS_ACTIVE: + self._update_port_up(context) + if binding.status == const.PORT_STATUS_DOWN: + agent_host = binding.host + fdb_entries = {agent_host: + self._update_port_down(context, port)} + self.L2populationAgentNotify.remove_fdb_entries( + self.rpc_ctx, fdb_entries[agent_host]) + elif port['status'] != orig['status']: + agent_host = port['binding:host_id'] + if port['status'] == const.PORT_STATUS_ACTIVE: + self._update_port_up(context) + elif port['status'] == const.PORT_STATUS_DOWN: + fdb_entries = {agent_host: self._update_port_down(context, + port)} + self.L2populationAgentNotify.remove_fdb_entries( + self.rpc_ctx, fdb_entries[agent_host]) + elif port['status'] == const.PORT_STATUS_BUILD: + orig = self.migrated_ports.pop(port['id'], None) + if orig: + # this port has been migrated : remove its entries from fdb + fdb_entries = {agent_host: self._update_port_down(context, + orig)} + self.L2populationAgentNotify.remove_fdb_entries( + self.rpc_ctx, fdb_entries[agent_host]) + + def _get_port_infos(self, context, port): + agent_host = self._get_agent_host(context, port) + if not agent_host: + return + + session = db_api.get_session() + agent = self.get_agent_by_host(session, agent_host) + if not agent: + return + + if port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE: + agent_ip = self.get_agent_ip(agent) + else: + agent_ip = self.get_host_ip_from_binding_profile(port) + if not agent_ip: + LOG.warning(_("Unable to retrieve the agent ip, check the agent " + "configuration.")) + return + + segment = context.bound_segment + if not segment: + LOG.warning(_("Port %(port)s updated by agent %(agent)s " + "isn't bound to any segment"), + {'port': port['id'], 'agent': agent}) + return + + tunnel_types = self.get_agent_tunnel_types(agent) + if segment['network_type'] not in tunnel_types: + return + + fdb_entries = self._get_port_fdb_entries(port) + + return agent, agent_host, agent_ip, segment, fdb_entries + + def _update_port_up(self, context): + port_context = context.current + port_infos = self._get_port_infos(context, port_context) + if not port_infos: + return + agent, agent_host, agent_ip, segment, port_fdb_entries = port_infos + + network_id = port_context['network_id'] + + session = db_api.get_session() + agent_active_ports = self.get_agent_network_active_port_count( + session, agent_host, network_id) + + other_fdb_entries = {network_id: + {'segment_id': segment['segmentation_id'], + 'network_type': segment['network_type'], + 'ports': {agent_ip: []}}} + + if agent_active_ports == 1 or ( + self.get_agent_uptime(agent) < cfg.CONF.l2pop.agent_boot_time): + # First port activated on current agent in this network, + # we have to provide it with the whole list of fdb entries + agent_fdb_entries = {network_id: + {'segment_id': segment['segmentation_id'], + 'network_type': segment['network_type'], + 'ports': {}}} + ports = agent_fdb_entries[network_id]['ports'] + + nondvr_network_ports = self.get_nondvr_network_ports(session, + network_id) + for network_port in nondvr_network_ports: + binding, agent = network_port + if agent.host == agent_host: + continue + + #ip = self.get_agent_ip(agent) + profile = binding['profile'] + ip = self.get_host_ip_from_binding_profile_str(profile) + if not ip: + LOG.debug(_("Unable to retrieve the agent ip, check " + "the agent %(agent_host)s configuration."), + {'agent_host': agent.host}) + continue + + agent_ports = ports.get(ip, [const.FLOODING_ENTRY]) + agent_ports += self._get_port_fdb_entries(binding.port) + ports[ip] = agent_ports + # comment by j00209498 +# dvr_network_ports = self.get_dvr_network_ports(session, network_id) +# for network_port in dvr_network_ports: +# binding, agent = network_port +# if agent.host == agent_host: +# continue +# +# ip = self.get_agent_ip(agent) +# if not ip: +# LOG.debug(_("Unable to retrieve the agent ip, check " +# "the agent %(agent_host)s configuration."), +# {'agent_host': agent.host}) +# continue +# +# agent_ports = ports.get(ip, [const.FLOODING_ENTRY]) +# ports[ip] = agent_ports + + # And notify other agents to add flooding entry + other_fdb_entries[network_id]['ports'][agent_ip].append( + const.FLOODING_ENTRY) + + if ports.keys(): + self.L2populationAgentNotify.add_fdb_entries( + self.rpc_ctx, agent_fdb_entries, agent_host) + + # Notify other agents to add fdb rule for current port + if port_context['device_owner'] != const.DEVICE_OWNER_DVR_INTERFACE: + other_fdb_entries[network_id]['ports'][agent_ip] += ( + port_fdb_entries) + + self.L2populationAgentNotify.add_fdb_entries(self.rpc_ctx, + other_fdb_entries) + + def _update_port_down(self, context, port_context, + agent_active_ports_count_for_flooding=0): + port_infos = self._get_port_infos(context, port_context) + if not port_infos: + return + agent, agent_host, agent_ip, segment, port_fdb_entries = port_infos + + network_id = port_context['network_id'] + + session = db_api.get_session() + agent_active_ports = self.get_agent_network_active_port_count( + session, agent_host, network_id) + + other_fdb_entries = {network_id: + {'segment_id': segment['segmentation_id'], + 'network_type': segment['network_type'], + 'ports': {agent_ip: []}}} + if agent_active_ports == agent_active_ports_count_for_flooding: + # Agent is removing its last activated port in this network, + # other agents needs to be notified to delete their flooding entry. + other_fdb_entries[network_id]['ports'][agent_ip].append( + const.FLOODING_ENTRY) + + # Notify other agents to remove fdb rule for current port + if port_context['device_owner'] != const.DEVICE_OWNER_DVR_INTERFACE: + fdb_entries = self._get_port_fdb_entries(port_context) + other_fdb_entries[network_id]['ports'][agent_ip] += fdb_entries + + return other_fdb_entries diff --git a/icehouse-patches/neutron/openvswitch-agent-patch/README.md b/icehouse-patches/neutron/openvswitch-agent-patch/README.md new file mode 100644 index 00000000..6f4126d1 --- /dev/null +++ b/icehouse-patches/neutron/openvswitch-agent-patch/README.md @@ -0,0 +1,65 @@ +Openstack Neutron-OpenvSwitch-agent +=============================== + + Neutron-OpenvSwitch-agent in cascaded OpenStack acts as the same role as the non-cascaded OpenStack. + Neutron-OpenvSwitch-agent is still Neutron-OpenvSwitch-agent, we modifed only one module ovs_dvr_neutron_agent. Because it is difficult to get dvr mac crossing openstack ,we processed dvr mac specially in cascaded openstack by modifying some code. + + +Key modules +----------- + +* Neutron-OpenvSwitch-agent is still Neutron-OpenvSwitch-agent, we modifed only one module ovs_dvr_neutron_agent. Because it is difficult to get dvr mac crossing openstack ,we processed dvr mac specially in cascaded openstack by modifying some code: + + neutron/plugins/openvswitch/agent/ovs_dvr_neutron_agent.py + + +Requirements +------------ +* openstack-neutron-2014.1-1.1 has been installed and DVR patch has been made. + +Installation +------------ + +We provide two ways to install the Neutron-OpenvSwitch-agent patch. In this section, we will guide you through installing the Neutron-OpenvSwitch-agent without modifying the configuration. + +* **Note:** + + - Make sure you have an existing installation of **Openstack Icehouse**. + - We recommend that you Do backup at least the following files before installation, because they are to be overwritten or modified: + $NEUTRON_PARENT_DIR/neutron + (replace the $... with actual directory names.) + +* **Manual Installation** + + - Navigate to the local repository and copy the contents in 'neutron' sub-directory to the corresponding places in existing neutron, e.g. + ```cp -r $LOCAL_REPOSITORY_DIR/neutron $NEUTRON_PARENT_DIR``` + (replace the $... with actual directory name.) + ``` + + - Restart the neutron openvswitch-agent. + ```service openstack-neutron-openvswitch-agent restart``` + + - Done. + +* **Automatic Installation** + + - Navigate to the installation directory and run installation script. + ``` + cd $LOCAL_REPOSITORY_DIR/installation + sudo bash ./install.sh + ``` + (replace the $... with actual directory name.) + + - Done. The installation code should modify the neutron code without modifying the configuration. + +* **Troubleshooting** + + In case the automatic installation process is not complete, please check the followings: + + - Make sure your OpenStack version is Icehouse and DVR patch has been made. + + - Check the variables in the beginning of the install.sh scripts. Your installation directories may be different from the default values we provide. + + - The installation code will automatically modify the related codes to $NEUTRON_PARENT_DIR/neutron and not modify the related configuration. + + - In case the automatic installation does not work, try to install manually. diff --git a/icehouse-patches/neutron/openvswitch-agent-patch/installation/install.sh b/icehouse-patches/neutron/openvswitch-agent-patch/installation/install.sh new file mode 100644 index 00000000..a7fad17e --- /dev/null +++ b/icehouse-patches/neutron/openvswitch-agent-patch/installation/install.sh @@ -0,0 +1,92 @@ +#!/bin/bash + +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# Copyright (c) 2014 Huawei Technologies. + +_NEUTRON_CONF_DIR="/etc/neutron" +_NEUTRON_CONF_FILE='neutron.conf' +_NEUTRON_INSTALL="/usr/lib64/python2.6/site-packages" +_NEUTRON_DIR="${_NEUTRON_INSTALL}/neutron" + +# if you did not make changes to the installation files, +# please do not edit the following directories. +_CODE_DIR="../neutron/" +_BACKUP_DIR="${_NEUTRON_INSTALL}/.neutron-cascaded-openvswitch-agent-installation-backup" + +#_SCRIPT_NAME="${0##*/}" +#_SCRIPT_LOGFILE="/var/log/neutron-cascaded-openvswitch-agent/installation/${_SCRIPT_NAME}.log" + +if [[ ${EUID} -ne 0 ]]; then + echo "Please run as root." + exit 1 +fi + +##Redirecting output to logfile as well as stdout +#exec > >(tee -a ${_SCRIPT_LOGFILE}) +#exec 2> >(tee -a ${_SCRIPT_LOGFILE} >&2) + +cd `dirname $0` + +echo "checking installation directories..." +if [ ! -d "${_NEUTRON_DIR}" ] ; then + echo "Could not find the neutron installation. Please check the variables in the beginning of the script." + echo "aborted." + exit 1 +fi +if [ ! -f "${_NEUTRON_CONF_DIR}/${_NEUTRON_CONF_FILE}" ] ; then + echo "Could not find neutron config file. Please check the variables in the beginning of the script." + echo "aborted." + exit 1 +fi + +echo "checking previous installation..." +if [ -d "${_BACKUP_DIR}/neutron" ] ; then + echo "It seems neutron-cascaded-openvswitch-agent has already been installed!" + echo "Please check README for solution if this is not true." + exit 1 +fi + +echo "backing up current files that might be overwritten..." +mkdir -p "${_BACKUP_DIR}" +cp -r "${_NEUTRON_DIR}/" "${_BACKUP_DIR}/" +if [ $? -ne 0 ] ; then + rm -r "${_BACKUP_DIR}/neutron" + echo "Error in code backup, aborted." + exit 1 +fi + +echo "copying in new files..." +cp -r "${_CODE_DIR}" `dirname ${_NEUTRON_DIR}` +if [ $? -ne 0 ] ; then + echo "Error in copying, aborted." + echo "Recovering original files..." + cp -r "${_BACKUP_DIR}/neutron" `dirname ${_NEUTRON_DIR}` && rm -r "${_BACKUP_DIR}/neutron" + if [ $? -ne 0 ] ; then + echo "Recovering failed! Please install manually." + fi + exit 1 +fi + +echo "restarting cascaded neutron openvswitch agent..." +service openstack-neutron-openvswitch-agent restart +if [ $? -ne 0 ] ; then + echo "There was an error in restarting the service, please restart cascade neutron openvswitch-agent manually." + exit 1 +fi + +echo "Completed." +echo "See README to get started." + +exit 0 + + diff --git a/icehouse-patches/neutron/openvswitch-agent-patch/neutron/plugins/openvswitch/agent/ovs_dvr_neutron_agent.py b/icehouse-patches/neutron/openvswitch-agent-patch/neutron/plugins/openvswitch/agent/ovs_dvr_neutron_agent.py new file mode 100644 index 00000000..65895654 --- /dev/null +++ b/icehouse-patches/neutron/openvswitch-agent-patch/neutron/plugins/openvswitch/agent/ovs_dvr_neutron_agent.py @@ -0,0 +1,764 @@ +# Copyright 2014, Hewlett Packard, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# @author: Vivekanandan Narasimhan, Hewlett-Packard Inc +# @author: Haojie Jia, Huawei + + +from neutron.api.rpc import dvr_rpc +from neutron.common import constants as q_const +from neutron.openstack.common import log as logging +from neutron.plugins.openvswitch.common import constants + + +LOG = logging.getLogger(__name__) + + +# A class to represent a DVR-hosted subnet including vif_ports resident on +# that subnet +class LocalDVRSubnetMapping: + + def __init__(self, subnet, csnat_ofport=constants.OFPORT_INVALID): + # set of commpute ports on on this dvr subnet + self.compute_ports = {} + self.subnet = subnet + self.csnat_ofport = csnat_ofport + self.dvr_owned = False + + def __str__(self): + return ("subnet = %s compute_ports = %s csnat_port = %s" + " is_dvr_owned = %s" % + (self.subnet, self.get_compute_ofports(), + self.get_csnat_ofport(), self.is_dvr_owned())) + + def get_subnet_info(self): + return self.subnet + + def set_dvr_owned(self, owned): + self.dvr_owned = owned + + def is_dvr_owned(self): + return self.dvr_owned + + def add_compute_ofport(self, vif_id, ofport): + self.compute_ports[vif_id] = ofport + + def remove_compute_ofport(self, vif_id): + self.compute_ports.pop(vif_id, 0) + + def remove_all_compute_ofports(self): + self.compute_ports.clear() + + def get_compute_ofports(self): + return self.compute_ports + + def set_csnat_ofport(self, ofport): + self.csnat_ofport = ofport + + def get_csnat_ofport(self): + return self.csnat_ofport + + +class OVSPort: + + def __init__(self, id, ofport, mac, device_owner): + self.id = id + self.mac = mac + self.ofport = ofport + self.subnets = set() + self.device_owner = device_owner + + def __str__(self): + return ("OVSPort: id = %s, ofport = %s, mac = %s," + "device_owner = %s, subnets = %s" % + (self.id, self.ofport, self.mac, + self.device_owner, self.subnets)) + + def add_subnet(self, subnet_id): + self.subnets.add(subnet_id) + + def remove_subnet(self, subnet_id): + self.subnets.remove(subnet_id) + + def remove_all_subnets(self): + self.subnets.clear() + + def get_subnets(self): + return self.subnets + + def get_device_owner(self): + return self.device_owner + + def get_mac(self): + return self.mac + + def get_ofport(self): + return self.ofport + + +class OVSDVRNeutronAgent(dvr_rpc.DVRAgentRpcApiMixin): + + '''Implements OVS-based DVR(Distributed Virtual Router), for + overlay networks. + + ''' + + # history + # 1.0 Initial version + + def __init__(self, context, plugin_rpc, integ_br, tun_br, + patch_int_ofport=constants.OFPORT_INVALID, + patch_tun_ofport=constants.OFPORT_INVALID, + host=None, enable_tunneling=False, + enable_distributed_routing=False): + self.context = context + self.plugin_rpc = plugin_rpc + self.int_br = integ_br + self.tun_br = tun_br + self.patch_int_ofport = patch_int_ofport + self.patch_tun_ofport = patch_tun_ofport + self.host = host + self.enable_tunneling = enable_tunneling + self.enable_distributed_routing = enable_distributed_routing + + def reset_ovs_parameters(self, integ_br, tun_br, + patch_int_ofport, patch_tun_ofport): + '''Reset the openvswitch parameters + ''' + if not self.enable_tunneling: + return + + if not self.enable_distributed_routing: + return + + self.int_br = integ_br + self.tun_br = tun_br + self.patch_int_ofport = patch_int_ofport + self.patch_tun_ofport = patch_tun_ofport + + def setup_dvr_flows_on_integ_tun_br(self): + '''Setup up initial dvr flows into integration bridge and tunnel + bridge. + ''' + if not self.enable_tunneling: + return + + if not self.enable_distributed_routing: + return + + # get the local DVR MAC Address + try: + LOG.debug("L2 Agent operating in DVR Mode") + self.dvr_mac_address = None + self.local_dvr_map = {} + self.local_csnat_map = {} + self.local_ports = {} + self.registered_dvr_macs = set() + details = self.plugin_rpc.\ + get_dvr_mac_address_by_host(self.context, self.host) + LOG.debug("L2 Agent DVR: Received response for " + "get_dvr_mac_address_by_host() from " + "plugin: %r", details) + self.dvr_mac_address = details['mac_address'] + except Exception: + LOG.exception(_("DVR: Failed to obtain local DVR Mac address")) + self.enable_distributed_routing = False + # switch all traffic using L2 learning + self.int_br.add_flow(priority=1, actions="normal") + return + + # Remove existing flows in integration bridge + # self.int_br.remove_all_flows() + + # Insert 'drop' action as the default for Table 2 + self.int_br.add_flow(table=constants.DVR_TO_SRC_MAC, + priority=1, + actions="drop") + + # Insert 'normal' action as the default for Table 1 + self.int_br.add_flow(table=constants.LOCAL_SWITCHING, + priority=1, + actions="normal") + +# add by j00209498 + self.int_br.add_flow(table=constants.LOCAL_SWITCHING, + priority=2, + in_port=self.patch_tun_ofport, + dl_src='fa:16:3f:00:00:00/ff:ff:ff:00:00:00', + actions="resubmit(,%s)" % + constants.DVR_TO_SRC_MAC) + + self.tun_br.add_flow(table=constants.DVR_NOT_LEARN, + priority=1, + dl_src='fa:16:3f:00:00:00/ff:ff:ff:00:00:00', + actions="output:%s" % self.patch_int_ofport) +# comment by j00209498 +# dvr_macs = self.plugin_rpc.get_dvr_mac_address_list(self.context) +# LOG.debug("L2 Agent DVR: Received these MACs: %r", dvr_macs) +# for mac in dvr_macs: +# if mac['mac_address'] == self.dvr_mac_address: +# continue +# Table 0 (default) will now sort DVR traffic from other +# traffic depending on in_port +# self.int_br.add_flow(table=constants.LOCAL_SWITCHING, +# priority=2, +# in_port=self.patch_tun_ofport, +# dl_src=mac['mac_address'], +# actions="resubmit(,%s)" % +# constants.DVR_TO_SRC_MAC) +# Table DVR_NOT_LEARN ensures unique dvr macs in the cloud +# are not learnt, as they may +# result in flow explosions +# self.tun_br.add_flow(table=constants.DVR_NOT_LEARN, +# priority=1, +# dl_src=mac['mac_address'], +# actions="output:%s" % self.patch_int_ofport) +# +# self.registered_dvr_macs.add(mac['mac_address']) + + self.tun_br.add_flow(priority=1, + in_port=self.patch_int_ofport, + actions="resubmit(,%s)" % + constants.DVR_PROCESS) + # table-miss should be sent to learning table + self.tun_br.add_flow(table=constants.DVR_NOT_LEARN, + priority=0, + actions="resubmit(,%s)" % + constants.LEARN_FROM_TUN) + + self.tun_br.add_flow(table=constants.DVR_PROCESS, + priority=0, + actions="resubmit(,%s)" % + constants.PATCH_LV_TO_TUN) + + def dvr_mac_address_update(self, dvr_macs): + pass + # comment by j00209498 +# if not self.enable_tunneling: +# return +# +# if not self.enable_distributed_routing: +# return +# +# LOG.debug("DVR Mac address update with host-mac: %s", dvr_macs) +# +# if not self.dvr_mac_address: +# LOG.debug("Self mac unknown, ignoring this" +# " dvr_mac_address_update() ") +# return +# +# dvr_host_macs = set() +# for entry in dvr_macs: +# if entry['mac_address'] == self.dvr_mac_address: +# continue +# dvr_host_macs.add(entry['mac_address']) +# +# if dvr_host_macs == self.registered_dvr_macs: +# LOG.debug("DVR Mac address already up to date") +# return +# +# dvr_macs_added = dvr_host_macs - self.registered_dvr_macs +# dvr_macs_removed = self.registered_dvr_macs - dvr_host_macs +# +# for oldmac in dvr_macs_removed: +# self.int_br.delete_flows(table=constants.LOCAL_SWITCHING, +# in_port=self.patch_tun_ofport, +# dl_src=oldmac) +# self.tun_br.delete_flows(table=constants.DVR_NOT_LEARN, +# dl_src=oldmac) +# LOG.debug("Removed DVR MAC flow for %s", oldmac) +# self.registered_dvr_macs.remove(oldmac) +# +# for newmac in dvr_macs_added: +# self.int_br.add_flow(table=constants.LOCAL_SWITCHING, +# priority=2, +# in_port=self.patch_tun_ofport, +# dl_src=newmac, +# actions="resubmit(,%s)" % +# constants.DVR_TO_SRC_MAC) +# self.tun_br.add_flow(table=constants.DVR_NOT_LEARN, +# priority=1, +# dl_src=newmac, +# actions="output:%s" % self.patch_int_ofport) +# LOG.debug("Added DVR MAC flow for %s", newmac) +# self.registered_dvr_macs.add(newmac) + + def is_dvr_router_interface(self, device_owner): + return (device_owner == q_const.DEVICE_OWNER_ROUTER_INTF_DISTRIBUTED) + + def process_tunneled_network(self, network_type, lvid, segmentation_id): + if not self.enable_tunneling: + return + if not self.enable_distributed_routing: + return + self.tun_br.add_flow(table=constants.TUN_TABLE[network_type], + priority=1, + tun_id=segmentation_id, + actions="mod_vlan_vid:%s," + "resubmit(,%s)" % + (lvid, constants.DVR_NOT_LEARN)) + + def _bind_distributed_router_interface_port(self, port, fixed_ips, + device_owner, local_vlan): + # since router port must have only one fixed IP, directly + # use fixed_ips[0] + subnet_uuid = fixed_ips[0]['subnet_id'] + csnat_ofport = constants.OFPORT_INVALID + ldm = None + if subnet_uuid in self.local_dvr_map: + ldm = self.local_dvr_map[subnet_uuid] + csnat_ofport = ldm.get_csnat_ofport() + if csnat_ofport == constants.OFPORT_INVALID: + LOG.error(_("DVR: Duplicate DVR router interface detected " + "for subnet %s"), subnet_uuid) + return + else: + # set up LocalDVRSubnetMapping available for this subnet + subnet_info = self.plugin_rpc.get_subnet_for_dvr(self.context, + subnet_uuid) + if not subnet_info: + LOG.error(_("DVR: Unable to retrieve subnet information" + " for subnet_id %s"), subnet_uuid) + return + LOG.debug("get_subnet_for_dvr for subnet %s returned with %s" % + (subnet_uuid, subnet_info)) + ldm = LocalDVRSubnetMapping(subnet_info) + self.local_dvr_map[subnet_uuid] = ldm + + # DVR takes over + ldm.set_dvr_owned(True) + + subnet_info = ldm.get_subnet_info() + ip_subnet = subnet_info['cidr'] + local_compute_ports = self.plugin_rpc.\ + get_compute_ports_on_host_by_subnet(self.context, + self.host, + subnet_uuid) + LOG.debug("DVR: List of ports received from " + "get_compute_ports_on_host_by_subnet %r", + local_compute_ports) + for prt in local_compute_ports: + vif = self.int_br.get_vif_port_by_id(prt['id']) + if not vif: + continue + ldm.add_compute_ofport(vif.vif_id, vif.ofport) + if vif.vif_id in self.local_ports: + # ensure if a compute port is already on + # a different dvr routed subnet + # if yes, queue this subnet to that port + ovsport = self.local_ports[vif.vif_id] + ovsport.add_subnet(subnet_uuid) + else: + # the compute port is discovered first here that its on + # a dvr routed subnet queue this subnet to that port + ovsport = OVSPort(vif.vif_id, vif.ofport, + vif.vif_mac, prt['device_owner']) + + ovsport.add_subnet(subnet_uuid) + self.local_ports[vif.vif_id] = ovsport + + # create rule for just this vm port + self.int_br.add_flow(table=constants.DVR_TO_SRC_MAC, + priority=4, + dl_vlan=local_vlan, + dl_dst=ovsport.get_mac(), + actions="strip_vlan,mod_dl_src:%s," + "output:%s" % + (subnet_info['gateway_mac'], + ovsport.get_ofport())) + + # create rule in Table LOCAL_SWITCHING to forward + # broadcast/multicast frames from dvr router interface to + # appropriate local tenant ports + ofports = ','.join(map(str, ldm.get_compute_ofports().values())) + if csnat_ofport != constants.OFPORT_INVALID: + ofports = str(csnat_ofport) + ',' + ofports + if ofports: + self.int_br.add_flow(table=constants.DVR_TO_SRC_MAC, + priority=2, + proto='ip', + dl_vlan=local_vlan, + nw_dst=ip_subnet, + actions="strip_vlan,mod_dl_src:%s, " + " output:%s" % + (subnet_info['gateway_mac'], ofports)) + + self.tun_br.add_flow(table=constants.DVR_PROCESS, + priority=3, + dl_vlan=local_vlan, + proto='arp', + nw_dst=subnet_info['gateway_ip'], + actions="drop") + + self.tun_br.add_flow(table=constants.DVR_PROCESS, + priority=2, + dl_vlan=local_vlan, + dl_dst=port.vif_mac, + actions="drop") + + self.tun_br.add_flow(table=constants.DVR_PROCESS, + priority=1, + dl_vlan=local_vlan, + dl_src=port.vif_mac, + actions="mod_dl_src:%s,resubmit(,%s)" % + (self.dvr_mac_address, + constants.PATCH_LV_TO_TUN)) + + # the dvr router interface is itself a port, so capture it + # queue this subnet to that port. A subnet appears only once as + # a router interface on any given router + ovsport = OVSPort(port.vif_id, port.ofport, + port.vif_mac, device_owner) + ovsport.add_subnet(subnet_uuid) + self.local_ports[port.vif_id] = ovsport + + def _bind_compute_port_on_dvr_subnet(self, port, fixed_ips, + device_owner, local_vlan): + # Handle new compute port added use-case + subnet_uuid = None + for ips in fixed_ips: + if ips['subnet_id'] not in self.local_dvr_map: + continue + subnet_uuid = ips['subnet_id'] + ldm = self.local_dvr_map[subnet_uuid] + if not ldm.is_dvr_owned(): + # well this is csnat stuff, let dvr come in + # and do plumbing for this vm later + continue + + # This confirms that this compute port belongs + # to a dvr hosted subnet. + # Accomodate this VM Port into the existing rule in + # the integration bridge + LOG.debug("DVR: Plumbing compute port %s", port.vif_id) + subnet_info = ldm.get_subnet_info() + ip_subnet = subnet_info['cidr'] + csnat_ofport = ldm.get_csnat_ofport() + ldm.add_compute_ofport(port.vif_id, port.ofport) + if port.vif_id in self.local_ports: + # ensure if a compute port is already on a different + # dvr routed subnet + # if yes, queue this subnet to that port + ovsport = self.local_ports[port.vif_id] + ovsport.add_subnet(subnet_uuid) + else: + # the compute port is discovered first here that its + # on a dvr routed subnet, queue this subnet to that port + ovsport = OVSPort(port.vif_id, port.ofport, + port.vif_mac, device_owner) + + ovsport.add_subnet(subnet_uuid) + self.local_ports[port.vif_id] = ovsport + # create a rule for this vm port + self.int_br.add_flow(table=constants.DVR_TO_SRC_MAC, + priority=4, + dl_vlan=local_vlan, + dl_dst=ovsport.get_mac(), + actions="strip_vlan,mod_dl_src:%s," + "output:%s" % + (subnet_info['gateway_mac'], + ovsport.get_ofport())) + ofports = ','.join(map(str, ldm.get_compute_ofports().values())) + + if csnat_ofport != constants.OFPORT_INVALID: + ofports = str(csnat_ofport) + ',' + ofports + self.int_br.add_flow(table=constants.DVR_TO_SRC_MAC, + priority=2, + proto='ip', + dl_vlan=local_vlan, + nw_dst=ip_subnet, + actions="strip_vlan,mod_dl_src:%s," + " output:%s" % + (subnet_info['gateway_mac'], ofports)) + + def _bind_centralized_snat_port_on_dvr_subnet(self, port, fixed_ips, + device_owner, local_vlan): + if port.vif_id in self.local_ports: + # throw an error if CSNAT port is already on a different + # dvr routed subnet + ovsport = self.local_ports[port.vif_id] + subs = list(ovsport.get_subnets()) + LOG.error(_("Centralized-SNAT port %s already seen on "), + port.vif_id) + LOG.error(_("a different subnet %s"), subs[0]) + return + # since centralized-SNAT (CSNAT) port must have only one fixed + # IP, directly use fixed_ips[0] + subnet_uuid = fixed_ips[0]['subnet_id'] + ldm = None + subnet_info = None + if subnet_uuid not in self.local_dvr_map: + # no csnat ports seen on this subnet - create csnat state + # for this subnet + subnet_info = self.plugin_rpc.get_subnet_for_dvr(self.context, + subnet_uuid) + ldm = LocalDVRSubnetMapping(subnet_info, port.ofport) + self.local_dvr_map[subnet_uuid] = ldm + else: + ldm = self.local_dvr_map[subnet_uuid] + subnet_info = ldm.get_subnet_info() + # Store csnat OF Port in the existing DVRSubnetMap + ldm.set_csnat_ofport(port.ofport) + + # create ovsPort footprint for csnat port + ovsport = OVSPort(port.vif_id, port.ofport, + port.vif_mac, device_owner) + ovsport.add_subnet(subnet_uuid) + self.local_ports[port.vif_id] = ovsport + + self.int_br.add_flow(table=constants.DVR_TO_SRC_MAC, + priority=4, + dl_vlan=local_vlan, + dl_dst=ovsport.get_mac(), + actions="strip_vlan,mod_dl_src:%s," + " output:%s" % + (subnet_info['gateway_mac'], + ovsport.get_ofport())) + ofports = ','.join(map(str, ldm.get_compute_ofports().values())) + ofports = str(ldm.get_csnat_ofport()) + ',' + ofports + ip_subnet = subnet_info['cidr'] + self.int_br.add_flow(table=constants.DVR_TO_SRC_MAC, + priority=2, + proto='ip', + dl_vlan=local_vlan, + nw_dst=ip_subnet, + actions="strip_vlan,mod_dl_src:%s," + " output:%s" % + (subnet_info['gateway_mac'], ofports)) + + def bind_port_to_dvr(self, port, network_type, fixed_ips, + device_owner, local_vlan_id): + # a port coming up as distributed router interface + if not self.enable_tunneling: + return + + if not self.enable_distributed_routing: + return + + if network_type not in constants.TUNNEL_NETWORK_TYPES: + return + + if device_owner == q_const.DEVICE_OWNER_ROUTER_INTF_DISTRIBUTED: + self._bind_distributed_router_interface_port(port, fixed_ips, + device_owner, + local_vlan_id) + + if 'compute' in device_owner: + self._bind_compute_port_on_dvr_subnet(port, fixed_ips, + device_owner, + local_vlan_id) + + if device_owner == q_const.DEVICE_OWNER_ROUTER_SNAT: + self._bind_centralized_snat_port_on_dvr_subnet(port, fixed_ips, + device_owner, + local_vlan_id) + + def _unbind_distributed_router_interface_port(self, port, local_vlan): + + ovsport = self.local_ports[port.vif_id] + + # removal of distributed router interface + subnet_ids = ovsport.get_subnets() + subnet_set = set(subnet_ids) + # ensure we process for all the subnets laid on this removed port + for sub_uuid in subnet_set: + if sub_uuid not in self.local_dvr_map: + continue + + ldm = self.local_dvr_map[sub_uuid] + subnet_info = ldm.get_subnet_info() + ip_subnet = subnet_info['cidr'] + + # DVR is no more owner + ldm.set_dvr_owned(False) + + # remove all vm rules for this dvr subnet + # clear of compute_ports altogether + compute_ports = ldm.get_compute_ofports() + for vif_id in compute_ports: + ovsport = self.local_ports[vif_id] + self.int_br.delete_flows(table=constants.DVR_TO_SRC_MAC, + dl_vlan=local_vlan, + dl_dst=ovsport.get_mac()) + ldm.remove_all_compute_ofports() + + if ldm.get_csnat_ofport() != -1: + # If there is a csnat port on this agent, preserve + # the local_dvr_map state + ofports = str(ldm.get_csnat_ofport()) + self.int_br.add_flow(table=constants.DVR_TO_SRC_MAC, + priority=2, + proto='ip', + dl_vlan=local_vlan, + nw_dst=ip_subnet, + actions="strip_vlan,mod_dl_src:%s," + " output:%s" % + (subnet_info['gateway_mac'], ofports)) + else: + # removed port is a distributed router interface + self.int_br.delete_flows(table=constants.DVR_TO_SRC_MAC, + proto='ip', dl_vlan=local_vlan, + nw_dst=ip_subnet) + # remove subnet from local_dvr_map as no dvr (or) csnat + # ports available on this agent anymore + self.local_dvr_map.pop(sub_uuid, None) + + self.tun_br.delete_flows(table=constants.DVR_PROCESS, + dl_vlan=local_vlan, + proto='arp', + nw_dst=subnet_info['gateway_ip']) + ovsport.remove_subnet(sub_uuid) + + self.tun_br.delete_flows(table=constants.DVR_PROCESS, + dl_vlan=local_vlan, + dl_dst=port.vif_mac) + + self.tun_br.delete_flows(table=constants.DVR_PROCESS, + dl_vlan=local_vlan, + dl_src=port.vif_mac) + # release port state + self.local_ports.pop(port.vif_id, None) + + def _unbind_compute_port_on_dvr_subnet(self, port, local_vlan): + + ovsport = self.local_ports[port.vif_id] + # This confirms that this compute port being removed belonged + # to a dvr hosted subnet. + # Accomodate this VM Port into the existing rule in + # the integration bridge + LOG.debug("DVR: Removing plumbing for compute port %s", port) + subnet_ids = ovsport.get_subnets() + # ensure we process for all the subnets laid on this port + for sub_uuid in subnet_ids: + if sub_uuid not in self.local_dvr_map: + continue + + ldm = self.local_dvr_map[sub_uuid] + subnet_info = ldm.get_subnet_info() + ldm.remove_compute_ofport(port.vif_id) + ofports = ','.join(map(str, ldm.get_compute_ofports().values())) + ip_subnet = subnet_info['cidr'] + + # first remove this vm port rule + self.int_br.delete_flows(table=constants.DVR_TO_SRC_MAC, + dl_vlan=local_vlan, + dl_dst=ovsport.get_mac()) + if ldm.get_csnat_ofport() != -1: + # If there is a csnat port on this agent, preserve + # the local_dvr_map state + ofports = str(ldm.get_csnat_ofport()) + ',' + ofports + self.int_br.add_flow(table=constants.DVR_TO_SRC_MAC, + priority=2, + proto='ip', + dl_vlan=local_vlan, + nw_dst=ip_subnet, + actions="strip_vlan,mod_dl_src:%s," + " output:%s" % + (subnet_info['gateway_mac'], ofports)) + else: + if ofports: + self.int_br.add_flow(table=constants.DVR_TO_SRC_MAC, + priority=2, + proto='ip', + dl_vlan=local_vlan, + nw_dst=ip_subnet, + actions="strip_vlan,mod_dl_src:%s," + " output:%s" % + (subnet_info['gateway_mac'], + ofports)) + else: + # remove the flow altogether, as no ports (both csnat/ + # compute) are available on this subnet in this + # agent + self.int_br.delete_flows(table=constants.DVR_TO_SRC_MAC, + proto='ip', + dl_vlan=local_vlan, + nw_dst=ip_subnet) + # release port state + self.local_ports.pop(port.vif_id, None) + + def _unbind_centralized_snat_port_on_dvr_subnet(self, port, local_vlan): + + ovsport = self.local_ports[port.vif_id] + # This comfirms that this compute port being removed belonged + # to a dvr hosted subnet. + # Accomodate this VM Port into the existing rule in + # the integration bridge + LOG.debug("DVR: Removing plumbing for csnat port %s", port) + sub_uuid = list(ovsport.get_subnets())[0] + # ensure we process for all the subnets laid on this port + if sub_uuid not in self.local_dvr_map: + return + ldm = self.local_dvr_map[sub_uuid] + subnet_info = ldm.get_subnet_info() + ip_subnet = subnet_info['cidr'] + ldm.set_csnat_ofport(constants.OFPORT_INVALID) + # then remove csnat port rule + self.int_br.delete_flows(table=constants.DVR_TO_SRC_MAC, + dl_vlan=local_vlan, + dl_dst=ovsport.get_mac()) + + ofports = ','.join(map(str, ldm.get_compute_ofports().values())) + if ofports: + self.int_br.add_flow(table=constants.DVR_TO_SRC_MAC, + priority=2, + proto='ip', + dl_vlan=local_vlan, + nw_dst=ip_subnet, + actions="strip_vlan,mod_dl_src:%s," + " output:%s" % + (subnet_info['gateway_mac'], ofports)) + else: + self.int_br.delete_flows(table=constants.DVR_TO_SRC_MAC, + proto='ip', + dl_vlan=local_vlan, + nw_dst=ip_subnet) + if not ldm.is_dvr_owned(): + # if not owned by DVR (only used for csnat), remove this + # subnet state altogether + self.local_dvr_map.pop(sub_uuid, None) + + # release port state + self.local_ports.pop(port.vif_id, None) + + def unbind_port_from_dvr(self, vif_port, local_vlan_id): + if not self.enable_tunneling: + return + + if not self.enable_distributed_routing: + return + + if not vif_port: + LOG.debug("DVR: VIF Port not available for delete %s", vif_port) + return + + # Handle port removed use-case + if vif_port.vif_id not in self.local_ports: + LOG.debug("DVR: Non distributed port, ignoring %s", vif_port) + return + + ovsport = self.local_ports[vif_port.vif_id] + + if ovsport.get_device_owner() == \ + q_const.DEVICE_OWNER_ROUTER_INTF_DISTRIBUTED: + self._unbind_distributed_router_interface_port(vif_port, + local_vlan_id) + + if 'compute' in ovsport.get_device_owner(): + self._unbind_compute_port_on_dvr_subnet(vif_port, + local_vlan_id) + + if ovsport.get_device_owner() == q_const.DEVICE_OWNER_ROUTER_SNAT: + self._unbind_centralized_snat_port_on_dvr_subnet(vif_port, + local_vlan_id) diff --git a/icehouse-patches/neutron/vlan2vlan/neutron/agent/l3_agent.py b/icehouse-patches/neutron/vlan2vlan/neutron/agent/l3_agent.py new file mode 100644 index 00000000..29498e0f --- /dev/null +++ b/icehouse-patches/neutron/vlan2vlan/neutron/agent/l3_agent.py @@ -0,0 +1,1697 @@ +# Copyright 2012 VMware, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +import sys +import os + +import eventlet +eventlet.monkey_patch() + +import netaddr +from six import moves +from oslo.config import cfg + +from neutron.agent.common import config +from neutron.agent.linux import external_process +from neutron.agent.linux import interface +from neutron.agent.linux import ip_lib +from neutron.agent.linux import iptables_manager +from neutron.agent import rpc as agent_rpc +from neutron.common import config as common_config +from neutron.common import constants as l3_constants +from neutron.common import rpc as rpc_compat +from neutron.common import topics +from neutron.common import utils as common_utils +from neutron import context +from neutron import manager +from neutron.openstack.common import excutils +from neutron.openstack.common import importutils +from neutron.openstack.common import lockutils +from neutron.openstack.common import log as logging +from neutron.openstack.common import loopingcall +from neutron.openstack.common import periodic_task +from neutron.openstack.common import processutils +from neutron.openstack.common import service +from neutron.openstack.common import jsonutils +from neutron import service as neutron_service +from neutron.services.firewall.agents.l3reference import firewall_l3_agent + +LOG = logging.getLogger(__name__) +NS_PREFIX = 'qrouter-' +INTERNAL_DEV_PREFIX = 'qr-' +EXTERNAL_DEV_PREFIX = 'qg-' +SNAT_INT_DEV_PREFIX = 'sg-' +FIP_NS_PREFIX = 'fip-' +SNAT_NS_PREFIX = 'snat-' +FIP_2_RTR_DEV_PREFIX = 'fpr-' +RTR_2_FIP_DEV_PREFIX = 'rfp-' +FIP_EXT_DEV_PREFIX = 'fg-' +FIP_LL_PREFIX = '169.254.30.' +# Route Table index for FIPs +FIP_RT_TBL = 16 +# Rule priority range for FIPs +FIP_PR_ST = 32768 +FIP_PR_END = FIP_PR_ST + 40000 +RPC_LOOP_INTERVAL = 1 +FLOATING_IP_CIDR_SUFFIX = '/32' + + +class L3PluginApi(rpc_compat.RpcProxy): + + """Agent side of the l3 agent RPC API. + + API version history: + 1.0 - Initial version. + 1.1 - Floating IP operational status updates + 1.2 - DVR support + + """ + + BASE_RPC_API_VERSION = '1.0' + + def __init__(self, topic, host): + super(L3PluginApi, self).__init__( + topic=topic, default_version=self.BASE_RPC_API_VERSION) + self.host = host + + def get_routers(self, context, router_ids=None): + """Make a remote process call to retrieve the sync data for routers.""" + return self.call(context, + self.make_msg('sync_routers', host=self.host, + router_ids=router_ids), + topic=self.topic) + + def get_external_network_id(self, context): + """Make a remote process call to retrieve the external network id. + + @raise rpc_compat.RemoteError: with TooManyExternalNetworks + as exc_type if there are + more than one external network + """ + return self.call(context, + self.make_msg('get_external_network_id', + host=self.host), + topic=self.topic) + + def update_floatingip_statuses(self, context, router_id, fip_statuses): + """Call the plugin update floating IPs's operational status.""" + return self.call(context, + self.make_msg('update_floatingip_statuses', + router_id=router_id, + fip_statuses=fip_statuses), + topic=self.topic, + version='1.1') + + def get_ports_by_subnet(self, context, subnet_id): + """Retrieve ports by subnet id.""" + return self.call(context, + self.make_msg('get_ports_by_subnet', host=self.host, + subnet_id=subnet_id), + topic=self.topic, + version='1.2') + + def get_agent_gateway_port(self, context, fip_net): + """Get or create a agent_gateway_port.""" + return self.call(context, + self.make_msg('get_agent_gateway_port', + network_id=fip_net, host=self.host), + topic=self.topic, + version='1.2') + + +class NextHopMACPriority(object): + + def __init__(self, nexthop, dvr_mac, priority): + self.nexthop = nexthop + self.dvr_mac = dvr_mac + self.priority = priority + + +class RouterInfo(object): + + def __init__(self, router_id, root_helper, use_namespaces, router): + self.router_id = router_id + self.ex_gw_port = None + self._snat_enabled = None + self._snat_action = None + self.internal_ports = [] + self.snat_ports = [] + self.floating_ips = set() + # TODO(mrsmith): DVR merge cleanup + self.floating_ips_dict = {} + self.root_helper = root_helper + self.use_namespaces = use_namespaces + # Invoke the setter for establishing initial SNAT action + self.router = router + self.ns_name = NS_PREFIX + router_id if use_namespaces else None + self.iptables_manager = iptables_manager.IptablesManager( + root_helper=root_helper, + # FIXME(danwent): use_ipv6=True, + namespace=self.ns_name) + self.routes = [] + # added by j00209498 ----begin + self.next_hop_ip_mac_map = {} + self.extern_ip_interface_name = None + self.available_ip_rule_priority = set(moves.xrange(32, 35535)) + # added by j00209498 ----end + # DVR Data + # Linklocal router to floating IP addr + self.rtr_2_fip = None + # Linklocal floating to router IP addr + self.fip_2_rtr = None + self.dist_fip_count = 0 + + @property + def router(self): + return self._router + + @router.setter + def router(self, value): + self._router = value + if not self._router: + return + # enable_snat by default if it wasn't specified by plugin + self._snat_enabled = self._router.get('enable_snat', True) + # Set a SNAT action for the router + if self._router.get('gw_port'): + self._snat_action = ('add_rules' if self._snat_enabled + else 'remove_rules') + elif self.ex_gw_port: + # Gateway port was removed, remove rules + self._snat_action = 'remove_rules' + + def perform_snat_action(self, snat_callback, *args): + # Process SNAT rules for attached subnets + if self._snat_action: + snat_callback(self, self._router.get('gw_port'), + *args, action=self._snat_action) + self._snat_action = None + + +class L3NATAgent(firewall_l3_agent.FWaaSL3AgentRpcCallback, manager.Manager): + + """Manager for L3NatAgent + + API version history: + 1.0 initial Version + 1.1 changed the type of the routers parameter + to the routers_updated method. + It was previously a list of routers in dict format. + It is now a list of router IDs only. + Per rpc versioning rules, it is backwards compatible. + """ + RPC_API_VERSION = '1.1' + + OPTS = [ + cfg.BoolOpt('distributed_agent', default=False, + help=_("Enables distributed router agent function.")), + cfg.BoolOpt('centralized_snat', default=False, + help=_("Enables centralized SNAT in dvr mode.")), + cfg.BoolOpt('centralized_router', default=True, + help=_("Enables centralized router in dvr mode.")), + cfg.StrOpt('external_network_bridge', default='br-ex', + help=_("Name of bridge used for external network " + "traffic.")), + cfg.IntOpt('metadata_port', + default=9697, + help=_("TCP Port used by Neutron metadata namespace " + "proxy.")), + cfg.IntOpt('send_arp_for_ha', + default=3, + help=_("Send this many gratuitous ARPs for HA setup, if " + "less than or equal to 0, the feature is disabled")), + cfg.StrOpt('router_id', default='', + help=_("If namespaces is disabled, the l3 agent can only" + " configure a router that has the matching router " + "ID.")), + cfg.BoolOpt('handle_internal_only_routers', + default=True, + help=_("Agent should implement routers with no gateway")), + cfg.StrOpt('gateway_external_network_id', default='', + help=_("UUID of external network for routers implemented " + "by the agents.")), + cfg.BoolOpt('enable_metadata_proxy', default=True, + help=_("Allow running metadata proxy.")), + cfg.BoolOpt('router_delete_namespaces', default=False, + help=_("Delete namespace after removing a router.")), + cfg.StrOpt('metadata_proxy_socket', + default='$state_path/metadata_proxy', + help=_('Location of Metadata Proxy UNIX domain ' + 'socket')), + # added by j00209498 ---begin + cfg.StrOpt('notify_l2_file_path', + default='/var/', + help=_('Location of notify_l2_file_path')), + cfg.StrOpt('notify_l2_file_name', + default='notify_l2_agent_info.json', + help=_('File name of notify_l2_file')), + # added by j00209498 ---end + ] + + def __init__(self, host, conf=None): + if conf: + self.conf = conf + else: + self.conf = cfg.CONF + self.root_helper = config.get_root_helper(self.conf) + self.router_info = {} + + self._check_config_params() + + try: + self.driver = importutils.import_object( + self.conf.interface_driver, + self.conf + ) + except Exception: + msg = _("Error importing interface driver " + "'%s'") % self.conf.interface_driver + LOG.error(msg) + raise SystemExit(1) + + self.context = context.get_admin_context_without_session() + self.plugin_rpc = L3PluginApi(topics.L3PLUGIN, host) + self.fullsync = True + self.updated_routers = set() + self.removed_routers = set() + self.sync_progress = False + + # TODO(mrsmith): remove once agent restart with + # stale namespaces is supported for DVR + root_ip = ip_lib.IPWrapper(self.root_helper) + host_namespaces = root_ip.get_namespaces(self.root_helper) + snat_namespaces = set(ns for ns in host_namespaces + if ns.startswith(SNAT_NS_PREFIX)) + self._destroy_stale_router_namespaces(snat_namespaces) + fip_namespaces = set(ns for ns in host_namespaces + if ns.startswith(FIP_NS_PREFIX)) + self._destroy_stale_router_namespaces(fip_namespaces) + + self._clean_stale_namespaces = self.conf.use_namespaces + + self.notify_l2_agent_info = {} + + # dvr data + self.agent_gateway_port = None + self.agent_fip_count = 0 + self.local_ips = set(xrange(2, 251)) + self.fip_priorities = set(xrange(FIP_PR_ST, FIP_PR_END)) + + self.rpc_loop = loopingcall.FixedIntervalLoopingCall( + self._rpc_loop) + self.rpc_loop.start(interval=RPC_LOOP_INTERVAL) + super(L3NATAgent, self).__init__(conf=self.conf) + + self.target_ex_net_id = None + + def _check_config_params(self): + """Check items in configuration files. + + Check for required and invalid configuration items. + The actual values are not verified for correctness. + """ + if not self.conf.interface_driver: + msg = _('An interface driver must be specified') + LOG.error(msg) + raise SystemExit(1) + + if not self.conf.use_namespaces and not self.conf.router_id: + msg = _('Router id is required if not using namespaces.') + LOG.error(msg) + raise SystemExit(1) + + def _cleanup_namespaces(self, routers): + """Destroy stale router namespaces on host when L3 agent restarts + + This routine is called when self._clean_stale_namespaces is True. + + The argument routers is the list of routers that are recorded in + the database as being hosted on this node. + """ + try: + root_ip = ip_lib.IPWrapper(self.root_helper) + + host_namespaces = root_ip.get_namespaces(self.root_helper) + router_namespaces = set(ns for ns in host_namespaces + if ns.startswith(NS_PREFIX)) + ns_to_ignore = set(NS_PREFIX + r['id'] for r in routers) + # TODO(mrsmith): include DVR SNAT namespaces, FIP namespaces + ns_to_destroy = router_namespaces - ns_to_ignore + except RuntimeError: + LOG.exception(_('RuntimeError in obtaining router list ' + 'for namespace cleanup.')) + else: + self._destroy_stale_router_namespaces(ns_to_destroy) + + def _destroy_stale_router_namespaces(self, router_namespaces): + """Destroys the stale router namespaces + + The argumenet router_namespaces is a list of stale router namespaces + + As some stale router namespaces may not be able to be deleted, only + one attempt will be made to delete them. + """ + for ns in router_namespaces: + try: + self._destroy_namespace(ns) + except RuntimeError: + LOG.exception(_('Failed to destroy stale router namespace ' + '%s'), ns) + self._clean_stale_namespaces = False + + def _destroy_namespace(self, ns): + if ns.startswith(NS_PREFIX): + if self.conf.enable_metadata_proxy: + self._destroy_metadata_proxy(ns[len(NS_PREFIX):], ns) + self._destroy_router_namespace(ns) + elif ns.startswith(FIP_NS_PREFIX): + self._destroy_fip_namespace(ns) + elif ns.startswith(SNAT_NS_PREFIX): + self._destroy_snat_namespace(ns) + + def _destroy_snat_namespace(self, ns_name): + ns_ip = ip_lib.IPWrapper(self.root_helper, namespace=ns_name) + # delete internal interfaces + for d in ns_ip.get_devices(exclude_loopback=True): + if d.name.startswith(SNAT_INT_DEV_PREFIX): + LOG.debug('DVR: unplug: %s', d.name) + self.driver.unplug(d.name, namespace=ns_name, + prefix=SNAT_INT_DEV_PREFIX) + + # TODO(mrsmith): delete ext-gw-port + LOG.debug('DVR: destroy snat ns: %s', ns_name) + ns_ip.netns.delete(ns_name) + + def _destroy_fip_namespace(self, ns_name): + ns_ip = ip_lib.IPWrapper(self.root_helper, namespace=ns_name) + for d in ns_ip.get_devices(exclude_loopback=True): + if d.name.startswith(FIP_2_RTR_DEV_PREFIX): + # internal link between IRs and FIP NS + # TODO(mrsmith): remove IR interfaces (IP pool?) + pass + elif d.name.startswith(FIP_EXT_DEV_PREFIX): + # single port from FIP NS to br-ext + # TODO(mrsmith): remove br-ext interface + LOG.debug('DVR: unplug: %s', d.name) + self.driver.unplug(d.name, + bridge=self.conf.external_network_bridge, + namespace=ns_name, + prefix=FIP_EXT_DEV_PREFIX) + LOG.debug('DVR: destroy fip ns: %s', ns_name) + # TODO(mrsmith): add LOG warn if fip count != 0 + ns_ip.netns.delete(ns_name) + self.agent_gateway_port = None + + def _destroy_router_namespace(self, namespace): + ns_ip = ip_lib.IPWrapper(self.root_helper, namespace=namespace) + for d in ns_ip.get_devices(exclude_loopback=True): + if d.name.startswith(INTERNAL_DEV_PREFIX): + # device is on default bridge + self.driver.unplug(d.name, namespace=namespace, + prefix=INTERNAL_DEV_PREFIX) + elif d.name.startswith(EXTERNAL_DEV_PREFIX): + self.driver.unplug(d.name, + bridge=self.conf.external_network_bridge, + namespace=namespace, + prefix=EXTERNAL_DEV_PREFIX) + + if self.conf.router_delete_namespaces: + try: + ns_ip.netns.delete(namespace) + except RuntimeError: + msg = _('Failed trying to delete namespace: %s') + LOG.exception(msg % namespace) + + def _create_namespace(self, name): + ip_wrapper_root = ip_lib.IPWrapper(self.root_helper) + ip_wrapper = ip_wrapper_root.ensure_namespace(name) + LOG.debug('DVR: ns-name: %s', name) + ip_wrapper.netns.execute(['sysctl', '-w', 'net.ipv4.ip_forward=1']) + + def _create_router_namespace(self, ri): + self._create_namespace(ri.ns_name) + + def _fetch_external_net_id(self, force=False): + """Find UUID of single external network for this agent.""" + if self.conf.gateway_external_network_id: + return self.conf.gateway_external_network_id + + # L3 agent doesn't use external_network_bridge to handle external + # networks, so bridge_mappings with provider networks will be used + # and the L3 agent is able to handle any external networks. + if not self.conf.external_network_bridge: + return + + if not force and self.target_ex_net_id: + return self.target_ex_net_id + + try: + self.target_ex_net_id = self.plugin_rpc.get_external_network_id( + self.context) + return self.target_ex_net_id + except rpc_compat.RemoteError as e: + with excutils.save_and_reraise_exception() as ctx: + if e.exc_type == 'TooManyExternalNetworks': + ctx.reraise = False + msg = _( + "The 'gateway_external_network_id' option must be " + "configured for this agent as Neutron has more than " + "one external network.") + raise Exception(msg) + + def _router_added(self, router_id, router): + ri = RouterInfo(router_id, self.root_helper, + self.conf.use_namespaces, router) + self.router_info[router_id] = ri + if self.conf.use_namespaces: + self._create_router_namespace(ri) + for c, r in self.metadata_filter_rules(): + ri.iptables_manager.ipv4['filter'].add_rule(c, r) + for c, r in self.metadata_nat_rules(): + ri.iptables_manager.ipv4['nat'].add_rule(c, r) + ri.iptables_manager.apply() + super(L3NATAgent, self).process_router_add(ri) + if self.conf.enable_metadata_proxy: + self._spawn_metadata_proxy(ri.router_id, ri.ns_name) + + def _router_removed(self, router_id): + ri = self.router_info.get(router_id) + if ri is None: + LOG.warn(_("Info for router %s were not found. " + "Skipping router removal"), router_id) + return + ri.router['gw_port'] = None + ri.router[l3_constants.INTERFACE_KEY] = [] + ri.router[l3_constants.FLOATINGIP_KEY] = [] + self.process_router(ri) + for c, r in self.metadata_filter_rules(): + ri.iptables_manager.ipv4['filter'].remove_rule(c, r) + for c, r in self.metadata_nat_rules(): + ri.iptables_manager.ipv4['nat'].remove_rule(c, r) + ri.iptables_manager.apply() + if self.conf.enable_metadata_proxy: + self._destroy_metadata_proxy(ri.router_id, ri.ns_name) + del self.router_info[router_id] + self._destroy_router_namespace(ri.ns_name) + + def _spawn_metadata_proxy(self, router_id, ns_name): + def callback(pid_file): + metadata_proxy_socket = cfg.CONF.metadata_proxy_socket + proxy_cmd = ['neutron-ns-metadata-proxy', + '--pid_file=%s' % pid_file, + '--metadata_proxy_socket=%s' % metadata_proxy_socket, + '--router_id=%s' % router_id, + '--state_path=%s' % self.conf.state_path, + '--metadata_port=%s' % self.conf.metadata_port] + proxy_cmd.extend(config.get_log_args( + cfg.CONF, 'neutron-ns-metadata-proxy-%s.log' % + router_id)) + return proxy_cmd + + pm = external_process.ProcessManager( + self.conf, + router_id, + self.root_helper, + ns_name) + pm.enable(callback) + + def _destroy_metadata_proxy(self, router_id, ns_name): + pm = external_process.ProcessManager( + self.conf, + router_id, + self.root_helper, + ns_name) + pm.disable() + + def _set_subnet_arp_info(self, ri, port): + """Get ARP info from Plugin for existing ports for subnet.""" + if 'id' not in port['subnet'] or not ri.router['distributed']: + return + subnet_id = port['subnet']['id'] + subnet_ports = ( + self.plugin_rpc.get_ports_by_subnet(self.context, + subnet_id)) + + for p in subnet_ports: + if (p['device_owner'] not in ( + 'network:router_interface', + 'network:router_interface_distributed')): + for fixed_ip in p['fixed_ips']: + self._update_arp_entry(ri, fixed_ip['ip_address'], + p['mac_address'], + subnet_id, 'add') + + def _set_subnet_info(self, port): + ips = port['fixed_ips'] + if not ips: + raise Exception(_("Router port %s has no IP address") % port['id']) + if len(ips) > 1: + LOG.error(_("Ignoring multiple IPs on router port %s"), + port['id']) + prefixlen = netaddr.IPNetwork(port['subnet']['cidr']).prefixlen + port['ip_cidr'] = "%s/%s" % (ips[0]['ip_address'], prefixlen) + + def _get_existing_devices(self, ri): + ip_wrapper = ip_lib.IPWrapper(root_helper=self.root_helper, + namespace=ri.ns_name) + ip_devs = ip_wrapper.get_devices(exclude_loopback=True) + return [ip_dev.name for ip_dev in ip_devs] + + def process_router(self, ri): + # TODO(mrsmith) - we shouldn't need to check here + if 'distributed' not in ri.router: + ri.router['distributed'] = False + ri.iptables_manager.defer_apply_on() + ex_gw_port = self._get_ex_gw_port(ri) + internal_ports = ri.router.get(l3_constants.INTERFACE_KEY, []) + snat_ports = ri.router.get(l3_constants.SNAT_ROUTER_INTF_KEY, []) + existing_port_ids = set([p['id'] for p in ri.internal_ports]) + current_port_ids = set([p['id'] for p in internal_ports + if p['admin_state_up']]) + new_ports = [p for p in internal_ports if + p['id'] in current_port_ids and + p['id'] not in existing_port_ids] + old_ports = [p for p in ri.internal_ports if + p['id'] not in current_port_ids] + for p in new_ports: + self._set_subnet_info(p) + self.internal_network_added(ri, p) + ri.internal_ports.append(p) + self._set_subnet_arp_info(ri, p) + + for p in old_ports: + self.internal_network_removed(ri, p) + ri.internal_ports.remove(p) + + existing_devices = self._get_existing_devices(ri) + current_internal_devs = set([n for n in existing_devices + if n.startswith(INTERNAL_DEV_PREFIX)]) + current_port_devs = set([self.get_internal_device_name(id) for + id in current_port_ids]) + stale_devs = current_internal_devs - current_port_devs + for stale_dev in stale_devs: + LOG.debug(_('Deleting stale internal router device: %s'), + stale_dev) + self.driver.unplug(stale_dev, + namespace=ri.ns_name, + prefix=INTERNAL_DEV_PREFIX) + + # Get IPv4 only internal CIDRs + internal_cidrs = [p['ip_cidr'] for p in ri.internal_ports + if netaddr.IPNetwork(p['ip_cidr']).version == 4] + # TODO(salv-orlando): RouterInfo would be a better place for + # this logic too + ex_gw_port_id = (ex_gw_port and ex_gw_port['id'] or + ri.ex_gw_port and ri.ex_gw_port['id']) + + interface_name = None + if ex_gw_port_id: + interface_name = self.get_external_device_name(ex_gw_port_id) + if ex_gw_port and ex_gw_port != ri.ex_gw_port: + self._set_subnet_info(ex_gw_port) + self.external_gateway_added(ri, ex_gw_port, + interface_name, internal_cidrs) + elif not ex_gw_port and ri.ex_gw_port: + self.external_gateway_removed(ri, ri.ex_gw_port, + interface_name, internal_cidrs) + + stale_devs = [dev for dev in existing_devices + if dev.startswith(EXTERNAL_DEV_PREFIX) + and dev != interface_name] + for stale_dev in stale_devs: + LOG.debug(_('Deleting stale external router device: %s'), + stale_dev) + self.driver.unplug(stale_dev, + bridge=self.conf.external_network_bridge, + namespace=ri.ns_name, + prefix=EXTERNAL_DEV_PREFIX) + + # Process static routes for router + self.routes_updated(ri) + # Process SNAT rules for external gateway + if (not ri.router['distributed'] or + ex_gw_port and ri.router['gw_port_host'] == self.host): + pass +# ri.perform_snat_action(self._handle_router_snat_rules, +# internal_cidrs, interface_name) + + # Process SNAT/DNAT rules for floating IPs + fip_statuses = {} + try: + if ex_gw_port: + existing_floating_ips = ri.floating_ips + self.process_router_floating_ip_nat_rules(ri) + ri.iptables_manager.defer_apply_off() + # Once NAT rules for floating IPs are safely in place + # configure their addresses on the external gateway port + fip_statuses = self.process_router_floating_ip_addresses( + ri, ex_gw_port) + except Exception: + # TODO(salv-orlando): Less broad catching + # All floating IPs must be put in error state + for fip in ri.router.get(l3_constants.FLOATINGIP_KEY, []): + fip_statuses[fip['id']] = l3_constants.FLOATINGIP_STATUS_ERROR + + if ex_gw_port: + # Identify floating IPs which were disabled + ri.floating_ips = set(fip_statuses.keys()) + for fip_id in existing_floating_ips - ri.floating_ips: + fip_statuses[fip_id] = l3_constants.FLOATINGIP_STATUS_DOWN + # Update floating IP status on the neutron server + self.plugin_rpc.update_floatingip_statuses( + self.context, ri.router_id, fip_statuses) + + # Update ex_gw_port and enable_snat on the router info cache + ri.ex_gw_port = ex_gw_port + ri.snat_ports = snat_ports + ri.enable_snat = ri.router.get('enable_snat') + + def _handle_router_snat_rules(self, ri, ex_gw_port, internal_cidrs, + interface_name, action): + # Remove all the rules + # This is safe because if use_namespaces is set as False + # then the agent can only configure one router, otherwise + # each router's SNAT rules will be in their own namespace + if ri.router['distributed']: + iptables_manager = ri.snat_iptables_manager + else: + iptables_manager = ri.iptables_manager + + iptables_manager.ipv4['nat'].empty_chain('POSTROUTING') + iptables_manager.ipv4['nat'].empty_chain('snat') + + if not ri.router['distributed']: + # Add back the jump to float-snat + iptables_manager.ipv4['nat'].add_rule('snat', '-j $float-snat') + + # And add them back if the action is add_rules + if action == 'add_rules' and ex_gw_port: + # ex_gw_port should not be None in this case + # NAT rules are added only if ex_gw_port has an IPv4 address + for ip_addr in ex_gw_port['fixed_ips']: + ex_gw_ip = ip_addr['ip_address'] + if netaddr.IPAddress(ex_gw_ip).version == 4: + rules = self.external_gateway_nat_rules(ex_gw_ip, + internal_cidrs, + interface_name) + for rule in rules: + iptables_manager.ipv4['nat'].add_rule(*rule) + break + iptables_manager.apply() + + def _handle_router_fip_nat_rules(self, ri, interface_name, action): + """Configures NAT rules for Floating IPs for DVR. + + Remove all the rules. This is safe because if + use_namespaces is set as False then the agent can + only configure one router, otherwise each router's + NAT rules will be in their own namespace. + """ + ri.iptables_manager.ipv4['nat'].empty_chain('POSTROUTING') + ri.iptables_manager.ipv4['nat'].empty_chain('snat') + + # Add back the jump to float-snat + ri.iptables_manager.ipv4['nat'].add_rule('snat', '-j $float-snat') + + # And add them back if the action if add_rules + if action == 'add_rules' and interface_name: + rule = ('POSTROUTING', '! -i %(interface_name)s ' + '! -o %(interface_name)s -m conntrack ! ' + '--ctstate DNAT -j ACCEPT' % + {'interface_name': interface_name}) + ri.iptables_manager.ipv4['nat'].add_rule(*rule) + ri.iptables_manager.apply() + + def process_router_floating_ip_nat_rules(self, ri): + """Configure NAT rules for the router's floating IPs. + + Configures iptables rules for the floating ips of the given router + """ + # Clear out all iptables rules for floating ips + ri.iptables_manager.ipv4['nat'].clear_rules_by_tag('floating_ip') + + floating_ips = self.get_floating_ips(ri) + # Loop once to ensure that floating ips are configured. + for fip in floating_ips: + # Rebuild iptables rules for the floating ip. + fixed = fip['fixed_ip_address'] + fip_ip = fip['floating_ip_address'] + for chain, rule in self.floating_forward_rules(fip_ip, fixed): + ri.iptables_manager.ipv4['nat'].add_rule(chain, rule, + tag='floating_ip') + + ri.iptables_manager.apply() + + def process_router_floating_ip_addresses(self, ri, ex_gw_port): + """Configure IP addresses on router's external gateway interface. + + Ensures addresses for existing floating IPs and cleans up + those that should not longer be configured. + """ + fip_statuses = {} + + floating_ips = ri.router.get(l3_constants.FLOATINGIP_KEY, []) + if ri.router['distributed']: + # filter out only FIPs for this host/agent + floating_ips = [i for i in floating_ips if i['host'] == self.host] + if floating_ips and self.agent_gateway_port is None: + self._create_agent_gateway_port(ri, floating_ips[0] + ['floating_network_id']) + + if self.agent_gateway_port: + if floating_ips and ri.dist_fip_count == 0: + self.create_rtr_2_fip_link(ri, floating_ips[0] + ['floating_network_id']) + interface_name = self.get_rtr_int_device_name(ri.router_id) + else: + # there are no fips or agent port, no work to do + return fip_statuses + else: + interface_name = self.get_external_device_name(ex_gw_port['id']) + + device = ip_lib.IPDevice(interface_name, self.root_helper, + namespace=ri.ns_name) + existing_cidrs = set([addr['cidr'] for addr in device.addr.list()]) + new_cidrs = set() + + # Loop once to ensure that floating ips are configured. + for fip in floating_ips: + fip_ip = fip['floating_ip_address'] + ip_cidr = str(fip_ip) + FLOATING_IP_CIDR_SUFFIX + + new_cidrs.add(ip_cidr) + + if ip_cidr not in existing_cidrs: + net = netaddr.IPNetwork(ip_cidr) + try: + device.addr.add(net.version, ip_cidr, str(net.broadcast)) + except (processutils.UnknownArgumentError, + processutils.ProcessExecutionError): + # any exception occurred here should cause the floating IP + # to be set in error state + fip_statuses[fip['id']] = ( + l3_constants.FLOATINGIP_STATUS_ERROR) + LOG.warn(_("Unable to configure IP address for " + "floating IP: %s"), fip['id']) + continue + if ri.router['distributed']: + # Special Handling for DVR - update FIP namespace + # and ri.namespace to handle DVR based FIP + self.floating_ip_added_dist(ri, fip) + else: + # As GARP is processed in a distinct thread the call below + # won't raise an exception to be handled. + self._send_gratuitous_arp_packet( + ri.ns_name, interface_name, fip_ip) + fip_statuses[fip['id']] = ( + l3_constants.FLOATINGIP_STATUS_ACTIVE) + + # Clean up addresses that no longer belong on the gateway interface. + for ip_cidr in existing_cidrs - new_cidrs: + if ip_cidr.endswith(FLOATING_IP_CIDR_SUFFIX): + net = netaddr.IPNetwork(ip_cidr) + device.addr.delete(net.version, ip_cidr) + if ri.router['distributed']: + self.floating_ip_removed_dist(ri, ip_cidr) + return fip_statuses + + def _get_ex_gw_port(self, ri): + return ri.router.get('gw_port') + + def _arping(self, ns_name, interface_name, ip_address, dist=None): + if dist: + device = ip_lib.IPDevice(interface_name, self.root_helper, + namespace=ns_name) + ip_cidr = str(ip_address) + FLOATING_IP_CIDR_SUFFIX + net = netaddr.IPNetwork(ip_cidr) + device.addr.add(net.version, ip_cidr, str(net.broadcast)) + + arping_cmd = ['arping', '-A', + '-I', interface_name, + '-c', self.conf.send_arp_for_ha, + ip_address] + try: + ip_wrapper = ip_lib.IPWrapper(self.root_helper, + namespace=ns_name) + ip_wrapper.netns.execute(arping_cmd, check_exit_code=True) + except Exception as e: + LOG.error(_("Failed sending gratuitous ARP: %s"), str(e)) + if dist: + device.addr.delete(net.version, ip_cidr) + + def _send_gratuitous_arp_packet(self, ns_name, interface_name, ip_address, + dist=None): + if self.conf.send_arp_for_ha > 0: + eventlet.spawn_n(self._arping, ns_name, interface_name, ip_address, + dist) + + def get_internal_port(self, ri, subnet_id): + """Returns internal router port based on subnet_id.""" + router_ports = ri.router.get(l3_constants.INTERFACE_KEY, []) + for port in router_ports: + fips = port['fixed_ips'] + for f in fips: + if f['subnet_id'] == subnet_id: + return port + + def get_internal_device_name(self, port_id): + return (INTERNAL_DEV_PREFIX + port_id)[:self.driver.DEV_NAME_LEN] + + def get_external_device_name(self, port_id): + return (EXTERNAL_DEV_PREFIX + port_id)[:self.driver.DEV_NAME_LEN] + + def get_fip_ext_device_name(self, port_id): + return (FIP_EXT_DEV_PREFIX + port_id)[:self.driver.DEV_NAME_LEN] + + def get_rtr_int_device_name(self, router_id): + return (RTR_2_FIP_DEV_PREFIX + router_id)[:self.driver.DEV_NAME_LEN] + + def get_fip_int_device_name(self, router_id): + return (FIP_2_RTR_DEV_PREFIX + router_id)[:self.driver.DEV_NAME_LEN] + + def get_snat_int_device_name(self, port_id): + return (SNAT_INT_DEV_PREFIX + port_id)[:self.driver.DEV_NAME_LEN] + + def get_fip_ns_name(self, ext_net_id): + return (FIP_NS_PREFIX + ext_net_id) + + def get_snat_ns_name(self, ext_gw_port_id): + return (SNAT_NS_PREFIX + ext_gw_port_id) + + def get_snat_interfaces(self, ri): + return ri.router.get(l3_constants.SNAT_ROUTER_INTF_KEY, []) + + def get_floating_ips(self, ri): + """Filters Floating IPs for DVR to be hosted on this agent.""" + floating_ips = ri.router.get(l3_constants.FLOATINGIP_KEY, []) + if ri.router['distributed']: + floating_ips = [i for i in floating_ips if i['host'] == self.host] + return floating_ips + + def _map_internal_interfaces(self, ri, int_port, snat_ports): + """Returns the SNAT port for the given internal interface port.""" + fixed_ip = int_port['fixed_ips'][0] + subnet_id = fixed_ip['subnet_id'] + match_port = [p for p in snat_ports if + p['fixed_ips'][0]['subnet_id'] == subnet_id] + if match_port: + return match_port[0] + else: + LOG.debug('DVR: no map match_port found!') + + def _create_dvr_gateway(self, ri, ex_gw_port, gw_interface_name, + internal_cidrs, snat_ports): + """Create SNAT namespace.""" + snat_ns_name = self.get_snat_ns_name(ex_gw_port['id']) + self._create_namespace(snat_ns_name) + # connect snat_ports to br_int from SNAT namespace + for port in snat_ports: + # create interface_name + self._set_subnet_info(port) + interface_name = self.get_snat_int_device_name(port['id']) + self._internal_network_added(snat_ns_name, port['network_id'], + port['id'], port['ip_cidr'], + port['mac_address'], interface_name, + SNAT_INT_DEV_PREFIX) + self._external_gateway_added(ri, ex_gw_port, gw_interface_name, + internal_cidrs, snat_ns_name, + preserve_ips=[]) + ri.snat_iptables_manager = ( + iptables_manager.IptablesManager( + root_helper=self.root_helper, namespace=snat_ns_name + ) + ) + + def external_gateway_added(self, ri, ex_gw_port, + interface_name, internal_cidrs): + if ri.router['distributed']: + snat_ports = self.get_snat_interfaces(ri) + for p in ri.internal_ports: + gateway = self._map_internal_interfaces(ri, p, snat_ports) + id_name = self.get_internal_device_name(p['id']) + self._snat_redirect_add(ri, gateway['fixed_ips'][0] + ['ip_address'], p, id_name) + + if self.conf.centralized_snat and ( + ri.router['gw_port_host'] == self.host): + if snat_ports: + self._create_dvr_gateway(ri, ex_gw_port, + interface_name, + internal_cidrs, snat_ports) + for port in snat_ports: + for ip in port['fixed_ips']: + self._update_arp_entry(ri, ip['ip_address'], + port['mac_address'], + ip['subnet_id'], 'add') + return + + # Compute a list of addresses this router is supposed to have. + # This avoids unnecessarily removing those addresses and + # causing a momentarily network outage. + floating_ips = self.get_floating_ips(ri) + preserve_ips = [ip['floating_ip_address'] + FLOATING_IP_CIDR_SUFFIX + for ip in floating_ips] + + self._external_gateway_added(ri, ex_gw_port, interface_name, + internal_cidrs, ri.ns_name, + preserve_ips) + + def _external_gateway_added(self, ri, ex_gw_port, interface_name, + internal_cidrs, ns_name, preserve_ips): + if not ip_lib.device_exists(interface_name, + root_helper=self.root_helper, + namespace=ns_name): + self.driver.plug(ex_gw_port['network_id'], + ex_gw_port['id'], interface_name, + ex_gw_port['mac_address'], + bridge=self.conf.external_network_bridge, + namespace=ns_name, + prefix=EXTERNAL_DEV_PREFIX) + ri.extern_ip_interface_name = interface_name + + self.driver.init_l3(interface_name, [ex_gw_port['ip_cidr']], + namespace=ns_name, + gateway=ex_gw_port['subnet'].get('gateway_ip'), + extra_subnets=ex_gw_port.get('extra_subnets', []), + preserve_ips=preserve_ips) + ip_address = ex_gw_port['ip_cidr'].split('/')[0] + self._send_gratuitous_arp_packet(ns_name, + interface_name, ip_address) + + def agent_gateway_added(self, ns_name, ex_gw_port, + interface_name): + """Adds Floating IP gateway port to FIP namespace.""" + if not ip_lib.device_exists(interface_name, + root_helper=self.root_helper, + namespace=ns_name): + self.driver.plug(ex_gw_port['network_id'], + ex_gw_port['id'], interface_name, + ex_gw_port['mac_address'], + bridge=self.conf.external_network_bridge, + namespace=ns_name, + prefix=FIP_EXT_DEV_PREFIX) + + self.driver.init_l3(interface_name, [ex_gw_port['ip_cidr']], + namespace=ns_name) + ip_address = ex_gw_port['ip_cidr'].split('/')[0] + self._send_gratuitous_arp_packet(ns_name, interface_name, ip_address) + + gw_ip = ex_gw_port['subnet']['gateway_ip'] + if gw_ip: + ipd = ip_lib.IPDevice(interface_name, self.root_helper, + namespace=ns_name) + ipd.route.add_gateway(gw_ip) + + cmd = ['sysctl', '-w', 'net.ipv4.conf.%s.proxy_arp=1' % interface_name] + ip_wrapper = ip_lib.IPWrapper(self.root_helper, namespace=ns_name) + ip_wrapper.netns.execute(cmd, check_exit_code=False) + + def internal_ns_interface_added(self, ip_cidr, + interface_name, ns_name): + ip_wrapper = ip_lib.IPWrapper(self.root_helper, namespace=ns_name) + ip_wrapper.netns.execute(['ip', 'addr', 'add', + ip_cidr, 'dev', interface_name]) + + def external_gateway_removed(self, ri, ex_gw_port, + interface_name, internal_cidrs): + if ri.router['distributed']: + for p in ri.internal_ports: + internal_interface = self.get_internal_device_name(p['id']) + self._snat_redirect_remove(ri, p, internal_interface) + + if self.conf.centralized_snat and ( + ex_gw_port['binding:host_id'] == self.host): + ns_name = self.get_snat_ns_name(ex_gw_port['id']) + else: + # not hosting agent - no work to do + LOG.debug('DVR: CSNAT not hosted: %s', ex_gw_port) + return + else: + ns_name = ri.ns_name + + self.driver.unplug(interface_name, + bridge=self.conf.external_network_bridge, + namespace=ns_name, + prefix=EXTERNAL_DEV_PREFIX) + if ri.router['distributed']: + self._destroy_snat_namespace(ns_name) + + def metadata_filter_rules(self): + rules = [] + if self.conf.enable_metadata_proxy: + rules.append(('INPUT', '-s 0.0.0.0/0 -d 127.0.0.1 ' + '-p tcp -m tcp --dport %s ' + '-j ACCEPT' % self.conf.metadata_port)) + return rules + + def metadata_nat_rules(self): + rules = [] + if self.conf.enable_metadata_proxy: + rules.append(('PREROUTING', '-s 0.0.0.0/0 -d 169.254.169.254/32 ' + '-p tcp -m tcp --dport 80 -j REDIRECT ' + '--to-port %s' % self.conf.metadata_port)) + return rules + + def external_gateway_nat_rules(self, ex_gw_ip, internal_cidrs, + interface_name): + rules = [('POSTROUTING', '! -i %(interface_name)s ' + '! -o %(interface_name)s -m conntrack ! ' + '--ctstate DNAT -j ACCEPT' % + {'interface_name': interface_name})] + for cidr in internal_cidrs: + rules.extend(self.internal_network_nat_rules(ex_gw_ip, cidr)) + return rules + + def _gen_snat_idx(self, cidr): + """Generate index based on cidr for SNAT entries.""" + ip = cidr.split('/')[0] + ip_str = ip.split('.') + ip_num = (((int(ip_str[0])) << 24) + ((int(ip_str[1])) << 16) + + ((int(ip_str[2])) << 8) + (int(ip_str[3]))) + return ip_num + + def _snat_redirect_add(self, ri, gateway, sn_port, sn_int): + """Adds rules and routes for SNAT redirection.""" + try: + snat_idx = self._gen_snat_idx(sn_port['ip_cidr']) + ns_ipr = ip_lib.IpRule(self.root_helper, namespace=ri.ns_name) + ns_ipd = ip_lib.IPDevice(sn_int, self.root_helper, + namespace=ri.ns_name) + ns_ipd.route.add_gateway(gateway, table=snat_idx) + ns_ipr.add_rule_from(sn_port['ip_cidr'], snat_idx, snat_idx) + ns_ipr.netns.execute(['sysctl', '-w', + 'net.ipv4.conf.all.send_redirects=0']) + ns_ipr.netns.execute(['sysctl', '-w', 'net.ipv4.conf.%s.' + 'send_redirects=0' % sn_int]) + except Exception: + LOG.exception(_('DVR: error adding redirection logic')) + + def _snat_redirect_remove(self, ri, sn_port, sn_int): + """Removes rules and routes for SNAT redirection.""" + try: + snat_idx = self._gen_snat_idx(sn_port['ip_cidr']) + ns_ipr = ip_lib.IpRule(self.root_helper, namespace=ri.ns_name) + ns_ipd = ip_lib.IPDevice(sn_int, self.root_helper, + namespace=ri.ns_name) + ns_ipd.route.delete_gateway(table=snat_idx) + ns_ipr.delete_rule_priority(snat_idx) + except Exception: + LOG.exception(_('DVR: removed snat failed')) + + def _internal_network_added(self, ns_name, network_id, port_id, + internal_cidr, mac_address, + interface_name, prefix): + if not ip_lib.device_exists(interface_name, + root_helper=self.root_helper, + namespace=ns_name): + self.driver.plug(network_id, port_id, interface_name, mac_address, + namespace=ns_name, + prefix=prefix) + + self.driver.init_l3(interface_name, [internal_cidr], + namespace=ns_name) + ip_address = internal_cidr.split('/')[0] + self._send_gratuitous_arp_packet(ns_name, interface_name, ip_address) + + def internal_network_added(self, ri, port): + network_id = port['network_id'] + port_id = port['id'] + internal_cidr = port['ip_cidr'] + mac_address = port['mac_address'] + + interface_name = self.get_internal_device_name(port_id) + + self._internal_network_added(ri.ns_name, network_id, port_id, + internal_cidr, mac_address, + interface_name, INTERNAL_DEV_PREFIX) + + if ri.router['distributed'] and ri.ex_gw_port: + ex_gw_port = ri.ex_gw_port + snat_ports = self.get_snat_interfaces(ri) + snat_ip = self._map_internal_interfaces(ri, port, snat_ports) + self._snat_redirect_add(ri, snat_ip['fixed_ips'][0] + ['ip_address'], port, interface_name) + if self.conf.centralized_snat and ( + ri.router['gw_port_host'] == self.host): + for port in snat_ports: + self._set_subnet_info(port) + interface_name = self.get_snat_int_device_name(port['id']) + ns_name = self.get_snat_ns_name(ex_gw_port['id']) + self._internal_network_added(ns_name, port['network_id'], + port['id'], internal_cidr, + port['mac_address'], + interface_name, + SNAT_INT_DEV_PREFIX) + + def internal_network_removed(self, ri, port): + port_id = port['id'] + interface_name = self.get_internal_device_name(port_id) + if ri.router['distributed'] and ri.ex_gw_port: + # DVR handling code for SNAT + ex_gw_port = ri.ex_gw_port + self._snat_redirect_remove(ri, port, interface_name) + if self.conf.centralized_snat and ( + ri.ex_gw_port['binding:host_id'] == self.host): + snat_port = self._map_internal_interfaces(ri, port, + ri.snat_ports) + snat_interface = ( + self.get_snat_int_device_name(snat_port['id']) + ) + ns_name = self.get_snat_ns_name(ex_gw_port['id']) + prefix = SNAT_INT_DEV_PREFIX + if ip_lib.device_exists(snat_interface, + root_helper=self.root_helper, + namespace=ns_name): + self.driver.unplug(snat_interface, namespace=ns_name, + prefix=prefix) + + if ip_lib.device_exists(interface_name, + root_helper=self.root_helper, + namespace=ri.ns_name): + self.driver.unplug(interface_name, namespace=ri.ns_name, + prefix=INTERNAL_DEV_PREFIX) + + def internal_network_nat_rules(self, ex_gw_ip, internal_cidr): + rules = [('snat', '-s %s -j SNAT --to-source %s' % + (internal_cidr, ex_gw_ip))] + return rules + + def _create_agent_gateway_port(self, ri, network_id): + """Creates Floating IP gateway port. + + Request port creation from Plugin then creates + Floating IP namespace and adds gateway port. + """ + # Port does not exist, request port from plugin + self.agent_gateway_port = ( + self.plugin_rpc.get_agent_gateway_port( + self.context, network_id)) + if 'subnet' not in self.agent_gateway_port: + LOG.error(_('Missing subnet/agent_gateway_port')) + return + self._set_subnet_info(self.agent_gateway_port) + + # add fip-namespace and agent_gateway_port + fip_ns_name = ( + self.get_fip_ns_name(str(network_id))) + self._create_namespace(fip_ns_name) + interface_name = ( + self.get_fip_ext_device_name(self.agent_gateway_port['id'])) + self.agent_gateway_added(fip_ns_name, self.agent_gateway_port, + interface_name) + + def create_rtr_2_fip_link(self, ri, network_id): + """Creates interface between router and Floating IP namespace.""" + rtr_2_fip_name = self.get_rtr_int_device_name(ri.router_id) + fip_2_rtr_name = self.get_fip_int_device_name(ri.router_id) + fip_ns_name = self.get_fip_ns_name(str(network_id)) + + # add link local IP to interface + if ri.rtr_2_fip is None: + ri.rtr_2_fip = FIP_LL_PREFIX + str(self.local_ips.pop()) + if ri.fip_2_rtr is None: + ri.fip_2_rtr = FIP_LL_PREFIX + str(self.local_ips.pop()) + ip_wrapper = ip_lib.IPWrapper(self.root_helper, + namespace=ri.ns_name) + int_dev = ip_wrapper.add_veth(rtr_2_fip_name, + fip_2_rtr_name, fip_ns_name) + self.internal_ns_interface_added(ri.rtr_2_fip + '/31', + rtr_2_fip_name, ri.ns_name) + self.internal_ns_interface_added(ri.fip_2_rtr + '/31', + fip_2_rtr_name, fip_ns_name) + int_dev[0].link.set_up() + int_dev[1].link.set_up() + # add default route for the link local interface + device = ip_lib.IPDevice(rtr_2_fip_name, self.root_helper, + namespace=ri.ns_name) + device.route.add_gateway(ri.fip_2_rtr, table=FIP_RT_TBL) + # setup the NAT rules and chains + self._handle_router_fip_nat_rules(ri, rtr_2_fip_name, 'add_rules') + + def floating_ip_added_dist(self, ri, fip): + """Adds floating IP to FIP namespace.""" + floating_ip = fip['floating_ip_address'] + fixed_ip = fip['fixed_ip_address'] + rule_pr = self.fip_priorities.pop() + ri.floating_ips_dict[floating_ip] = rule_pr + fip_2_rtr_name = self.get_fip_int_device_name(ri.router_id) + ipRule = ip_lib.IpRule(self.root_helper, namespace=ri.ns_name) + ipRule.add_rule_from(fixed_ip, FIP_RT_TBL, rule_pr) + + # Add routing rule in fip namespace + fip_cidr = str(floating_ip) + FLOATING_IP_CIDR_SUFFIX + fip_ns_name = self.get_fip_ns_name(str(fip['floating_network_id'])) + device = ip_lib.IPDevice(fip_2_rtr_name, self.root_helper, + namespace=fip_ns_name) + device.route.add_route(fip_cidr, ri.rtr_2_fip) + interface_name = ( + self.get_fip_ext_device_name(self.agent_gateway_port['id'])) + self._send_gratuitous_arp_packet(fip_ns_name, + interface_name, floating_ip, + dist=True) + # update internal structures + self.agent_fip_count = self.agent_fip_count + 1 + ri.dist_fip_count = ri.dist_fip_count + 1 + + def floating_ip_removed_dist(self, ri, fip_cidr): + """Removes floating IP from FIP namespace.""" + floating_ip = fip_cidr.split('/')[0] + rtr_2_fip_name = self.get_rtr_int_device_name(ri.router_id) + fip_2_rtr_name = self.get_fip_int_device_name(ri.router_id) + fip_ns_name = self.get_fip_ns_name(str(self._fetch_external_net_id())) + ip_rule_rtr = ip_lib.IpRule(self.root_helper, namespace=ri.ns_name) + if floating_ip in ri.floating_ips_dict: + rule_pr = ri.floating_ips_dict[floating_ip] + # TODO(rajeev): Handle else case - exception/log? + else: + rule_pr = None + + ip_rule_rtr.delete_rule_priority(rule_pr) + self.fip_priorities.add(rule_pr) + device = ip_lib.IPDevice(fip_2_rtr_name, self.root_helper, + namespace=fip_ns_name) + + device.route.delete_route(fip_cidr, ri.rtr_2_fip) + # check if this is the last FIP for this router + ri.dist_fip_count = ri.dist_fip_count - 1 + if ri.dist_fip_count == 0: + # remove default route entry + device = ip_lib.IPDevice(rtr_2_fip_name, self.root_helper, + namespace=ri.ns_name) + device.route.delete_gateway(ri.fip_2_rtr, table=FIP_RT_TBL) + self.local_ips.add(ri.rtr_2_fip.rsplit('.', 1)[1]) + ri.rtr_2_fip = None + self.local_ips.add(ri.fip_2_rtr.rsplit('.', 1)[1]) + ri.fip_2_rtr = None + # TODO(mrsmith): remove interface + # clean up fip-namespace if this is the last FIP + self.agent_fip_count = self.agent_fip_count - 1 + if self.agent_fip_count == 0: + self._destroy_fip_namespace(fip_ns_name) + + def floating_forward_rules(self, floating_ip, fixed_ip): + return [('PREROUTING', '-d %s -j DNAT --to %s' % + (floating_ip, fixed_ip)), + ('OUTPUT', '-d %s -j DNAT --to %s' % + (floating_ip, fixed_ip)), + ('float-snat', '-s %s -j SNAT --to %s' % + (fixed_ip, floating_ip))] + + def router_deleted(self, context, router_id): + """Deal with router deletion RPC message.""" + LOG.debug(_('Got router deleted notification for %s'), router_id) + self.removed_routers.add(router_id) + + def _update_arp_entry(self, ri, ip, mac, subnet_id, operation): + """Add or delete arp entry into router namespace.""" + port = self.get_internal_port(ri, subnet_id) + if 'id' in port: + ip_cidr = str(ip) + '/32' + try: + # TODO(mrsmith): optimize the calls below for bulk calls + net = netaddr.IPNetwork(ip_cidr) + interface_name = self.get_internal_device_name(port['id']) + device = ip_lib.IPDevice(interface_name, self.root_helper, + namespace=ri.ns_name) + if operation == 'add': + device.neigh.add(net.version, ip, mac) + elif operation == 'delete': + device.neigh.delete(net.version, ip, mac) + except Exception: + LOG.exception(_("DVR: Failed updating arp entry")) + + def _update_static_arp_entry(self, ri, operation, ip, mac): + """Add or delete arp entry into router namespace.""" + if ri.extern_ip_interface_name: + ip_cidr = str(ip) + '/32' + try: + # TODO(mrsmith): optimize the calls below for bulk calls + net = netaddr.IPNetwork(ip_cidr) + interface_name = ri.extern_ip_interface_name + device = ip_lib.IPDevice(interface_name, self.root_helper, + namespace=ri.ns_name) + if operation == 'add': + device.neigh.add(net.version, ip, mac) + elif operation == 'delete': + device.neigh.delete(net.version, ip, mac) + except Exception: + LOG.exception(_("DVR: Failed updating arp entry")) + + def add_arp_entry(self, context, payload): + """Adds arp entry into router namespace from RPC.""" + arp_table = payload['arp_table'] + router_id = payload['router_id'] + ip = arp_table['ip_address'] + mac = arp_table['mac_address'] + subnet_id = arp_table['subnet_id'] + ri = self.router_info.get(router_id) + self._update_arp_entry(ri, ip, mac, subnet_id, 'add') + + def delete_arp_entry(self, context, payload): + """Deletes arp entry into router namespace from RPC.""" + arp_table = payload['arp_table'] + router_id = payload['router_id'] + ip = arp_table['ip_address'] + mac = arp_table['mac_address'] + subnet_id = arp_table['subnet_id'] + ri = self.router_info.get(router_id) + self._update_arp_entry(ri, ip, mac, subnet_id, 'delete') + + def routers_updated(self, context, routers): + """Deal with routers modification and creation RPC message.""" + LOG.debug(_('Got routers updated notification :%s'), routers) + if routers: + # This is needed for backward compatibility + if isinstance(routers[0], dict): + routers = [router['id'] for router in routers] + self.updated_routers.update(routers) + + def router_removed_from_agent(self, context, payload): + LOG.debug(_('Got router removed from agent :%r'), payload) + self.removed_routers.add(payload['router_id']) + + def router_added_to_agent(self, context, payload): + LOG.debug(_('Got router added to agent :%r'), payload) + self.routers_updated(context, payload) + + def _process_routers(self, routers, all_routers=False): + pool = eventlet.GreenPool() + if (self.conf.external_network_bridge and + not ip_lib.device_exists(self.conf.external_network_bridge)): + LOG.error(_("The external network bridge '%s' does not exist"), + self.conf.external_network_bridge) + return + + target_ex_net_id = self._fetch_external_net_id() + # if routers are all the routers we have (They are from router sync on + # starting or when error occurs during running), we seek the + # routers which should be removed. + # If routers are from server side notification, we seek them + # from subset of incoming routers and ones we have now. + if all_routers: + prev_router_ids = set(self.router_info) + else: + prev_router_ids = set(self.router_info) & set( + [router['id'] for router in routers]) + cur_router_ids = set() + for r in routers: + # If namespaces are disabled, only process the router associated + # with the configured agent id. + if (not self.conf.use_namespaces and + r['id'] != self.conf.router_id): + continue + ex_net_id = (r['external_gateway_info'] or {}).get('network_id') + if not ex_net_id and not self.conf.handle_internal_only_routers: + continue + if (target_ex_net_id and ex_net_id and + ex_net_id != target_ex_net_id): + # Double check that our single external_net_id has not changed + # by forcing a check by RPC. + if (ex_net_id != self._fetch_external_net_id(force=True)): + continue + cur_router_ids.add(r['id']) + if r['id'] not in self.router_info: + self._router_added(r['id'], r) + ri = self.router_info[r['id']] + ri.router = r + pool.spawn_n(self.process_router, ri) + # identify and remove routers that no longer exist + for router_id in prev_router_ids - cur_router_ids: + pool.spawn_n(self._router_removed, router_id) + pool.waitall() + + @lockutils.synchronized('l3-agent', 'neutron-') + def _rpc_loop(self): + # _rpc_loop and _sync_routers_task will not be + # executed in the same time because of lock. + # so we can clear the value of updated_routers + # and removed_routers, but they can be updated by + # updated_routers and removed_routers rpc call + try: + LOG.debug(_("Starting RPC loop for %d updated routers"), + len(self.updated_routers)) + if self.updated_routers: + # We're capturing and clearing the list, and will + # process the "captured" updates in this loop, + # and any updates that happen due to a context switch + # will be picked up on the next pass. + updated_routers = set(self.updated_routers) + self.updated_routers.clear() + router_ids = list(updated_routers) + routers = self.plugin_rpc.get_routers( + self.context, router_ids) + # routers with admin_state_up=false will not be in the fetched + fetched = set([r['id'] for r in routers]) + self.removed_routers.update(updated_routers - fetched) + + self._process_routers(routers) + self._process_router_delete() + LOG.debug(_("RPC loop successfully completed")) + except Exception: + LOG.exception(_("Failed synchronizing routers")) + self.fullsync = True + + def _process_router_delete(self): + current_removed_routers = list(self.removed_routers) + for router_id in current_removed_routers: + self._router_removed(router_id) + self.removed_routers.remove(router_id) + + def _router_ids(self): + if not self.conf.use_namespaces: + return [self.conf.router_id] + + @periodic_task.periodic_task + @lockutils.synchronized('l3-agent', 'neutron-') + def _sync_routers_task(self, context): + if self.services_sync: + super(L3NATAgent, self).process_services_sync(context) + LOG.debug(_("Starting _sync_routers_task - fullsync:%s"), + self.fullsync) + if not self.fullsync: + return + try: + router_ids = self._router_ids() + self.updated_routers.clear() + self.removed_routers.clear() + routers = self.plugin_rpc.get_routers( + context, router_ids) + + LOG.debug(_('Processing :%r'), routers) + self._process_routers(routers, all_routers=True) + self.fullsync = False + LOG.debug(_("_sync_routers_task successfully completed")) + except rpc_compat.RPCException: + LOG.exception(_("Failed synchronizing routers due to RPC error")) + self.fullsync = True + return + except Exception: + LOG.exception(_("Failed synchronizing routers")) + self.fullsync = True + + # Resync is not necessary for the cleanup of stale + # namespaces. + if self._clean_stale_namespaces: + self._cleanup_namespaces(routers) + + def after_start(self): + LOG.info(_("L3 agent started")) + + def _update_routing_table(self, ri, operation, route): + cmd = ['ip', 'route', operation, 'to', route['destination'], + 'via', route['nexthop']] + ip_wrapper = ip_lib.IPWrapper(self.root_helper, + namespace=ri.ns_name) + ip_wrapper.netns.execute(cmd, check_exit_code=False) + + def _update_ip_rule(self, ri, operation, dest_ip, priority): + ipRule = ip_lib.IpRule(self.root_helper, namespace=ri.ns_name) + table = priority + rule_pr = priority + if(operation == 'add'): + ipRule.add_rule_to(dest_ip, table, rule_pr) + else: + ipRule.delete_rule_priority(rule_pr) + + def _update_route_entry(self, ri, operation, gateway_ip, onlink, table): + device = ip_lib.IPDevice(ri.extern_ip_interface_name, + self.root_helper, + namespace=ri.ns_name) + if(operation == 'add'): + device.route.add_gateway_onlink(gateway_ip, 'onlink', table) + if(operation == 'delete'): + device.route.del_route_table(table) + + def _update_ip_neigh_and_ip_rule(self, ri, opration, route): + next_hop = route['nexthop'] + dest_cidr = route['destination'] + dvr_mac = route['nexthop_dvr_mac'] + nhp = ri.next_hop_ip_mac_map.get(next_hop) + if(opration == 'add'): + if(nhp): + self._update_ip_rule(ri, 'add', dest_cidr, nhp.priority) + else: + priority = ri.available_ip_rule_priority.pop() + nhp = NextHopMACPriority(next_hop, dvr_mac, priority) + ri.next_hop_ip_mac_map[next_hop] = nhp + self._update_static_arp_entry(ri, 'add', next_hop, dvr_mac) + self._update_route_entry(ri, 'add', next_hop, + 'onlink', priority) + self._update_ip_rule(ri, 'add', dest_cidr, priority) + if(opration == 'delete'): + self._update_route_entry(ri, 'delete', next_hop, '', nhp.priority) + self._update_static_arp_entry(ri, 'delete', next_hop, dvr_mac) + if(nhp): + self._update_ip_rule(ri, 'delete', dest_cidr, nhp.priority) + ri.next_hop_ip_mac_map.pop(next_hop, None) + + def write_to_file(self, path, contents): + with open(path, 'w') as f: + f.write(contents) + + def _notify_l2_agent(self, ri): + if not ri: + return + ext_inter_name = ri.extern_ip_interface_name + if(not ext_inter_name): + return + if(len(ri.internal_ports) <= 0): + #self.notify_l2_agent_info.pop(ext_inter_name, None) + self.notify_l2_agent_info[ext_inter_name] = {} + ri.next_hop_ip_mac_map = {} + else: + # ip_mac_map = self.notify_l2_agent_info.get(ext_inter_name, None) + # if(not ip_mac_map): + self.notify_l2_agent_info[ext_inter_name] = {} + for ip, nhp in ri.next_hop_ip_mac_map.items(): + self.notify_l2_agent_info[ext_inter_name][nhp.dvr_mac] = ip + path = os.path.join(self.conf.notify_l2_file_path, + self.conf.notify_l2_file_name) + self.write_to_file(path, jsonutils.dumps(self.notify_l2_agent_info)) + + def routes_updated(self, ri): + new_routes = ri.router['routes'] + old_routes = ri.routes + adds, removes = common_utils.diff_list_of_dict(old_routes, + new_routes) + for route in adds: + LOG.debug(_("Added route entry is '%s'"), route) + # remove replaced route from deleted route + for del_route in removes: + if route['destination'] == del_route['destination']: + removes.remove(del_route) + if(route.get('nexthop_dvr_mac') and route.get('onlink')): + self._update_ip_neigh_and_ip_rule(ri, 'add', route) + else: + # replace success even if there is no existing route + self._update_routing_table(ri, 'replace', route) + for route in removes: + LOG.debug(_("Removed route entry is '%s'"), route) + if(route.get('nexthop_dvr_mac') and route.get('onlink')): + self._update_ip_neigh_and_ip_rule(ri, 'delete', route) + else: + self._update_routing_table(ri, 'delete', route) + ri.routes = new_routes + self._notify_l2_agent(ri) + + +class L3NATAgentWithStateReport(L3NATAgent): + + def __init__(self, host, conf=None): + super(L3NATAgentWithStateReport, self).__init__(host=host, conf=conf) + self.state_rpc = agent_rpc.PluginReportStateAPI(topics.PLUGIN) + self.agent_state = { + 'binary': 'neutron-l3-agent', + 'host': host, + 'topic': topics.L3_AGENT, + 'configurations': { + 'distributed_agent': self.conf.distributed_agent, + 'centralized_snat': self.conf.centralized_snat, + 'centralized_router': self.conf.centralized_router, + 'use_namespaces': self.conf.use_namespaces, + 'router_id': self.conf.router_id, + 'handle_internal_only_routers': + self.conf.handle_internal_only_routers, + 'external_network_bridge': self.conf.external_network_bridge, + 'gateway_external_network_id': + self.conf.gateway_external_network_id, + 'interface_driver': self.conf.interface_driver}, + 'start_flag': True, + 'agent_type': l3_constants.AGENT_TYPE_L3} + report_interval = cfg.CONF.AGENT.report_interval + self.use_call = True + if report_interval: + self.heartbeat = loopingcall.FixedIntervalLoopingCall( + self._report_state) + self.heartbeat.start(interval=report_interval) + + def _report_state(self): + LOG.debug(_("Report state task started")) + num_ex_gw_ports = 0 + num_interfaces = 0 + num_floating_ips = 0 + router_infos = self.router_info.values() + num_routers = len(router_infos) + for ri in router_infos: + ex_gw_port = self._get_ex_gw_port(ri) + if ex_gw_port: + num_ex_gw_ports += 1 + num_interfaces += len(ri.router.get(l3_constants.INTERFACE_KEY, + [])) + num_floating_ips += len(ri.router.get(l3_constants.FLOATINGIP_KEY, + [])) + configurations = self.agent_state['configurations'] + configurations['routers'] = num_routers + configurations['ex_gw_ports'] = num_ex_gw_ports + configurations['interfaces'] = num_interfaces + configurations['floating_ips'] = num_floating_ips + try: + self.state_rpc.report_state(self.context, self.agent_state, + self.use_call) + self.agent_state.pop('start_flag', None) + self.use_call = False + LOG.debug(_("Report state task successfully completed")) + except AttributeError: + # This means the server does not support report_state + LOG.warn(_("Neutron server does not support state report." + " State report for this agent will be disabled.")) + self.heartbeat.stop() + return + except Exception: + LOG.exception(_("Failed reporting state!")) + + def agent_updated(self, context, payload): + """Handle the agent_updated notification event.""" + self.fullsync = True + LOG.info(_("agent_updated by server side %s!"), payload) + + +def main(manager='neutron.agent.l3_agent.L3NATAgentWithStateReport'): + conf = cfg.CONF + conf.register_opts(L3NATAgent.OPTS) + config.register_interface_driver_opts_helper(conf) + config.register_use_namespaces_opts_helper(conf) + config.register_agent_state_opts_helper(conf) + config.register_root_helper(conf) + conf.register_opts(interface.OPTS) + conf.register_opts(external_process.OPTS) + common_config.init(sys.argv[1:]) + config.setup_logging(conf) + server = neutron_service.Service.create( + binary='neutron-l3-agent', + topic=topics.L3_AGENT, + report_interval=cfg.CONF.AGENT.report_interval, + manager=manager) + service.launch(server).wait() diff --git a/icehouse-patches/neutron/vlan2vlan/neutron/agent/l3_proxy.py b/icehouse-patches/neutron/vlan2vlan/neutron/agent/l3_proxy.py new file mode 100644 index 00000000..ad9a2ccd --- /dev/null +++ b/icehouse-patches/neutron/vlan2vlan/neutron/agent/l3_proxy.py @@ -0,0 +1,1992 @@ +''' +Created on 2014-5-23 + +@author: j00209498 +''' +import sys +import json +import eventlet +eventlet.monkey_patch() + +import netaddr +from oslo.config import cfg + +from neutron.agent.common import config +from neutron.agent.linux import external_process +from neutron.agent.linux import interface +from neutron.agent.linux import ip_lib +from neutron.agent.linux import iptables_manager +from neutron.agent import rpc as agent_rpc +from neutron.common import config as common_config +from neutron.common import constants as l3_constants +from neutron.common import rpc as rpc_compat +from neutron.common import topics +from neutron.common import utils as common_utils +from neutron import context +from neutron import manager +from neutron.openstack.common import excutils +from neutron.openstack.common import importutils +from neutron.openstack.common import lockutils +from neutron.openstack.common import log as logging +from neutron.openstack.common import loopingcall +from neutron.openstack.common import periodic_task +from neutron.openstack.common import processutils +from neutron.openstack.common import service +from neutron import service as neutron_service +from neutron.services.firewall.agents.l3reference import firewall_l3_agent + +from neutron.plugins.l2_proxy.agent import neutron_proxy_context +from neutron.plugins.l2_proxy.agent import clients + +LOG = logging.getLogger(__name__) +NS_PREFIX = 'qrouter-' +INTERNAL_DEV_PREFIX = 'qr-' +EXTERNAL_DEV_PREFIX = 'qg-' +SNAT_INT_DEV_PREFIX = 'sg-' +FIP_NS_PREFIX = 'fip-' +SNAT_NS_PREFIX = 'snat-' +FIP_2_RTR_DEV_PREFIX = 'fpr-' +RTR_2_FIP_DEV_PREFIX = 'rfp-' +FIP_EXT_DEV_PREFIX = 'fg-' +FIP_LL_PREFIX = '169.254.30.' +# Route Table index for FIPs +FIP_RT_TBL = 16 +# Rule priority range for FIPs +FIP_PR_ST = 32768 +FIP_PR_END = FIP_PR_ST + 40000 +RPC_LOOP_INTERVAL = 1 +FLOATING_IP_CIDR_SUFFIX = '/32' + + +class L3PluginApi(rpc_compat.RpcProxy): + + """Agent side of the l3 agent RPC API. + + API version history: + 1.0 - Initial version. + 1.1 - Floating IP operational status updates + 1.2 - DVR support + + """ + + BASE_RPC_API_VERSION = '1.0' + + def __init__(self, topic, host): + super(L3PluginApi, self).__init__( + topic=topic, default_version=self.BASE_RPC_API_VERSION) + self.host = host + + def get_routers(self, context, router_ids=None): + """Make a remote process call to retrieve the sync data for routers.""" + return self.call(context, + self.make_msg('sync_routers', host=self.host, + router_ids=router_ids), + topic=self.topic) + + def get_external_network_id(self, context): + """Make a remote process call to retrieve the external network id. + + @raise rpc_compat.RemoteError: with TooManyExternalNetworks + as exc_type if there are + more than one external network + """ + return self.call(context, + self.make_msg('get_external_network_id', + host=self.host), + topic=self.topic) + + def update_floatingip_statuses(self, context, router_id, fip_statuses): + """Call the plugin update floating IPs's operational status.""" + return self.call(context, + self.make_msg('update_floatingip_statuses', + router_id=router_id, + fip_statuses=fip_statuses), + topic=self.topic, + version='1.1') + + def get_ports_by_subnet(self, context, subnet_id): + """Retrieve ports by subnet id.""" + return self.call(context, + self.make_msg('get_ports_by_subnet', host=self.host, + subnet_id=subnet_id), + topic=self.topic, + version='1.2') + + def get_agent_gateway_port(self, context, fip_net): + """Get or create a agent_gateway_port.""" + return self.call(context, + self.make_msg('get_agent_gateway_port', + network_id=fip_net, host=self.host), + topic=self.topic, + version='1.2') + + def update_router_extern_ip_map(self, context, router_id, gateway_ip): + """update router and extern ip mapping""" + return self.call(context, + self.make_msg('update_router_extern_ip_map', + router_id=router_id, host=self.host, + gateway_ip=gateway_ip), + topic=self.topic, + version='1.2') + + def get_extra_routes_by_subnet(self, context, router_id, subnet_id): + """get extra routes for router by subnet id""" + return self.call(context, + self.make_msg('get_extra_routes_by_subnet', + router_id=router_id, host=self.host, + subnet_id=subnet_id), + topic=self.topic, + version='1.2') + + +class RouterInfo(object): + + def __init__(self, router_id, root_helper, use_namespaces, router): + self.router_id = router_id + self.cascaded_router_id = None + self.extern_extra_routes = {} + self.extra_routes_is_update = False + self.local_internal_ports = [] + self.ex_gw_port = None + self._snat_enabled = None + self._snat_action = None + self.internal_ports = [] + self.snat_ports = [] + self.floating_ips = set() + # TODO(mrsmith): DVR merge cleanup + self.floating_ips_dict = {} + self.root_helper = root_helper + self.use_namespaces = use_namespaces + # Invoke the setter for establishing initial SNAT action + self.router = router + self.ns_name = NS_PREFIX + router_id if use_namespaces else None + self.iptables_manager = iptables_manager.IptablesManager( + root_helper=root_helper, + # FIXME(danwent): use_ipv6=True, + namespace=self.ns_name) + self.routes = [] + # DVR Data + # Linklocal router to floating IP addr + self.rtr_2_fip = None + # Linklocal floating to router IP addr + self.fip_2_rtr = None + self.dist_fip_count = 0 + + @property + def router(self): + return self._router + + @router.setter + def router(self, value): + self._router = value + if not self._router: + return + # enable_snat by default if it wasn't specified by plugin + self._snat_enabled = self._router.get('enable_snat', True) + # Set a SNAT action for the router + if self._router.get('gw_port'): + self._snat_action = ('add_rules' if self._snat_enabled + else 'remove_rules') + elif self.ex_gw_port: + # Gateway port was removed, remove rules + self._snat_action = 'remove_rules' + + def perform_snat_action(self, snat_callback, *args): + # Process SNAT rules for attached subnets + if self._snat_action: + snat_callback(self, self._router.get('gw_port'), + *args, action=self._snat_action) + self._snat_action = None + + +class L3NATAgent(firewall_l3_agent.FWaaSL3AgentRpcCallback, manager.Manager): + + """Manager for L3NatAgent + + API version history: + 1.0 initial Version + 1.1 changed the type of the routers parameter + to the routers_updated method. + It was previously a list of routers in dict format. + It is now a list of router IDs only. + Per rpc versioning rules, it is backwards compatible. + """ + RPC_API_VERSION = '1.1' + + OPTS = [ + cfg.BoolOpt('distributed_agent', default=False, + help=_("Enables distributed router agent function.")), + cfg.BoolOpt('centralized_snat', default=False, + help=_("Enables centralized SNAT in dvr mode.")), + cfg.BoolOpt('centralized_router', default=True, + help=_("Enables centralized router in dvr mode.")), + cfg.StrOpt('external_network_bridge', default='br-ex', + help=_("Name of bridge used for external network " + "traffic.")), + cfg.IntOpt('metadata_port', + default=9697, + help=_("TCP Port used by Neutron metadata namespace " + "proxy.")), + cfg.IntOpt('send_arp_for_ha', + default=3, + help=_("Send this many gratuitous ARPs for HA setup, if " + "less than or equal to 0, the feature is disabled")), + cfg.StrOpt('router_id', default='', + help=_("If namespaces is disabled, the l3 agent can only" + " configure a router that has the matching router " + "ID.")), + cfg.BoolOpt('handle_internal_only_routers', + default=True, + help=_("Agent should implement routers with no gateway")), + cfg.StrOpt('gateway_external_network_id', default='', + help=_("UUID of external network for routers implemented " + "by the agents.")), + cfg.BoolOpt('enable_metadata_proxy', default=True, + help=_("Allow running metadata proxy.")), + cfg.BoolOpt('router_delete_namespaces', default=False, + help=_("Delete namespace after removing a router.")), + cfg.StrOpt('metadata_proxy_socket', + default='$state_path/metadata_proxy', + help=_('Location of Metadata Proxy UNIX domain ' + 'socket')), + # add by j00209498 + cfg.StrOpt('cascaded_os_region_name', default=None, + help=_("region name to use")), + cfg.StrOpt('cascaded_auth_url', + default='http://127.0.0.1:35357/v2.0', + help=_("keystone auth url to use")), + cfg.StrOpt('cascaded_admin_user_name', + help=_("access neutron user name to use")), + cfg.StrOpt('cascaded_admin_password', + help=_("access neutron password to use")), + cfg.StrOpt('cascaded_tenant_name', + help=_("access neutron tenant to use")), + cfg.StrOpt('cascaded_extern_subnet_cidr', + default='100.64.1.0/24', + help=_("cascaded_extern_subnet_cidr")), + cfg.StrOpt('cascaded_start_extern_ip', + default='100.64.1.2', + help=_("cascaded_start_extern_ip")), + cfg.StrOpt('cascaded_end_extern_ip', + default='100.64.1.254', + help=_("cascaded_end_extern_ip")), + cfg.StrOpt('cascaded_extern_network_type', + default='flat', + help=_("cascaded_extern_net_type")), + cfg.StrOpt('cascaded_extern_physical_network', + default='EXT', + help=_("cascaded_extern_physical_net")), + ] + + def __init__(self, host, conf=None): + if conf: + self.conf = conf + else: + self.conf = cfg.CONF + self.root_helper = config.get_root_helper(self.conf) + self.router_info = {} + + self._check_config_params() + + try: + self.driver = importutils.import_object( + self.conf.interface_driver, + self.conf + ) + except Exception: + msg = _("Error importing interface driver " + "'%s'") % self.conf.interface_driver + LOG.error(msg) + raise SystemExit(1) + + self.context = context.get_admin_context_without_session() + self.plugin_rpc = L3PluginApi(topics.L3PLUGIN, host) + self.fullsync = True + self.updated_routers = set() + self.removed_routers = set() + self.sync_progress = False + + # TODO(mrsmith): remove once agent restart with + # stale namespaces is supported for DVR + root_ip = ip_lib.IPWrapper(self.root_helper) + host_namespaces = root_ip.get_namespaces(self.root_helper) + snat_namespaces = set(ns for ns in host_namespaces + if ns.startswith(SNAT_NS_PREFIX)) + self._destroy_stale_router_namespaces(snat_namespaces) + fip_namespaces = set(ns for ns in host_namespaces + if ns.startswith(FIP_NS_PREFIX)) + self._destroy_stale_router_namespaces(fip_namespaces) + + self._clean_stale_namespaces = self.conf.use_namespaces + + # added by j00209498 cascading data + self.network_map = {} + self.subnet_map = {} + self.cascaded_extern_net_id = None + + # dvr data + self.agent_gateway_port = None + self.agent_fip_count = 0 + self.local_ips = set(xrange(2, 251)) + self.fip_priorities = set(xrange(FIP_PR_ST, FIP_PR_END)) + + self.rpc_loop = loopingcall.FixedIntervalLoopingCall( + self._rpc_loop) + self.rpc_loop.start(interval=RPC_LOOP_INTERVAL) + super(L3NATAgent, self).__init__(conf=self.conf) + + self.target_ex_net_id = None + + def _check_config_params(self): + """Check items in configuration files. + + Check for required and invalid configuration items. + The actual values are not verified for correctness. + """ + if not self.conf.interface_driver: + msg = _('An interface driver must be specified') + LOG.error(msg) + raise SystemExit(1) + + if not self.conf.use_namespaces and not self.conf.router_id: + msg = _('Router id is required if not using namespaces.') + LOG.error(msg) + raise SystemExit(1) + + def _cleanup_namespaces(self, routers): + """Destroy stale router namespaces on host when L3 agent restarts + + This routine is called when self._clean_stale_namespaces is True. + + The argument routers is the list of routers that are recorded in + the database as being hosted on this node. + """ + try: + root_ip = ip_lib.IPWrapper(self.root_helper) + + host_namespaces = root_ip.get_namespaces(self.root_helper) + router_namespaces = set(ns for ns in host_namespaces + if ns.startswith(NS_PREFIX)) + ns_to_ignore = set(NS_PREFIX + r['id'] for r in routers) + # TODO(mrsmith): include DVR SNAT namespaces, FIP namespaces + ns_to_destroy = router_namespaces - ns_to_ignore + except RuntimeError: + LOG.exception(_('RuntimeError in obtaining router list ' + 'for namespace cleanup.')) + else: + self._destroy_stale_router_namespaces(ns_to_destroy) + + def _destroy_stale_router_namespaces(self, router_namespaces): + """Destroys the stale router namespaces + + The argumenet router_namespaces is a list of stale router namespaces + + As some stale router namespaces may not be able to be deleted, only + one attempt will be made to delete them. + """ + for ns in router_namespaces: + try: + self._destroy_namespace(ns) + except RuntimeError: + LOG.exception(_('Failed to destroy stale router namespace ' + '%s'), ns) + self._clean_stale_namespaces = False + + def _destroy_namespace(self, ns): + if ns.startswith(NS_PREFIX): + if self.conf.enable_metadata_proxy: + self._destroy_metadata_proxy(ns[len(NS_PREFIX):], ns) + self._destroy_router_namespace(ns) + elif ns.startswith(FIP_NS_PREFIX): + self._destroy_fip_namespace(ns) + elif ns.startswith(SNAT_NS_PREFIX): + self._destroy_snat_namespace(ns) + + def _destroy_snat_namespace(self, ns_name): + ns_ip = ip_lib.IPWrapper(self.root_helper, namespace=ns_name) + # delete internal interfaces + for d in ns_ip.get_devices(exclude_loopback=True): + if d.name.startswith(SNAT_INT_DEV_PREFIX): + LOG.debug('DVR: unplug: %s', d.name) + self.driver.unplug(d.name, namespace=ns_name, + prefix=SNAT_INT_DEV_PREFIX) + + # TODO(mrsmith): delete ext-gw-port + LOG.debug('DVR: destroy snat ns: %s', ns_name) + ns_ip.netns.delete(ns_name) + + def _destroy_fip_namespace(self, ns_name): + ns_ip = ip_lib.IPWrapper(self.root_helper, namespace=ns_name) + for d in ns_ip.get_devices(exclude_loopback=True): + if d.name.startswith(FIP_2_RTR_DEV_PREFIX): + # internal link between IRs and FIP NS + # TODO(mrsmith): remove IR interfaces (IP pool?) + pass + elif d.name.startswith(FIP_EXT_DEV_PREFIX): + # single port from FIP NS to br-ext + # TODO(mrsmith): remove br-ext interface + LOG.debug('DVR: unplug: %s', d.name) + self.driver.unplug(d.name, + bridge=self.conf.external_network_bridge, + namespace=ns_name, + prefix=FIP_EXT_DEV_PREFIX) + LOG.debug('DVR: destroy fip ns: %s', ns_name) + # TODO(mrsmith): add LOG warn if fip count != 0 + ns_ip.netns.delete(ns_name) + self.agent_gateway_port = None + + def _destroy_router_namespace(self, namespace): + ns_ip = ip_lib.IPWrapper(self.root_helper, namespace=namespace) + for d in ns_ip.get_devices(exclude_loopback=True): + if d.name.startswith(INTERNAL_DEV_PREFIX): + # device is on default bridge + self.driver.unplug(d.name, namespace=namespace, + prefix=INTERNAL_DEV_PREFIX) + elif d.name.startswith(EXTERNAL_DEV_PREFIX): + self.driver.unplug(d.name, + bridge=self.conf.external_network_bridge, + namespace=namespace, + prefix=EXTERNAL_DEV_PREFIX) + + if self.conf.router_delete_namespaces: + try: + ns_ip.netns.delete(namespace) + except RuntimeError: + msg = _('Failed trying to delete namespace: %s') + LOG.exception(msg % namespace) + + def _create_namespace(self, name): + ip_wrapper_root = ip_lib.IPWrapper(self.root_helper) + ip_wrapper = ip_wrapper_root.ensure_namespace(name) + LOG.debug('DVR: ns-name: %s', name) + ip_wrapper.netns.execute(['sysctl', '-w', 'net.ipv4.ip_forward=1']) + + def _create_router_namespace(self, ri): + self._create_namespace(ri.ns_name) + + def _fetch_external_net_id(self, force=False): + """Find UUID of single external network for this agent.""" + if self.conf.gateway_external_network_id: + return self.conf.gateway_external_network_id + + # L3 agent doesn't use external_network_bridge to handle external + # networks, so bridge_mappings with provider networks will be used + # and the L3 agent is able to handle any external networks. + if not self.conf.external_network_bridge: + return + + if not force and self.target_ex_net_id: + return self.target_ex_net_id + + try: + self.target_ex_net_id = self.plugin_rpc.get_external_network_id( + self.context) + return self.target_ex_net_id + except rpc_compat.RemoteError as e: + with excutils.save_and_reraise_exception() as ctx: + if e.exc_type == 'TooManyExternalNetworks': + ctx.reraise = False + msg = _( + "The 'gateway_external_network_id' option must be " + "configured for this agent as Neutron has more than " + "one external network.") + raise Exception(msg) + + def _router_added(self, router_id, router): + ri = RouterInfo(router_id, self.root_helper, + self.conf.use_namespaces, router) + self.router_info[router_id] = ri +# may be deleted. by j00209498 +# if self.conf.use_namespaces: +# self._create_router_namespace(ri) +# for c, r in self.metadata_filter_rules(): +# ri.iptables_manager.ipv4['filter'].add_rule(c, r) +# for c, r in self.metadata_nat_rules(): +# ri.iptables_manager.ipv4['nat'].add_rule(c, r) +# ri.iptables_manager.apply() +# super(L3NATAgent, self).process_router_add(ri) +# if self.conf.enable_metadata_proxy: +# self._spawn_metadata_proxy(ri.router_id, ri.ns_name) + + def _router_removed(self, router_id): + ri = self.router_info.get(router_id) + if ri is None: + LOG.warn(_("Info for router %s were not found. " + "Skipping router removal"), router_id) + return + ri.router['gw_port'] = None + ri.router[l3_constants.INTERFACE_KEY] = [] + ri.router[l3_constants.FLOATINGIP_KEY] = [] + self.process_router(ri) + for c, r in self.metadata_filter_rules(): + ri.iptables_manager.ipv4['filter'].remove_rule(c, r) + for c, r in self.metadata_nat_rules(): + ri.iptables_manager.ipv4['nat'].remove_rule(c, r) + ri.iptables_manager.apply() + if self.conf.enable_metadata_proxy: + self._destroy_metadata_proxy(ri.router_id, ri.ns_name) + del self.router_info[router_id] + self._destroy_router_namespace(ri.ns_name) + + def _spawn_metadata_proxy(self, router_id, ns_name): + def callback(pid_file): + metadata_proxy_socket = cfg.CONF.metadata_proxy_socket + proxy_cmd = ['neutron-ns-metadata-proxy', + '--pid_file=%s' % pid_file, + '--metadata_proxy_socket=%s' % metadata_proxy_socket, + '--router_id=%s' % router_id, + '--state_path=%s' % self.conf.state_path, + '--metadata_port=%s' % self.conf.metadata_port] + proxy_cmd.extend(config.get_log_args( + cfg.CONF, 'neutron-ns-metadata-proxy-%s.log' % + router_id)) + return proxy_cmd + + pm = external_process.ProcessManager( + self.conf, + router_id, + self.root_helper, + ns_name) + pm.enable(callback) + + def _destroy_metadata_proxy(self, router_id, ns_name): + pm = external_process.ProcessManager( + self.conf, + router_id, + self.root_helper, + ns_name) + pm.disable() + + def get_one_compute_port(self, ri, port): + # Get DVR ports for subnet + if 'id' not in port['subnet'] or ri.router['distributed'] is False: + return + + subnet_ports = ( + self.plugin_rpc.get_ports_by_subnet(self.context, + port['subnet']['id'])) + LOG.debug(_('DVR: subnet_ports: %s'), subnet_ports) + + for p in subnet_ports: + # TODO: check for multiple subnets on port case + if (p['device_owner'] != 'network:router_interface' and + p['device_owner'] != + 'network:router_interface_distributed' and + p['binding:host_id'] == self.conf.host): + return p + + def _set_subnet_arp_info(self, ri, port): + """Get ARP info from Plugin for existing ports for subnet.""" + if 'id' not in port['subnet'] or not ri.router['distributed']: + return + subnet_id = port['subnet']['id'] + subnet_ports = ( + self.plugin_rpc.get_ports_by_subnet(self.context, + subnet_id)) + + for p in subnet_ports: + if (p['device_owner'] not in ( + 'network:router_interface', + 'network:router_interface_distributed')): + for fixed_ip in p['fixed_ips']: + self._update_arp_entry(ri, fixed_ip['ip_address'], + p['mac_address'], + subnet_id, 'add') + + def _set_subnet_info(self, port): + ips = port['fixed_ips'] + if not ips: + raise Exception(_("Router port %s has no IP address") % port['id']) + if len(ips) > 1: + LOG.error(_("Ignoring multiple IPs on router port %s"), + port['id']) + prefixlen = netaddr.IPNetwork(port['subnet']['cidr']).prefixlen + port['ip_cidr'] = "%s/%s" % (ips[0]['ip_address'], prefixlen) + + def _get_existing_devices(self, ri): + ip_wrapper = ip_lib.IPWrapper(root_helper=self.root_helper, + namespace=ri.ns_name) + ip_devs = ip_wrapper.get_devices(exclude_loopback=True) + return [ip_dev.name for ip_dev in ip_devs] + + def get_neutron_client(self): + kwargs = {'auth_token': None, + 'username': self.conf.cascaded_admin_user_name, + 'password': self.conf.cascaded_admin_password, + 'aws_creds': None, + 'tenant': self.conf.cascaded_tenant_name, + # 'tenant_id':'e8f280855dbe42a189eebb0f3ecb94bb', #context.values['tenant'], + 'auth_url': self.conf.cascaded_auth_url, + 'roles': self.context.roles, + 'is_admin': self.context.is_admin, + 'region_name': self.conf.cascaded_os_region_name} + reqCon = neutron_proxy_context.RequestContext(**kwargs) + openStackClients = clients.OpenStackClients(reqCon) + neutronClient = openStackClients.neutron() + return neutronClient + + def create_cascaded_router(self, router_name, extern_net_id): + req_props = {'name': router_name, + "external_gateway_info": { + "network_id": extern_net_id}} + neutron_client = self.get_neutron_client() + router_ret = neutron_client.create_router({'router': req_props}) + if(not router_ret or + (router_ret and (not router_ret.get('router')))): + LOG.debug(_("cascaded router created failed, " + "router name:%s"), router_name) + return + LOG.debug(_('create router, Response:%s'), str(router_ret)) + return router_ret['router']['id'] + + def delete_cascaded_router(self, router_id): + neutron_client = self.get_neutron_client() + neutron_client.delete_router(router_id) + + def get_or_create_cascaded_net_id(self, port): + '''only get cascaded net_id from port binding:profile''' + '''not implement creating cascaded network''' + cascaded_net_id = self.network_map.get(port['network_id']) + if cascaded_net_id: + return cascaded_net_id + profile = port['binding:profile'] + #profile = json.load(profile) + cascaded_net_id_dict = profile.get('cascaded_net_id') + if(not cascaded_net_id_dict): + return + cascaded_net_id_dict = cascaded_net_id_dict.get(port['network_id']) + cascaded_net_id = cascaded_net_id_dict.get(cfg.CONF.host) + if(cascaded_net_id): + self.network_map[port['network_id']] = cascaded_net_id + return cascaded_net_id + + def get_or_create_cascaded_subnet_id(self, subnet_id, port): + '''only get cascaded subnet_id from port binding:profile''' + '''not implement creating cascaded subnet''' + cascaded_subnet_id = \ + self.subnet_map.get(port['fixed_ips'][0]['subnet_id']) + if cascaded_subnet_id: + return cascaded_subnet_id + profile = port['binding:profile'] + #profile = json.load(profile) + cascaded_subnet_id_dict = profile.get('cascaded_subnet_id') + if(not cascaded_subnet_id_dict): + return + cascaded_subnet_id_dict = cascaded_subnet_id_dict.get(subnet_id) + if(not cascaded_subnet_id_dict): + return + cascaded_subnet_id = cascaded_subnet_id_dict.get(cfg.CONF.host) + if(cascaded_subnet_id): + self.subnet_map[port['fixed_ips'][0]['subnet_id']] = \ + cascaded_subnet_id + return cascaded_subnet_id + + def create_cascaded_router_port(self, cascaded_net_id, port): + neutron_client = self.get_neutron_client() + mac_address = port['mac_address'] + ip_address = port['fixed_ips'][0]['ip_address'] + profile = {'cascading_port_id': port['id']} + req_props = {'network_id': cascaded_net_id, + 'name': 'router_port', + 'admin_state_up': True, + 'fixed_ips': [{'ip_address': ip_address}], + 'mac_address': mac_address, + 'binding:profile': profile + } + port_ret = neutron_client.create_port({'port': req_props}) + if(not port_ret or + (port_ret and (not port_ret.get('port')))): + LOG.error(_("ERR:router port created failed, " + "ip_address:%s, mac_address:%s"), + ip_address, mac_address) + return + LOG.debug(_('create router port, Response:%s'), str(port_ret)) + return port_ret['port'].get('id') + + def delete_cascaded_router_port(self, cascaded_port_id): + neutron_client = self.get_neutron_client() + bodyResponse = neutron_client.delete_port(cascaded_port_id) + LOG.debug(_('delete port, Response:%s'), str(bodyResponse)) + return bodyResponse + + def add_interface_for_cascaded_router(self, cascaded_router_id, + cascaded_subnet_id, + cascaded_port_id): + neutron_client = self.get_neutron_client() + #'subnet_id': cascaded_subnet_id, + req_props = {'port_id': cascaded_port_id} + ret = neutron_client.add_interface_router(cascaded_router_id, + req_props) + LOG.debug(_('add interface for router port, Response:%s'), str(ret)) + return + + def delete_interface_for_cascaded_router(self, cascaded_router_id, + cascaded_subnet_id): + neutron_client = self.get_neutron_client() + req_props = {'subnet_id': cascaded_subnet_id} + ret = neutron_client.remove_interface_router(cascaded_router_id, + req_props) + LOG.debug(_('delete interface for router port, Response:%s'), str(ret)) + return + + def create_cascaded_extern_net(self): + net_name = 'net_extern' + net_type = self.conf.cascaded_extern_network_type + physical_net = self.conf.cascaded_extern_physical_network + req_props = {'name': net_name, + 'provider:network_type': net_type, + 'provider:physical_network': physical_net, + 'router:external': True} + neutron_client = self.get_neutron_client() + net_ret = neutron_client.create_network({'network': req_props}) + if(not net_ret or + (net_ret and (not net_ret.get('network')))): + LOG.debug(_("cascaded extern net created failed, " + "net name:%s"), net_name) + return + LOG.debug(_('create extern net, Response:%s'), str(net_ret)) + net_id = net_ret['network']['id'] + alloc_ip_pools = {'start': self.conf.cascaded_start_extern_ip, + 'end': self.conf.cascaded_end_extern_ip} + subnet_req_props = {'name': 'subnet_extern', + 'network_id': net_id, + 'cidr': self.conf.cascaded_extern_subnet_cidr, + 'allocation_pools': [alloc_ip_pools], + 'enable_dhcp': False, + "ip_version": "4"} + subnet_ret = neutron_client.create_subnet({'subnet': subnet_req_props}) + if(not subnet_ret or + (subnet_ret and (not subnet_ret.get('subnet')))): + LOG.debug(_("cascaded extern subnet created failed, " + "net name:%s"), net_name) + return + return net_id + + def get_or_create_cascaded_extern_net(self): + if(self.cascaded_extern_net_id): + return self.cascaded_extern_net_id + net_id = self.create_cascaded_extern_net() + if(not net_id): + return + self.cascaded_extern_net_id = net_id + return net_id + + def get_cascaded_router_gateway_ip(self, router_id): + search_opts = {'device_id': router_id, + 'device_owner': 'network:router_gateway'} + neutron_client = self.get_neutron_client() + port_ret = neutron_client.list_ports(**search_opts) + if(not port_ret or + (port_ret and (not port_ret.get('ports')))): + LOG.debug(_("cascaded router gateway_ip get failed, " + "router id:%s"), router_id) + return + port = port_ret['ports'] + if(len(port) == 0): + return + return port[0]['fixed_ips'][0]['ip_address'] + + def update_extra_routes_for_cascaded_router(self, router_id, extra_routes): + req_props = {"routes": [{ + 'nexthop': n, + 'destination': d} for d, n in extra_routes.items()]} + neutron_client = self.get_neutron_client() + router_ret = neutron_client.update_router(router_id, + {'router': req_props}) + if(not router_ret or + (router_ret and (not router_ret.get('router')))): + LOG.debug(_("cascaded router update failed, " + "router id:%s"), router_id) + return + LOG.debug(_('update router, Response:%s'), str(router_ret)) + return router_ret['router']['id'] + + def create_cascaded_extern_net_and_router(self, existing_port_ids, + internal_ports, ri): + if(len(existing_port_ids) == 0 and len(internal_ports) > 0 and + not ri.cascaded_router_id): + extern_net_id = self.get_or_create_cascaded_extern_net() + if(not extern_net_id): + LOG.error(_('ERR: can not get or create cascaded extern net')) + return + router_name = ri.router['name'] + router_id = self.create_cascaded_router(router_name, extern_net_id) + if(not router_id): + LOG.error(_('ERR: can not create cascaded router: %s'), + router_name) + return + gateway_ip = self.get_cascaded_router_gateway_ip(router_id) + if(not gateway_ip): + LOG.error(_('ERR: can not get cascaded router gateway ip')) + return + self.plugin_rpc.update_router_extern_ip_map(self.context, + ri.router['id'], + gateway_ip) + ri.cascaded_router_id = router_id + pass + + def process_router(self, ri): + # TODO(mrsmith) - we shouldn't need to check here + if 'distributed' not in ri.router: + ri.router['distributed'] = False +# ri.iptables_manager.defer_apply_on() +# ex_gw_port = self._get_ex_gw_port(ri) + internal_ports = ri.router.get(l3_constants.INTERFACE_KEY, []) +# snat_ports = ri.router.get(l3_constants.SNAT_ROUTER_INTF_KEY, []) + existing_port_ids = set([p['id'] for p in ri.internal_ports]) + current_port_ids = set([p['id'] for p in internal_ports + if p['admin_state_up']]) + new_ports = [p for p in internal_ports if + p['id'] in current_port_ids and + p['id'] not in existing_port_ids] + old_ports = [p for p in ri.internal_ports if + p['id'] not in current_port_ids] +# if(len(existing_port_ids) == 0 and len(internal_ports) > 0 and +# not ri.cascaded_router_id): +# extern_net_id = self.get_or_create_cascaded_extern_net() +# if(not extern_net_id): +# LOG.error(_('ERR: can not get or create cascaded extern net')) +# return +# router_name = ri.router['name'] +# router_id = self.create_cascaded_router(router_name, extern_net_id) +# if(not router_id): +# LOG.error(_('ERR: can not create cascaded router: %s'), +# router_name) +# return +# gateway_ip = self.get_cascaded_router_gateway_ip(router_id) +# if(not gateway_ip): +# LOG.error(_('ERR: can not get cascaded router gateway ip')) +# return +# self.plugin_rpc.update_router_extern_ip_map(self.context, +# ri.router['id'], +# gateway_ip) +# ri.cascaded_router_id = router_id + + for p in new_ports: + extra_routes = self.plugin_rpc.get_extra_routes_by_subnet( + self.context, + ri.router['id'], + p['fixed_ips'][0]['subnet_id']) + LOG.debug(_("Cascade Info, new ports, extra_routes:%s from " + "plugin_rpc.get_extra_routes_by_subnet"), extra_routes) + if('not_bound_network' in extra_routes): + continue + if ('big2Layer' not in extra_routes and + 'local_network' not in extra_routes): + next_hop = extra_routes[0][0] + dest_cidr = extra_routes[0][1] + if(not next_hop): + continue + # if(not ri.extern_extra_routes.get(dest_cidr, None)): + ri.extern_extra_routes[dest_cidr] = next_hop + ri.extra_routes_is_update = True + ri.internal_ports.append(p) + continue + local_existing_port_ids = set([pt['id'] + for pt in ri.local_internal_ports]) + self.create_cascaded_extern_net_and_router(local_existing_port_ids, + internal_ports, ri) + vm_port = self.get_one_compute_port(ri, p) + cascaded_net_id = self.get_or_create_cascaded_net_id(vm_port) + if(not cascaded_net_id): + LOG.error(_('ERR: can not get cascaded net_id from port' + ' %s by get_or_create_cascaded_net_id!'), p) + return + cascaded_subnet_id = \ + self.get_or_create_cascaded_subnet_id( + p['fixed_ips'][0]['subnet_id'], + vm_port) + if(not cascaded_subnet_id): + LOG.error(_('ERR: can not get cascaded subnet_id from port' + ' %s by get_or_create_cascaded_subnet_id!'), p) + return + cascaded_port_id = \ + self.create_cascaded_router_port(cascaded_net_id, p) + if(not cascaded_port_id): + return + p['cascaded_port_id'] = cascaded_port_id + if(not ri.cascaded_router_id): + LOG.error(_('ERR: can not create cascaded router port from' + 'port %s by create_cascaded_router_port!'), p) + return + self.add_interface_for_cascaded_router(ri.cascaded_router_id, + cascaded_subnet_id, + cascaded_port_id) + LOG.debug(_("Add interface for cascaded router, router:%s" + "cascaded_subnet_id:%s, cascaded_port_id:%s"), + ri.cascaded_router_id, cascaded_subnet_id, + cascaded_port_id) + + # deleted by j00209498 +# self._set_subnet_info(p) +# self.internal_network_added(ri, p['network_id'], p['id'], +# p['ip_cidr'], p['mac_address']) + + ri.internal_ports.append(p) + ri.local_internal_ports.append(p) +# self._set_subnet_arp_info(ri, p) + + for p in old_ports: + extra_routes = self.plugin_rpc.get_extra_routes_by_subnet( + self.context, + ri.router['id'], + p['fixed_ips'][0]['subnet_id']) + LOG.debug(_("Cascade Info, old ports, extra_routes:%s from " + "plugin_rpc.get_extra_routes_by_subnet"), extra_routes) + if('not_bound_network' in extra_routes): + continue + if ('big2Layer' not in extra_routes and + 'local_network' not in extra_routes): + next_hop = extra_routes[0][0] + dest_cidr = extra_routes[0][1] + # if(not ri.extern_extra_routes.get(dest_cidr, None)): + ri.extern_extra_routes.pop(dest_cidr, None) + ri.extra_routes_is_update = True + ri.internal_ports.remove(p) + continue + + cascaded_subnet_id = self.subnet_map.get( + p['fixed_ips'][0]['subnet_id']) + if(not cascaded_subnet_id): + LOG.error(_('ERR: can not delete interface for cascaded' + ' router, not find cascaded_subnet_id!')) + return + self.delete_interface_for_cascaded_router(ri.cascaded_router_id, + cascaded_subnet_id) + # self.delete_cascaded_router_port(p['cascaded_port_id']) + ri.internal_ports.remove(p) + ri.local_internal_ports.remove(p) + + if ri.cascaded_router_id and (ri.extra_routes_is_update): + self.update_extra_routes_for_cascaded_router( + ri.cascaded_router_id, + ri.extern_extra_routes) + ri.extra_routes_is_update = False + + if(len(ri.local_internal_ports) == 0 and ri.cascaded_router_id): + ri.internal_ports = [] + ri.local_internal_ports = [] + ri.extern_extra_routes = {} + ri.extra_routes_is_update = False + self.delete_cascaded_router(ri.cascaded_router_id) + self.plugin_rpc.update_router_extern_ip_map(self.context, + ri.router['id'], + None) + ri.cascaded_router_id = None + + # not support external network, so return. by j00209498 + # return + # by j00209498 +# existing_devices = self._get_existing_devices(ri) +# current_internal_devs = set([n for n in existing_devices +# if n.startswith(INTERNAL_DEV_PREFIX)]) +# current_port_devs = set([self.get_internal_device_name(id) for +# id in current_port_ids]) +# stale_devs = current_internal_devs - current_port_devs +# for stale_dev in stale_devs: +# LOG.debug(_('Deleting stale internal router device: %s'), +# stale_dev) +# self.driver.unplug(stale_dev, +# namespace=ri.ns_name, +# prefix=INTERNAL_DEV_PREFIX) +# +# Get IPv4 only internal CIDRs +# internal_cidrs = [p['ip_cidr'] for p in ri.internal_ports +# if netaddr.IPNetwork(p['ip_cidr']).version == 4] +# TODO(salv-orlando): RouterInfo would be a better place for +# this logic too +# ex_gw_port_id = (ex_gw_port and ex_gw_port['id'] or +# ri.ex_gw_port and ri.ex_gw_port['id']) +# +# interface_name = None +# if ex_gw_port_id: +# interface_name = self.get_external_device_name(ex_gw_port_id) +# if ex_gw_port and ex_gw_port != ri.ex_gw_port: +# self._set_subnet_info(ex_gw_port) +# self.external_gateway_added(ri, ex_gw_port, +# interface_name, internal_cidrs) +# elif not ex_gw_port and ri.ex_gw_port: +# self.external_gateway_removed(ri, ri.ex_gw_port, +# interface_name, internal_cidrs) +# +# stale_devs = [dev for dev in existing_devices +# if dev.startswith(EXTERNAL_DEV_PREFIX) +# and dev != interface_name] +# for stale_dev in stale_devs: +# LOG.debug(_('Deleting stale external router device: %s'), +# stale_dev) +# self.driver.unplug(stale_dev, +# bridge=self.conf.external_network_bridge, +# namespace=ri.ns_name, +# prefix=EXTERNAL_DEV_PREFIX) +# +# Process static routes for router +# self.routes_updated(ri) +# Process SNAT rules for external gateway +# if (not ri.router['distributed'] or +# ex_gw_port and ri.router['gw_port_host'] == self.host): +# ri.perform_snat_action(self._handle_router_snat_rules, +# internal_cidrs, interface_name) +# +# Process SNAT/DNAT rules for floating IPs +# fip_statuses = {} +# try: +# if ex_gw_port: +# existing_floating_ips = ri.floating_ips +# self.process_router_floating_ip_nat_rules(ri) +# ri.iptables_manager.defer_apply_off() +# Once NAT rules for floating IPs are safely in place +# configure their addresses on the external gateway port +# fip_statuses = self.process_router_floating_ip_addresses( +# ri, ex_gw_port) +# except Exception: +# TODO(salv-orlando): Less broad catching +# All floating IPs must be put in error state +# for fip in ri.router.get(l3_constants.FLOATINGIP_KEY, []): +# fip_statuses[fip['id']] = l3_constants.FLOATINGIP_STATUS_ERROR +# +# if ex_gw_port: +# Identify floating IPs which were disabled +# ri.floating_ips = set(fip_statuses.keys()) +# for fip_id in existing_floating_ips - ri.floating_ips: +# fip_statuses[fip_id] = l3_constants.FLOATINGIP_STATUS_DOWN +# Update floating IP status on the neutron server +# self.plugin_rpc.update_floatingip_statuses( +# self.context, ri.router_id, fip_statuses) +# +# Update ex_gw_port and enable_snat on the router info cache +# ri.ex_gw_port = ex_gw_port +# ri.snat_ports = snat_ports +# ri.enable_snat = ri.router.get('enable_snat') + + def _handle_router_snat_rules(self, ri, ex_gw_port, internal_cidrs, + interface_name, action): + # Remove all the rules + # This is safe because if use_namespaces is set as False + # then the agent can only configure one router, otherwise + # each router's SNAT rules will be in their own namespace + if ri.router['distributed']: + iptables_manager = ri.snat_iptables_manager + else: + iptables_manager = ri.iptables_manager + + iptables_manager.ipv4['nat'].empty_chain('POSTROUTING') + iptables_manager.ipv4['nat'].empty_chain('snat') + + if not ri.router['distributed']: + # Add back the jump to float-snat + iptables_manager.ipv4['nat'].add_rule('snat', '-j $float-snat') + + # And add them back if the action is add_rules + if action == 'add_rules' and ex_gw_port: + # ex_gw_port should not be None in this case + # NAT rules are added only if ex_gw_port has an IPv4 address + for ip_addr in ex_gw_port['fixed_ips']: + ex_gw_ip = ip_addr['ip_address'] + if netaddr.IPAddress(ex_gw_ip).version == 4: + rules = self.external_gateway_nat_rules(ex_gw_ip, + internal_cidrs, + interface_name) + for rule in rules: + iptables_manager.ipv4['nat'].add_rule(*rule) + break + iptables_manager.apply() + + def _handle_router_fip_nat_rules(self, ri, interface_name, action): + """Configures NAT rules for Floating IPs for DVR. + + Remove all the rules. This is safe because if + use_namespaces is set as False then the agent can + only configure one router, otherwise each router's + NAT rules will be in their own namespace. + """ + ri.iptables_manager.ipv4['nat'].empty_chain('POSTROUTING') + ri.iptables_manager.ipv4['nat'].empty_chain('snat') + + # Add back the jump to float-snat + ri.iptables_manager.ipv4['nat'].add_rule('snat', '-j $float-snat') + + # And add them back if the action if add_rules + if action == 'add_rules' and interface_name: + rule = ('POSTROUTING', '! -i %(interface_name)s ' + '! -o %(interface_name)s -m conntrack ! ' + '--ctstate DNAT -j ACCEPT' % + {'interface_name': interface_name}) + ri.iptables_manager.ipv4['nat'].add_rule(*rule) + ri.iptables_manager.apply() + + def process_router_floating_ip_nat_rules(self, ri): + """Configure NAT rules for the router's floating IPs. + + Configures iptables rules for the floating ips of the given router + """ + # Clear out all iptables rules for floating ips + ri.iptables_manager.ipv4['nat'].clear_rules_by_tag('floating_ip') + + floating_ips = self.get_floating_ips(ri) + # Loop once to ensure that floating ips are configured. + for fip in floating_ips: + # Rebuild iptables rules for the floating ip. + fixed = fip['fixed_ip_address'] + fip_ip = fip['floating_ip_address'] + for chain, rule in self.floating_forward_rules(fip_ip, fixed): + ri.iptables_manager.ipv4['nat'].add_rule(chain, rule, + tag='floating_ip') + + ri.iptables_manager.apply() + + def process_router_floating_ip_addresses(self, ri, ex_gw_port): + """Configure IP addresses on router's external gateway interface. + + Ensures addresses for existing floating IPs and cleans up + those that should not longer be configured. + """ + fip_statuses = {} + + floating_ips = ri.router.get(l3_constants.FLOATINGIP_KEY, []) + if ri.router['distributed']: + # filter out only FIPs for this host/agent + floating_ips = [i for i in floating_ips if i['host'] == self.host] + if floating_ips and self.agent_gateway_port is None: + self._create_agent_gateway_port(ri, floating_ips[0] + ['floating_network_id']) + + if self.agent_gateway_port: + if floating_ips and ri.dist_fip_count == 0: + self.create_rtr_2_fip_link(ri, floating_ips[0] + ['floating_network_id']) + interface_name = self.get_rtr_int_device_name(ri.router_id) + else: + # there are no fips or agent port, no work to do + return fip_statuses + else: + interface_name = self.get_external_device_name(ex_gw_port['id']) + + device = ip_lib.IPDevice(interface_name, self.root_helper, + namespace=ri.ns_name) + existing_cidrs = set([addr['cidr'] for addr in device.addr.list()]) + new_cidrs = set() + + # Loop once to ensure that floating ips are configured. + for fip in floating_ips: + fip_ip = fip['floating_ip_address'] + ip_cidr = str(fip_ip) + FLOATING_IP_CIDR_SUFFIX + + new_cidrs.add(ip_cidr) + + if ip_cidr not in existing_cidrs: + net = netaddr.IPNetwork(ip_cidr) + try: + device.addr.add(net.version, ip_cidr, str(net.broadcast)) + except (processutils.UnknownArgumentError, + processutils.ProcessExecutionError): + # any exception occurred here should cause the floating IP + # to be set in error state + fip_statuses[fip['id']] = ( + l3_constants.FLOATINGIP_STATUS_ERROR) + LOG.warn(_("Unable to configure IP address for " + "floating IP: %s"), fip['id']) + continue + if ri.router['distributed']: + # Special Handling for DVR - update FIP namespace + # and ri.namespace to handle DVR based FIP + self.floating_ip_added_dist(ri, fip) + else: + # As GARP is processed in a distinct thread the call below + # won't raise an exception to be handled. + self._send_gratuitous_arp_packet( + ri.ns_name, interface_name, fip_ip) + fip_statuses[fip['id']] = ( + l3_constants.FLOATINGIP_STATUS_ACTIVE) + + # Clean up addresses that no longer belong on the gateway interface. + for ip_cidr in existing_cidrs - new_cidrs: + if ip_cidr.endswith(FLOATING_IP_CIDR_SUFFIX): + net = netaddr.IPNetwork(ip_cidr) + device.addr.delete(net.version, ip_cidr) + if ri.router['distributed']: + self.floating_ip_removed_dist(ri, ip_cidr) + return fip_statuses + + def _get_ex_gw_port(self, ri): + return ri.router.get('gw_port') + + def _arping(self, ns_name, interface_name, ip_address, dist=None): + if dist: + device = ip_lib.IPDevice(interface_name, self.root_helper, + namespace=ns_name) + ip_cidr = str(ip_address) + FLOATING_IP_CIDR_SUFFIX + net = netaddr.IPNetwork(ip_cidr) + device.addr.add(net.version, ip_cidr, str(net.broadcast)) + + arping_cmd = ['arping', '-A', + '-I', interface_name, + '-c', self.conf.send_arp_for_ha, + ip_address] + try: + ip_wrapper = ip_lib.IPWrapper(self.root_helper, + namespace=ns_name) + ip_wrapper.netns.execute(arping_cmd, check_exit_code=True) + except Exception as e: + LOG.error(_("Failed sending gratuitous ARP: %s"), str(e)) + if dist: + device.addr.delete(net.version, ip_cidr) + + def _send_gratuitous_arp_packet(self, ns_name, interface_name, ip_address, + dist=None): + if self.conf.send_arp_for_ha > 0: + eventlet.spawn_n(self._arping, ns_name, interface_name, ip_address, + dist) + + def get_internal_port(self, ri, subnet_id): + """Returns internal router port based on subnet_id.""" + router_ports = ri.router.get(l3_constants.INTERFACE_KEY, []) + for port in router_ports: + fips = port['fixed_ips'] + for f in fips: + if f['subnet_id'] == subnet_id: + return port + + def get_internal_device_name(self, port_id): + return (INTERNAL_DEV_PREFIX + port_id)[:self.driver.DEV_NAME_LEN] + + def get_external_device_name(self, port_id): + return (EXTERNAL_DEV_PREFIX + port_id)[:self.driver.DEV_NAME_LEN] + + def get_fip_ext_device_name(self, port_id): + return (FIP_EXT_DEV_PREFIX + port_id)[:self.driver.DEV_NAME_LEN] + + def get_rtr_int_device_name(self, router_id): + return (RTR_2_FIP_DEV_PREFIX + router_id)[:self.driver.DEV_NAME_LEN] + + def get_fip_int_device_name(self, router_id): + return (FIP_2_RTR_DEV_PREFIX + router_id)[:self.driver.DEV_NAME_LEN] + + def get_snat_int_device_name(self, port_id): + return (SNAT_INT_DEV_PREFIX + port_id)[:self.driver.DEV_NAME_LEN] + + def get_fip_ns_name(self, ext_net_id): + return (FIP_NS_PREFIX + ext_net_id) + + def get_snat_ns_name(self, ext_gw_port_id): + return (SNAT_NS_PREFIX + ext_gw_port_id) + + def get_snat_interfaces(self, ri): + return ri.router.get(l3_constants.SNAT_ROUTER_INTF_KEY, []) + + def get_floating_ips(self, ri): + """Filters Floating IPs for DVR to be hosted on this agent.""" + floating_ips = ri.router.get(l3_constants.FLOATINGIP_KEY, []) + if ri.router['distributed']: + floating_ips = [i for i in floating_ips if i['host'] == self.host] + return floating_ips + + def _map_internal_interfaces(self, ri, int_port, snat_ports): + """Returns the SNAT port for the given internal interface port.""" + fixed_ip = int_port['fixed_ips'][0] + subnet_id = fixed_ip['subnet_id'] + match_port = [p for p in snat_ports if + p['fixed_ips'][0]['subnet_id'] == subnet_id] + if match_port: + return match_port[0] + else: + LOG.debug('DVR: no map match_port found!') + + def _create_dvr_gateway(self, ri, ex_gw_port, gw_interface_name, + internal_cidrs, snat_ports): + """Create SNAT namespace.""" + snat_ns_name = self.get_snat_ns_name(ex_gw_port['id']) + self._create_namespace(snat_ns_name) + # connect snat_ports to br_int from SNAT namespace + for port in snat_ports: + # create interface_name + self._set_subnet_info(port) + interface_name = self.get_snat_int_device_name(port['id']) + self._internal_network_added(snat_ns_name, port['network_id'], + port['id'], port['ip_cidr'], + port['mac_address'], interface_name, + SNAT_INT_DEV_PREFIX) + self._external_gateway_added(ri, ex_gw_port, gw_interface_name, + internal_cidrs, snat_ns_name, + preserve_ips=[]) + ri.snat_iptables_manager = ( + iptables_manager.IptablesManager( + root_helper=self.root_helper, namespace=snat_ns_name + ) + ) + + def external_gateway_added(self, ri, ex_gw_port, + interface_name, internal_cidrs): + if ri.router['distributed']: + snat_ports = self.get_snat_interfaces(ri) + for p in ri.internal_ports: + gateway = self._map_internal_interfaces(ri, p, snat_ports) + id_name = self.get_internal_device_name(p['id']) + self._snat_redirect_add(ri, gateway['fixed_ips'][0] + ['ip_address'], p, id_name) + + if self.conf.centralized_snat and ( + ri.router['gw_port_host'] == self.host): + if snat_ports: + self._create_dvr_gateway(ri, ex_gw_port, + interface_name, + internal_cidrs, snat_ports) + for port in snat_ports: + for ip in port['fixed_ips']: + self._update_arp_entry(ri, ip['ip_address'], + port['mac_address'], + ip['subnet_id'], 'add') + return + + # Compute a list of addresses this router is supposed to have. + # This avoids unnecessarily removing those addresses and + # causing a momentarily network outage. + floating_ips = self.get_floating_ips(ri) + preserve_ips = [ip['floating_ip_address'] + FLOATING_IP_CIDR_SUFFIX + for ip in floating_ips] + + self._external_gateway_added(ri, ex_gw_port, interface_name, + internal_cidrs, ri.ns_name, + preserve_ips) + + def _external_gateway_added(self, ri, ex_gw_port, interface_name, + internal_cidrs, ns_name, preserve_ips): + if not ip_lib.device_exists(interface_name, + root_helper=self.root_helper, + namespace=ns_name): + self.driver.plug(ex_gw_port['network_id'], + ex_gw_port['id'], interface_name, + ex_gw_port['mac_address'], + bridge=self.conf.external_network_bridge, + namespace=ns_name, + prefix=EXTERNAL_DEV_PREFIX) + + self.driver.init_l3(interface_name, [ex_gw_port['ip_cidr']], + namespace=ns_name, + gateway=ex_gw_port['subnet'].get('gateway_ip'), + extra_subnets=ex_gw_port.get('extra_subnets', []), + preserve_ips=preserve_ips) + ip_address = ex_gw_port['ip_cidr'].split('/')[0] + self._send_gratuitous_arp_packet(ns_name, + interface_name, ip_address) + + def agent_gateway_added(self, ns_name, ex_gw_port, + interface_name): + """Adds Floating IP gateway port to FIP namespace.""" + if not ip_lib.device_exists(interface_name, + root_helper=self.root_helper, + namespace=ns_name): + self.driver.plug(ex_gw_port['network_id'], + ex_gw_port['id'], interface_name, + ex_gw_port['mac_address'], + bridge=self.conf.external_network_bridge, + namespace=ns_name, + prefix=FIP_EXT_DEV_PREFIX) + + self.driver.init_l3(interface_name, [ex_gw_port['ip_cidr']], + namespace=ns_name) + ip_address = ex_gw_port['ip_cidr'].split('/')[0] + self._send_gratuitous_arp_packet(ns_name, interface_name, ip_address) + + gw_ip = ex_gw_port['subnet']['gateway_ip'] + if gw_ip: + ipd = ip_lib.IPDevice(interface_name, self.root_helper, + namespace=ns_name) + ipd.route.add_gateway(gw_ip) + + cmd = ['sysctl', '-w', 'net.ipv4.conf.%s.proxy_arp=1' % interface_name] + ip_wrapper = ip_lib.IPWrapper(self.root_helper, namespace=ns_name) + ip_wrapper.netns.execute(cmd, check_exit_code=False) + + def internal_ns_interface_added(self, ip_cidr, + interface_name, ns_name): + ip_wrapper = ip_lib.IPWrapper(self.root_helper, namespace=ns_name) + ip_wrapper.netns.execute(['ip', 'addr', 'add', + ip_cidr, 'dev', interface_name]) + + def external_gateway_removed(self, ri, ex_gw_port, + interface_name, internal_cidrs): + if ri.router['distributed']: + for p in ri.internal_ports: + internal_interface = self.get_internal_device_name(p['id']) + self._snat_redirect_remove(ri, p, internal_interface) + + if self.conf.centralized_snat and ( + ex_gw_port['binding:host_id'] == self.host): + ns_name = self.get_snat_ns_name(ex_gw_port['id']) + else: + # not hosting agent - no work to do + LOG.debug('DVR: CSNAT not hosted: %s', ex_gw_port) + return + else: + ns_name = ri.ns_name + + self.driver.unplug(interface_name, + bridge=self.conf.external_network_bridge, + namespace=ns_name, + prefix=EXTERNAL_DEV_PREFIX) + if ri.router['distributed']: + self._destroy_snat_namespace(ns_name) + + def metadata_filter_rules(self): + rules = [] + if self.conf.enable_metadata_proxy: + rules.append(('INPUT', '-s 0.0.0.0/0 -d 127.0.0.1 ' + '-p tcp -m tcp --dport %s ' + '-j ACCEPT' % self.conf.metadata_port)) + return rules + + def metadata_nat_rules(self): + rules = [] + if self.conf.enable_metadata_proxy: + rules.append(('PREROUTING', '-s 0.0.0.0/0 -d 169.254.169.254/32 ' + '-p tcp -m tcp --dport 80 -j REDIRECT ' + '--to-port %s' % self.conf.metadata_port)) + return rules + + def external_gateway_nat_rules(self, ex_gw_ip, internal_cidrs, + interface_name): + rules = [('POSTROUTING', '! -i %(interface_name)s ' + '! -o %(interface_name)s -m conntrack ! ' + '--ctstate DNAT -j ACCEPT' % + {'interface_name': interface_name})] + for cidr in internal_cidrs: + rules.extend(self.internal_network_nat_rules(ex_gw_ip, cidr)) + return rules + + def _gen_snat_idx(self, cidr): + """Generate index based on cidr for SNAT entries.""" + ip = cidr.split('/')[0] + ip_str = ip.split('.') + ip_num = (((int(ip_str[0])) << 24) + ((int(ip_str[1])) << 16) + + ((int(ip_str[2])) << 8) + (int(ip_str[3]))) + return ip_num + + def _snat_redirect_add(self, ri, gateway, sn_port, sn_int): + """Adds rules and routes for SNAT redirection.""" + try: + snat_idx = self._gen_snat_idx(sn_port['ip_cidr']) + ns_ipr = ip_lib.IpRule(self.root_helper, namespace=ri.ns_name) + ns_ipd = ip_lib.IPDevice(sn_int, self.root_helper, + namespace=ri.ns_name) + ns_ipd.route.add_gateway(gateway, table=snat_idx) + ns_ipr.add_rule_from(sn_port['ip_cidr'], snat_idx, snat_idx) + ns_ipr.netns.execute(['sysctl', '-w', + 'net.ipv4.conf.all.send_redirects=0']) + ns_ipr.netns.execute(['sysctl', '-w', 'net.ipv4.conf.%s.' + 'send_redirects=0' % sn_int]) + except Exception: + LOG.exception(_('DVR: error adding redirection logic')) + + def _snat_redirect_remove(self, ri, sn_port, sn_int): + """Removes rules and routes for SNAT redirection.""" + try: + snat_idx = self._gen_snat_idx(sn_port['ip_cidr']) + ns_ipr = ip_lib.IpRule(self.root_helper, namespace=ri.ns_name) + ns_ipd = ip_lib.IPDevice(sn_int, self.root_helper, + namespace=ri.ns_name) + ns_ipd.route.delete_gateway(table=snat_idx) + ns_ipr.delete_rule_priority(snat_idx) + except Exception: + LOG.exception(_('DVR: removed snat failed')) + + def _internal_network_added(self, ns_name, network_id, port_id, + internal_cidr, mac_address, + interface_name, prefix): + if not ip_lib.device_exists(interface_name, + root_helper=self.root_helper, + namespace=ns_name): + self.driver.plug(network_id, port_id, interface_name, mac_address, + namespace=ns_name, + prefix=prefix) + + self.driver.init_l3(interface_name, [internal_cidr], + namespace=ns_name) + ip_address = internal_cidr.split('/')[0] + self._send_gratuitous_arp_packet(ns_name, interface_name, ip_address) + + def internal_network_added(self, ri, port): + network_id = port['network_id'] + port_id = port['id'] + internal_cidr = port['ip_cidr'] + mac_address = port['mac_address'] + + interface_name = self.get_internal_device_name(port_id) + + self._internal_network_added(ri.ns_name, network_id, port_id, + internal_cidr, mac_address, + interface_name, INTERNAL_DEV_PREFIX) + + if ri.router['distributed'] and ri.ex_gw_port: + ex_gw_port = ri.ex_gw_port + snat_ports = self.get_snat_interfaces(ri) + snat_ip = self._map_internal_interfaces(ri, port, snat_ports) + self._snat_redirect_add(ri, snat_ip['fixed_ips'][0] + ['ip_address'], port, interface_name) + if self.conf.centralized_snat and ( + ri.router['gw_port_host'] == self.host): + for port in snat_ports: + self._set_subnet_info(port) + interface_name = self.get_snat_int_device_name(port['id']) + ns_name = self.get_snat_ns_name(ex_gw_port['id']) + self._internal_network_added(ns_name, port['network_id'], + port['id'], internal_cidr, + port['mac_address'], + interface_name, + SNAT_INT_DEV_PREFIX) + + def internal_network_removed(self, ri, port): + port_id = port['id'] + interface_name = self.get_internal_device_name(port_id) + if ri.router['distributed'] and ri.ex_gw_port: + # DVR handling code for SNAT + ex_gw_port = ri.ex_gw_port + self._snat_redirect_remove(ri, port, interface_name) + if self.conf.centralized_snat and ( + ri.ex_gw_port['binding:host_id'] == self.host): + snat_port = self._map_internal_interfaces(ri, port, + ri.snat_ports) + snat_interface = ( + self.get_snat_int_device_name(snat_port['id']) + ) + ns_name = self.get_snat_ns_name(ex_gw_port['id']) + prefix = SNAT_INT_DEV_PREFIX + if ip_lib.device_exists(snat_interface, + root_helper=self.root_helper, + namespace=ns_name): + self.driver.unplug(snat_interface, namespace=ns_name, + prefix=prefix) + + if ip_lib.device_exists(interface_name, + root_helper=self.root_helper, + namespace=ri.ns_name): + self.driver.unplug(interface_name, namespace=ri.ns_name, + prefix=INTERNAL_DEV_PREFIX) + + def internal_network_nat_rules(self, ex_gw_ip, internal_cidr): + rules = [('snat', '-s %s -j SNAT --to-source %s' % + (internal_cidr, ex_gw_ip))] + return rules + + def _create_agent_gateway_port(self, ri, network_id): + """Creates Floating IP gateway port. + + Request port creation from Plugin then creates + Floating IP namespace and adds gateway port. + """ + # Port does not exist, request port from plugin + self.agent_gateway_port = ( + self.plugin_rpc.get_agent_gateway_port( + self.context, network_id)) + if 'subnet' not in self.agent_gateway_port: + LOG.error(_('Missing subnet/agent_gateway_port')) + return + self._set_subnet_info(self.agent_gateway_port) + + # add fip-namespace and agent_gateway_port + fip_ns_name = ( + self.get_fip_ns_name(str(network_id))) + self._create_namespace(fip_ns_name) + interface_name = ( + self.get_fip_ext_device_name(self.agent_gateway_port['id'])) + self.agent_gateway_added(fip_ns_name, self.agent_gateway_port, + interface_name) + + def create_rtr_2_fip_link(self, ri, network_id): + """Creates interface between router and Floating IP namespace.""" + rtr_2_fip_name = self.get_rtr_int_device_name(ri.router_id) + fip_2_rtr_name = self.get_fip_int_device_name(ri.router_id) + fip_ns_name = self.get_fip_ns_name(str(network_id)) + + # add link local IP to interface + if ri.rtr_2_fip is None: + ri.rtr_2_fip = FIP_LL_PREFIX + str(self.local_ips.pop()) + if ri.fip_2_rtr is None: + ri.fip_2_rtr = FIP_LL_PREFIX + str(self.local_ips.pop()) + ip_wrapper = ip_lib.IPWrapper(self.root_helper, + namespace=ri.ns_name) + int_dev = ip_wrapper.add_veth(rtr_2_fip_name, + fip_2_rtr_name, fip_ns_name) + self.internal_ns_interface_added(ri.rtr_2_fip + '/31', + rtr_2_fip_name, ri.ns_name) + self.internal_ns_interface_added(ri.fip_2_rtr + '/31', + fip_2_rtr_name, fip_ns_name) + int_dev[0].link.set_up() + int_dev[1].link.set_up() + # add default route for the link local interface + device = ip_lib.IPDevice(rtr_2_fip_name, self.root_helper, + namespace=ri.ns_name) + device.route.add_gateway(ri.fip_2_rtr, table=FIP_RT_TBL) + # setup the NAT rules and chains + self._handle_router_fip_nat_rules(ri, rtr_2_fip_name, 'add_rules') + + def floating_ip_added_dist(self, ri, fip): + """Adds floating IP to FIP namespace.""" + floating_ip = fip['floating_ip_address'] + fixed_ip = fip['fixed_ip_address'] + rule_pr = self.fip_priorities.pop() + ri.floating_ips_dict[floating_ip] = rule_pr + fip_2_rtr_name = self.get_fip_int_device_name(ri.router_id) + ipRule = ip_lib.IpRule(self.root_helper, namespace=ri.ns_name) + ipRule.add_rule_from(fixed_ip, FIP_RT_TBL, rule_pr) + + # Add routing rule in fip namespace + fip_cidr = str(floating_ip) + FLOATING_IP_CIDR_SUFFIX + fip_ns_name = self.get_fip_ns_name(str(fip['floating_network_id'])) + device = ip_lib.IPDevice(fip_2_rtr_name, self.root_helper, + namespace=fip_ns_name) + device.route.add_route(fip_cidr, ri.rtr_2_fip) + interface_name = ( + self.get_fip_ext_device_name(self.agent_gateway_port['id'])) + self._send_gratuitous_arp_packet(fip_ns_name, + interface_name, floating_ip, + dist=True) + # update internal structures + self.agent_fip_count = self.agent_fip_count + 1 + ri.dist_fip_count = ri.dist_fip_count + 1 + + def floating_ip_removed_dist(self, ri, fip_cidr): + """Removes floating IP from FIP namespace.""" + floating_ip = fip_cidr.split('/')[0] + rtr_2_fip_name = self.get_rtr_int_device_name(ri.router_id) + fip_2_rtr_name = self.get_fip_int_device_name(ri.router_id) + fip_ns_name = self.get_fip_ns_name(str(self._fetch_external_net_id())) + ip_rule_rtr = ip_lib.IpRule(self.root_helper, namespace=ri.ns_name) + if floating_ip in ri.floating_ips_dict: + rule_pr = ri.floating_ips_dict[floating_ip] + # TODO(rajeev): Handle else case - exception/log? + else: + rule_pr = None + + ip_rule_rtr.delete_rule_priority(rule_pr) + self.fip_priorities.add(rule_pr) + device = ip_lib.IPDevice(fip_2_rtr_name, self.root_helper, + namespace=fip_ns_name) + + device.route.delete_route(fip_cidr, ri.rtr_2_fip) + # check if this is the last FIP for this router + ri.dist_fip_count = ri.dist_fip_count - 1 + if ri.dist_fip_count == 0: + # remove default route entry + device = ip_lib.IPDevice(rtr_2_fip_name, self.root_helper, + namespace=ri.ns_name) + device.route.delete_gateway(ri.fip_2_rtr, table=FIP_RT_TBL) + self.local_ips.add(ri.rtr_2_fip.rsplit('.', 1)[1]) + ri.rtr_2_fip = None + self.local_ips.add(ri.fip_2_rtr.rsplit('.', 1)[1]) + ri.fip_2_rtr = None + # TODO(mrsmith): remove interface + # clean up fip-namespace if this is the last FIP + self.agent_fip_count = self.agent_fip_count - 1 + if self.agent_fip_count == 0: + self._destroy_fip_namespace(fip_ns_name) + + def floating_forward_rules(self, floating_ip, fixed_ip): + return [('PREROUTING', '-d %s -j DNAT --to %s' % + (floating_ip, fixed_ip)), + ('OUTPUT', '-d %s -j DNAT --to %s' % + (floating_ip, fixed_ip)), + ('float-snat', '-s %s -j SNAT --to %s' % + (fixed_ip, floating_ip))] + + def router_deleted(self, context, router_id): + """Deal with router deletion RPC message.""" + LOG.debug(_('Got router deleted notification for %s'), router_id) + self.removed_routers.add(router_id) + + def _update_arp_entry(self, ri, ip, mac, subnet_id, operation): + """Add or delete arp entry into router namespace.""" + port = self.get_internal_port(ri, subnet_id) + if 'id' in port: + ip_cidr = str(ip) + '/32' + try: + # TODO(mrsmith): optimize the calls below for bulk calls + net = netaddr.IPNetwork(ip_cidr) + interface_name = self.get_internal_device_name(port['id']) + device = ip_lib.IPDevice(interface_name, self.root_helper, + namespace=ri.ns_name) + if operation == 'add': + device.neigh.add(net.version, ip, mac) + elif operation == 'delete': + device.neigh.delete(net.version, ip, mac) + except Exception: + LOG.exception(_("DVR: Failed updating arp entry")) + self.fullsync = True + + def add_arp_entry(self, context, payload): + """Adds arp entry into router namespace from RPC.""" + arp_table = payload['arp_table'] + router_id = payload['router_id'] + ip = arp_table['ip_address'] + mac = arp_table['mac_address'] + subnet_id = arp_table['subnet_id'] + ri = self.router_info.get(router_id) + self._update_arp_entry(ri, ip, mac, subnet_id, 'add') + + def delete_arp_entry(self, context, payload): + """Deletes arp entry into router namespace from RPC.""" + arp_table = payload['arp_table'] + router_id = payload['router_id'] + ip = arp_table['ip_address'] + mac = arp_table['mac_address'] + subnet_id = arp_table['subnet_id'] + ri = self.router_info.get(router_id) + self._update_arp_entry(ri, ip, mac, subnet_id, 'delete') + + def routers_updated(self, context, routers): + """Deal with routers modification and creation RPC message.""" + LOG.debug(_('Got routers updated notification :%s'), routers) + if routers: + # This is needed for backward compatibility + if isinstance(routers[0], dict): + routers = [router['id'] for router in routers] + self.updated_routers.update(routers) + + def router_removed_from_agent(self, context, payload): + LOG.debug(_('Got router removed from agent :%r'), payload) + self.removed_routers.add(payload['router_id']) + + def router_added_to_agent(self, context, payload): + LOG.debug(_('Got router added to agent :%r'), payload) + self.routers_updated(context, payload) + + def _process_routers(self, routers, all_routers=False): + pool = eventlet.GreenPool() + if (self.conf.external_network_bridge and + not ip_lib.device_exists(self.conf.external_network_bridge)): + LOG.error(_("The external network bridge '%s' does not exist"), + self.conf.external_network_bridge) + return + + target_ex_net_id = self._fetch_external_net_id() + # if routers are all the routers we have (They are from router sync on + # starting or when error occurs during running), we seek the + # routers which should be removed. + # If routers are from server side notification, we seek them + # from subset of incoming routers and ones we have now. + if all_routers: + prev_router_ids = set(self.router_info) + else: + prev_router_ids = set(self.router_info) & set( + [router['id'] for router in routers]) + cur_router_ids = set() + for r in routers: + # If namespaces are disabled, only process the router associated + # with the configured agent id. + if (not self.conf.use_namespaces and + r['id'] != self.conf.router_id): + continue + ex_net_id = (r['external_gateway_info'] or {}).get('network_id') + if not ex_net_id and not self.conf.handle_internal_only_routers: + continue + if (target_ex_net_id and ex_net_id and + ex_net_id != target_ex_net_id): + # Double check that our single external_net_id has not changed + # by forcing a check by RPC. + if (ex_net_id != self._fetch_external_net_id(force=True)): + continue + cur_router_ids.add(r['id']) + if r['id'] not in self.router_info: + self._router_added(r['id'], r) + ri = self.router_info[r['id']] + ri.router = r + pool.spawn_n(self.process_router, ri) + # identify and remove routers that no longer exist + for router_id in prev_router_ids - cur_router_ids: + pool.spawn_n(self._router_removed, router_id) + pool.waitall() + + @lockutils.synchronized('l3-agent', 'neutron-') + def _rpc_loop(self): + # _rpc_loop and _sync_routers_task will not be + # executed in the same time because of lock. + # so we can clear the value of updated_routers + # and removed_routers, but they can be updated by + # updated_routers and removed_routers rpc call + try: + LOG.debug(_("Starting RPC loop for %d updated routers"), + len(self.updated_routers)) + if self.updated_routers: + # We're capturing and clearing the list, and will + # process the "captured" updates in this loop, + # and any updates that happen due to a context switch + # will be picked up on the next pass. + updated_routers = set(self.updated_routers) + self.updated_routers.clear() + router_ids = list(updated_routers) + routers = self.plugin_rpc.get_routers( + self.context, router_ids) + # routers with admin_state_up=false will not be in the fetched + fetched = set([r['id'] for r in routers]) + self.removed_routers.update(updated_routers - fetched) + + self._process_routers(routers) + self._process_router_delete() + LOG.debug(_("RPC loop successfully completed")) + except Exception: + LOG.exception(_("Failed synchronizing routers")) + self.fullsync = True + + def _process_router_delete(self): + current_removed_routers = list(self.removed_routers) + for router_id in current_removed_routers: + self._router_removed(router_id) + self.removed_routers.remove(router_id) + + def _router_ids(self): + if not self.conf.use_namespaces: + return [self.conf.router_id] + + @periodic_task.periodic_task + @lockutils.synchronized('l3-agent', 'neutron-') + def _sync_routers_task(self, context): + if self.services_sync: + super(L3NATAgent, self).process_services_sync(context) + LOG.debug(_("Starting _sync_routers_task - fullsync:%s"), + self.fullsync) + if not self.fullsync: + return + try: + router_ids = self._router_ids() + self.updated_routers.clear() + self.removed_routers.clear() + routers = self.plugin_rpc.get_routers( + context, router_ids) + + LOG.debug(_('Processing :%r'), routers) + self._process_routers(routers, all_routers=True) + self.fullsync = False + LOG.debug(_("_sync_routers_task successfully completed")) + except rpc_compat.RPCException: + LOG.exception(_("Failed synchronizing routers due to RPC error")) + self.fullsync = True + return + except Exception: + LOG.exception(_("Failed synchronizing routers")) + self.fullsync = True + + # Resync is not necessary for the cleanup of stale + # namespaces. + if self._clean_stale_namespaces: + self._cleanup_namespaces(routers) + + def after_start(self): + LOG.info(_("L3 agent started")) + + def _update_routing_table(self, ri, operation, route): + cmd = ['ip', 'route', operation, 'to', route['destination'], + 'via', route['nexthop']] + ip_wrapper = ip_lib.IPWrapper(self.root_helper, + namespace=ri.ns_name) + ip_wrapper.netns.execute(cmd, check_exit_code=False) + + def routes_updated(self, ri): + new_routes = ri.router['routes'] + old_routes = ri.routes + adds, removes = common_utils.diff_list_of_dict(old_routes, + new_routes) + for route in adds: + LOG.debug(_("Added route entry is '%s'"), route) + # remove replaced route from deleted route + for del_route in removes: + if route['destination'] == del_route['destination']: + removes.remove(del_route) + # replace success even if there is no existing route + self._update_routing_table(ri, 'replace', route) + for route in removes: + LOG.debug(_("Removed route entry is '%s'"), route) + self._update_routing_table(ri, 'delete', route) + ri.routes = new_routes + + +class L3NATAgentWithStateReport(L3NATAgent): + + def __init__(self, host, conf=None): + super(L3NATAgentWithStateReport, self).__init__(host=host, conf=conf) + self.state_rpc = agent_rpc.PluginReportStateAPI(topics.PLUGIN) + self.agent_state = { + 'binary': 'neutron-l3-agent', + 'host': host, + 'topic': topics.L3_AGENT, + 'configurations': { + 'distributed_agent': self.conf.distributed_agent, + 'centralized_snat': self.conf.centralized_snat, + 'centralized_router': self.conf.centralized_router, + 'use_namespaces': self.conf.use_namespaces, + 'router_id': self.conf.router_id, + 'handle_internal_only_routers': + self.conf.handle_internal_only_routers, + 'external_network_bridge': self.conf.external_network_bridge, + 'gateway_external_network_id': + self.conf.gateway_external_network_id, + 'interface_driver': self.conf.interface_driver}, + 'start_flag': True, + 'agent_type': l3_constants.AGENT_TYPE_L3} + report_interval = cfg.CONF.AGENT.report_interval + self.use_call = True + if report_interval: + self.heartbeat = loopingcall.FixedIntervalLoopingCall( + self._report_state) + self.heartbeat.start(interval=report_interval) + + def _report_state(self): + LOG.debug(_("Report state task started")) + num_ex_gw_ports = 0 + num_interfaces = 0 + num_floating_ips = 0 + router_infos = self.router_info.values() + num_routers = len(router_infos) + for ri in router_infos: + ex_gw_port = self._get_ex_gw_port(ri) + if ex_gw_port: + num_ex_gw_ports += 1 + num_interfaces += len(ri.router.get(l3_constants.INTERFACE_KEY, + [])) + num_floating_ips += len(ri.router.get(l3_constants.FLOATINGIP_KEY, + [])) + configurations = self.agent_state['configurations'] + configurations['routers'] = num_routers + configurations['ex_gw_ports'] = num_ex_gw_ports + configurations['interfaces'] = num_interfaces + configurations['floating_ips'] = num_floating_ips + try: + self.state_rpc.report_state(self.context, self.agent_state, + self.use_call) + self.agent_state.pop('start_flag', None) + self.use_call = False + LOG.debug(_("Report state task successfully completed")) + except AttributeError: + # This means the server does not support report_state + LOG.warn(_("Neutron server does not support state report." + " State report for this agent will be disabled.")) + self.heartbeat.stop() + return + except Exception: + LOG.exception(_("Failed reporting state!")) + + def agent_updated(self, context, payload): + """Handle the agent_updated notification event.""" + self.fullsync = True + LOG.info(_("agent_updated by server side %s!"), payload) + + +def main(manager='neutron.agent.l3_proxy.L3NATAgentWithStateReport'): + conf = cfg.CONF + conf.register_opts(L3NATAgent.OPTS) + config.register_interface_driver_opts_helper(conf) + config.register_use_namespaces_opts_helper(conf) + config.register_agent_state_opts_helper(conf) + config.register_root_helper(conf) + conf.register_opts(interface.OPTS) + conf.register_opts(external_process.OPTS) + common_config.init(sys.argv[1:]) + config.setup_logging(conf) + server = neutron_service.Service.create( + binary='neutron-l3-agent', + topic=topics.L3_AGENT, + report_interval=cfg.CONF.AGENT.report_interval, + manager=manager) + service.launch(server).wait() + +if __name__ == "__main__": + sys.exit(main()) diff --git a/icehouse-patches/neutron/vlan2vlan/neutron/agent/linux/ip_lib.py b/icehouse-patches/neutron/vlan2vlan/neutron/agent/linux/ip_lib.py new file mode 100644 index 00000000..6f6f0790 --- /dev/null +++ b/icehouse-patches/neutron/vlan2vlan/neutron/agent/linux/ip_lib.py @@ -0,0 +1,590 @@ +# Copyright 2012 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import netaddr +from oslo.config import cfg + +from neutron.agent.linux import utils +from neutron.common import exceptions + + +OPTS = [ + cfg.BoolOpt('ip_lib_force_root', + default=False, + help=_('Force ip_lib calls to use the root helper')), +] + + +LOOPBACK_DEVNAME = 'lo' +# NOTE(ethuleau): depend of the version of iproute2, the vlan +# interface details vary. +VLAN_INTERFACE_DETAIL = ['vlan protocol 802.1q', + 'vlan protocol 802.1Q', + 'vlan id'] + + +class SubProcessBase(object): + + def __init__(self, root_helper=None, namespace=None): + self.root_helper = root_helper + self.namespace = namespace + try: + self.force_root = cfg.CONF.ip_lib_force_root + except cfg.NoSuchOptError: + # Only callers that need to force use of the root helper + # need to register the option. + self.force_root = False + + def _run(self, options, command, args): + if self.namespace: + return self._as_root(options, command, args) + elif self.force_root: + # Force use of the root helper to ensure that commands + # will execute in dom0 when running under XenServer/XCP. + return self._execute(options, command, args, self.root_helper) + else: + return self._execute(options, command, args) + + def _as_root(self, options, command, args, use_root_namespace=False): + if not self.root_helper: + raise exceptions.SudoRequired() + + namespace = self.namespace if not use_root_namespace else None + + return self._execute(options, + command, + args, + self.root_helper, + namespace) + + @classmethod + def _execute(cls, options, command, args, root_helper=None, + namespace=None): + opt_list = ['-%s' % o for o in options] + if namespace: + ip_cmd = ['ip', 'netns', 'exec', namespace, 'ip'] + else: + ip_cmd = ['ip'] + return utils.execute(ip_cmd + opt_list + [command] + list(args), + root_helper=root_helper) + + +class IPWrapper(SubProcessBase): + + def __init__(self, root_helper=None, namespace=None): + super(IPWrapper, self).__init__(root_helper=root_helper, + namespace=namespace) + self.netns = IpNetnsCommand(self) + + def device(self, name): + return IPDevice(name, self.root_helper, self.namespace) + + def get_devices(self, exclude_loopback=False): + retval = [] + output = self._execute(['o', 'd'], 'link', ('list',), + self.root_helper, self.namespace) + for line in output.split('\n'): + if '<' not in line: + continue + tokens = line.split(' ', 2) + if len(tokens) == 3: + if any(v in tokens[2] for v in VLAN_INTERFACE_DETAIL): + delimiter = '@' + else: + delimiter = ':' + name = tokens[1].rpartition(delimiter)[0].strip() + + if exclude_loopback and name == LOOPBACK_DEVNAME: + continue + + retval.append(IPDevice(name, + self.root_helper, + self.namespace)) + return retval + + def add_tuntap(self, name, mode='tap'): + self._as_root('', 'tuntap', ('add', name, 'mode', mode)) + return IPDevice(name, self.root_helper, self.namespace) + + def add_veth(self, name1, name2, namespace2=None): + args = ['add', name1, 'type', 'veth', 'peer', 'name', name2] + + if namespace2 is None: + namespace2 = self.namespace + else: + self.ensure_namespace(namespace2) + args += ['netns', namespace2] + + self._as_root('', 'link', tuple(args)) + + return (IPDevice(name1, self.root_helper, self.namespace), + IPDevice(name2, self.root_helper, namespace2)) + + def ensure_namespace(self, name): + if not self.netns.exists(name): + ip = self.netns.add(name) + lo = ip.device(LOOPBACK_DEVNAME) + lo.link.set_up() + else: + ip = IPWrapper(self.root_helper, name) + return ip + + def namespace_is_empty(self): + return not self.get_devices(exclude_loopback=True) + + def garbage_collect_namespace(self): + """Conditionally destroy the namespace if it is empty.""" + if self.namespace and self.netns.exists(self.namespace): + if self.namespace_is_empty(): + self.netns.delete(self.namespace) + return True + return False + + def add_device_to_namespace(self, device): + if self.namespace: + device.link.set_netns(self.namespace) + + def add_vxlan(self, name, vni, group=None, dev=None, ttl=None, tos=None, + local=None, port=None, proxy=False): + cmd = ['add', name, 'type', 'vxlan', 'id', vni] + if group: + cmd.extend(['group', group]) + if dev: + cmd.extend(['dev', dev]) + if ttl: + cmd.extend(['ttl', ttl]) + if tos: + cmd.extend(['tos', tos]) + if local: + cmd.extend(['local', local]) + if proxy: + cmd.append('proxy') + # tuple: min,max + if port and len(port) == 2: + cmd.extend(['port', port[0], port[1]]) + elif port: + raise exceptions.NetworkVxlanPortRangeError(vxlan_range=port) + self._as_root('', 'link', cmd) + return (IPDevice(name, self.root_helper, self.namespace)) + + @classmethod + def get_namespaces(cls, root_helper): + output = cls._execute('', 'netns', ('list',), root_helper=root_helper) + return [l.strip() for l in output.split('\n')] + + +class IpRule(IPWrapper): + + def add_rule_from(self, ip, table, rule_pr): + args = ['add', 'from', ip, 'lookup', table, 'priority', rule_pr] + ip = self._as_root('', 'rule', tuple(args)) + return ip + + def add_rule_to(self, ip, table, rule_pr): + args = ['add', 'to', ip, 'lookup', table, 'priority', rule_pr] + ip = self._as_root('', 'rule', tuple(args)) + return ip + + def delete_rule_priority(self, rule_pr): + args = ['del', 'priority', rule_pr] + ip = self._as_root('', 'rule', tuple(args)) + return ip + + +class IPDevice(SubProcessBase): + + def __init__(self, name, root_helper=None, namespace=None): + super(IPDevice, self).__init__(root_helper=root_helper, + namespace=namespace) + self.name = name + self.link = IpLinkCommand(self) + self.addr = IpAddrCommand(self) + self.route = IpRouteCommand(self) + self.neigh = IpNeighCommand(self) + + def __eq__(self, other): + return (other is not None and self.name == other.name + and self.namespace == other.namespace) + + def __str__(self): + return self.name + + +class IpCommandBase(object): + COMMAND = '' + + def __init__(self, parent): + self._parent = parent + + def _run(self, *args, **kwargs): + return self._parent._run(kwargs.get('options', []), self.COMMAND, args) + + def _as_root(self, *args, **kwargs): + return self._parent._as_root(kwargs.get('options', []), + self.COMMAND, + args, + kwargs.get('use_root_namespace', False)) + + +class IpDeviceCommandBase(IpCommandBase): + + @property + def name(self): + return self._parent.name + + +class IpLinkCommand(IpDeviceCommandBase): + COMMAND = 'link' + + def set_address(self, mac_address): + self._as_root('set', self.name, 'address', mac_address) + + def set_mtu(self, mtu_size): + self._as_root('set', self.name, 'mtu', mtu_size) + + def set_up(self): + self._as_root('set', self.name, 'up') + + def set_down(self): + self._as_root('set', self.name, 'down') + + def set_netns(self, namespace): + self._as_root('set', self.name, 'netns', namespace) + self._parent.namespace = namespace + + def set_name(self, name): + self._as_root('set', self.name, 'name', name) + self._parent.name = name + + def set_alias(self, alias_name): + self._as_root('set', self.name, 'alias', alias_name) + + def delete(self): + self._as_root('delete', self.name) + + @property + def address(self): + return self.attributes.get('link/ether') + + @property + def state(self): + return self.attributes.get('state') + + @property + def mtu(self): + return self.attributes.get('mtu') + + @property + def qdisc(self): + return self.attributes.get('qdisc') + + @property + def qlen(self): + return self.attributes.get('qlen') + + @property + def alias(self): + return self.attributes.get('alias') + + @property + def attributes(self): + return self._parse_line(self._run('show', self.name, options='o')) + + def _parse_line(self, value): + if not value: + return {} + + device_name, settings = value.replace("\\", '').split('>', 1) + tokens = settings.split() + keys = tokens[::2] + values = [int(v) if v.isdigit() else v for v in tokens[1::2]] + + retval = dict(zip(keys, values)) + return retval + + +class IpAddrCommand(IpDeviceCommandBase): + COMMAND = 'addr' + + def add(self, ip_version, cidr, broadcast, scope='global'): + self._as_root('add', + cidr, + 'brd', + broadcast, + 'scope', + scope, + 'dev', + self.name, + options=[ip_version]) + + def delete(self, ip_version, cidr): + self._as_root('del', + cidr, + 'dev', + self.name, + options=[ip_version]) + + def flush(self): + self._as_root('flush', self.name) + + def list(self, scope=None, to=None, filters=None): + if filters is None: + filters = [] + + retval = [] + + if scope: + filters += ['scope', scope] + if to: + filters += ['to', to] + + for line in self._run('show', self.name, *filters).split('\n'): + line = line.strip() + if not line.startswith('inet'): + continue + parts = line.split() + if parts[0] == 'inet6': + version = 6 + scope = parts[3] + broadcast = '::' + else: + version = 4 + if parts[2] == 'brd': + broadcast = parts[3] + scope = parts[5] + else: + # sometimes output of 'ip a' might look like: + # inet 192.168.100.100/24 scope global eth0 + # and broadcast needs to be calculated from CIDR + broadcast = str(netaddr.IPNetwork(parts[1]).broadcast) + scope = parts[3] + + retval.append(dict(cidr=parts[1], + broadcast=broadcast, + scope=scope, + ip_version=version, + dynamic=('dynamic' == parts[-1]))) + return retval + + +class IpRouteCommand(IpDeviceCommandBase): + COMMAND = 'route' + + def add_gateway(self, gateway, metric=None, table=None): + args = ['replace', 'default', 'via', gateway] + if metric: + args += ['metric', metric] + args += ['dev', self.name] + if table: + args += ['table', table] + self._as_root(*args) + + def delete_gateway(self, gateway=None, table=None): + args = ['del', 'default'] + if gateway: + args += ['via', gateway] + args += ['dev', self.name] + if table: + args += ['table', table] + self._as_root(*args) + + def add_gateway_onlink(self, gateway, onlink='onlink', table=None): + args = ['replace', 'default', 'via', gateway] + args += ['dev', self.name] + if onlink: + args += ['onlink'] + if table: + args += ['table', table] + self._as_root(*args) + + def del_route_table(self, table): + args = ['del', 'table', table] + self._as_root(*args) + + def list_onlink_routes(self): + def iterate_routes(): + output = self._run('list', 'dev', self.name, 'scope', 'link') + for line in output.split('\n'): + line = line.strip() + if line and not line.count('src'): + yield line + + return [x for x in iterate_routes()] + + def add_onlink_route(self, cidr): + self._as_root('replace', cidr, 'dev', self.name, 'scope', 'link') + + def delete_onlink_route(self, cidr): + self._as_root('del', cidr, 'dev', self.name, 'scope', 'link') + + def get_gateway(self, scope=None, filters=None): + if filters is None: + filters = [] + + retval = None + + if scope: + filters += ['scope', scope] + + route_list_lines = self._run('list', 'dev', self.name, + *filters).split('\n') + default_route_line = next((x.strip() for x in + route_list_lines if + x.strip().startswith('default')), None) + if default_route_line: + gateway_index = 2 + parts = default_route_line.split() + retval = dict(gateway=parts[gateway_index]) + if 'metric' in parts: + metric_index = parts.index('metric') + 1 + retval.update(metric=int(parts[metric_index])) + + return retval + + def pullup_route(self, interface_name): + """Ensures that the route entry for the interface is before all + others on the same subnet. + """ + device_list = [] + device_route_list_lines = self._run('list', 'proto', 'kernel', + 'dev', interface_name).split('\n') + for device_route_line in device_route_list_lines: + try: + subnet = device_route_line.split()[0] + except Exception: + continue + subnet_route_list_lines = self._run('list', 'proto', 'kernel', + 'match', subnet).split('\n') + for subnet_route_line in subnet_route_list_lines: + i = iter(subnet_route_line.split()) + while(i.next() != 'dev'): + pass + device = i.next() + try: + while(i.next() != 'src'): + pass + src = i.next() + except Exception: + src = '' + if device != interface_name: + device_list.append((device, src)) + else: + break + + for (device, src) in device_list: + self._as_root('del', subnet, 'dev', device) + if (src != ''): + self._as_root('append', subnet, 'proto', 'kernel', + 'src', src, 'dev', device) + else: + self._as_root('append', subnet, 'proto', 'kernel', + 'dev', device) + + def add_route(self, cidr, ip, table=None): + args = ['replace', cidr, 'via', ip, 'dev', self.name] + if table: + args += ['table', table] + self._as_root(*args) + + def delete_route(self, cidr, ip, table=None): + args = ['del', cidr, 'via', ip, 'dev', self.name] + if table: + args += ['table', table] + self._as_root(*args) + + +class IpNeighCommand(IpDeviceCommandBase): + COMMAND = 'neigh' + + def add(self, ip_version, ip_address, mac_address): + self._as_root('replace', + ip_address, + 'lladdr', + mac_address, + 'nud', + 'permanent', + 'dev', + self.name, + options=[ip_version]) + + def delete(self, ip_version, ip_address, mac_address): + self._as_root('del', + ip_address, + 'lladdr', + mac_address, + 'dev', + self.name, + options=[ip_version]) + + +class IpNetnsCommand(IpCommandBase): + COMMAND = 'netns' + + def add(self, name): + self._as_root('add', name, use_root_namespace=True) + return IPWrapper(self._parent.root_helper, name) + + def delete(self, name): + self._as_root('delete', name, use_root_namespace=True) + + def execute(self, cmds, addl_env={}, check_exit_code=True): + if not self._parent.root_helper: + raise exceptions.SudoRequired() + ns_params = [] + if self._parent.namespace: + ns_params = ['ip', 'netns', 'exec', self._parent.namespace] + + env_params = [] + if addl_env: + env_params = (['env'] + + ['%s=%s' % pair for pair in addl_env.items()]) + return utils.execute( + ns_params + env_params + list(cmds), + root_helper=self._parent.root_helper, + check_exit_code=check_exit_code) + + def exists(self, name): + output = self._parent._execute('o', 'netns', ['list']) + + for line in output.split('\n'): + if name == line.strip(): + return True + return False + + +def device_exists(device_name, root_helper=None, namespace=None): + try: + address = IPDevice(device_name, root_helper, namespace).link.address + except RuntimeError: + return False + return bool(address) + + +def ensure_device_is_ready(device_name, root_helper=None, namespace=None): + dev = IPDevice(device_name, root_helper, namespace) + try: + # Ensure the device is up, even if it is already up. If the device + # doesn't exist, a RuntimeError will be raised. + dev.link.set_up() + except RuntimeError: + return False + return True + + +def iproute_arg_supported(command, arg, root_helper=None): + command += ['help'] + stdout, stderr = utils.execute(command, root_helper=root_helper, + check_exit_code=False, return_stderr=True) + return any(arg in line for line in stderr.split('\n')) diff --git a/icehouse-patches/neutron/vlan2vlan/neutron/agent/linux/ovs_lib.py b/icehouse-patches/neutron/vlan2vlan/neutron/agent/linux/ovs_lib.py new file mode 100644 index 00000000..b6b33872 --- /dev/null +++ b/icehouse-patches/neutron/vlan2vlan/neutron/agent/linux/ovs_lib.py @@ -0,0 +1,611 @@ +# Copyright 2011 VMware, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo.config import cfg + +from neutron.agent.linux import ip_lib +from neutron.agent.linux import utils +from neutron.common import exceptions +from neutron.common import utils as common_utils +from neutron.openstack.common import excutils +from neutron.openstack.common import jsonutils +from neutron.openstack.common import log as logging +from neutron.plugins.common import constants as p_const +# TODO(JLH) Should we remove the explicit include of the ovs plugin here +from neutron.plugins.openvswitch.common import constants + +# Default timeout for ovs-vsctl command +DEFAULT_OVS_VSCTL_TIMEOUT = 10 +OPTS = [ + cfg.IntOpt('ovs_vsctl_timeout', + default=DEFAULT_OVS_VSCTL_TIMEOUT, + help=_('Timeout in seconds for ovs-vsctl commands')), +] +cfg.CONF.register_opts(OPTS) + +LOG = logging.getLogger(__name__) + + +class VifPort: + + def __init__(self, port_name, ofport, vif_id, vif_mac, switch): + self.port_name = port_name + self.ofport = ofport + self.vif_id = vif_id + self.vif_mac = vif_mac + self.switch = switch + + def __str__(self): + return ("iface-id=" + self.vif_id + ", vif_mac=" + + self.vif_mac + ", port_name=" + self.port_name + + ", ofport=" + str(self.ofport) + ", bridge_name=" + + self.switch.br_name) + + +class BaseOVS(object): + + def __init__(self, root_helper): + self.root_helper = root_helper + self.vsctl_timeout = cfg.CONF.ovs_vsctl_timeout + + def run_vsctl(self, args, check_error=False): + full_args = ["ovs-vsctl", "--timeout=%d" % self.vsctl_timeout] + args + try: + return utils.execute(full_args, root_helper=self.root_helper) + except Exception as e: + with excutils.save_and_reraise_exception() as ctxt: + LOG.error(_("Unable to execute %(cmd)s. " + "Exception: %(exception)s"), + {'cmd': full_args, 'exception': e}) + if not check_error: + ctxt.reraise = False + + def add_bridge(self, bridge_name): + self.run_vsctl(["--", "--may-exist", "add-br", bridge_name]) + return OVSBridge(bridge_name, self.root_helper) + + def delete_bridge(self, bridge_name): + self.run_vsctl(["--", "--if-exists", "del-br", bridge_name]) + + def bridge_exists(self, bridge_name): + try: + self.run_vsctl(['br-exists', bridge_name], check_error=True) + except RuntimeError as e: + with excutils.save_and_reraise_exception() as ctxt: + if 'Exit code: 2\n' in str(e): + ctxt.reraise = False + return False + return True + + def get_bridge_name_for_port_name(self, port_name): + try: + return self.run_vsctl(['port-to-br', port_name], check_error=True) + except RuntimeError as e: + with excutils.save_and_reraise_exception() as ctxt: + if 'Exit code: 1\n' in str(e): + ctxt.reraise = False + + def port_exists(self, port_name): + return bool(self.get_bridge_name_for_port_name(port_name)) + + +class OVSBridge(BaseOVS): + + def __init__(self, br_name, root_helper): + super(OVSBridge, self).__init__(root_helper) + self.br_name = br_name + self.defer_apply_flows = False + self.deferred_flows = {'add': '', 'mod': '', 'del': ''} + + def set_controller(self, controller_names): + vsctl_command = ['--', 'set-controller', self.br_name] + vsctl_command.extend(controller_names) + self.run_vsctl(vsctl_command, check_error=True) + + def del_controller(self): + self.run_vsctl(['--', 'del-controller', self.br_name], + check_error=True) + + def get_controller(self): + res = self.run_vsctl(['--', 'get-controller', self.br_name], + check_error=True) + if res: + return res.strip().split('\n') + return res + + def set_secure_mode(self): + self.run_vsctl(['--', 'set-fail-mode', self.br_name, 'secure'], + check_error=True) + + def set_protocols(self, protocols): + self.run_vsctl(['--', 'set', 'bridge', self.br_name, + "protocols=%s" % protocols], + check_error=True) + + def create(self): + self.add_bridge(self.br_name) + + def destroy(self): + self.delete_bridge(self.br_name) + + def reset_bridge(self): + self.destroy() + self.create() + + def add_port(self, port_name): + self.run_vsctl(["--", "--may-exist", "add-port", self.br_name, + port_name]) + return self.get_port_ofport(port_name) + + def delete_port(self, port_name): + self.run_vsctl(["--", "--if-exists", "del-port", self.br_name, + port_name]) + + def set_db_attribute(self, table_name, record, column, value): + args = ["set", table_name, record, "%s=%s" % (column, value)] + self.run_vsctl(args) + + def clear_db_attribute(self, table_name, record, column): + args = ["clear", table_name, record, column] + self.run_vsctl(args) + + def run_ofctl(self, cmd, args, process_input=None): + full_args = ["ovs-ofctl", cmd, self.br_name] + args + try: + return utils.execute(full_args, root_helper=self.root_helper, + process_input=process_input) + except Exception as e: + LOG.error(_("Unable to execute %(cmd)s. Exception: %(exception)s"), + {'cmd': full_args, 'exception': e}) + + def count_flows(self): + flow_list = self.run_ofctl("dump-flows", []).split("\n")[1:] + return len(flow_list) - 1 + + def remove_all_flows(self): + self.run_ofctl("del-flows", []) + + def get_port_ofport(self, port_name): + ofport = self.db_get_val("Interface", port_name, "ofport") + # This can return a non-integer string, like '[]' so ensure a + # common failure case + try: + int(ofport) + return ofport + except ValueError: + return constants.INVALID_OFPORT + + def get_datapath_id(self): + return self.db_get_val('Bridge', + self.br_name, 'datapath_id').strip('"') + + def add_flow(self, **kwargs): + flow_str = _build_flow_expr_str(kwargs, 'add') + if self.defer_apply_flows: + self.deferred_flows['add'] += flow_str + '\n' + else: + self.run_ofctl("add-flow", [flow_str]) + + def mod_flow(self, **kwargs): + flow_str = _build_flow_expr_str(kwargs, 'mod') + if self.defer_apply_flows: + self.deferred_flows['mod'] += flow_str + '\n' + else: + self.run_ofctl("mod-flows", [flow_str]) + + def delete_flows(self, **kwargs): + flow_expr_str = _build_flow_expr_str(kwargs, 'del') + if self.defer_apply_flows: + self.deferred_flows['del'] += flow_expr_str + '\n' + else: + self.run_ofctl("del-flows", [flow_expr_str]) + + def dump_flows_for_table(self, table): + retval = None + flow_str = "table=%s" % table + flows = self.run_ofctl("dump-flows", [flow_str]) + if flows: + retval = '\n'.join(item for item in flows.splitlines() + if 'NXST' not in item) + return retval + + def defer_apply_on(self): + LOG.debug(_('defer_apply_on')) + self.defer_apply_flows = True + + def defer_apply_off(self): + LOG.debug(_('defer_apply_off')) + # Note(ethuleau): stash flows and disable deferred mode. Then apply + # flows from the stashed reference to be sure to not purge flows that + # were added between two ofctl commands. + stashed_deferred_flows, self.deferred_flows = ( + self.deferred_flows, {'add': '', 'mod': '', 'del': ''} + ) + self.defer_apply_flows = False + for action, flows in stashed_deferred_flows.items(): + if flows: + LOG.debug(_('Applying following deferred flows ' + 'to bridge %s'), self.br_name) + for line in flows.splitlines(): + LOG.debug(_('%(action)s: %(flow)s'), + {'action': action, 'flow': line}) + self.run_ofctl('%s-flows' % action, ['-'], flows) + + def add_tunnel_port(self, port_name, remote_ip, local_ip, + tunnel_type=p_const.TYPE_GRE, + vxlan_udp_port=constants.VXLAN_UDP_PORT, + dont_fragment=True): + vsctl_command = ["--", "--may-exist", "add-port", self.br_name, + port_name] + vsctl_command.extend(["--", "set", "Interface", port_name, + "type=%s" % tunnel_type]) + if tunnel_type == p_const.TYPE_VXLAN: + # Only set the VXLAN UDP port if it's not the default + if vxlan_udp_port != constants.VXLAN_UDP_PORT: + vsctl_command.append("options:dst_port=%s" % vxlan_udp_port) + vsctl_command.append(("options:df_default=%s" % + bool(dont_fragment)).lower()) + vsctl_command.extend(["options:remote_ip=%s" % remote_ip, + "options:local_ip=%s" % local_ip, + "options:in_key=flow", + "options:out_key=flow"]) + self.run_vsctl(vsctl_command) + ofport = self.get_port_ofport(port_name) + if (tunnel_type == p_const.TYPE_VXLAN and + ofport == constants.INVALID_OFPORT): + LOG.error(_('Unable to create VXLAN tunnel port. Please ensure ' + 'that an openvswitch version that supports VXLAN is ' + 'installed.')) + return ofport + + def add_flowkey_tunnel_port(self, port_name, + tunnel_type=p_const.TYPE_GRE, + vxlan_udp_port=constants.VXLAN_UDP_PORT, + dont_fragment=True): + vsctl_command = ["--", "--may-exist", "add-port", self.br_name, + port_name] + vsctl_command.extend(["--", "set", "Interface", port_name, + "type=%s" % tunnel_type]) + if tunnel_type == p_const.TYPE_VXLAN: + # Only set the VXLAN UDP port if it's not the default + if vxlan_udp_port != constants.VXLAN_UDP_PORT: + vsctl_command.append("options:dst_port=%s" % vxlan_udp_port) + vsctl_command.append(("options:df_default=%s" % + bool(dont_fragment)).lower()) + vsctl_command.extend(["options:remote_ip=flow", + "options:key=flow"]) + self.run_vsctl(vsctl_command) + ofport = self.get_port_ofport(port_name) + if (tunnel_type == p_const.TYPE_VXLAN and + ofport == constants.INVALID_OFPORT): + LOG.error(_('Unable to create VXLAN tunnel port. Please ensure ' + 'that an openvswitch version that supports VXLAN is ' + 'installed.')) + return ofport + + def add_patch_port(self, local_name, remote_name): + self.run_vsctl(["add-port", self.br_name, local_name, + "--", "set", "Interface", local_name, + "type=patch", "options:peer=%s" % remote_name]) + return self.get_port_ofport(local_name) + + def db_get_map(self, table, record, column, check_error=False): + output = self.run_vsctl(["get", table, record, column], check_error) + if output: + output_str = output.rstrip("\n\r") + return self.db_str_to_map(output_str) + return {} + + def db_get_val(self, table, record, column, check_error=False): + output = self.run_vsctl(["get", table, record, column], check_error) + if output: + return output.rstrip("\n\r") + + def db_str_to_map(self, full_str): + list = full_str.strip("{}").split(", ") + ret = {} + for e in list: + if e.find("=") == -1: + continue + arr = e.split("=") + ret[arr[0]] = arr[1].strip("\"") + return ret + + def get_port_name_list(self): + res = self.run_vsctl(["list-ports", self.br_name], check_error=True) + if res: + return res.strip().split("\n") + return [] + + def get_port_stats(self, port_name): + return self.db_get_map("Interface", port_name, "statistics") + + def get_xapi_iface_id(self, xs_vif_uuid): + args = ["xe", "vif-param-get", "param-name=other-config", + "param-key=nicira-iface-id", "uuid=%s" % xs_vif_uuid] + try: + return utils.execute(args, root_helper=self.root_helper).strip() + except Exception as e: + with excutils.save_and_reraise_exception(): + LOG.error(_("Unable to execute %(cmd)s. " + "Exception: %(exception)s"), + {'cmd': args, 'exception': e}) + + # returns a VIF object for each VIF port + def get_vif_ports(self): + edge_ports = [] + port_names = self.get_port_name_list() + for name in port_names: + external_ids = self.db_get_map("Interface", name, "external_ids", + check_error=True) + ofport = self.db_get_val("Interface", name, "ofport", + check_error=True) + if "iface-id" in external_ids and "attached-mac" in external_ids: + p = VifPort(name, ofport, external_ids["iface-id"], + external_ids["attached-mac"], self) + edge_ports.append(p) + elif ("xs-vif-uuid" in external_ids and + "attached-mac" in external_ids): + # if this is a xenserver and iface-id is not automatically + # synced to OVS from XAPI, we grab it from XAPI directly + iface_id = self.get_xapi_iface_id(external_ids["xs-vif-uuid"]) + p = VifPort(name, ofport, iface_id, + external_ids["attached-mac"], self) + edge_ports.append(p) + + return edge_ports + + def get_vif_port_set(self): + port_names = self.get_port_name_list() + edge_ports = set() + args = ['--format=json', '--', '--columns=name,external_ids,ofport', + 'list', 'Interface'] + result = self.run_vsctl(args, check_error=True) + if not result: + return edge_ports + for row in jsonutils.loads(result)['data']: + name = row[0] + if name not in port_names: + continue + external_ids = dict(row[1][1]) + # Do not consider VIFs which aren't yet ready + # This can happen when ofport values are either [] or ["set", []] + # We will therefore consider only integer values for ofport + ofport = row[2] + try: + int_ofport = int(ofport) + except (ValueError, TypeError): + LOG.warn(_("Found not yet ready openvswitch port: %s"), row) + else: + if int_ofport > 0: + if ("iface-id" in external_ids and + "attached-mac" in external_ids): + edge_ports.add(external_ids['iface-id']) + elif ("xs-vif-uuid" in external_ids and + "attached-mac" in external_ids): + # if this is a xenserver and iface-id is not + # automatically synced to OVS from XAPI, we grab it + # from XAPI directly + iface_id = self.get_xapi_iface_id( + external_ids["xs-vif-uuid"]) + edge_ports.add(iface_id) + else: + LOG.warn(_("Found failed openvswitch port: %s"), row) + return edge_ports + + def get_port_tag_dict(self): + """Get a dict of port names and associated vlan tags. + + e.g. the returned dict is of the following form:: + + {u'int-br-eth2': [], + u'patch-tun': [], + u'qr-76d9e6b6-21': 1, + u'tapce5318ff-78': 1, + u'tape1400310-e6': 1} + + The TAG ID is only available in the "Port" table and is not available + in the "Interface" table queried by the get_vif_port_set() method. + + """ + port_names = self.get_port_name_list() + args = ['--format=json', '--', '--columns=name,tag', 'list', 'Port'] + result = self.run_vsctl(args, check_error=True) + port_tag_dict = {} + if not result: + return port_tag_dict + for name, tag in jsonutils.loads(result)['data']: + if name not in port_names: + continue + # 'tag' can be [u'set', []] or an integer + if isinstance(tag, list): + tag = tag[1] + port_tag_dict[name] = tag + return port_tag_dict + + def get_vif_port_by_id(self, port_id): + args = ['--format=json', '--', '--columns=external_ids,name,ofport', + 'find', 'Interface', + 'external_ids:iface-id="%s"' % port_id] + result = self.run_vsctl(args) + if not result: + return + json_result = jsonutils.loads(result) + try: + # Retrieve the indexes of the columns we're looking for + headings = json_result['headings'] + ext_ids_idx = headings.index('external_ids') + name_idx = headings.index('name') + ofport_idx = headings.index('ofport') + # If data attribute is missing or empty the line below will raise + # an exeception which will be captured in this block. + # We won't deal with the possibility of ovs-vsctl return multiple + # rows since the interface identifier is unique + data = json_result['data'][0] + port_name = data[name_idx] + switch = get_bridge_for_iface(self.root_helper, port_name) + if switch != self.br_name: + LOG.info(_("Port: %(port_name)s is on %(switch)s," + " not on %(br_name)s"), {'port_name': port_name, + 'switch': switch, + 'br_name': self.br_name}) + return + ofport = data[ofport_idx] + # ofport must be integer otherwise return None + if not isinstance(ofport, int) or ofport == -1: + LOG.warn(_("ofport: %(ofport)s for VIF: %(vif)s is not a " + "positive integer"), {'ofport': ofport, + 'vif': port_id}) + return + # Find VIF's mac address in external ids + ext_id_dict = dict((item[0], item[1]) for item in + data[ext_ids_idx][1]) + vif_mac = ext_id_dict['attached-mac'] + return VifPort(port_name, ofport, port_id, vif_mac, self) + except Exception as e: + LOG.warn(_("Unable to parse interface details. Exception: %s"), e) + return + + def delete_ports(self, all_ports=False): + if all_ports: + port_names = self.get_port_name_list() + else: + port_names = (port.port_name for port in self.get_vif_ports()) + + for port_name in port_names: + self.delete_port(port_name) + + def get_local_port_mac(self): + """Retrieve the mac of the bridge's local port.""" + address = ip_lib.IPDevice(self.br_name, self.root_helper).link.address + if address: + return address + else: + msg = _('Unable to determine mac address for %s') % self.br_name + raise Exception(msg) + + def __enter__(self): + self.create() + return self + + def __exit__(self, exc_type, exc_value, exc_tb): + self.destroy() + + +def exec_host_ip_config_up(eth_port, num, gw_ip): + port = eth_port + ':' + str(num) + args = ["ifconfig", port, '%s/24' % gw_ip] + try: + return utils.execute(args).strip() + except Exception: + LOG.exception(_("Interface %s not found."), eth_port) + return None + + +def exec_host_ip_config_down(eth_port, num): + port = eth_port + ':' + str(num) + args = ["ifconfig", port, 'down'] + try: + return utils.execute(args).strip() + except Exception: + LOG.exception(_("Interface %s not found."), eth_port) + return None + + +def get_bridge_for_iface(root_helper, iface): + args = ["ovs-vsctl", "--timeout=%d" % cfg.CONF.ovs_vsctl_timeout, + "iface-to-br", iface] + try: + return utils.execute(args, root_helper=root_helper).strip() + except Exception: + LOG.exception(_("Interface %s not found."), iface) + return None + + +def get_bridges(root_helper): + args = ["ovs-vsctl", "--timeout=%d" % cfg.CONF.ovs_vsctl_timeout, + "list-br"] + try: + return utils.execute(args, root_helper=root_helper).strip().split("\n") + except Exception as e: + with excutils.save_and_reraise_exception(): + LOG.exception(_("Unable to retrieve bridges. Exception: %s"), e) + + +def get_bridge_external_bridge_id(root_helper, bridge): + args = ["ovs-vsctl", "--timeout=2", "br-get-external-id", + bridge, "bridge-id"] + try: + return utils.execute(args, root_helper=root_helper).strip() + except Exception: + LOG.exception(_("Bridge %s not found."), bridge) + return None + + +def _build_flow_expr_str(flow_dict, cmd): + flow_expr_arr = [] + actions = None + + if cmd == 'add': + flow_expr_arr.append("hard_timeout=%s" % + flow_dict.pop('hard_timeout', '0')) + flow_expr_arr.append("idle_timeout=%s" % + flow_dict.pop('idle_timeout', '0')) + flow_expr_arr.append("priority=%s" % + flow_dict.pop('priority', '1')) + elif 'priority' in flow_dict: + msg = _("Cannot match priority on flow deletion or modification") + raise exceptions.InvalidInput(error_message=msg) + + if cmd != 'del': + if "actions" not in flow_dict: + msg = _("Must specify one or more actions on flow addition" + " or modification") + raise exceptions.InvalidInput(error_message=msg) + actions = "actions=%s" % flow_dict.pop('actions') + + for key, value in flow_dict.iteritems(): + if key == 'proto': + flow_expr_arr.append(value) + else: + flow_expr_arr.append("%s=%s" % (key, str(value))) + + if actions: + flow_expr_arr.append(actions) + + return ','.join(flow_expr_arr) + + +def ofctl_arg_supported(root_helper, cmd, args): + '''Verify if ovs-ofctl binary supports command with specific args. + + :param root_helper: utility to use when running shell cmds. + :param cmd: ovs-vsctl command to use for test. + :param args: arguments to test with command. + :returns: a boolean if the args supported. + ''' + supported = True + br_name = 'br-test-%s' % common_utils.get_random_string(6) + test_br = OVSBridge(br_name, root_helper) + test_br.reset_bridge() + + full_args = ["ovs-ofctl", cmd, test_br.br_name] + args + try: + utils.execute(full_args, root_helper=root_helper) + except Exception: + supported = False + + test_br.destroy() + return supported diff --git a/icehouse-patches/neutron/vlan2vlan/neutron/common/config.py b/icehouse-patches/neutron/vlan2vlan/neutron/common/config.py new file mode 100644 index 00000000..82567228 --- /dev/null +++ b/icehouse-patches/neutron/vlan2vlan/neutron/common/config.py @@ -0,0 +1,195 @@ +# Copyright 2011 VMware, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Routines for configuring Neutron +""" + +import os + +from oslo.config import cfg +from oslo.db import options as db_options +from oslo import messaging +from paste import deploy + +from neutron.api.v2 import attributes +from neutron.common import utils +from neutron.openstack.common import log as logging +from neutron import version + + +LOG = logging.getLogger(__name__) + +core_opts = [ + cfg.StrOpt('bind_host', default='0.0.0.0', + help=_("The host IP to bind to")), + cfg.IntOpt('bind_port', default=9696, + help=_("The port to bind to")), + cfg.StrOpt('api_paste_config', default="api-paste.ini", + help=_("The API paste config file to use")), + cfg.StrOpt('api_extensions_path', default="", + help=_("The path for API extensions")), + cfg.StrOpt('policy_file', default="policy.json", + help=_("The policy file to use")), + cfg.StrOpt('auth_strategy', default='keystone', + help=_("The type of authentication to use")), + cfg.StrOpt('core_plugin', + help=_("The core plugin Neutron will use")), + cfg.ListOpt('service_plugins', default=[], + help=_("The service plugins Neutron will use")), + cfg.StrOpt('base_mac', default="fa:16:3e:00:00:00", + help=_("The base MAC address Neutron will use for VIFs")), + cfg.IntOpt('mac_generation_retries', default=16, + help=_("How many times Neutron will retry MAC generation")), + cfg.BoolOpt('allow_bulk', default=True, + help=_("Allow the usage of the bulk API")), + cfg.BoolOpt('allow_pagination', default=False, + help=_("Allow the usage of the pagination")), + cfg.BoolOpt('allow_sorting', default=False, + help=_("Allow the usage of the sorting")), + cfg.StrOpt('pagination_max_limit', default="-1", + help=_("The maximum number of items returned in a single " + "response, value was 'infinite' or negative integer " + "means no limit")), + cfg.IntOpt('max_dns_nameservers', default=5, + help=_("Maximum number of DNS nameservers")), + cfg.IntOpt('max_subnet_host_routes', default=20, + help=_("Maximum number of host routes per subnet")), + cfg.IntOpt('max_fixed_ips_per_port', default=5, + help=_("Maximum number of fixed ips per port")), + cfg.IntOpt('dhcp_lease_duration', default=86400, + deprecated_name='dhcp_lease_time', + help=_("DHCP lease duration (in seconds). Use -1 to tell " + "dnsmasq to use infinite lease times.")), + cfg.BoolOpt('dhcp_agent_notification', default=True, + help=_("Allow sending resource operation" + " notification to DHCP agent")), + cfg.BoolOpt('allow_overlapping_ips', default=False, + help=_("Allow overlapping IP support in Neutron")), + cfg.StrOpt('host', default=utils.get_hostname(), + help=_("The hostname Neutron is running on")), + cfg.BoolOpt('force_gateway_on_subnet', default=False, + help=_("Ensure that configured gateway is on subnet")), + cfg.BoolOpt('notify_nova_on_port_status_changes', default=True, + help=_("Send notification to nova when port status changes")), + cfg.BoolOpt('notify_nova_on_port_data_changes', default=True, + help=_("Send notification to nova when port data (fixed_ips/" + "floatingip) changes so nova can update its cache.")), + cfg.StrOpt('nova_url', + default='http://127.0.0.1:8774/v2', + help=_('URL for connection to nova')), + cfg.StrOpt('nova_admin_username', + help=_('Username for connecting to nova in admin context')), + cfg.StrOpt('nova_admin_password', + help=_('Password for connection to nova in admin context'), + secret=True), + cfg.StrOpt('nova_admin_tenant_id', + help=_('The uuid of the admin nova tenant')), + cfg.StrOpt('nova_admin_auth_url', + default='http://localhost:5000/v2.0', + help=_('Authorization URL for connecting to nova in admin ' + 'context')), + cfg.StrOpt('nova_ca_certificates_file', + help=_('CA file for novaclient to verify server certificates')), + cfg.BoolOpt('nova_api_insecure', default=False, + help=_("If True, ignore any SSL validation issues")), + cfg.StrOpt('nova_region_name', + help=_('Name of nova region to use. Useful if keystone manages' + ' more than one region.')), + cfg.IntOpt('send_events_interval', default=2, + help=_('Number of seconds between sending events to nova if ' + 'there are any events to send.')), + + # add by j00209498 + cfg.StrOpt('cascade_str', default='cascaded', + help=_('cascade_str identity cascading openstack or cascaded' + 'openstack, value = cascaded or cascading.')), +] + +core_cli_opts = [ + cfg.StrOpt('state_path', + default='/var/lib/neutron', + help=_("Where to store Neutron state files. " + "This directory must be writable by the agent.")), +] + +# Register the configuration options +cfg.CONF.register_opts(core_opts) +cfg.CONF.register_cli_opts(core_cli_opts) + +# Ensure that the control exchange is set correctly +messaging.set_transport_defaults(control_exchange='neutron') +_SQL_CONNECTION_DEFAULT = 'sqlite://' +# Update the default QueuePool parameters. These can be tweaked by the +# configuration variables - max_pool_size, max_overflow and pool_timeout +db_options.set_defaults(cfg.CONF, + connection=_SQL_CONNECTION_DEFAULT, + sqlite_db='', max_pool_size=10, + max_overflow=20, pool_timeout=10) + + +def init(args, **kwargs): + cfg.CONF(args=args, project='neutron', + version='%%prog %s' % version.version_info.release_string(), + **kwargs) + + # FIXME(ihrachys): if import is put in global, circular import + # failure occurs + from neutron.common import rpc as n_rpc + n_rpc.init(cfg.CONF) + + # Validate that the base_mac is of the correct format + msg = attributes._validate_regex(cfg.CONF.base_mac, + attributes.MAC_PATTERN) + if msg: + msg = _("Base MAC: %s") % msg + raise Exception(msg) + + +def setup_logging(conf): + """Sets up the logging options for a log with supplied name. + + :param conf: a cfg.ConfOpts object + """ + product_name = "neutron" + logging.setup(product_name) + LOG.info(_("Logging enabled!")) + + +def load_paste_app(app_name): + """Builds and returns a WSGI app from a paste config file. + + :param app_name: Name of the application to load + :raises ConfigFilesNotFoundError when config file cannot be located + :raises RuntimeError when application cannot be loaded from config file + """ + + config_path = cfg.CONF.find_file(cfg.CONF.api_paste_config) + if not config_path: + raise cfg.ConfigFilesNotFoundError( + config_files=[cfg.CONF.api_paste_config]) + config_path = os.path.abspath(config_path) + LOG.info(_("Config paste file: %s"), config_path) + + try: + app = deploy.loadapp("config:%s" % config_path, name=app_name) + except (LookupError, ImportError): + msg = (_("Unable to load %(app_name)s from " + "configuration file %(config_path)s.") % + {'app_name': app_name, + 'config_path': config_path}) + LOG.exception(msg) + raise RuntimeError(msg) + return app diff --git a/icehouse-patches/neutron/vlan2vlan/neutron/common/exceptions.py b/icehouse-patches/neutron/vlan2vlan/neutron/common/exceptions.py new file mode 100644 index 00000000..03b99f8c --- /dev/null +++ b/icehouse-patches/neutron/vlan2vlan/neutron/common/exceptions.py @@ -0,0 +1,328 @@ +# Copyright 2011 VMware, Inc +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Neutron base exception handling. +""" + +from neutron.openstack.common import excutils + + +class NeutronException(Exception): + + """Base Neutron Exception. + + To correctly use this class, inherit from it and define + a 'message' property. That message will get printf'd + with the keyword arguments provided to the constructor. + """ + message = _("An unknown exception occurred.") + + def __init__(self, **kwargs): + try: + super(NeutronException, self).__init__(self.message % kwargs) + self.msg = self.message % kwargs + except Exception: + with excutils.save_and_reraise_exception() as ctxt: + if not self.use_fatal_exceptions(): + ctxt.reraise = False + # at least get the core message out if something happened + super(NeutronException, self).__init__(self.message) + + def __unicode__(self): + return unicode(self.msg) + + def use_fatal_exceptions(self): + return False + + +class BadRequest(NeutronException): + message = _('Bad %(resource)s request: %(msg)s') + + +class NotFound(NeutronException): + pass + + +class Conflict(NeutronException): + pass + + +class NotAuthorized(NeutronException): + message = _("Not authorized.") + + +class ServiceUnavailable(NeutronException): + message = _("The service is unavailable") + + +class AdminRequired(NotAuthorized): + message = _("User does not have admin privileges: %(reason)s") + + +class PolicyNotAuthorized(NotAuthorized): + message = _("Policy doesn't allow %(action)s to be performed.") + + +class NetworkNotFound(NotFound): + message = _("Network %(net_id)s could not be found") + + +class SubnetNotFound(NotFound): + message = _("Subnet %(subnet_id)s could not be found") + + +class PortNotFound(NotFound): + message = _("Port %(port_id)s could not be found") + + +class PortNotFoundOnNetwork(NotFound): + message = _("Port %(port_id)s could not be found " + "on network %(net_id)s") + + +class PolicyFileNotFound(NotFound): + message = _("Policy configuration policy.json could not be found") + + +class PolicyInitError(NeutronException): + message = _("Failed to init policy %(policy)s because %(reason)s") + + +class PolicyCheckError(NeutronException): + message = _("Failed to check policy %(policy)s because %(reason)s") + + +class StateInvalid(BadRequest): + message = _("Unsupported port state: %(port_state)s") + + +class InUse(NeutronException): + message = _("The resource is inuse") + + +class NetworkInUse(InUse): + message = _("Unable to complete operation on network %(net_id)s. " + "There are one or more ports still in use on the network.") + + +class SubnetInUse(InUse): + message = _("Unable to complete operation on subnet %(subnet_id)s. " + "One or more ports have an IP allocation from this subnet.") + + +class PortInUse(InUse): + message = _("Unable to complete operation on port %(port_id)s " + "for network %(net_id)s. Port already has an attached" + "device %(device_id)s.") + + +class MacAddressInUse(InUse): + message = _("Unable to complete operation for network %(net_id)s. " + "The mac address %(mac)s is in use.") + + +class HostRoutesExhausted(BadRequest): + # NOTE(xchenum): probably make sense to use quota exceeded exception? + message = _("Unable to complete operation for %(subnet_id)s. " + "The number of host routes exceeds the limit %(quota)s.") + + +class DNSNameServersExhausted(BadRequest): + # NOTE(xchenum): probably make sense to use quota exceeded exception? + message = _("Unable to complete operation for %(subnet_id)s. " + "The number of DNS nameservers exceeds the limit %(quota)s.") + + +class IpAddressInUse(InUse): + message = _("Unable to complete operation for network %(net_id)s. " + "The IP address %(ip_address)s is in use.") + + +class VlanIdInUse(InUse): + message = _("Unable to create the network. " + "The VLAN %(vlan_id)s on physical network " + "%(physical_network)s is in use.") + + +class FlatNetworkInUse(InUse): + message = _("Unable to create the flat network. " + "Physical network %(physical_network)s is in use.") + + +class TunnelIdInUse(InUse): + message = _("Unable to create the network. " + "The tunnel ID %(tunnel_id)s is in use.") + + +class TenantNetworksDisabled(ServiceUnavailable): + message = _("Tenant network creation is not enabled.") + + +class ResourceExhausted(ServiceUnavailable): + pass + + +class NoNetworkAvailable(ResourceExhausted): + message = _("Unable to create the network. " + "No tenant network is available for allocation.") + + +class SubnetMismatchForPort(BadRequest): + message = _("Subnet on port %(port_id)s does not match " + "the requested subnet %(subnet_id)s") + + +class MalformedRequestBody(BadRequest): + message = _("Malformed request body: %(reason)s") + + +class Invalid(NeutronException): + + def __init__(self, message=None): + self.message = message + super(Invalid, self).__init__() + + +class InvalidInput(BadRequest): + message = _("Invalid input for operation: %(error_message)s.") + + +class InvalidAllocationPool(BadRequest): + message = _("The allocation pool %(pool)s is not valid.") + + +class OverlappingAllocationPools(Conflict): + message = _("Found overlapping allocation pools:" + "%(pool_1)s %(pool_2)s for subnet %(subnet_cidr)s.") + + +class OutOfBoundsAllocationPool(BadRequest): + message = _("The allocation pool %(pool)s spans " + "beyond the subnet cidr %(subnet_cidr)s.") + + +class MacAddressGenerationFailure(ServiceUnavailable): + message = _("Unable to generate unique mac on network %(net_id)s.") + + +class IpAddressGenerationFailure(Conflict): + message = _("No more IP addresses available on network %(net_id)s.") + + +class BridgeDoesNotExist(NeutronException): + message = _("Bridge %(bridge)s does not exist.") + + +class PreexistingDeviceFailure(NeutronException): + message = _("Creation failed. %(dev_name)s already exists.") + + +class SudoRequired(NeutronException): + message = _("Sudo privilege is required to run this command.") + + +class QuotaResourceUnknown(NotFound): + message = _("Unknown quota resources %(unknown)s.") + + +class OverQuota(Conflict): + message = _("Quota exceeded for resources: %(overs)s") + + +class QuotaMissingTenant(BadRequest): + message = _("Tenant-id was missing from Quota request") + + +class InvalidQuotaValue(Conflict): + message = _("Change would make usage less than 0 for the following " + "resources: %(unders)s") + + +class InvalidSharedSetting(Conflict): + message = _("Unable to reconfigure sharing settings for network " + "%(network)s. Multiple tenants are using it") + + +class InvalidExtensionEnv(BadRequest): + message = _("Invalid extension environment: %(reason)s") + + +class ExtensionsNotFound(NotFound): + message = _("Extensions not found: %(extensions)s") + + +class InvalidContentType(NeutronException): + message = _("Invalid content type %(content_type)s") + + +class ExternalIpAddressExhausted(BadRequest): + message = _("Unable to find any IP address on external " + "network %(net_id)s.") + + +class TooManyExternalNetworks(NeutronException): + message = _("More than one external network exists") + + +class InvalidConfigurationOption(NeutronException): + message = _("An invalid value was provided for %(opt_name)s: " + "%(opt_value)s") + + +class GatewayConflictWithAllocationPools(InUse): + message = _("Gateway ip %(ip_address)s conflicts with " + "allocation pool %(pool)s") + + +class GatewayIpInUse(InUse): + message = _("Current gateway ip %(ip_address)s already in use " + "by port %(port_id)s. Unable to update.") + + +class NetworkVlanRangeError(NeutronException): + message = _("Invalid network VLAN range: '%(vlan_range)s' - '%(error)s'") + + def __init__(self, **kwargs): + # Convert vlan_range tuple to 'start:end' format for display + if isinstance(kwargs['vlan_range'], tuple): + kwargs['vlan_range'] = "%d:%d" % kwargs['vlan_range'] + super(NetworkVlanRangeError, self).__init__(**kwargs) + + +class NetworkVxlanPortRangeError(NeutronException): + message = _("Invalid network VXLAN port range: '%(vxlan_range)s'") + + +class VxlanNetworkUnsupported(NeutronException): + message = _("VXLAN Network unsupported.") + + +class DuplicatedExtension(NeutronException): + message = _("Found duplicate extension: %(alias)s") + + +class DeviceIDNotOwnedByTenant(Conflict): + message = _("The following device_id %(device_id)s is not owned by your " + "tenant or matches another tenants router.") + + +class InvalidCIDR(BadRequest): + message = _("Invalid CIDR %(input)s given as IP prefix") + + +class PortBindAZError(BadRequest): + message = _("Network %(net_id)s is local network, " + "cannot be created in host %(host)s AZ.") diff --git a/icehouse-patches/neutron/vlan2vlan/neutron/db/cascade_db.py b/icehouse-patches/neutron/vlan2vlan/neutron/db/cascade_db.py new file mode 100644 index 00000000..d3e520ea --- /dev/null +++ b/icehouse-patches/neutron/vlan2vlan/neutron/db/cascade_db.py @@ -0,0 +1,162 @@ +''' +Created on 2014-8-5 + +@author: j00209498 +''' +from oslo.db import exception as db_exc + +import sqlalchemy as sa +from neutron.api.rpc.agentnotifiers import l3_rpc_agent_api + +from neutron.common import exceptions as q_exc +from neutron.common import log +from neutron.common import utils +from neutron.db import model_base +from neutron.extensions import dvr as ext_dvr +from neutron import manager +from neutron.openstack.common import log as logging +from oslo.config import cfg +from sqlalchemy.orm import exc + +LOG = logging.getLogger(__name__) + +big2layer_vni_opts = [ + cfg.StrOpt('big2layer_vni_range', + default="4097:20000", + help=_('The big 2 layer vxlan vni range used for ' + 'CascadeDBMixin instances by Neutron')), +] +cfg.CONF.register_opts(big2layer_vni_opts) + + +class CascadeAZNetworkBinding(model_base.BASEV2): + + """Represents a v2 neutron distributed virtual router mac address.""" + + __tablename__ = 'cascade_az_network_bind' + + network_id = sa.Column(sa.String(36), primary_key=True, nullable=False) + host = sa.Column(sa.String(255), primary_key=True, nullable=False) + + +class CascadeRouterAZExternipMapping(model_base.BASEV2): + + """Represents a v2 neutron distributed virtual router mac address.""" + + __tablename__ = 'cascade_router_az_externip_map' + + router_id = sa.Column(sa.String(36), primary_key=True, nullable=False) + host = sa.Column(sa.String(255), primary_key=True, nullable=False) + extern_ip = sa.Column(sa.String(64), nullable=False) + + +class CascadeDBMixin(object): + + @property + def l3_rpc_notifier(self): + if not hasattr(self, '_l3_rpc_notifier'): + self._l3_rpc_notifier = l3_rpc_agent_api.L3AgentNotifyAPI() + return self._l3_rpc_notifier + + def is_big2layer_vni(self, seg_id): + vni = cfg.CONF.big2layer_vni_range.split(':') + if(seg_id >= int(vni[0]) and seg_id <= int(vni[1])): + return True + else: + return False + + def get_binding_az_by_network_id(self, context, net_id): + try: + query = context.session.query(CascadeAZNetworkBinding) + ban = query.filter( + CascadeAZNetworkBinding.network_id == net_id).one() + except exc.NoResultFound: + return None + return ban['host'] + + def add_binding_az_network_id(self, context, binding_host, net_id): + try: + with context.session.begin(subtransactions=True): + dvr_mac_binding = CascadeAZNetworkBinding( + network_id=net_id, host=binding_host) + context.session.add(dvr_mac_binding) + LOG.debug("add az_host %(host)s for network %(network_id)s ", + {'host': binding_host, 'network_id': net_id}) + except db_exc.DBDuplicateEntry: + LOG.debug("az_host %(host)s exists for network %(network_id)s," + " DBDuplicateEntry error.", + {'host': binding_host, 'network_id': net_id}) + + def get_extern_ip_by_router_id_and_host(self, context, router_id, host): + rae = self.get_router_az_extern_ip_mapping(context, router_id, host) + if(rae): + return rae['extern_ip'] + return None +# try: +# query = context.session.query(CascadeRouterAZExternipMapping) +# erh = query.filter( +# CascadeRouterAZExternipMapping.router_id == router_id, +# CascadeRouterAZExternipMapping.host == host).one() +# except exc.NoResultFound: +# return None +# return erh['extern_ip'] + + def get_router_az_extern_ip_mapping(self, context, router_id, host): + try: + query = context.session.query(CascadeRouterAZExternipMapping) + erh = query.filter( + CascadeRouterAZExternipMapping.router_id == router_id, + CascadeRouterAZExternipMapping.host == host).one() + except exc.NoResultFound: + return None + return erh + + def update_router_az_extern_ip_mapping(self, context, router_id, + host, extern_ip): + if extern_ip is None: + self.del_router_az_extern_ip_mapping(context, router_id, host) + self.l3_rpc_notifier.routers_updated(context, [router_id], + None, None) + return + rae = self.get_router_az_extern_ip_mapping(context, router_id, host) + if(rae and rae['extern_ip'] != extern_ip): + update_rae = {} + update_rae['router_id'] = rae['router_id'] + update_rae['host'] = rae['host'] + update_rae['extern_ip'] = extern_ip + rae.update(update_rae) + LOG.debug("update extern_ip %(extern_ip)s for az_host %(host)s " + "and router %(router_id)s ", + {'extern_ip': extern_ip, + 'host': host, + 'network_id': router_id}) + self.l3_rpc_notifier.routers_updated(context, [router_id], + None, None) + return + try: + with context.session.begin(subtransactions=True): + router_az_extern_ip_map = CascadeRouterAZExternipMapping( + router_id=router_id, host=host, extern_ip=extern_ip) + context.session.add(router_az_extern_ip_map) + LOG.debug("add extern_ip %(extern_ip)s for az_host %(host)s " + "and router %(router_id)s ", + {'extern_ip': extern_ip, + 'host': host, + 'network_id': router_id}) + self.l3_rpc_notifier.routers_updated(context, [router_id], + None, None) + except db_exc.DBDuplicateEntry: + LOG.debug("DBDuplicateEntry ERR:update extern_ip %(extern_ip)s " + "for az_host %(host)s and router %(router_id)s ", + {'extern_ip': extern_ip, + 'host': host, + 'network_id': router_id}) + + def del_router_az_extern_ip_mapping(self, context, router_id, host): + try: + query = context.session.query(CascadeRouterAZExternipMapping) + query.filter( + CascadeRouterAZExternipMapping.router_id == router_id, + CascadeRouterAZExternipMapping.host == host).delete() + except exc.NoResultFound: + return None diff --git a/icehouse-patches/neutron/vlan2vlan/neutron/db/dvr_mac_db.py b/icehouse-patches/neutron/vlan2vlan/neutron/db/dvr_mac_db.py new file mode 100644 index 00000000..e08df8bd --- /dev/null +++ b/icehouse-patches/neutron/vlan2vlan/neutron/db/dvr_mac_db.py @@ -0,0 +1,191 @@ +# Copyright 2014 Hewlett Packard, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo.db import exception as db_exc + +import sqlalchemy as sa + +from neutron.common import exceptions as q_exc +from neutron.common import log +from neutron.common import utils +from neutron.db import model_base +from neutron.extensions import dvr as ext_dvr +from neutron import manager +from neutron.openstack.common import log as logging +from oslo.config import cfg +from sqlalchemy.orm import exc + +LOG = logging.getLogger(__name__) + +dvr_mac_address_opts = [ + cfg.StrOpt('dvr_base_mac', + default="fa:16:3f:00:00:00", + help=_('The base mac address used for unique ' + 'DVR instances by Neutron')), +] +cfg.CONF.register_opts(dvr_mac_address_opts) + + +class DistributedVirtualRouterMacAddress(model_base.BASEV2): + + """Represents a v2 neutron distributed virtual router mac address.""" + + __tablename__ = 'dvr_host_macs' + + host = sa.Column(sa.String(255), primary_key=True, nullable=False) + mac_address = sa.Column(sa.String(32), nullable=False, unique=True) + + +class DVRDbMixin(ext_dvr.DVRMacAddressPluginBase): + + """Mixin class to add dvr mac address to db_plugin_base_v2.""" + + @property + def plugin(self): + try: + if self._plugin is not None: + return self._plugin + except AttributeError: + pass + self._plugin = manager.NeutronManager.get_plugin() + return self._plugin + + def _get_dvr_mac_address_by_host(self, context, host): + try: + query = context.session.query(DistributedVirtualRouterMacAddress) + dvrma = query.filter( + DistributedVirtualRouterMacAddress.host == host).one() + except exc.NoResultFound: + raise ext_dvr.DVRMacAddressNotFound(host=host) + return dvrma + + def _create_dvr_mac_address(self, context, host): + """Create dvr mac address for a given host.""" + base_mac = cfg.CONF.dvr_base_mac.split(':') + max_retries = cfg.CONF.mac_generation_retries + for attempt in reversed(range(max_retries)): + try: + with context.session.begin(subtransactions=True): + mac_address = utils.get_random_mac(base_mac) + dvr_mac_binding = DistributedVirtualRouterMacAddress( + host=host, mac_address=mac_address) + context.session.add(dvr_mac_binding) + LOG.debug("Generated dvr mac for host %(host)s " + "is %(mac_address)s", + {'host': host, 'mac_address': mac_address}) + dvr_macs = self.get_dvr_mac_address_list(context) + self.notifier.dvr_mac_address_update(context, dvr_macs) + return self._make_dvr_mac_address_dict(dvr_mac_binding) + except db_exc.DBDuplicateEntry: + LOG.debug("Generated dvr mac %(mac)s exists." + " Remaining attempts %(attempts_left)s.", + {'mac': mac_address, 'attempts_left': attempt}) + LOG.error(_("MAC generation error after %s attempts"), max_retries) + raise ext_dvr.MacAddressGenerationFailure(host=host) + + def _create_dvr_mac_for_extern_ip(self, context, host): + """Create dvr mac address for a extern net ip.""" + base_mac = cfg.CONF.dvr_base_mac.split(':') + max_retries = cfg.CONF.mac_generation_retries + for attempt in reversed(range(max_retries)): + try: + with context.session.begin(subtransactions=True): + mac_address = utils.get_random_mac(base_mac) + dvr_mac_binding = DistributedVirtualRouterMacAddress( + host=host, mac_address=mac_address) + context.session.add(dvr_mac_binding) + LOG.debug("Generated dvr mac for host %(host)s " + "is %(mac_address)s", + {'host': host, 'mac_address': mac_address}) + return self._make_dvr_mac_address_dict(dvr_mac_binding) + except db_exc.DBDuplicateEntry: + LOG.debug("Generated dvr mac %(mac)s exists." + " Remaining attempts %(attempts_left)s.", + {'mac': mac_address, 'attempts_left': attempt}) + LOG.error(_("MAC generation error after %s attempts"), max_retries) + raise ext_dvr.MacAddressGenerationFailure(host=host) + + def delete_dvr_mac_address(self, context, host): + query = context.session.query(DistributedVirtualRouterMacAddress) + query.filter(DistributedVirtualRouterMacAddress.host == host).delete() + + def get_dvr_mac_address_list(self, context): + with context.session.begin(subtransactions=True): + query = context.session.query(DistributedVirtualRouterMacAddress) + dvrmacs = query.all() + return dvrmacs + + def get_dvr_mac_address_by_host(self, context, host): + if not host: + LOG.warn(_("get_dvr_mac_address_by_host, host input is empty")) + return + + try: + return self._get_dvr_mac_address_by_host(context, host) + except ext_dvr.DVRMacAddressNotFound: + return self._create_dvr_mac_address(context, host) + + def get_dvr_mac_address_by_next_hop(self, context, next_hop): + if not next_hop: + LOG.warn(_("get_dvr_mac_address_by_host, host input is empty")) + return + + try: + return self._get_dvr_mac_address_by_host(context, next_hop) + except ext_dvr.DVRMacAddressNotFound: + return self._create_dvr_mac_for_extern_ip(context, next_hop) + + def _make_dvr_mac_address_dict(self, dvr_mac_entry, fields=None): + return {'host': dvr_mac_entry['host'], + 'mac_address': dvr_mac_entry['mac_address']} + + @log.log + def get_compute_ports_on_host_by_subnet(self, context, host, subnet): + # FIXME(vivek): need to optimize this code to do away two-step + # filtering + vm_ports_by_host = [] + filter = {'fixed_ips': {'subnet_id': [subnet]}} + ports = self.plugin.get_ports(context, filters=filter) + LOG.debug("List of Ports on subnet %(subnet) received as %(ports)", + {'subnet': subnet, 'ports': ports}) + for port in ports: + if 'compute:' in port['device_owner']: + if port['binding:host_id'] == host: + port_dict = self.plugin._make_port_dict( + port, process_extensions=False) + vm_ports_by_host.append(port_dict) + LOG.debug("Returning list of VM Ports on host %(host) for subnet " + " %(subnet) ports %(ports)", + {'host': host, 'subnet': subnet, 'ports': vm_ports_by_host}) + return vm_ports_by_host + + @log.log + def get_subnet_for_dvr(self, context, subnet): + try: + subnet_info = self.plugin.get_subnet(context, subnet) + except q_exc.SubnetNotFound: + return {} + else: + # retrieve the gateway port on this subnet + filter = {'fixed_ips': {'subnet_id': [subnet], + 'ip_address': [subnet_info['gateway_ip']]}} + internal_gateway_ports = self.plugin.get_ports( + context, filters=filter) + if not internal_gateway_ports: + LOG.error(_("Could not retrieve gateway port " + "for subnet %s"), subnet_info) + return {} + internal_port = internal_gateway_ports[0] + subnet_info['gateway_mac'] = internal_port['mac_address'] + return subnet_info diff --git a/icehouse-patches/neutron/vlan2vlan/neutron/db/extraroute_db.py b/icehouse-patches/neutron/vlan2vlan/neutron/db/extraroute_db.py new file mode 100644 index 00000000..1c055e0a --- /dev/null +++ b/icehouse-patches/neutron/vlan2vlan/neutron/db/extraroute_db.py @@ -0,0 +1,237 @@ +# Copyright 2013, Nachi Ueno, NTT MCL, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import netaddr +from oslo.config import cfg +import sqlalchemy as sa +from sqlalchemy import orm + +from neutron.common import utils +from neutron.db import db_base_plugin_v2 +from neutron.db import l3_db +from neutron.db import model_base +from neutron.db import models_v2 +from neutron.extensions import extraroute +from neutron.extensions import l3 +from neutron.openstack.common import log as logging +from neutron.db import dvr_mac_db as dvr_db + + +LOG = logging.getLogger(__name__) + +extra_route_opts = [ + # TODO(nati): use quota framework when it support quota for attributes + cfg.IntOpt('max_routes', default=30, + help=_("Maximum number of routes")), + # add by j00209498 ---begin + cfg.StrOpt('l3gw_extern_net_ip_range', + default="", + help=_('The l3gw external ip range(cidr) used for unique ' + 'like 100.64.0.0/16')), + # add by j00209498 ---end +] + +cfg.CONF.register_opts(extra_route_opts) + + +class RouterRoute(model_base.BASEV2, models_v2.Route): + router_id = sa.Column(sa.String(36), + sa.ForeignKey('routers.id', + ondelete="CASCADE"), + primary_key=True) + + router = orm.relationship(l3_db.Router, + backref=orm.backref("route_list", + lazy='joined', + cascade='delete')) + + +class ExtraRoute_db_mixin(l3_db.L3_NAT_db_mixin, + dvr_db.DVRDbMixin): + + """Mixin class to support extra route configuration on router.""" + + extern_ip_dvr_mac_map = {} + + def _extend_router_dict_extraroute(self, router_res, router_db): + router_res['routes'] = (ExtraRoute_db_mixin. + _make_extra_route_list( + router_db['route_list'] + )) + + db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs( + l3.ROUTERS, ['_extend_router_dict_extraroute']) + + def update_router(self, context, id, router): + r = router['router'] + with context.session.begin(subtransactions=True): + # check if route exists and have permission to access + router_db = self._get_router(context, id) + if 'routes' in r: + self._update_extra_routes(context, router_db, r['routes']) + routes = self._get_extra_routes_by_router_id(context, id) + router_updated = super(ExtraRoute_db_mixin, self).update_router( + context, id, router) + router_updated['routes'] = routes + + return router_updated + + def _get_subnets_by_cidr(self, context, cidr): + query_subnets = context.session.query(models_v2.Subnet) + return query_subnets.filter_by(cidr=cidr).all() + + def _validate_routes_nexthop(self, cidrs, ips, routes, nexthop): + # Note(nati): Nexthop should be connected, + # so we need to check + # nexthop belongs to one of cidrs of the router ports + if not netaddr.all_matching_cidrs(nexthop, cidrs): + raise extraroute.InvalidRoutes( + routes=routes, + reason=_('the nexthop is not connected with router')) + # Note(nati) nexthop should not be same as fixed_ips + if nexthop in ips: + raise extraroute.InvalidRoutes( + routes=routes, + reason=_('the nexthop is used by router')) + + def _validate_routes(self, context, + router_id, routes): + extern_relay_cidr = cfg.CONF.l3gw_extern_net_ip_range + if len(routes) > cfg.CONF.max_routes: + raise extraroute.RoutesExhausted( + router_id=router_id, + quota=cfg.CONF.max_routes) + + filters = {'device_id': [router_id]} + ports = self._core_plugin.get_ports(context, filters) + cidrs = [] + ips = [] + for port in ports: + for ip in port['fixed_ips']: + cidrs.append(self._core_plugin._get_subnet( + context, ip['subnet_id'])['cidr']) + ips.append(ip['ip_address']) + for route in routes: + if(cfg.CONF.cascade_str == 'cascaded' + and extern_relay_cidr + and netaddr.all_matching_cidrs(route['nexthop'], + [extern_relay_cidr])): + continue + self._validate_routes_nexthop( + cidrs, ips, routes, route['nexthop']) + + def get_dvr_mac_for_remote_extern_ip(self, context, remote_cidr, nexthop): + return(self.get_dvr_mac_address_by_next_hop(context, nexthop)) + + def _update_extra_routes(self, context, router, routes): + # add by j00209498 ---begin + extern_relay_cidr = cfg.CONF.l3gw_extern_net_ip_range + # add by j00209498 ---end + self._validate_routes(context, router['id'], + routes) + old_routes, routes_dict = self._get_extra_routes_dict_by_router_id( + context, router['id']) + added, removed = utils.diff_list_of_dict(old_routes, + routes) + LOG.debug(_('Added routes are %s'), added) + for route in added: + if(cfg.CONF.cascade_str == 'cascaded' + and extern_relay_cidr + and netaddr.all_matching_cidrs(route['nexthop'], + [extern_relay_cidr])): + dvr_mac = self.get_dvr_mac_for_remote_extern_ip( + context, + route['destination'], + route['nexthop']) + if dvr_mac: + self.extern_ip_dvr_mac_map[route['nexthop']] = \ + dvr_mac.get('mac_address') + router_routes = RouterRoute( + router_id=router['id'], + destination=route['destination'], + nexthop=route['nexthop']) + context.session.add(router_routes) + + LOG.debug(_('Removed routes are %s'), removed) + for route in removed: + context.session.delete( + routes_dict[(route['destination'], route['nexthop'])]) + + @staticmethod + def _make_extra_route_list(extra_routes): + # added by j00209498 ----begin + extern_relay_cidr = cfg.CONF.l3gw_extern_net_ip_range + if(cfg.CONF.cascade_str == 'cascaded' and extern_relay_cidr): + routes_list = [] + for route in extra_routes: + if(netaddr.all_matching_cidrs(route['nexthop'], + [extern_relay_cidr])): + dvr_mac = ExtraRoute_db_mixin.extern_ip_dvr_mac_map.get( + route['nexthop']) + routes_list.append({'destination': route['destination'], + 'nexthop': route['nexthop'], + 'nexthop_dvr_mac': dvr_mac, + 'onlink': True}) + else: + routes_list.append({'destination': route['destination'], + 'nexthop': route['nexthop']}) + return routes_list + # added by j00209498 ----end + return [{'destination': route['destination'], + 'nexthop': route['nexthop']} + for route in extra_routes] + + def _get_extra_routes_by_router_id(self, context, id): + query = context.session.query(RouterRoute) + query = query.filter_by(router_id=id) + return self._make_extra_route_list(query) + + def _get_extra_routes_dict_by_router_id(self, context, id): + query = context.session.query(RouterRoute) + query = query.filter_by(router_id=id) + routes = [] + routes_dict = {} + for route in query: + routes.append({'destination': route['destination'], + 'nexthop': route['nexthop']}) + routes_dict[(route['destination'], route['nexthop'])] = route + return routes, routes_dict + + def get_router(self, context, id, fields=None): + with context.session.begin(subtransactions=True): + router = super(ExtraRoute_db_mixin, self).get_router( + context, id, fields) + return router + + def get_routers(self, context, filters=None, fields=None, + sorts=None, limit=None, marker=None, + page_reverse=False): + with context.session.begin(subtransactions=True): + routers = super(ExtraRoute_db_mixin, self).get_routers( + context, filters, fields, sorts=sorts, limit=limit, + marker=marker, page_reverse=page_reverse) + return routers + + def _confirm_router_interface_not_in_use(self, context, router_id, + subnet_id): + super(ExtraRoute_db_mixin, self)._confirm_router_interface_not_in_use( + context, router_id, subnet_id) + subnet_db = self._core_plugin._get_subnet(context, subnet_id) + subnet_cidr = netaddr.IPNetwork(subnet_db['cidr']) + extra_routes = self._get_extra_routes_by_router_id(context, router_id) + for route in extra_routes: + if netaddr.all_matching_cidrs(route['nexthop'], [subnet_cidr]): + raise extraroute.RouterInterfaceInUseByRoute( + router_id=router_id, subnet_id=subnet_id) diff --git a/icehouse-patches/neutron/vlan2vlan/neutron/db/l3_rpc_base.py b/icehouse-patches/neutron/vlan2vlan/neutron/db/l3_rpc_base.py new file mode 100644 index 00000000..1a4c1323 --- /dev/null +++ b/icehouse-patches/neutron/vlan2vlan/neutron/db/l3_rpc_base.py @@ -0,0 +1,232 @@ +# Copyright (c) 2012 OpenStack Foundation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from oslo.config import cfg + +from neutron.common import constants +from neutron.common import utils +from neutron import context as neutron_context +from neutron.extensions import l3 +from neutron.extensions import portbindings +from neutron import manager +from neutron.openstack.common import jsonutils +from neutron.openstack.common import log as logging +from neutron.plugins.common import constants as plugin_constants + + +LOG = logging.getLogger(__name__) + + +class L3RpcCallbackMixin(object): + + """A mix-in that enable L3 agent rpc support in plugin implementations.""" + + def sync_routers(self, context, **kwargs): + """Sync routers according to filters to a specific agent. + + @param context: contain user information + @param kwargs: host, router_ids + @return: a list of routers + with their interfaces and floating_ips + """ + router_ids = kwargs.get('router_ids') + host = kwargs.get('host') + context = neutron_context.get_admin_context() + l3plugin = manager.NeutronManager.get_service_plugins()[ + plugin_constants.L3_ROUTER_NAT] + if not l3plugin: + routers = {} + LOG.error(_('No plugin for L3 routing registered! Will reply ' + 'to l3 agent with empty router dictionary.')) + elif utils.is_extension_supported( + l3plugin, constants.L3_AGENT_SCHEDULER_EXT_ALIAS): + if cfg.CONF.router_auto_schedule: + l3plugin.auto_schedule_routers(context, host, router_ids) + routers = l3plugin.list_active_sync_routers_on_active_l3_agent( + context, host, router_ids) + else: + routers = l3plugin.get_sync_data(context, router_ids) + plugin = manager.NeutronManager.get_plugin() + if utils.is_extension_supported( + plugin, constants.PORT_BINDING_EXT_ALIAS): + self._ensure_host_set_on_ports(context, plugin, host, routers) + LOG.debug(_("Routers returned to l3 agent:\n %s"), + jsonutils.dumps(routers, indent=5)) + return routers + + def _ensure_host_set_on_ports(self, context, plugin, host, routers): + for router in routers: + LOG.debug(_("Checking router: %(id)s for host: %(host)s"), + {'id': router['id'], 'host': host}) + self._ensure_host_set_on_port(context, plugin, host, + router.get('gw_port'), + router['id']) + for interface in router.get(constants.INTERFACE_KEY, []): + self._ensure_host_set_on_port(context, plugin, host, + interface, router['id']) + + def _ensure_host_set_on_port(self, context, plugin, host, port, + router_id=None): + if (port and + port.get('device_owner') == + constants.DEVICE_OWNER_DVR_INTERFACE): + # Ports that are DVR interfaces have multiple bindings (based on + # of hosts on which DVR router interfaces are spawned). Such + # bindings are created/updated here by invoking + # update_dvr_port_binding + plugin.update_dvr_port_binding(context, port['id'], + {'port': + {portbindings.HOST_ID: host, + 'device_id': router_id} + }) + elif (port and + (port.get(portbindings.HOST_ID) != host or + port.get(portbindings.VIF_TYPE) == + portbindings.VIF_TYPE_BINDING_FAILED)): + # All ports, including ports created for SNAT'ing for + # DVR are handled here + plugin.update_port(context, port['id'], + {'port': {portbindings.HOST_ID: host}}) + + def get_external_network_id(self, context, **kwargs): + """Get one external network id for l3 agent. + + l3 agent expects only on external network when it performs + this query. + """ + context = neutron_context.get_admin_context() + plugin = manager.NeutronManager.get_plugin() + net_id = plugin.get_external_network_id(context) + LOG.debug(_("External network ID returned to l3 agent: %s"), + net_id) + return net_id + + def update_floatingip_statuses(self, context, router_id, fip_statuses): + """Update operational status for a floating IP.""" + l3_plugin = manager.NeutronManager.get_service_plugins()[ + plugin_constants.L3_ROUTER_NAT] + with context.session.begin(subtransactions=True): + for (floatingip_id, status) in fip_statuses.iteritems(): + LOG.debug(_("New status for floating IP %(floatingip_id)s: " + "%(status)s"), {'floatingip_id': floatingip_id, + 'status': status}) + try: + l3_plugin.update_floatingip_status(context, + floatingip_id, + status) + except l3.FloatingIPNotFound: + LOG.debug(_("Floating IP: %s no longer present."), + floatingip_id) + # Find all floating IPs known to have been the given router + # for which an update was not received. Set them DOWN mercilessly + # This situation might occur for some asynchronous backends if + # notifications were missed + known_router_fips = l3_plugin.get_floatingips( + context, {'last_known_router_id': [router_id]}) + # Consider only floating ips which were disassociated in the API + # FIXME(salv-orlando): Filtering in code should be avoided. + # the plugin should offer a way to specify a null filter + fips_to_disable = (fip['id'] for fip in known_router_fips + if not fip['router_id']) + for fip_id in fips_to_disable: + l3_plugin.update_floatingip_status( + context, fip_id, constants.FLOATINGIP_STATUS_DOWN) + + def get_ports_by_subnet(self, context, **kwargs): + """DVR: RPC called by dvr-agent to get all ports for subnet.""" + subnet_id = kwargs.get('subnet_id') + LOG.debug("DVR: subnet_id: %s", subnet_id) + filters = {'fixed_ips': {'subnet_id': [subnet_id]}} + plugin = manager.NeutronManager.get_plugin() + return plugin.get_ports(context, filters=filters) + + def get_agent_gateway_port(self, context, **kwargs): + """Get Agent Gateway port for FIP. + + l3 agent expects an Agent Gateway Port to be returned + for this query. + """ + network_id = kwargs.get('network_id') + host = kwargs.get('host') + context = neutron_context.get_admin_context() + plugin = manager.NeutronManager.get_plugin() + l3plugin = manager.NeutronManager.get_service_plugins()[ + plugin_constants.L3_ROUTER_NAT] + agent_port = l3plugin.create_fip_agent_gw_port_if_not_exists( + context, network_id, host) + self._ensure_host_set_on_port(context, plugin, host, + agent_port) + LOG.debug('Agent Gateway port returned : %(agent_port)s with ' + 'host %(host)s', {'agent_port': agent_port, + 'host': host}) + return agent_port + + def update_router_extern_ip_map(self, context, **kwargs): + router_id = kwargs.get('router_id') + host = kwargs.get('host') + extern_ip = kwargs.get('gateway_ip') + context = neutron_context.get_admin_context() + plugin = manager.NeutronManager.get_plugin() + plugin.update_router_az_extern_ip_mapping(context, + router_id, host, extern_ip) + + def get_extra_routes_by_subnet(self, context, **kwargs): + router_id = kwargs.get('router_id') + host = kwargs.get('host') + subnet_id = kwargs.get('subnet_id') + plugin = manager.NeutronManager.get_plugin() + subnet = plugin.get_subnet(context, subnet_id) + network = plugin.get_network(context, subnet['network_id']) + binding_host = plugin.get_binding_az_by_network_id(context, + network['id']) + net_type = network['provider:network_type'] + seg_id = network['provider:segmentation_id'] + if(net_type == 'vxlan' and plugin.is_big2layer_vni(seg_id)): + extra_routes = ['big2Layer'] + elif(net_type in ['vlan', 'vxlan'] and binding_host != host): + if(binding_host is None): + return['not_bound_network'] + extern_ip = plugin.get_extern_ip_by_router_id_and_host( + context, + router_id, + binding_host) + extra_routes = [(extern_ip, subnet['cidr'])] + else: + extra_routes = ['local_network'] + return extra_routes + + def get_snat_router_interface_ports(self, context, **kwargs): + """Get SNAT serviced Router Port List. + + The Service Node that hosts the SNAT service requires + the ports to service the router interfaces. + This function will check if any available ports, if not + it will create ports on the routers interfaces and + will send a list to the L3 agent. + """ + router_id = kwargs.get('router_id') + host = kwargs.get('host') + context = neutron_context.get_admin_context() + plugin = manager.NeutronManager.get_plugin() + l3plugin = manager.NeutronManager.get_service_plugins()[ + plugin_constants.L3_ROUTER_NAT] + snat_port_list = l3plugin.create_snat_intf_port_list_if_not_exists( + context, router_id) + for p in snat_port_list: + self._ensure_host_set_on_port(context, plugin, host, p) + LOG.debug('SNAT interface ports returned : %(snat_port_list)s ' + 'and on host %(host)s', {'snat_port_list': snat_port_list, + 'host': host}) + return snat_port_list diff --git a/icehouse-patches/neutron/vlan2vlan/neutron/db/migration/alembic_migrations/versions/2026156eab2f_l2_dvr_models.py b/icehouse-patches/neutron/vlan2vlan/neutron/db/migration/alembic_migrations/versions/2026156eab2f_l2_dvr_models.py new file mode 100644 index 00000000..48afd8d1 --- /dev/null +++ b/icehouse-patches/neutron/vlan2vlan/neutron/db/migration/alembic_migrations/versions/2026156eab2f_l2_dvr_models.py @@ -0,0 +1,94 @@ +# Copyright 2014 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""L2 models to support DVR + +Revision ID: 2026156eab2f +Revises: 3927f7f7c456 +Create Date: 2014-06-23 19:12:43.392912 + +""" + +# revision identifiers, used by Alembic. +revision = '2026156eab2f' +down_revision = '3927f7f7c456' + +migration_for_plugins = [ + 'neutron.plugins.ml2.plugin.Ml2Plugin' +] + +from alembic import op +import sqlalchemy as sa + +from neutron.db import migration + + +def upgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.create_table( + 'dvr_host_macs', + sa.Column('host', sa.String(length=255), nullable=False), + sa.Column('mac_address', sa.String(length=32), + nullable=False, unique=True), + sa.PrimaryKeyConstraint('host') + ) + op.create_table( + 'ml2_dvr_port_bindings', + sa.Column('port_id', sa.String(length=36), nullable=False), + sa.Column('host', sa.String(length=255), nullable=False), + sa.Column('router_id', sa.String(length=36), nullable=True), + sa.Column('vif_type', sa.String(length=64), nullable=False), + sa.Column('vif_details', sa.String(length=4095), + nullable=False, server_default=''), + sa.Column('vnic_type', sa.String(length=64), + nullable=False, server_default='normal'), + sa.Column('profile', sa.String(length=4095), + nullable=False, server_default=''), + sa.Column('cap_port_filter', sa.Boolean(), nullable=False), + sa.Column('driver', sa.String(length=64), nullable=True), + sa.Column('segment', sa.String(length=36), nullable=True), + sa.Column(u'status', sa.String(16), nullable=False), + sa.ForeignKeyConstraint(['port_id'], ['ports.id'], + ondelete='CASCADE'), + sa.ForeignKeyConstraint(['segment'], ['ml2_network_segments.id'], + ondelete='SET NULL'), + sa.PrimaryKeyConstraint('port_id', 'host') + ) + # add by j00209498 + op.create_table( + 'cascade_az_network_bind', + sa.Column('network_id', sa.String(length=36), nullable=False), + sa.Column('host', sa.String(length=255), nullable=False), + sa.PrimaryKeyConstraint('network_id', 'host') + ) + op.create_table( + 'cascade_router_az_externip_map', + sa.Column('router_id', sa.String(length=36), nullable=False), + sa.Column('host', sa.String(length=255), nullable=False), + sa.Column('extern_ip', sa.String(length=64), nullable=False), + sa.PrimaryKeyConstraint('router_id', 'host') + ) + + +def downgrade(active_plugins=None, options=None): + if not migration.should_run(active_plugins, migration_for_plugins): + return + + op.drop_table('ml2_dvr_port_bindings') + op.drop_table('dvr_host_macs') + op.drop_table('cascade_az_network_bind') + op.drop_table('cascade_router_az_externip_map') diff --git a/icehouse-patches/neutron/vlan2vlan/neutron/plugins/ml2/plugin.py b/icehouse-patches/neutron/vlan2vlan/neutron/plugins/ml2/plugin.py new file mode 100644 index 00000000..4f2e2403 --- /dev/null +++ b/icehouse-patches/neutron/vlan2vlan/neutron/plugins/ml2/plugin.py @@ -0,0 +1,993 @@ +# Copyright (c) 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +import contextlib + +from oslo.config import cfg +from oslo.db import exception as os_db_exception +from sqlalchemy import exc as sql_exc +from sqlalchemy.orm import exc as sa_exc + +from neutron.agent import securitygroups_rpc as sg_rpc +from neutron.api.rpc.agentnotifiers import dhcp_rpc_agent_api +from neutron.api.v2 import attributes +from neutron.common import constants as const +from neutron.common import exceptions as exc +from neutron.common import rpc as n_rpc +from neutron.common import topics +from neutron.db import agents_db +from neutron.db import agentschedulers_db +from neutron.db import allowedaddresspairs_db as addr_pair_db +from neutron.db import db_base_plugin_v2 +from neutron.db import dvr_mac_db +from neutron.db import external_net_db +from neutron.db import extradhcpopt_db +from neutron.db import l3_dvrscheduler_db +from neutron.db import models_v2 +from neutron.db import quota_db # noqa +from neutron.db import securitygroups_rpc_base as sg_db_rpc +from neutron.db import cascade_db +from neutron.extensions import allowedaddresspairs as addr_pair +from neutron.extensions import extra_dhcp_opt as edo_ext +from neutron.extensions import multiprovidernet as mpnet +from neutron.extensions import portbindings +from neutron.extensions import providernet as provider +from neutron import manager +from neutron.openstack.common import excutils +from neutron.openstack.common import importutils +from neutron.openstack.common import jsonutils +from neutron.openstack.common import lockutils +from neutron.openstack.common import log +from neutron.plugins.common import constants as service_constants +from neutron.plugins.ml2.common import exceptions as ml2_exc +from neutron.plugins.ml2 import config # noqa +from neutron.plugins.ml2 import db +from neutron.plugins.ml2 import driver_api as api +from neutron.plugins.ml2 import driver_context +from neutron.plugins.ml2 import managers +from neutron.plugins.ml2 import models +from neutron.plugins.ml2 import rpc + +LOG = log.getLogger(__name__) + +# REVISIT(rkukura): Move this and other network_type constants to +# providernet.py? +TYPE_MULTI_SEGMENT = 'multi-segment' + + +class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2, + dvr_mac_db.DVRDbMixin, + external_net_db.External_net_db_mixin, + sg_db_rpc.SecurityGroupServerRpcMixin, + agentschedulers_db.DhcpAgentSchedulerDbMixin, + addr_pair_db.AllowedAddressPairsMixin, + extradhcpopt_db.ExtraDhcpOptMixin, + l3_dvrscheduler_db.L3_DVRsch_db_mixin, + cascade_db.CascadeDBMixin): + + """Implement the Neutron L2 abstractions using modules. + + Ml2Plugin is a Neutron plugin based on separately extensible sets + of network types and mechanisms for connecting to networks of + those types. The network types and mechanisms are implemented as + drivers loaded via Python entry points. Networks can be made up of + multiple segments (not yet fully implemented). + """ + + # This attribute specifies whether the plugin supports or not + # bulk/pagination/sorting operations. Name mangling is used in + # order to ensure it is qualified by class + __native_bulk_support = True + __native_pagination_support = True + __native_sorting_support = True + + # List of supported extensions + _supported_extension_aliases = ["provider", "external-net", "binding", + "quotas", "security-group", "agent", + "dhcp_agent_scheduler", + "multi-provider", "allowed-address-pairs", + "extra_dhcp_opt"] + + @property + def supported_extension_aliases(self): + if not hasattr(self, '_aliases'): + aliases = self._supported_extension_aliases[:] + sg_rpc.disable_security_group_extension_by_config(aliases) + self._aliases = aliases + return self._aliases + + def __init__(self): + # First load drivers, then initialize DB, then initialize drivers + self.type_manager = managers.TypeManager() + self.mechanism_manager = managers.MechanismManager() + super(Ml2Plugin, self).__init__() + self.type_manager.initialize() + self.mechanism_manager.initialize() + # bulk support depends on the underlying drivers + self.__native_bulk_support = self.mechanism_manager.native_bulk_support + + self._setup_rpc() + + # REVISIT(rkukura): Use stevedore for these? + self.network_scheduler = importutils.import_object( + cfg.CONF.network_scheduler_driver + ) + + LOG.info(_("Modular L2 Plugin initialization complete")) + + def _setup_rpc(self): + self.notifier = rpc.AgentNotifierApi(topics.AGENT) + self.agent_notifiers[const.AGENT_TYPE_DHCP] = ( + dhcp_rpc_agent_api.DhcpAgentNotifyAPI() + ) + + def start_rpc_listeners(self): + self.endpoints = [rpc.RpcCallbacks(self.notifier, self.type_manager), + agents_db.AgentExtRpcCallback()] + self.topic = topics.PLUGIN + self.conn = n_rpc.create_connection(new=True) + self.conn.create_consumer(self.topic, self.endpoints, + fanout=False) + return self.conn.consume_in_threads() + + def _process_provider_segment(self, segment): + network_type = self._get_attribute(segment, provider.NETWORK_TYPE) + physical_network = self._get_attribute(segment, + provider.PHYSICAL_NETWORK) + segmentation_id = self._get_attribute(segment, + provider.SEGMENTATION_ID) + + if attributes.is_attr_set(network_type): + segment = {api.NETWORK_TYPE: network_type, + api.PHYSICAL_NETWORK: physical_network, + api.SEGMENTATION_ID: segmentation_id} + self.type_manager.validate_provider_segment(segment) + return segment + + msg = _("network_type required") + raise exc.InvalidInput(error_message=msg) + + def _process_provider_create(self, network): + segments = [] + + if any(attributes.is_attr_set(network.get(f)) + for f in (provider.NETWORK_TYPE, provider.PHYSICAL_NETWORK, + provider.SEGMENTATION_ID)): + # Verify that multiprovider and provider attributes are not set + # at the same time. + if attributes.is_attr_set(network.get(mpnet.SEGMENTS)): + raise mpnet.SegmentsSetInConjunctionWithProviders() + + network_type = self._get_attribute(network, provider.NETWORK_TYPE) + physical_network = self._get_attribute(network, + provider.PHYSICAL_NETWORK) + segmentation_id = self._get_attribute(network, + provider.SEGMENTATION_ID) + segments = [{provider.NETWORK_TYPE: network_type, + provider.PHYSICAL_NETWORK: physical_network, + provider.SEGMENTATION_ID: segmentation_id}] + elif attributes.is_attr_set(network.get(mpnet.SEGMENTS)): + segments = network[mpnet.SEGMENTS] + else: + return + + return [self._process_provider_segment(s) for s in segments] + + def _get_attribute(self, attrs, key): + value = attrs.get(key) + if value is attributes.ATTR_NOT_SPECIFIED: + value = None + return value + + def _extend_network_dict_provider(self, context, network): + id = network['id'] + segments = db.get_network_segments(context.session, id) + if not segments: + LOG.error(_("Network %s has no segments"), id) + network[provider.NETWORK_TYPE] = None + network[provider.PHYSICAL_NETWORK] = None + network[provider.SEGMENTATION_ID] = None + elif len(segments) > 1: + network[mpnet.SEGMENTS] = [ + {provider.NETWORK_TYPE: segment[api.NETWORK_TYPE], + provider.PHYSICAL_NETWORK: segment[api.PHYSICAL_NETWORK], + provider.SEGMENTATION_ID: segment[api.SEGMENTATION_ID]} + for segment in segments] + else: + segment = segments[0] + network[provider.NETWORK_TYPE] = segment[api.NETWORK_TYPE] + network[provider.PHYSICAL_NETWORK] = segment[api.PHYSICAL_NETWORK] + network[provider.SEGMENTATION_ID] = segment[api.SEGMENTATION_ID] + + def _filter_nets_provider(self, context, nets, filters): + # TODO(rkukura): Implement filtering. + return nets + + def _process_port_binding(self, mech_context, context, attrs): + binding = mech_context._binding + port = mech_context.current + self._update_port_dict_binding(port, binding) + + host = attrs and attrs.get(portbindings.HOST_ID) + host_set = attributes.is_attr_set(host) + + vnic_type = attrs and attrs.get(portbindings.VNIC_TYPE) + vnic_type_set = attributes.is_attr_set(vnic_type) + + # CLI can't send {}, so treat None as {} + profile = attrs and attrs.get(portbindings.PROFILE) + profile_set = profile is not None and \ + profile is not attributes.ATTR_NOT_SPECIFIED + if profile_set and not profile: + profile = {} + + if binding.vif_type != portbindings.VIF_TYPE_UNBOUND: + if (not host_set and not vnic_type_set and not profile_set and + binding.segment): + return False + self._delete_port_binding(mech_context) + + # Return True only if an agent notification is needed. + # This will happen if a new host, vnic_type, or profile was specified + # that differs from the current one. Note that host_set is True + # even if the host is an empty string + ret_value = ((host_set and binding.get('host') != host) or + (vnic_type_set and + binding.get('vnic_type') != vnic_type) or + (profile_set and self._get_profile(binding) != profile)) + + if host_set: + binding.host = host + port[portbindings.HOST_ID] = host + if "compute:" in port['device_owner']: + self.dvr_update_router_addvm(context, port) + + if vnic_type_set: + binding.vnic_type = vnic_type + port[portbindings.VNIC_TYPE] = vnic_type + + if profile_set: + binding.profile = jsonutils.dumps(profile) + if len(binding.profile) > models.BINDING_PROFILE_LEN: + msg = _("binding:profile value too large") + raise exc.InvalidInput(error_message=msg) + port[portbindings.PROFILE] = profile + + # To try to [re]bind if host is non-empty. + if binding.host: + self.mechanism_manager.bind_port(mech_context) + self._update_port_dict_binding(port, binding) + + # Update the port status if requested by the bound driver. + if binding.segment and mech_context._new_port_status: + # REVISIT(rkukura): This function is currently called + # inside a transaction with the port either newly + # created or locked for update. After the fix for bug + # 1276391 is merged, this will no longer be true, and + # the port status update will need to be handled in + # the transaction that commits the new binding. + port_db = db.get_port(mech_context._plugin_context.session, + port['id']) + port_db.status = mech_context._new_port_status + port['status'] = mech_context._new_port_status + + return ret_value + + def _process_dvr_port_binding(self, mech_context, context, attrs): + binding = mech_context.binding + + host = attrs and attrs.get(portbindings.HOST_ID) + host_set = attributes.is_attr_set(host) + + if binding.vif_type != portbindings.VIF_TYPE_UNBOUND: + if (not host_set and binding.segment and + self.mechanism_manager. + validate_port_binding(mech_context)): + return False + self.mechanism_manager.unbind_port(mech_context) + + if host_set: + binding.host = host + + if binding.host: + self.mechanism_manager.bind_port(mech_context) + + return True + + def _update_port_dict_binding(self, port, binding): + port[portbindings.HOST_ID] = binding.host + port[portbindings.VNIC_TYPE] = binding.vnic_type + port[portbindings.PROFILE] = self._get_profile(binding) + port[portbindings.VIF_TYPE] = binding.vif_type + port[portbindings.VIF_DETAILS] = self._get_vif_details(binding) + + def _get_vif_details(self, binding): + if binding.vif_details: + try: + return jsonutils.loads(binding.vif_details) + except Exception: + LOG.error(_("Serialized vif_details DB value '%(value)s' " + "for port %(port)s is invalid"), + {'value': binding.vif_details, + 'port': binding.port_id}) + return {} + + def _get_profile(self, binding): + if binding.profile: + try: + return jsonutils.loads(binding.profile) + except Exception: + LOG.error(_("Serialized profile DB value '%(value)s' for " + "port %(port)s is invalid"), + {'value': binding.profile, + 'port': binding.port_id}) + return {} + + def _delete_port_binding(self, mech_context): + binding = mech_context._binding + binding.vif_type = portbindings.VIF_TYPE_UNBOUND + binding.vif_details = '' + binding.driver = None + binding.segment = None + port = mech_context.current + self._update_port_dict_binding(port, binding) + + def _check_and_delete_dvr_port_binding(self, mech_context, context): + dvr_binding = mech_context.binding + if (not dvr_binding.router_id and + dvr_binding.status == const.PORT_STATUS_DOWN): + with context.session.begin(subtransactions=True): + LOG.debug("DVR: Deleting dvr binding for port %s", + dvr_binding.port_id) + context.session.delete(dvr_binding) + + def _check_port_binding_az_valid(self, context, network, binding_host): + net_type = network['provider:network_type'] + seg_id = network['provider:segmentation_id'] + net_id = network['id'] + if (net_type == 'vxlan' and self.is_big2layer_vni(seg_id)): + return True + elif(net_type in ['vlan', 'vxlan']): + host = self.get_binding_az_by_network_id(context, net_id) + if(not host): + self.add_binding_az_network_id(context, binding_host, net_id) + elif(host and host != binding_host): + return False + return True + #net_type == 'local' or net_type == 'flat' or 'gre' + return True + + def _ml2_extend_port_dict_binding(self, port_res, port_db): + # None when called during unit tests for other plugins. + if port_db.port_binding: + self._update_port_dict_binding(port_res, port_db.port_binding) + + db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs( + attributes.PORTS, ['_ml2_extend_port_dict_binding']) + + # Note - The following hook methods have "ml2" in their names so + # that they are not called twice during unit tests due to global + # registration of hooks in portbindings_db.py used by other + # plugins. + + def _ml2_port_model_hook(self, context, original_model, query): + query = query.outerjoin(models.PortBinding, + (original_model.id == + models.PortBinding.port_id)) + return query + + def _ml2_port_result_filter_hook(self, query, filters): + values = filters and filters.get(portbindings.HOST_ID, []) + if not values: + return query + return query.filter(models.PortBinding.host.in_(values)) + + db_base_plugin_v2.NeutronDbPluginV2.register_model_query_hook( + models_v2.Port, + "ml2_port_bindings", + '_ml2_port_model_hook', + None, + '_ml2_port_result_filter_hook') + + def _notify_port_updated(self, mech_context): + port = mech_context._port + segment = mech_context.bound_segment + if not segment: + # REVISIT(rkukura): This should notify agent to unplug port + network = mech_context.network.current + LOG.warning(_("In _notify_port_updated(), no bound segment for " + "port %(port_id)s on network %(network_id)s"), + {'port_id': port['id'], + 'network_id': network['id']}) + return + self.notifier.port_update(mech_context._plugin_context, port, + segment[api.NETWORK_TYPE], + segment[api.SEGMENTATION_ID], + segment[api.PHYSICAL_NETWORK]) + + # TODO(apech): Need to override bulk operations + + def create_network(self, context, network): + net_data = network['network'] + segments = self._process_provider_create(net_data) + tenant_id = self._get_tenant_id_for_create(context, net_data) + + session = context.session + with session.begin(subtransactions=True): + self._ensure_default_security_group(context, tenant_id) + result = super(Ml2Plugin, self).create_network(context, network) + network_id = result['id'] + self._process_l3_create(context, result, net_data) + # REVISIT(rkukura): Consider moving all segment management + # to TypeManager. + if segments: + for segment in segments: + self.type_manager.reserve_provider_segment(session, + segment) + db.add_network_segment(session, network_id, segment) + else: + segment = self.type_manager.allocate_tenant_segment(session) + db.add_network_segment(session, network_id, segment) + self._extend_network_dict_provider(context, result) + mech_context = driver_context.NetworkContext(self, context, + result) + self.mechanism_manager.create_network_precommit(mech_context) + + try: + self.mechanism_manager.create_network_postcommit(mech_context) + except ml2_exc.MechanismDriverError: + with excutils.save_and_reraise_exception(): + LOG.error(_("mechanism_manager.create_network_postcommit " + "failed, deleting network '%s'"), result['id']) + self.delete_network(context, result['id']) + return result + + def update_network(self, context, id, network): + provider._raise_if_updates_provider_attributes(network['network']) + + session = context.session + with session.begin(subtransactions=True): + original_network = super(Ml2Plugin, self).get_network(context, id) + updated_network = super(Ml2Plugin, self).update_network(context, + id, + network) + self._process_l3_update(context, updated_network, + network['network']) + self._extend_network_dict_provider(context, updated_network) + mech_context = driver_context.NetworkContext( + self, context, updated_network, + original_network=original_network) + self.mechanism_manager.update_network_precommit(mech_context) + + # TODO(apech) - handle errors raised by update_network, potentially + # by re-calling update_network with the previous attributes. For + # now the error is propogated to the caller, which is expected to + # either undo/retry the operation or delete the resource. + self.mechanism_manager.update_network_postcommit(mech_context) + return updated_network + + def get_network(self, context, id, fields=None): + session = context.session + with session.begin(subtransactions=True): + result = super(Ml2Plugin, self).get_network(context, id, None) + self._extend_network_dict_provider(context, result) + + return self._fields(result, fields) + + def get_networks(self, context, filters=None, fields=None, + sorts=None, limit=None, marker=None, page_reverse=False): + session = context.session + with session.begin(subtransactions=True): + nets = super(Ml2Plugin, + self).get_networks(context, filters, None, sorts, + limit, marker, page_reverse) + for net in nets: + self._extend_network_dict_provider(context, net) + + nets = self._filter_nets_provider(context, nets, filters) + nets = self._filter_nets_l3(context, nets, filters) + + return [self._fields(net, fields) for net in nets] + + def delete_network(self, context, id): + # REVISIT(rkukura) The super(Ml2Plugin, self).delete_network() + # function is not used because it auto-deletes ports and + # subnets from the DB without invoking the derived class's + # delete_port() or delete_subnet(), preventing mechanism + # drivers from being called. This approach should be revisited + # when the API layer is reworked during icehouse. + + LOG.debug(_("Deleting network %s"), id) + session = context.session + while True: + try: + with session.begin(subtransactions=True): + self._process_l3_delete(context, id) + + # Get ports to auto-delete. + ports = (session.query(models_v2.Port). + enable_eagerloads(False). + filter_by(network_id=id). + with_lockmode('update').all()) + LOG.debug(_("Ports to auto-delete: %s"), ports) + only_auto_del = all(p.device_owner + in db_base_plugin_v2. + AUTO_DELETE_PORT_OWNERS + for p in ports) + if not only_auto_del: + LOG.debug(_("Tenant-owned ports exist")) + raise exc.NetworkInUse(net_id=id) + + # Get subnets to auto-delete. + subnets = (session.query(models_v2.Subnet). + enable_eagerloads(False). + filter_by(network_id=id). + with_lockmode('update').all()) + LOG.debug(_("Subnets to auto-delete: %s"), subnets) + + if not (ports or subnets): + network = self.get_network(context, id) + mech_context = driver_context.NetworkContext(self, + context, + network) + self.mechanism_manager.delete_network_precommit( + mech_context) + + record = self._get_network(context, id) + LOG.debug(_("Deleting network record %s"), record) + session.delete(record) + + for segment in mech_context.network_segments: + self.type_manager.release_segment(session, segment) + + # The segment records are deleted via cascade from the + # network record, so explicit removal is not necessary. + LOG.debug(_("Committing transaction")) + break + except os_db_exception.DBError as e: + with excutils.save_and_reraise_exception() as ctxt: + if isinstance(e.inner_exception, sql_exc.IntegrityError): + ctxt.reraise = False + msg = _("A concurrent port creation has occurred") + LOG.warning(msg) + continue + + for port in ports: + try: + self.delete_port(context, port.id) + except Exception: + with excutils.save_and_reraise_exception(): + LOG.exception(_("Exception auto-deleting port %s"), + port.id) + + for subnet in subnets: + try: + self.delete_subnet(context, subnet.id) + except Exception: + with excutils.save_and_reraise_exception(): + LOG.exception(_("Exception auto-deleting subnet %s"), + subnet.id) + + try: + self.mechanism_manager.delete_network_postcommit(mech_context) + except ml2_exc.MechanismDriverError: + # TODO(apech) - One or more mechanism driver failed to + # delete the network. Ideally we'd notify the caller of + # the fact that an error occurred. + LOG.error(_("mechanism_manager.delete_network_postcommit failed")) + self.notifier.network_delete(context, id) + + def create_subnet(self, context, subnet): + session = context.session + with session.begin(subtransactions=True): + result = super(Ml2Plugin, self).create_subnet(context, subnet) + mech_context = driver_context.SubnetContext(self, context, result) + self.mechanism_manager.create_subnet_precommit(mech_context) + + try: + self.mechanism_manager.create_subnet_postcommit(mech_context) + except ml2_exc.MechanismDriverError: + with excutils.save_and_reraise_exception(): + LOG.error(_("mechanism_manager.create_subnet_postcommit " + "failed, deleting subnet '%s'"), result['id']) + self.delete_subnet(context, result['id']) + return result + + def update_subnet(self, context, id, subnet): + session = context.session + with session.begin(subtransactions=True): + original_subnet = super(Ml2Plugin, self).get_subnet(context, id) + updated_subnet = super(Ml2Plugin, self).update_subnet( + context, id, subnet) + mech_context = driver_context.SubnetContext( + self, context, updated_subnet, original_subnet=original_subnet) + self.mechanism_manager.update_subnet_precommit(mech_context) + + # TODO(apech) - handle errors raised by update_subnet, potentially + # by re-calling update_subnet with the previous attributes. For + # now the error is propogated to the caller, which is expected to + # either undo/retry the operation or delete the resource. + self.mechanism_manager.update_subnet_postcommit(mech_context) + return updated_subnet + + def delete_subnet(self, context, id): + # REVISIT(rkukura) The super(Ml2Plugin, self).delete_subnet() + # function is not used because it deallocates the subnet's addresses + # from ports in the DB without invoking the derived class's + # update_port(), preventing mechanism drivers from being called. + # This approach should be revisited when the API layer is reworked + # during icehouse. + + LOG.debug(_("Deleting subnet %s"), id) + session = context.session + while True: + with session.begin(subtransactions=True): + subnet = self.get_subnet(context, id) + # Get ports to auto-deallocate + allocated = (session.query(models_v2.IPAllocation). + filter_by(subnet_id=id). + join(models_v2.Port). + filter_by(network_id=subnet['network_id']). + with_lockmode('update').all()) + LOG.debug(_("Ports to auto-deallocate: %s"), allocated) + only_auto_del = all(not a.port_id or + a.ports.device_owner in db_base_plugin_v2. + AUTO_DELETE_PORT_OWNERS + for a in allocated) + if not only_auto_del: + LOG.debug(_("Tenant-owned ports exist")) + raise exc.SubnetInUse(subnet_id=id) + + if not allocated: + mech_context = driver_context.SubnetContext(self, context, + subnet) + self.mechanism_manager.delete_subnet_precommit( + mech_context) + + LOG.debug(_("Deleting subnet record")) + record = self._get_subnet(context, id) + session.delete(record) + + LOG.debug(_("Committing transaction")) + break + + for a in allocated: + if a.port_id: + # calling update_port() for each allocation to remove the + # IP from the port and call the MechanismDrivers + data = {'port': + {'fixed_ips': [{'subnet_id': ip.subnet_id, + 'ip_address': ip.ip_address} + for ip in a.ports.fixed_ips + if ip.subnet_id != id]}} + try: + self.update_port(context, a.port_id, data) + except Exception: + with excutils.save_and_reraise_exception(): + LOG.exception(_("Exception deleting fixed_ip from " + "port %s"), a.port_id) + session.delete(a) + + try: + self.mechanism_manager.delete_subnet_postcommit(mech_context) + except ml2_exc.MechanismDriverError: + # TODO(apech) - One or more mechanism driver failed to + # delete the subnet. Ideally we'd notify the caller of + # the fact that an error occurred. + LOG.error(_("mechanism_manager.delete_subnet_postcommit failed")) + + def create_port(self, context, port): + attrs = port['port'] + attrs['status'] = const.PORT_STATUS_DOWN + + session = context.session + mech_context = None + with session.begin(subtransactions=True): + self._ensure_default_security_group_on_port(context, port) + sgids = self._get_security_groups_on_port(context, port) + dhcp_opts = port['port'].get(edo_ext.EXTRADHCPOPTS, []) + network = self.get_network(context, port['port']['network_id']) + # add by j00209498 ---begin + if('compute' in attrs['device_owner']): + binding_host = attrs.get('binding:host_id', None) + if cfg.CONF.cascade_str == 'cascading' and binding_host: + is_continue = True + is_continue = self._check_port_binding_az_valid( + context, + network, + binding_host) + if(not is_continue): + raise exc.PortBindAZError( + net_id=port['port']['network_id'], + host=binding_host) + # add by j00209498 ---end + result = super(Ml2Plugin, self).create_port(context, port) + self._process_port_create_security_group(context, result, sgids) + + if (attrs['device_owner'] != + const.DEVICE_OWNER_DVR_INTERFACE): + # for DVR ports late binding happens via L3-Agent + mech_context = driver_context.PortContext(self, context, + result, + network) + self._process_port_binding(mech_context, context, attrs) + + result[addr_pair.ADDRESS_PAIRS] = ( + self._process_create_allowed_address_pairs( + context, result, + attrs.get(addr_pair.ADDRESS_PAIRS))) + self._process_port_create_extra_dhcp_opts(context, result, + dhcp_opts) + #self.l3_agent_notify_for_vmarp_table(context, result['id'], 'add') + if mech_context: + self.mechanism_manager.create_port_precommit(mech_context) + + try: + if mech_context: + self.mechanism_manager.create_port_postcommit(mech_context) + except ml2_exc.MechanismDriverError: + with excutils.save_and_reraise_exception(): + LOG.error(_("mechanism_manager.create_port_postcommit " + "failed, deleting port '%s'"), result['id']) + self.delete_port(context, result['id']) + self.notify_security_groups_member_updated(context, result) + return result + + def update_port(self, context, id, port): + attrs = port['port'] + need_port_update_notify = False + + session = context.session + with session.begin(subtransactions=True): + try: + port_db = (session.query(models_v2.Port). + enable_eagerloads(False). + filter_by(id=id).with_lockmode('update').one()) + except sa_exc.NoResultFound: + raise exc.PortNotFound(port_id=id) + original_port = self._make_port_dict(port_db) + updated_port = super(Ml2Plugin, self).update_port(context, id, + port) + if addr_pair.ADDRESS_PAIRS in port['port']: + need_port_update_notify |= ( + self.update_address_pairs_on_port(context, id, port, + original_port, + updated_port)) + need_port_update_notify |= self.update_security_group_on_port( + context, id, port, original_port, updated_port) + network = self.get_network(context, original_port['network_id']) + need_port_update_notify |= self._update_extra_dhcp_opts_on_port( + context, id, port, updated_port) + mech_context = driver_context.PortContext( + self, context, updated_port, network, + original_port=original_port) + need_port_update_notify |= self._process_port_binding( + mech_context, context, attrs) + self.mechanism_manager.update_port_precommit(mech_context) + + # TODO(apech) - handle errors raised by update_port, potentially + # by re-calling update_port with the previous attributes. For + # now the error is propogated to the caller, which is expected to + # either undo/retry the operation or delete the resource. + self.mechanism_manager.update_port_postcommit(mech_context) + + need_port_update_notify |= self.is_security_group_member_updated( + context, original_port, updated_port) + + if original_port['admin_state_up'] != updated_port['admin_state_up']: + need_port_update_notify = True + + if need_port_update_notify: + self._notify_port_updated(mech_context) + + return updated_port + + def update_dvr_port_binding(self, context, id, port): + attrs = port['port'] + need_port_update_notify = False + + host = attrs and attrs.get(portbindings.HOST_ID) + host_set = attributes.is_attr_set(host) + + if not host_set: + LOG.error(_("No Host supplied to bind DVR Port %s"), id) + return + + binding = db.get_dvr_port_binding_by_host(port_id=id, host=host, + session=None) + if ((not binding) or + (binding.vif_type == portbindings.VIF_TYPE_BINDING_FAILED)): + session = context.session + with session.begin(subtransactions=True): + if (not binding): + binding = db.ensure_dvr_port_binding(session, id, + host, + attrs['device_id']) + orig_port = super(Ml2Plugin, self).get_port(context, id) + network = self.get_network(context, orig_port['network_id']) + mech_context = driver_context.PortContext( + self, + context, + orig_port, + network, + original_port=orig_port, + binding=binding) + need_port_update_notify |= self._process_dvr_port_binding( + mech_context, context, attrs) + + def delete_port(self, context, id, l3_port_check=True): + LOG.debug(_("Deleting port %s"), id) + l3plugin = manager.NeutronManager.get_service_plugins().get( + service_constants.L3_ROUTER_NAT) + if l3plugin and l3_port_check: + l3plugin.prevent_l3_port_deletion(context, id) + + session = context.session + mech_context = None + # REVISIT: Serialize this operation with a semaphore to prevent + # undesired eventlet yields leading to 'lock wait timeout' errors + with contextlib.nested(lockutils.lock('db-access'), + session.begin(subtransactions=True)): + try: + port_db = (session.query(models_v2.Port). + enable_eagerloads(False). + filter_by(id=id).with_lockmode('update').one()) + except sa_exc.NoResultFound: + # the port existed when l3plugin.prevent_l3_port_deletion + # was called but now is already gone + LOG.debug(_("The port '%s' was deleted"), id) + return + port = self._make_port_dict(port_db) + + network = self.get_network(context, port['network_id']) + if port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE: + bindings = db.get_dvr_port_bindings(id) + for bind in bindings: + mech_context = driver_context.PortContext(self, context, + port, network, + binding=bind) + self.mechanism_manager.delete_port_precommit(mech_context) + LOG.debug("Calling base delete_port %s for DVR", id) + super(Ml2Plugin, self).delete_port(context, id) + else: + mech_context = driver_context.PortContext(self, context, port, + network) + if "compute:" in port['device_owner']: + self.dvr_deletens_ifnovm(context, id) + self.mechanism_manager.delete_port_precommit(mech_context) + self._delete_port_security_group_bindings(context, id) + LOG.debug(_("Calling base delete_port")) + if l3plugin: + l3plugin.disassociate_floatingips(context, id) + super(Ml2Plugin, self).delete_port(context, id) + + try: + # for both normal and DVR Interface ports, only one invocation of + # delete_port_postcommit + if mech_context: + self.mechanism_manager.delete_port_postcommit(mech_context) + else: + LOG.error(_("Unable to invoke delete_port_postcommit," + " mech_context NULL for port %s"), id) + except ml2_exc.MechanismDriverError: + # TODO(apech) - One or more mechanism driver failed to + # delete the port. Ideally we'd notify the caller of the + # fact that an error occurred. + LOG.error(_("mechanism_manager.delete_port_postcommit failed for" + " port %s"), id) + self.notify_security_groups_member_updated(context, port) + + def _generate_dvr_port_status(self, session, port_id): + # an OR'ed value of status assigned to parent port from the + # dvrportbinding bucket + query = session.query(models.DVRPortBinding) + bindings = query.filter(models.DVRPortBinding.port_id == port_id).all() + for bind in bindings: + if bind.status == const.PORT_STATUS_ACTIVE: + return const.PORT_STATUS_ACTIVE + for bind in bindings: + if bind.status == const.PORT_STATUS_DOWN: + return const.PORT_STATUS_DOWN + return const.PORT_STATUS_BUILD + + def update_port_status(self, context, port_id, status, host=None): + updated = False + session = context.session + # REVISIT: Serialize this operation with a semaphore to prevent + # undesired eventlet yields leading to 'lock wait timeout' errors + with contextlib.nested(lockutils.lock('db-access'), + session.begin(subtransactions=True)): + port = db.get_port(session, port_id) + if not port: + LOG.warning(_("Port %(port)s updated up by agent not found"), + {'port': port_id}) + return False + if port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE: + binding = db.get_dvr_port_binding_by_host(port_id=port['id'], + host=host, + session=session) + if not binding: + LOG.error(_("Binding info for port %s not found"), + port_id) + return False + binding['status'] = status + binding.update(binding) + + # binding already updated + with contextlib.nested(lockutils.lock('db-access'), + session.begin(subtransactions=True)): + port = db.get_port(session, port_id) + if not port: + LOG.warning(_("Port %(port)s updated up by agent not found"), + {'port': port_id}) + return False + if port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE: + original_port = self._make_port_dict(port) + network = self.get_network(context, + original_port['network_id']) + port.status = self._generate_dvr_port_status(session, + port['id']) + updated_port = self._make_port_dict(port) + mech_context = (driver_context.PortContext( + self, context, updated_port, network, + original_port=original_port, + binding=binding)) + self.mechanism_manager.update_port_precommit(mech_context) + updated = True + elif port.status != status: + original_port = self._make_port_dict(port) + port.status = status + updated_port = self._make_port_dict(port) + network = self.get_network(context, + original_port['network_id']) + mech_context = driver_context.PortContext( + self, context, updated_port, network, + original_port=original_port) + self.mechanism_manager.update_port_precommit(mech_context) + updated = True + + if updated: + self.mechanism_manager.update_port_postcommit(mech_context) + + if port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE: + self._check_and_delete_dvr_port_binding(mech_context, context) + + return True + + def get_bindinghost_by_portid(self, port_id): + return db.get_port_binding_host(port_id) + + def get_dvr_port_binding_by_host(self, context, port_id, host): + session = context.session + return db.get_dvr_port_binding_by_host(port_id, host, + session) + + def port_bound_to_host(self, context, port_id, host): + try: + port = self.get_port(context, port_id) + if port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE: + bindings = db.get_dvr_port_bindings(port_id) + for b in bindings: + if (b.host == host): + # LOG.debug(_("Binding with host %s exists for port %s") + # % (host, port_id)) + return True + LOG.debug(_("No Binding exists for port %s"), port_id) + return False + else: + port_host = db.get_port_binding_host(port_id) + return (port_host == host) + except exc.PortNotFound: + LOG.debug(_("Port not found %s"), port_id) + return False diff --git a/icehouse-patches/neutron/vlan2vlan/neutron/plugins/openvswitch/agent/ovs_dvr_neutron_agent.py b/icehouse-patches/neutron/vlan2vlan/neutron/plugins/openvswitch/agent/ovs_dvr_neutron_agent.py new file mode 100644 index 00000000..a11859ea --- /dev/null +++ b/icehouse-patches/neutron/vlan2vlan/neutron/plugins/openvswitch/agent/ovs_dvr_neutron_agent.py @@ -0,0 +1,777 @@ +# Copyright 2014, Hewlett Packard, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# @author: Vivekanandan Narasimhan, Hewlett-Packard Inc + + +from neutron.api.rpc import dvr_rpc +from neutron.common import constants as q_const +from neutron.openstack.common import log as logging +from neutron.plugins.openvswitch.common import constants + + +LOG = logging.getLogger(__name__) + + +# A class to represent a DVR-hosted subnet including vif_ports resident on +# that subnet +class LocalDVRSubnetMapping: + + def __init__(self, subnet, csnat_ofport=constants.OFPORT_INVALID): + # set of commpute ports on on this dvr subnet + self.compute_ports = {} + self.subnet = subnet + self.csnat_ofport = csnat_ofport + self.dvr_owned = False + + def __str__(self): + return ("subnet = %s compute_ports = %s csnat_port = %s" + " is_dvr_owned = %s" % + (self.subnet, self.get_compute_ofports(), + self.get_csnat_ofport(), self.is_dvr_owned())) + + def get_subnet_info(self): + return self.subnet + + def set_dvr_owned(self, owned): + self.dvr_owned = owned + + def is_dvr_owned(self): + return self.dvr_owned + + def add_compute_ofport(self, vif_id, ofport): + self.compute_ports[vif_id] = ofport + + def remove_compute_ofport(self, vif_id): + self.compute_ports.pop(vif_id, 0) + + def remove_all_compute_ofports(self): + self.compute_ports.clear() + + def get_compute_ofports(self): + return self.compute_ports + + def set_csnat_ofport(self, ofport): + self.csnat_ofport = ofport + + def get_csnat_ofport(self): + return self.csnat_ofport + + +class OVSPort: + + def __init__(self, id, ofport, mac, device_owner): + self.id = id + self.mac = mac + self.ofport = ofport + self.subnets = set() + self.device_owner = device_owner + + def __str__(self): + return ("OVSPort: id = %s, ofport = %s, mac = %s," + "device_owner = %s, subnets = %s" % + (self.id, self.ofport, self.mac, + self.device_owner, self.subnets)) + + def add_subnet(self, subnet_id): + self.subnets.add(subnet_id) + + def remove_subnet(self, subnet_id): + self.subnets.remove(subnet_id) + + def remove_all_subnets(self): + self.subnets.clear() + + def get_subnets(self): + return self.subnets + + def get_device_owner(self): + return self.device_owner + + def get_mac(self): + return self.mac + + def get_ofport(self): + return self.ofport + + +class OVSDVRNeutronAgent(dvr_rpc.DVRAgentRpcApiMixin): + + '''Implements OVS-based DVR(Distributed Virtual Router), for + overlay networks. + + ''' + + # history + # 1.0 Initial version + + def __init__(self, context, plugin_rpc, integ_br, tun_br, + patch_int_ofport=constants.OFPORT_INVALID, + patch_tun_ofport=constants.OFPORT_INVALID, + host=None, enable_tunneling=False, + enable_distributed_routing=False): + self.context = context + self.plugin_rpc = plugin_rpc + self.int_br = integ_br + self.tun_br = tun_br + self.patch_int_ofport = patch_int_ofport + self.patch_tun_ofport = patch_tun_ofport + self.host = host + self.enable_tunneling = enable_tunneling + self.enable_distributed_routing = enable_distributed_routing + + def reset_ovs_parameters(self, integ_br, tun_br, + patch_int_ofport, patch_tun_ofport): + '''Reset the openvswitch parameters + ''' + if not self.enable_tunneling: + return + + if not self.enable_distributed_routing: + return + + self.int_br = integ_br + self.tun_br = tun_br + self.patch_int_ofport = patch_int_ofport + self.patch_tun_ofport = patch_tun_ofport + + def setup_dvr_flows_on_integ_tun_br(self): + '''Setup up initial dvr flows into integration bridge and tunnel + bridge. + ''' + if not self.enable_tunneling: + return + + if not self.enable_distributed_routing: + return + + # get the local DVR MAC Address + try: + LOG.debug("L2 Agent operating in DVR Mode") + self.dvr_mac_address = None + self.local_dvr_map = {} + self.local_csnat_map = {} + self.local_ports = {} + self.registered_dvr_macs = set() + details = self.plugin_rpc.\ + get_dvr_mac_address_by_host(self.context, self.host) + LOG.debug("L2 Agent DVR: Received response for " + "get_dvr_mac_address_by_host() from " + "plugin: %r", details) + self.dvr_mac_address = details['mac_address'] + except Exception: + LOG.exception(_("DVR: Failed to obtain local DVR Mac address")) + self.enable_distributed_routing = False + # switch all traffic using L2 learning + self.int_br.add_flow(priority=1, actions="normal") + return + + # Remove existing flows in integration bridge + # self.int_br.remove_all_flows() + + # Insert 'drop' action as the default for Table 2 + self.int_br.add_flow(table=constants.DVR_TO_SRC_MAC, + priority=1, + actions="drop") + self.int_br.add_flow(table=constants.DVR_TO_DST_MAC, + priority=1, + actions="drop") + + # Insert 'normal' action as the default for Table 1 + self.int_br.add_flow(table=constants.LOCAL_SWITCHING, + priority=1, + actions="normal") + +# add by j00209498 + self.int_br.add_flow(table=constants.LOCAL_SWITCHING, + priority=2, + in_port=self.patch_tun_ofport, + dl_src='fa:16:3f:00:00:00/ff:ff:ff:00:00:00', + actions="resubmit(,%s)" % + constants.DVR_TO_SRC_MAC) + self.int_br.add_flow(table=constants.LOCAL_SWITCHING, + priority=3, + in_port=self.patch_tun_ofport, + dl_dst='fa:16:3f:00:00:00/ff:ff:ff:00:00:00', + actions="resubmit(,%s)" % + constants.DVR_TO_DST_MAC) + + self.tun_br.add_flow(table=constants.DVR_NOT_LEARN, + priority=1, + dl_src='fa:16:3f:00:00:00/ff:ff:ff:00:00:00', + actions="output:%s" % self.patch_int_ofport) + + self.tun_br.add_flow(table=constants.DVR_NOT_LEARN, + priority=2, + dl_dst='fa:16:3f:00:00:00/ff:ff:ff:00:00:00', + actions="output:%s" % self.patch_int_ofport) +# comment by j00209498 +# dvr_macs = self.plugin_rpc.get_dvr_mac_address_list(self.context) +# LOG.debug("L2 Agent DVR: Received these MACs: %r", dvr_macs) +# for mac in dvr_macs: +# if mac['mac_address'] == self.dvr_mac_address: +# continue +# Table 0 (default) will now sort DVR traffic from other +# traffic depending on in_port +# self.int_br.add_flow(table=constants.LOCAL_SWITCHING, +# priority=2, +# in_port=self.patch_tun_ofport, +# dl_src=mac['mac_address'], +# actions="resubmit(,%s)" % +# constants.DVR_TO_SRC_MAC) +# Table DVR_NOT_LEARN ensures unique dvr macs in the cloud +# are not learnt, as they may +# result in flow explosions +# self.tun_br.add_flow(table=constants.DVR_NOT_LEARN, +# priority=1, +# dl_src=mac['mac_address'], +# actions="output:%s" % self.patch_int_ofport) +# +# self.registered_dvr_macs.add(mac['mac_address']) + + self.tun_br.add_flow(priority=1, + in_port=self.patch_int_ofport, + actions="resubmit(,%s)" % + constants.DVR_PROCESS) + # table-miss should be sent to learning table + self.tun_br.add_flow(table=constants.DVR_NOT_LEARN, + priority=0, + actions="resubmit(,%s)" % + constants.LEARN_FROM_TUN) + + self.tun_br.add_flow(table=constants.DVR_PROCESS, + priority=0, + actions="resubmit(,%s)" % + constants.PATCH_LV_TO_TUN) + + def dvr_mac_address_update(self, dvr_macs): + pass + # comment by j00209498 +# if not self.enable_tunneling: +# return +# +# if not self.enable_distributed_routing: +# return +# +# LOG.debug("DVR Mac address update with host-mac: %s", dvr_macs) +# +# if not self.dvr_mac_address: +# LOG.debug("Self mac unknown, ignoring this" +# " dvr_mac_address_update() ") +# return +# +# dvr_host_macs = set() +# for entry in dvr_macs: +# if entry['mac_address'] == self.dvr_mac_address: +# continue +# dvr_host_macs.add(entry['mac_address']) +# +# if dvr_host_macs == self.registered_dvr_macs: +# LOG.debug("DVR Mac address already up to date") +# return +# +# dvr_macs_added = dvr_host_macs - self.registered_dvr_macs +# dvr_macs_removed = self.registered_dvr_macs - dvr_host_macs +# +# for oldmac in dvr_macs_removed: +# self.int_br.delete_flows(table=constants.LOCAL_SWITCHING, +# in_port=self.patch_tun_ofport, +# dl_src=oldmac) +# self.tun_br.delete_flows(table=constants.DVR_NOT_LEARN, +# dl_src=oldmac) +# LOG.debug("Removed DVR MAC flow for %s", oldmac) +# self.registered_dvr_macs.remove(oldmac) +# +# for newmac in dvr_macs_added: +# self.int_br.add_flow(table=constants.LOCAL_SWITCHING, +# priority=2, +# in_port=self.patch_tun_ofport, +# dl_src=newmac, +# actions="resubmit(,%s)" % +# constants.DVR_TO_SRC_MAC) +# self.tun_br.add_flow(table=constants.DVR_NOT_LEARN, +# priority=1, +# dl_src=newmac, +# actions="output:%s" % self.patch_int_ofport) +# LOG.debug("Added DVR MAC flow for %s", newmac) +# self.registered_dvr_macs.add(newmac) + + def is_dvr_router_interface(self, device_owner): + return (device_owner == q_const.DEVICE_OWNER_ROUTER_INTF_DISTRIBUTED) + + def process_tunneled_network(self, network_type, lvid, segmentation_id): + if not self.enable_tunneling: + return + if not self.enable_distributed_routing: + return + self.tun_br.add_flow(table=constants.TUN_TABLE[network_type], + priority=1, + tun_id=segmentation_id, + actions="mod_vlan_vid:%s," + "resubmit(,%s)" % + (lvid, constants.DVR_NOT_LEARN)) + + def _bind_distributed_router_interface_port(self, port, fixed_ips, + device_owner, local_vlan): + # since router port must have only one fixed IP, directly + # use fixed_ips[0] + subnet_uuid = fixed_ips[0]['subnet_id'] + csnat_ofport = constants.OFPORT_INVALID + ldm = None + if subnet_uuid in self.local_dvr_map: + ldm = self.local_dvr_map[subnet_uuid] + csnat_ofport = ldm.get_csnat_ofport() + if csnat_ofport == constants.OFPORT_INVALID: + LOG.error(_("DVR: Duplicate DVR router interface detected " + "for subnet %s"), subnet_uuid) + return + else: + # set up LocalDVRSubnetMapping available for this subnet + subnet_info = self.plugin_rpc.get_subnet_for_dvr(self.context, + subnet_uuid) + if not subnet_info: + LOG.error(_("DVR: Unable to retrieve subnet information" + " for subnet_id %s"), subnet_uuid) + return + LOG.debug("get_subnet_for_dvr for subnet %s returned with %s" % + (subnet_uuid, subnet_info)) + ldm = LocalDVRSubnetMapping(subnet_info) + self.local_dvr_map[subnet_uuid] = ldm + + # DVR takes over + ldm.set_dvr_owned(True) + + subnet_info = ldm.get_subnet_info() + ip_subnet = subnet_info['cidr'] + local_compute_ports = self.plugin_rpc.\ + get_compute_ports_on_host_by_subnet(self.context, + self.host, + subnet_uuid) + LOG.debug("DVR: List of ports received from " + "get_compute_ports_on_host_by_subnet %r", + local_compute_ports) + for prt in local_compute_ports: + vif = self.int_br.get_vif_port_by_id(prt['id']) + if not vif: + continue + ldm.add_compute_ofport(vif.vif_id, vif.ofport) + if vif.vif_id in self.local_ports: + # ensure if a compute port is already on + # a different dvr routed subnet + # if yes, queue this subnet to that port + ovsport = self.local_ports[vif.vif_id] + ovsport.add_subnet(subnet_uuid) + else: + # the compute port is discovered first here that its on + # a dvr routed subnet queue this subnet to that port + ovsport = OVSPort(vif.vif_id, vif.ofport, + vif.vif_mac, prt['device_owner']) + + ovsport.add_subnet(subnet_uuid) + self.local_ports[vif.vif_id] = ovsport + + # create rule for just this vm port + self.int_br.add_flow(table=constants.DVR_TO_SRC_MAC, + priority=4, + dl_vlan=local_vlan, + dl_dst=ovsport.get_mac(), + actions="strip_vlan,mod_dl_src:%s," + "output:%s" % + (subnet_info['gateway_mac'], + ovsport.get_ofport())) + + # create rule in Table LOCAL_SWITCHING to forward + # broadcast/multicast frames from dvr router interface to + # appropriate local tenant ports + ofports = ','.join(map(str, ldm.get_compute_ofports().values())) + if csnat_ofport != constants.OFPORT_INVALID: + ofports = str(csnat_ofport) + ',' + ofports + if ofports: + self.int_br.add_flow(table=constants.DVR_TO_SRC_MAC, + priority=2, + proto='ip', + dl_vlan=local_vlan, + nw_dst=ip_subnet, + actions="strip_vlan,mod_dl_src:%s, " + " output:%s" % + (subnet_info['gateway_mac'], ofports)) + + self.tun_br.add_flow(table=constants.DVR_PROCESS, + priority=3, + dl_vlan=local_vlan, + proto='arp', + nw_dst=subnet_info['gateway_ip'], + actions="drop") + + self.tun_br.add_flow(table=constants.DVR_PROCESS, + priority=2, + dl_vlan=local_vlan, + dl_dst=port.vif_mac, + actions="drop") + + self.tun_br.add_flow(table=constants.DVR_PROCESS, + priority=1, + dl_vlan=local_vlan, + dl_src=port.vif_mac, + actions="mod_dl_src:%s,resubmit(,%s)" % + (self.dvr_mac_address, + constants.PATCH_LV_TO_TUN)) + + # the dvr router interface is itself a port, so capture it + # queue this subnet to that port. A subnet appears only once as + # a router interface on any given router + ovsport = OVSPort(port.vif_id, port.ofport, + port.vif_mac, device_owner) + ovsport.add_subnet(subnet_uuid) + self.local_ports[port.vif_id] = ovsport + + def _bind_compute_port_on_dvr_subnet(self, port, fixed_ips, + device_owner, local_vlan): + # Handle new compute port added use-case + subnet_uuid = None + for ips in fixed_ips: + if ips['subnet_id'] not in self.local_dvr_map: + continue + subnet_uuid = ips['subnet_id'] + ldm = self.local_dvr_map[subnet_uuid] + if not ldm.is_dvr_owned(): + # well this is csnat stuff, let dvr come in + # and do plumbing for this vm later + continue + + # This confirms that this compute port belongs + # to a dvr hosted subnet. + # Accomodate this VM Port into the existing rule in + # the integration bridge + LOG.debug("DVR: Plumbing compute port %s", port.vif_id) + subnet_info = ldm.get_subnet_info() + ip_subnet = subnet_info['cidr'] + csnat_ofport = ldm.get_csnat_ofport() + ldm.add_compute_ofport(port.vif_id, port.ofport) + if port.vif_id in self.local_ports: + # ensure if a compute port is already on a different + # dvr routed subnet + # if yes, queue this subnet to that port + ovsport = self.local_ports[port.vif_id] + ovsport.add_subnet(subnet_uuid) + else: + # the compute port is discovered first here that its + # on a dvr routed subnet, queue this subnet to that port + ovsport = OVSPort(port.vif_id, port.ofport, + port.vif_mac, device_owner) + + ovsport.add_subnet(subnet_uuid) + self.local_ports[port.vif_id] = ovsport + # create a rule for this vm port + self.int_br.add_flow(table=constants.DVR_TO_SRC_MAC, + priority=4, + dl_vlan=local_vlan, + dl_dst=ovsport.get_mac(), + actions="strip_vlan,mod_dl_src:%s," + "output:%s" % + (subnet_info['gateway_mac'], + ovsport.get_ofport())) + ofports = ','.join(map(str, ldm.get_compute_ofports().values())) + + if csnat_ofport != constants.OFPORT_INVALID: + ofports = str(csnat_ofport) + ',' + ofports + self.int_br.add_flow(table=constants.DVR_TO_SRC_MAC, + priority=2, + proto='ip', + dl_vlan=local_vlan, + nw_dst=ip_subnet, + actions="strip_vlan,mod_dl_src:%s," + " output:%s" % + (subnet_info['gateway_mac'], ofports)) + + def _bind_centralized_snat_port_on_dvr_subnet(self, port, fixed_ips, + device_owner, local_vlan): + if port.vif_id in self.local_ports: + # throw an error if CSNAT port is already on a different + # dvr routed subnet + ovsport = self.local_ports[port.vif_id] + subs = list(ovsport.get_subnets()) + LOG.error(_("Centralized-SNAT port %s already seen on "), + port.vif_id) + LOG.error(_("a different subnet %s"), subs[0]) + return + # since centralized-SNAT (CSNAT) port must have only one fixed + # IP, directly use fixed_ips[0] + subnet_uuid = fixed_ips[0]['subnet_id'] + ldm = None + subnet_info = None + if subnet_uuid not in self.local_dvr_map: + # no csnat ports seen on this subnet - create csnat state + # for this subnet + subnet_info = self.plugin_rpc.get_subnet_for_dvr(self.context, + subnet_uuid) + ldm = LocalDVRSubnetMapping(subnet_info, port.ofport) + self.local_dvr_map[subnet_uuid] = ldm + else: + ldm = self.local_dvr_map[subnet_uuid] + subnet_info = ldm.get_subnet_info() + # Store csnat OF Port in the existing DVRSubnetMap + ldm.set_csnat_ofport(port.ofport) + + # create ovsPort footprint for csnat port + ovsport = OVSPort(port.vif_id, port.ofport, + port.vif_mac, device_owner) + ovsport.add_subnet(subnet_uuid) + self.local_ports[port.vif_id] = ovsport + + self.int_br.add_flow(table=constants.DVR_TO_SRC_MAC, + priority=4, + dl_vlan=local_vlan, + dl_dst=ovsport.get_mac(), + actions="strip_vlan,mod_dl_src:%s," + " output:%s" % + (subnet_info['gateway_mac'], + ovsport.get_ofport())) + ofports = ','.join(map(str, ldm.get_compute_ofports().values())) + ofports = str(ldm.get_csnat_ofport()) + ',' + ofports + ip_subnet = subnet_info['cidr'] + self.int_br.add_flow(table=constants.DVR_TO_SRC_MAC, + priority=2, + proto='ip', + dl_vlan=local_vlan, + nw_dst=ip_subnet, + actions="strip_vlan,mod_dl_src:%s," + " output:%s" % + (subnet_info['gateway_mac'], ofports)) + + def bind_port_to_dvr(self, port, network_type, fixed_ips, + device_owner, local_vlan_id): + # a port coming up as distributed router interface + if not self.enable_tunneling: + return + + if not self.enable_distributed_routing: + return + + if network_type not in constants.TUNNEL_NETWORK_TYPES: + return + + if device_owner == q_const.DEVICE_OWNER_ROUTER_INTF_DISTRIBUTED: + self._bind_distributed_router_interface_port(port, fixed_ips, + device_owner, + local_vlan_id) + + if 'compute' in device_owner: + self._bind_compute_port_on_dvr_subnet(port, fixed_ips, + device_owner, + local_vlan_id) + + if device_owner == q_const.DEVICE_OWNER_ROUTER_SNAT: + self._bind_centralized_snat_port_on_dvr_subnet(port, fixed_ips, + device_owner, + local_vlan_id) + + def _unbind_distributed_router_interface_port(self, port, local_vlan): + + ovsport = self.local_ports[port.vif_id] + + # removal of distributed router interface + subnet_ids = ovsport.get_subnets() + subnet_set = set(subnet_ids) + # ensure we process for all the subnets laid on this removed port + for sub_uuid in subnet_set: + if sub_uuid not in self.local_dvr_map: + continue + + ldm = self.local_dvr_map[sub_uuid] + subnet_info = ldm.get_subnet_info() + ip_subnet = subnet_info['cidr'] + + # DVR is no more owner + ldm.set_dvr_owned(False) + + # remove all vm rules for this dvr subnet + # clear of compute_ports altogether + compute_ports = ldm.get_compute_ofports() + for vif_id in compute_ports: + ovsport = self.local_ports[vif_id] + self.int_br.delete_flows(table=constants.DVR_TO_SRC_MAC, + dl_vlan=local_vlan, + dl_dst=ovsport.get_mac()) + ldm.remove_all_compute_ofports() + + if ldm.get_csnat_ofport() != -1: + # If there is a csnat port on this agent, preserve + # the local_dvr_map state + ofports = str(ldm.get_csnat_ofport()) + self.int_br.add_flow(table=constants.DVR_TO_SRC_MAC, + priority=2, + proto='ip', + dl_vlan=local_vlan, + nw_dst=ip_subnet, + actions="strip_vlan,mod_dl_src:%s," + " output:%s" % + (subnet_info['gateway_mac'], ofports)) + else: + # removed port is a distributed router interface + self.int_br.delete_flows(table=constants.DVR_TO_SRC_MAC, + proto='ip', dl_vlan=local_vlan, + nw_dst=ip_subnet) + # remove subnet from local_dvr_map as no dvr (or) csnat + # ports available on this agent anymore + self.local_dvr_map.pop(sub_uuid, None) + + self.tun_br.delete_flows(table=constants.DVR_PROCESS, + dl_vlan=local_vlan, + proto='arp', + nw_dst=subnet_info['gateway_ip']) + ovsport.remove_subnet(sub_uuid) + + self.tun_br.delete_flows(table=constants.DVR_PROCESS, + dl_vlan=local_vlan, + dl_dst=port.vif_mac) + + self.tun_br.delete_flows(table=constants.DVR_PROCESS, + dl_vlan=local_vlan, + dl_src=port.vif_mac) + # release port state + self.local_ports.pop(port.vif_id, None) + + def _unbind_compute_port_on_dvr_subnet(self, port, local_vlan): + + ovsport = self.local_ports[port.vif_id] + # This confirms that this compute port being removed belonged + # to a dvr hosted subnet. + # Accomodate this VM Port into the existing rule in + # the integration bridge + LOG.debug("DVR: Removing plumbing for compute port %s", port) + subnet_ids = ovsport.get_subnets() + # ensure we process for all the subnets laid on this port + for sub_uuid in subnet_ids: + if sub_uuid not in self.local_dvr_map: + continue + + ldm = self.local_dvr_map[sub_uuid] + subnet_info = ldm.get_subnet_info() + ldm.remove_compute_ofport(port.vif_id) + ofports = ','.join(map(str, ldm.get_compute_ofports().values())) + ip_subnet = subnet_info['cidr'] + + # first remove this vm port rule + self.int_br.delete_flows(table=constants.DVR_TO_SRC_MAC, + dl_vlan=local_vlan, + dl_dst=ovsport.get_mac()) + if ldm.get_csnat_ofport() != -1: + # If there is a csnat port on this agent, preserve + # the local_dvr_map state + ofports = str(ldm.get_csnat_ofport()) + ',' + ofports + self.int_br.add_flow(table=constants.DVR_TO_SRC_MAC, + priority=2, + proto='ip', + dl_vlan=local_vlan, + nw_dst=ip_subnet, + actions="strip_vlan,mod_dl_src:%s," + " output:%s" % + (subnet_info['gateway_mac'], ofports)) + else: + if ofports: + self.int_br.add_flow(table=constants.DVR_TO_SRC_MAC, + priority=2, + proto='ip', + dl_vlan=local_vlan, + nw_dst=ip_subnet, + actions="strip_vlan,mod_dl_src:%s," + " output:%s" % + (subnet_info['gateway_mac'], + ofports)) + else: + # remove the flow altogether, as no ports (both csnat/ + # compute) are available on this subnet in this + # agent + self.int_br.delete_flows(table=constants.DVR_TO_SRC_MAC, + proto='ip', + dl_vlan=local_vlan, + nw_dst=ip_subnet) + # release port state + self.local_ports.pop(port.vif_id, None) + + def _unbind_centralized_snat_port_on_dvr_subnet(self, port, local_vlan): + + ovsport = self.local_ports[port.vif_id] + # This comfirms that this compute port being removed belonged + # to a dvr hosted subnet. + # Accomodate this VM Port into the existing rule in + # the integration bridge + LOG.debug("DVR: Removing plumbing for csnat port %s", port) + sub_uuid = list(ovsport.get_subnets())[0] + # ensure we process for all the subnets laid on this port + if sub_uuid not in self.local_dvr_map: + return + ldm = self.local_dvr_map[sub_uuid] + subnet_info = ldm.get_subnet_info() + ip_subnet = subnet_info['cidr'] + ldm.set_csnat_ofport(constants.OFPORT_INVALID) + # then remove csnat port rule + self.int_br.delete_flows(table=constants.DVR_TO_SRC_MAC, + dl_vlan=local_vlan, + dl_dst=ovsport.get_mac()) + + ofports = ','.join(map(str, ldm.get_compute_ofports().values())) + if ofports: + self.int_br.add_flow(table=constants.DVR_TO_SRC_MAC, + priority=2, + proto='ip', + dl_vlan=local_vlan, + nw_dst=ip_subnet, + actions="strip_vlan,mod_dl_src:%s," + " output:%s" % + (subnet_info['gateway_mac'], ofports)) + else: + self.int_br.delete_flows(table=constants.DVR_TO_SRC_MAC, + proto='ip', + dl_vlan=local_vlan, + nw_dst=ip_subnet) + if not ldm.is_dvr_owned(): + # if not owned by DVR (only used for csnat), remove this + # subnet state altogether + self.local_dvr_map.pop(sub_uuid, None) + + # release port state + self.local_ports.pop(port.vif_id, None) + + def unbind_port_from_dvr(self, vif_port, local_vlan_id): + if not self.enable_tunneling: + return + + if not self.enable_distributed_routing: + return + + if not vif_port: + LOG.debug("DVR: VIF Port not available for delete %s", vif_port) + return + + # Handle port removed use-case + if vif_port.vif_id not in self.local_ports: + LOG.debug("DVR: Non distributed port, ignoring %s", vif_port) + return + + ovsport = self.local_ports[vif_port.vif_id] + + if ovsport.get_device_owner() == \ + q_const.DEVICE_OWNER_ROUTER_INTF_DISTRIBUTED: + self._unbind_distributed_router_interface_port(vif_port, + local_vlan_id) + + if 'compute' in ovsport.get_device_owner(): + self._unbind_compute_port_on_dvr_subnet(vif_port, + local_vlan_id) + + if ovsport.get_device_owner() == q_const.DEVICE_OWNER_ROUTER_SNAT: + self._unbind_centralized_snat_port_on_dvr_subnet(vif_port, + local_vlan_id) diff --git a/icehouse-patches/neutron/vlan2vlan/neutron/plugins/openvswitch/agent/ovs_neutron_agent.py b/icehouse-patches/neutron/vlan2vlan/neutron/plugins/openvswitch/agent/ovs_neutron_agent.py new file mode 100644 index 00000000..f7f6720e --- /dev/null +++ b/icehouse-patches/neutron/vlan2vlan/neutron/plugins/openvswitch/agent/ovs_neutron_agent.py @@ -0,0 +1,1818 @@ +#!/usr/bin/env python +# Copyright 2011 VMware, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import hashlib +import signal +import sys +import time +import os +import json + +import eventlet +eventlet.monkey_patch() + +import netaddr +from neutron.plugins.openvswitch.agent import ovs_dvr_neutron_agent +from oslo.config import cfg +from six import moves + +from neutron.agent import l2population_rpc +from neutron.agent.linux import ip_lib +from neutron.agent.linux import ovs_lib +from neutron.agent.linux import polling +from neutron.agent.linux import utils +from neutron.agent import rpc as agent_rpc +from neutron.agent import securitygroups_rpc as sg_rpc +from neutron.api.rpc import dvr_rpc +from neutron.common import config as common_config +from neutron.common import constants as q_const +from neutron.common import rpc as n_rpc +from neutron.common import topics +from neutron.common import utils as q_utils +from neutron import context +from neutron.openstack.common import log as logging +from neutron.openstack.common import loopingcall +from neutron.plugins.common import constants as p_const +from neutron.plugins.openvswitch.common import config # noqa +from neutron.plugins.openvswitch.common import constants + + +LOG = logging.getLogger(__name__) + +# A placeholder for dead vlans. +DEAD_VLAN_TAG = str(q_const.MAX_VLAN_TAG + 1) + + +# A class to represent a VIF (i.e., a port that has 'iface-id' and 'vif-mac' +# attributes set). +class LocalVLANMapping: + + def __init__(self, vlan, network_type, physical_network, segmentation_id, + vif_ports=None): + if vif_ports is None: + vif_ports = {} + self.vlan = vlan + self.network_type = network_type + self.physical_network = physical_network + self.segmentation_id = segmentation_id + self.vif_ports = vif_ports + # set of tunnel ports on which packets should be flooded + self.tun_ofports = set() + + def __str__(self): + return ("lv-id = %s type = %s phys-net = %s phys-id = %s" % + (self.vlan, self.network_type, self.physical_network, + self.segmentation_id)) + + +class GatewayPortExternInfo: + + def __init__(self, port, ip, vlan_id, + num=None, extern_ip_mac_mapping=None): + self.port = port + self.gateway_ip = ip + self.local_vlan = vlan_id + self.eth_num = num + if(not extern_ip_mac_mapping): + self.extern_ip_mac_mapping = {} + else: + self.extern_ip_mac_mapping = extern_ip_mac_mapping + + +class OVSPluginApi(agent_rpc.PluginApi, + dvr_rpc.DVRServerRpcApiMixin, + sg_rpc.SecurityGroupServerRpcApiMixin): + pass + + +class OVSSecurityGroupAgent(sg_rpc.SecurityGroupAgentRpcMixin): + + def __init__(self, context, plugin_rpc, root_helper): + self.context = context + self.plugin_rpc = plugin_rpc + self.root_helper = root_helper + self.init_firewall(defer_refresh_firewall=True) + + +class OVSNeutronAgent(n_rpc.RpcCallback, + sg_rpc.SecurityGroupAgentRpcCallbackMixin, + l2population_rpc.L2populationRpcCallBackMixin, + dvr_rpc.DVRAgentRpcCallbackMixin): + + '''Implements OVS-based tunneling, VLANs and flat networks. + + Two local bridges are created: an integration bridge (defaults to + 'br-int') and a tunneling bridge (defaults to 'br-tun'). An + additional bridge is created for each physical network interface + used for VLANs and/or flat networks. + + All VM VIFs are plugged into the integration bridge. VM VIFs on a + given virtual network share a common "local" VLAN (i.e. not + propagated externally). The VLAN id of this local VLAN is mapped + to the physical networking details realizing that virtual network. + + For virtual networks realized as GRE tunnels, a Logical Switch + (LS) identifier is used to differentiate tenant traffic on + inter-HV tunnels. A mesh of tunnels is created to other + Hypervisors in the cloud. These tunnels originate and terminate on + the tunneling bridge of each hypervisor. Port patching is done to + connect local VLANs on the integration bridge to inter-hypervisor + tunnels on the tunnel bridge. + + For each virtual network realized as a VLAN or flat network, a + veth or a pair of patch ports is used to connect the local VLAN on + the integration bridge with the physical network bridge, with flow + rules adding, modifying, or stripping VLAN tags as necessary. + ''' + + # history + # 1.0 Initial version + # 1.1 Support Security Group RPC + # 1.2 Support DVR (Distributed Virtual Router) RPC + RPC_API_VERSION = '1.2' + + def __init__(self, integ_br, tun_br, local_ip, + bridge_mappings, root_helper, + polling_interval, tunnel_types=None, + veth_mtu=None, l2_population=False, + enable_distributed_routing=False, + minimize_polling=False, + ovsdb_monitor_respawn_interval=( + constants.DEFAULT_OVSDBMON_RESPAWN), + arp_responder=False, + use_veth_interconnection=False): + '''Constructor. + + :param integ_br: name of the integration bridge. + :param tun_br: name of the tunnel bridge. + :param local_ip: local IP address of this hypervisor. + :param bridge_mappings: mappings from physical network name to bridge. + :param root_helper: utility to use when running shell cmds. + :param polling_interval: interval (secs) to poll DB. + :param tunnel_types: A list of tunnel types to enable support for in + the agent. If set, will automatically set enable_tunneling to + True. + :param veth_mtu: MTU size for veth interfaces. + :param l2_population: Optional, whether L2 population is turned on + :param minimize_polling: Optional, whether to minimize polling by + monitoring ovsdb for interface changes. + :param ovsdb_monitor_respawn_interval: Optional, when using polling + minimization, the number of seconds to wait before respawning + the ovsdb monitor. + :param arp_responder: Optional, enable local ARP responder if it is + supported. + :param use_veth_interconnection: use veths instead of patch ports to + interconnect the integration bridge to physical bridges. + ''' + super(OVSNeutronAgent, self).__init__() + self.use_veth_interconnection = use_veth_interconnection + self.veth_mtu = veth_mtu + self.root_helper = root_helper + self.available_local_vlans = set(moves.xrange(q_const.MIN_VLAN_TAG, + q_const.MAX_VLAN_TAG)) + self.tunnel_types = tunnel_types or [] + self.l2_pop = l2_population + # TODO(ethuleau): Initially, local ARP responder is be dependent to the + # ML2 l2 population mechanism driver. + self.arp_responder_enabled = (arp_responder and + self._check_arp_responder_support() and + self.l2_pop) + self.enable_distributed_routing = enable_distributed_routing + self.agent_state = { + 'binary': 'neutron-openvswitch-agent', + 'host': cfg.CONF.host, + 'topic': q_const.L2_AGENT_TOPIC, + 'configurations': {'bridge_mappings': bridge_mappings, + 'tunnel_types': self.tunnel_types, + 'tunneling_ip': local_ip, + 'l2_population': self.l2_pop, + 'arp_responder_enabled': + self.arp_responder_enabled, + 'enable_distributed_routing': + self.enable_distributed_routing}, + 'agent_type': q_const.AGENT_TYPE_OVS, + 'start_flag': True} + + # Keep track of int_br's device count for use by _report_state() + self.int_br_device_count = 0 + + self.int_br = ovs_lib.OVSBridge(integ_br, self.root_helper) + self.setup_integration_br() + # Stores port update notifications for processing in main rpc loop + self.updated_ports = set() + self.setup_rpc() + self.bridge_mappings = bridge_mappings + self.setup_physical_bridges(self.bridge_mappings) + self.local_vlan_map = {} + self.tun_br_ofports = {p_const.TYPE_GRE: {}, + p_const.TYPE_VXLAN: {}} + # added by j00209498 ----begin + self.tun_br_flowkey_ofport = None + self.gateway_port_extern_info = {} + self.extern_eth = cfg.CONF.AGENT.extern_net_port + self.available_eth_num_list = set(moves.xrange(0, 1000)) + # added by j00209498 ----end + + self.polling_interval = polling_interval + self.minimize_polling = minimize_polling + self.ovsdb_monitor_respawn_interval = ovsdb_monitor_respawn_interval + + if tunnel_types: + self.enable_tunneling = True + else: + self.enable_tunneling = False + self.local_ip = local_ip + self.tunnel_count = 0 + self.vxlan_udp_port = cfg.CONF.AGENT.vxlan_udp_port + self.dont_fragment = cfg.CONF.AGENT.dont_fragment + self.tun_br = None + self.patch_int_ofport = constants.OFPORT_INVALID + self.patch_tun_ofport = constants.OFPORT_INVALID + if self.enable_tunneling: + # The patch_int_ofport and patch_tun_ofport are updated + # here inside the call to setup_tunnel_br + self.setup_tunnel_br(tun_br) + + self.dvr_agent = ovs_dvr_neutron_agent.OVSDVRNeutronAgent( + self.context, + self.plugin_rpc, + self.int_br, + self.tun_br, + self.patch_int_ofport, + self.patch_tun_ofport, + cfg.CONF.host, + self.enable_tunneling, + self.enable_distributed_routing) + + self.dvr_agent.setup_dvr_flows_on_integ_tun_br() + + # Collect additional bridges to monitor + self.ancillary_brs = self.setup_ancillary_bridges(integ_br, tun_br) + + # Security group agent support + self.sg_agent = OVSSecurityGroupAgent(self.context, + self.plugin_rpc, + root_helper) + # Initialize iteration counter + self.iter_num = 0 + self.run_daemon_loop = True + + def _check_arp_responder_support(self): + '''Check if OVS supports to modify ARP headers. + + This functionality is only available since the development branch 2.1. + ''' + args = ['arp,action=load:0x2->NXM_OF_ARP_OP[],' + 'move:NXM_NX_ARP_SHA[]->NXM_NX_ARP_THA[],' + 'move:NXM_OF_ARP_SPA[]->NXM_OF_ARP_TPA[]'] + supported = ovs_lib.ofctl_arg_supported(self.root_helper, 'add-flow', + args) + if not supported: + LOG.warning(_('OVS version can not support ARP responder.')) + return supported + + def _report_state(self): + # How many devices are likely used by a VM + self.agent_state.get('configurations')['devices'] = ( + self.int_br_device_count) + try: + self.state_rpc.report_state(self.context, + self.agent_state) + self.agent_state.pop('start_flag', None) + except Exception: + LOG.exception(_("Failed reporting state!")) + + def setup_rpc(self): + self.agent_id = 'ovs-agent-%s' % cfg.CONF.host + self.topic = topics.AGENT + self.plugin_rpc = OVSPluginApi(topics.PLUGIN) + self.state_rpc = agent_rpc.PluginReportStateAPI(topics.PLUGIN) + + # RPC network init + self.context = context.get_admin_context_without_session() + # Handle updates from service + self.endpoints = [self] + # Define the listening consumers for the agent + consumers = [[topics.PORT, topics.UPDATE], + [topics.NETWORK, topics.DELETE], + [constants.TUNNEL, topics.UPDATE], + [topics.SECURITY_GROUP, topics.UPDATE], + [topics.DVR, topics.UPDATE]] + if self.l2_pop: + consumers.append([topics.L2POPULATION, + topics.UPDATE, cfg.CONF.host]) + self.connection = agent_rpc.create_consumers(self.endpoints, + self.topic, + consumers) + report_interval = cfg.CONF.AGENT.report_interval + if report_interval: + heartbeat = loopingcall.FixedIntervalLoopingCall( + self._report_state) + heartbeat.start(interval=report_interval) + + def get_net_uuid(self, vif_id): + for network_id, vlan_mapping in self.local_vlan_map.iteritems(): + if vif_id in vlan_mapping.vif_ports: + return network_id + + def network_delete(self, context, **kwargs): + LOG.debug(_("network_delete received")) + network_id = kwargs.get('network_id') + LOG.debug(_("Delete %s"), network_id) + # The network may not be defined on this agent + lvm = self.local_vlan_map.get(network_id) + if lvm: + self.reclaim_local_vlan(network_id) + else: + LOG.debug(_("Network %s not used on agent."), network_id) + + def port_update(self, context, **kwargs): + port = kwargs.get('port') + # Put the port identifier in the updated_ports set. + # Even if full port details might be provided to this call, + # they are not used since there is no guarantee the notifications + # are processed in the same order as the relevant API requests + self.updated_ports.add(port['id']) + LOG.debug(_("port_update message processed for port %s"), port['id']) + + def tunnel_update(self, context, **kwargs): + LOG.debug(_("tunnel_update received")) + if not self.enable_tunneling: + return + tunnel_ip = kwargs.get('tunnel_ip') + tunnel_id = kwargs.get('tunnel_id', self.get_ip_in_hex(tunnel_ip)) + if not tunnel_id: + return + tunnel_type = kwargs.get('tunnel_type') + if not tunnel_type: + LOG.error(_("No tunnel_type specified, cannot create tunnels")) + return + if tunnel_type not in self.tunnel_types: + LOG.error(_("tunnel_type %s not supported by agent"), tunnel_type) + return + if tunnel_ip == self.local_ip: + return + tun_name = '%s-%s' % (tunnel_type, tunnel_id) + if not self.l2_pop: + self.setup_tunnel_port(tun_name, tunnel_ip, tunnel_type) + + def fdb_add(self, context, fdb_entries): + LOG.debug(_("fdb_add received")) + for network_id, values in fdb_entries.items(): + lvm = self.local_vlan_map.get(network_id) + if not lvm: + # Agent doesn't manage any port in this network + continue + agent_ports = values.get('ports') + agent_ports.pop(self.local_ip, None) + if len(agent_ports): + if not self.enable_distributed_routing: + # TODO(vivek): when defer_apply_on is used DVR + # flows are only getting partially configured when + # run concurrently with l2-pop ON. + # Will need make ovs_lib flow API context sensitive + # and then use the same across this file, which will + # address the race issue here. + self.tun_br.defer_apply_on() + for agent_ip, ports in agent_ports.items(): + # Ensure we have a tunnel port with this remote agent + ofport = self.tun_br_ofports[ + lvm.network_type].get(agent_ip) + if not ofport: + remote_ip_hex = self.get_ip_in_hex(agent_ip) + if not remote_ip_hex: + continue + port_name = '%s-%s' % (lvm.network_type, remote_ip_hex) + ofport = self.setup_tunnel_port(port_name, agent_ip, + lvm.network_type) + if ofport == 0: + continue + for port in ports: + self._add_fdb_flow(port, agent_ip, lvm, ofport) + if not self.enable_distributed_routing: + # TODO(vivek): when defer_apply_on is used DVR + # flows are only getting partiall configured when + # run concurrently with l2-pop ON. + # Will need make ovs_lib flow API context sensitive + # and then use the same across this file, which will + # address the race issue here. + self.tun_br.defer_apply_off() + + def fdb_remove(self, context, fdb_entries): + LOG.debug(_("fdb_remove received")) + for network_id, values in fdb_entries.items(): + lvm = self.local_vlan_map.get(network_id) + if not lvm: + # Agent doesn't manage any more ports in this network + continue + agent_ports = values.get('ports') + agent_ports.pop(self.local_ip, None) + if len(agent_ports): + if not self.enable_distributed_routing: + # TODO(vivek): when defer_apply_on is used DVR + # flows are only getting partially configured when + # run concurrently with l2-pop ON. + # Will need make ovs_lib flow API context sensitive + # and then use the same across this file, which will + # address the race issue he + self.tun_br.defer_apply_on() + for agent_ip, ports in agent_ports.items(): + ofport = self.tun_br_ofports[ + lvm.network_type].get(agent_ip) + if not ofport: + continue + for port in ports: + self._del_fdb_flow(port, agent_ip, lvm, ofport) + if not self.enable_distributed_routing: + # TODO(vivek): when defer_apply_on is used DVR + # flows are only getting partially configured when + # run concurrently with l2-pop ON. + # Will need make ovs_lib flow API context sensitive + # and then use the same across this file, which will + # address the race issue here. + self.tun_br.defer_apply_off() + + def _add_fdb_flow(self, port_info, agent_ip, lvm, ofport): + if port_info == q_const.FLOODING_ENTRY: + lvm.tun_ofports.add(ofport) + ofports = ','.join(lvm.tun_ofports) + self.tun_br.mod_flow(table=constants.FLOOD_TO_TUN, + dl_vlan=lvm.vlan, + actions="strip_vlan,set_tunnel:%s," + "output:%s" % (lvm.segmentation_id, ofports)) + else: + self._set_arp_responder('add', lvm.vlan, port_info[0], + port_info[2]) + if not self.dvr_agent.is_dvr_router_interface(port_info[1]): + self.tun_br.add_flow(table=constants.UCAST_TO_TUN, + priority=2, + dl_vlan=lvm.vlan, + dl_dst=port_info[0], + actions="strip_vlan,set_tunnel:%s," + "output:%s" % + (lvm.segmentation_id, ofport)) + + def _del_fdb_flow(self, port_info, agent_ip, lvm, ofport): + if port_info == q_const.FLOODING_ENTRY: + lvm.tun_ofports.remove(ofport) + if len(lvm.tun_ofports) > 0: + ofports = ','.join(lvm.tun_ofports) + self.tun_br.mod_flow(table=constants.FLOOD_TO_TUN, + dl_vlan=lvm.vlan, + actions="strip_vlan," + "set_tunnel:%s,output:%s" % + (lvm.segmentation_id, ofports)) + else: + # This local vlan doesn't require any more tunnelling + self.tun_br.delete_flows(table=constants.FLOOD_TO_TUN, + dl_vlan=lvm.vlan) + # Check if this tunnel port is still used + self.cleanup_tunnel_port(ofport, lvm.network_type) + else: + self._set_arp_responder('remove', lvm.vlan, port_info[0], + port_info[1]) + self.tun_br.delete_flows(table=constants.UCAST_TO_TUN, + dl_vlan=lvm.vlan, + dl_dst=port_info[0]) + + def _fdb_chg_ip(self, context, fdb_entries): + '''fdb update when an IP of a port is updated. + + The ML2 l2-pop mechanism driver send an fdb update rpc message when an + IP of a port is updated. + + :param context: RPC context. + :param fdb_entries: fdb dicts that contain all mac/IP informations per + agent and network. + {'net1': + {'agent_ip': + {'before': [[mac, ip]], + 'after': [[mac, ip]] + } + } + 'net2': + ... + } + ''' + LOG.debug(_("update chg_ip received")) + + # TODO(ethuleau): Use OVS defer apply flows for all rules will be an + # interesting improvement here. But actually, OVS lib defer apply flows + # methods doesn't ensure the add flows will be applied before delete. + for network_id, agent_ports in fdb_entries.items(): + lvm = self.local_vlan_map.get(network_id) + if not lvm: + continue + + for agent_ip, state in agent_ports.items(): + if agent_ip == self.local_ip: + continue + + after = state.get('after') + for mac, ip in after: + self._set_arp_responder('add', lvm.vlan, mac, ip) + + before = state.get('before') + for mac, ip in before: + self._set_arp_responder('remove', lvm.vlan, mac, ip) + + def fdb_update(self, context, fdb_entries): + LOG.debug(_("fdb_update received")) + for action, values in fdb_entries.items(): + method = '_fdb_' + action + if not hasattr(self, method): + raise NotImplementedError() + + getattr(self, method)(context, values) + + def _set_arp_responder(self, action, lvid, mac_str, ip_str): + '''Set the ARP respond entry. + + When the l2 population mechanism driver and OVS supports to edit ARP + fields, a table (ARP_RESPONDER) to resolve ARP locally is added to the + tunnel bridge. + + :param action: add or remove ARP entry. + :param lvid: local VLAN map of network's ARP entry. + :param mac_str: MAC string value. + :param ip_str: IP string value. + ''' + if not self.arp_responder_enabled: + return + + mac = netaddr.EUI(mac_str, dialect=netaddr.mac_unix) + ip = netaddr.IPAddress(ip_str) + + if action == 'add': + actions = ('move:NXM_OF_ETH_SRC[]->NXM_OF_ETH_DST[],' + 'mod_dl_src:%(mac)s,' + 'load:0x2->NXM_OF_ARP_OP[],' + 'move:NXM_NX_ARP_SHA[]->NXM_NX_ARP_THA[],' + 'move:NXM_OF_ARP_SPA[]->NXM_OF_ARP_TPA[],' + 'load:%(mac)#x->NXM_NX_ARP_SHA[],' + 'load:%(ip)#x->NXM_OF_ARP_SPA[],' + 'in_port' % {'mac': mac, 'ip': ip}) + self.tun_br.add_flow(table=constants.ARP_RESPONDER, + priority=1, + proto='arp', + dl_vlan=lvid, + nw_dst='%s' % ip, + actions=actions) + elif action == 'remove': + self.tun_br.delete_flows(table=constants.ARP_RESPONDER, + proto='arp', + dl_vlan=lvid, + nw_dst='%s' % ip) + else: + LOG.warning(_('Action %s not supported'), action) + + def provision_local_vlan(self, net_uuid, network_type, physical_network, + segmentation_id): + '''Provisions a local VLAN. + + :param net_uuid: the uuid of the network associated with this vlan. + :param network_type: the network type ('gre', 'vxlan', 'vlan', 'flat', + 'local') + :param physical_network: the physical network for 'vlan' or 'flat' + :param segmentation_id: the VID for 'vlan' or tunnel ID for 'tunnel' + ''' + + # On a restart or crash of OVS, the network associated with this VLAN + # will already be assigned, so check for that here before assigning a + # new one. + lvm = self.local_vlan_map.get(net_uuid) + if lvm: + lvid = lvm.vlan + else: + if not self.available_local_vlans: + LOG.error(_("No local VLAN available for net-id=%s"), net_uuid) + return + lvid = self.available_local_vlans.pop() + self.local_vlan_map[net_uuid] = LocalVLANMapping(lvid, + network_type, + physical_network, + segmentation_id) + + LOG.info(_("Assigning %(vlan_id)s as local vlan for " + "net-id=%(net_uuid)s"), + {'vlan_id': lvid, 'net_uuid': net_uuid}) + + if network_type in constants.TUNNEL_NETWORK_TYPES: + if self.enable_tunneling: + # outbound broadcast/multicast + ofports = ','.join(self.tun_br_ofports[network_type].values()) + if ofports: + self.tun_br.mod_flow(table=constants.FLOOD_TO_TUN, + dl_vlan=lvid, + actions="strip_vlan," + "set_tunnel:%s,output:%s" % + (segmentation_id, ofports)) + # inbound from tunnels: set lvid in the right table + # and resubmit to Table LEARN_FROM_TUN for mac learning + if self.enable_distributed_routing: + self.dvr_agent.process_tunneled_network( + network_type, lvid, segmentation_id) + else: + self.tun_br.add_flow( + table=constants.TUN_TABLE[network_type], + priority=1, + tun_id=segmentation_id, + actions="mod_vlan_vid:%s," + "resubmit(,%s)" % + (lvid, constants.LEARN_FROM_TUN)) + + else: + LOG.error(_("Cannot provision %(network_type)s network for " + "net-id=%(net_uuid)s - tunneling disabled"), + {'network_type': network_type, + 'net_uuid': net_uuid}) + elif network_type == p_const.TYPE_FLAT: + if physical_network in self.phys_brs: + # outbound + br = self.phys_brs[physical_network] + br.add_flow(priority=4, + in_port=self.phys_ofports[physical_network], + dl_vlan=lvid, + actions="strip_vlan,normal") + # inbound + self.int_br.add_flow( + priority=3, + in_port=self.int_ofports[physical_network], + dl_vlan=0xffff, + actions="mod_vlan_vid:%s,normal" % lvid) + else: + LOG.error(_("Cannot provision flat network for " + "net-id=%(net_uuid)s - no bridge for " + "physical_network %(physical_network)s"), + {'net_uuid': net_uuid, + 'physical_network': physical_network}) + elif network_type == p_const.TYPE_VLAN: + if physical_network in self.phys_brs: + # outbound + br = self.phys_brs[physical_network] + br.add_flow(priority=4, + in_port=self.phys_ofports[physical_network], + dl_vlan=lvid, + actions="mod_vlan_vid:%s,normal" % segmentation_id) + # inbound + self.int_br.add_flow(priority=3, + in_port=self. + int_ofports[physical_network], + dl_vlan=segmentation_id, + actions="mod_vlan_vid:%s,normal" % lvid) + else: + LOG.error(_("Cannot provision VLAN network for " + "net-id=%(net_uuid)s - no bridge for " + "physical_network %(physical_network)s"), + {'net_uuid': net_uuid, + 'physical_network': physical_network}) + elif network_type == p_const.TYPE_LOCAL: + # no flows needed for local networks + pass + else: + LOG.error(_("Cannot provision unknown network type " + "%(network_type)s for net-id=%(net_uuid)s"), + {'network_type': network_type, + 'net_uuid': net_uuid}) + + def reclaim_local_vlan(self, net_uuid): + '''Reclaim a local VLAN. + + :param net_uuid: the network uuid associated with this vlan. + :param lvm: a LocalVLANMapping object that tracks (vlan, lsw_id, + vif_ids) mapping. + ''' + lvm = self.local_vlan_map.pop(net_uuid, None) + if lvm is None: + LOG.debug(_("Network %s not used on agent."), net_uuid) + return + + LOG.info(_("Reclaiming vlan = %(vlan_id)s from net-id = %(net_uuid)s"), + {'vlan_id': lvm.vlan, + 'net_uuid': net_uuid}) + + if lvm.network_type in constants.TUNNEL_NETWORK_TYPES: + if self.enable_tunneling: + self.tun_br.delete_flows( + table=constants.TUN_TABLE[lvm.network_type], + tun_id=lvm.segmentation_id) + self.tun_br.delete_flows(dl_vlan=lvm.vlan) + if self.l2_pop: + # Try to remove tunnel ports if not used by other networks + for ofport in lvm.tun_ofports: + self.cleanup_tunnel_port(ofport, lvm.network_type) + elif lvm.network_type == p_const.TYPE_FLAT: + if lvm.physical_network in self.phys_brs: + # outbound + br = self.phys_brs[lvm.physical_network] + br.delete_flows(in_port=self.phys_ofports[lvm. + physical_network], + dl_vlan=lvm.vlan) + # inbound + br = self.int_br + br.delete_flows(in_port=self.int_ofports[lvm.physical_network], + dl_vlan=0xffff) + elif lvm.network_type == p_const.TYPE_VLAN: + if lvm.physical_network in self.phys_brs: + # outbound + br = self.phys_brs[lvm.physical_network] + br.delete_flows(in_port=self.phys_ofports[lvm. + physical_network], + dl_vlan=lvm.vlan) + # inbound + br = self.int_br + br.delete_flows(in_port=self.int_ofports[lvm.physical_network], + dl_vlan=lvm.segmentation_id) + elif lvm.network_type == p_const.TYPE_LOCAL: + # no flows needed for local networks + pass + else: + LOG.error(_("Cannot reclaim unknown network type " + "%(network_type)s for net-id=%(net_uuid)s"), + {'network_type': lvm.network_type, + 'net_uuid': net_uuid}) + + self.available_local_vlans.add(lvm.vlan) + + def extern_ip_host_setting_up(self, extern_ip): + if not self.available_eth_num_list: + LOG.error(_("No eth num available for extern_Ip=%s"), extern_ip) + return None + num = self.available_eth_num_list.pop() + ovs_lib.exec_host_ip_config_up(self.extern_eth, num, extern_ip) + return num + + def extern_ip_host_setting_down(self, eth_num, extern_ip): + self.available_eth_num_list.add(eth_num) + ovs_lib.exec_host_ip_config_down(self.extern_eth, eth_num) + + def bind_extern_tun_port_process(self, port, fixed_ips, + device_owner, local_vlan_id): + if(device_owner == q_const.DEVICE_OWNER_ROUTER_GW): + # gateway port must have only one fixed IP, + # use fixed_ips[0] + gw_port_ip = fixed_ips[0]['ip_address'] + eth_num = self.extern_ip_host_setting_up(gw_port_ip) + self.gateway_port_extern_info[port.port_name] = \ + GatewayPortExternInfo(port, gw_port_ip, local_vlan_id, eth_num) + self.tun_br.add_flow(table=constants.DVR_PROCESS, + priority=2, + dl_vlan=local_vlan_id, + dl_dst='fa:16:3f:00:00:00/ff:ff:ff:00:00:00', + actions="resubmit(,%s)" % + (constants.FLOW_KEY_TABLE)) + + network_type = p_const.TYPE_VXLAN + segmentation_id = cfg.CONF.AGENT.extern_tun_vni + self.tun_br.add_flow( + table=constants.TUN_TABLE[network_type], + priority=1, + tun_id=segmentation_id, + tun_dst=gw_port_ip, + actions="mod_vlan_vid:%s," + "resubmit(,%s)" % + (local_vlan_id, constants.DVR_NOT_LEARN)) + + self.int_br.add_flow(table=constants.DVR_TO_DST_MAC, + priority=2, + dl_vlan=local_vlan_id, + actions="mod_dl_dst:%s," + "NORMAL" % port.vif_mac) + + def unbind_extern_tun_port_process(self, port, local_vlan_id): + gpe = self.gateway_port_extern_info.pop(port.port_name, None) + if(gpe): + # gateway port must have only one fixed IP, + # use fixed_ips[0] + gw_port_ip = gpe.gateway_ip + self.extern_ip_host_setting_down(gpe.eth_num, gw_port_ip) + self.tun_br.delete_flows( + table=constants.DVR_PROCESS, + dl_vlan=local_vlan_id, + dl_dst='fa:16:3f:00:00:00/ff:ff:ff:00:00:00') + + network_type = p_const.TYPE_VXLAN + segmentation_id = cfg.CONF.AGENT.extern_tun_vni + self.tun_br.delete_flows( + table=constants.TUN_TABLE[network_type], + tun_id=segmentation_id, + tun_dst=gw_port_ip) + + self.int_br.delete_flows(table=constants.DVR_TO_DST_MAC, + dl_vlan=local_vlan_id) + + def port_bound(self, port, net_uuid, + network_type, physical_network, + segmentation_id, fixed_ips, device_owner, + ovs_restarted): + '''Bind port to net_uuid/lsw_id and install flow for inbound traffic + to vm. + + :param port: a ovslib.VifPort object. + :param net_uuid: the net_uuid this port is to be associated with. + :param network_type: the network type ('gre', 'vlan', 'flat', 'local') + :param physical_network: the physical network for 'vlan' or 'flat' + :param segmentation_id: the VID for 'vlan' or tunnel ID for 'tunnel' + :param fixed_ips: the ip addresses assigned to this port + :param device_owner: the string indicative of owner of this port + :param ovs_restarted: indicates if this is called for an OVS restart. + ''' + if net_uuid not in self.local_vlan_map or ovs_restarted: + self.provision_local_vlan(net_uuid, network_type, + physical_network, segmentation_id) + lvm = self.local_vlan_map[net_uuid] + lvm.vif_ports[port.vif_id] = port + self.bind_extern_tun_port_process(port, fixed_ips, + device_owner, + local_vlan_id=lvm.vlan) + + self.dvr_agent.bind_port_to_dvr(port, network_type, fixed_ips, + device_owner, + local_vlan_id=lvm.vlan) + + # Do not bind a port if it's already bound + cur_tag = self.int_br.db_get_val("Port", port.port_name, "tag") + if cur_tag != str(lvm.vlan): + self.int_br.set_db_attribute("Port", port.port_name, "tag", + str(lvm.vlan)) + if port.ofport != -1: + self.int_br.delete_flows(in_port=port.ofport) + + def port_unbound(self, vif_id, net_uuid=None): + '''Unbind port. + + Removes corresponding local vlan mapping object if this is its last + VIF. + + :param vif_id: the id of the vif + :param net_uuid: the net_uuid this port is associated with. + ''' + if net_uuid is None: + net_uuid = self.get_net_uuid(vif_id) + + if not self.local_vlan_map.get(net_uuid): + LOG.info(_('port_unbound(): net_uuid %s not' + ' in local_vlan_map'), net_uuid) + return + + lvm = self.local_vlan_map[net_uuid] + + if vif_id in lvm.vif_ports: + vif_port = lvm.vif_ports[vif_id] + self.unbind_extern_tun_port_process(vif_port, lvm.vlan) + self.dvr_agent.unbind_port_from_dvr(vif_port, + local_vlan_id=lvm.vlan) + lvm.vif_ports.pop(vif_id, None) + + if not lvm.vif_ports: + self.reclaim_local_vlan(net_uuid) + + def port_dead(self, port): + '''Once a port has no binding, put it on the "dead vlan". + + :param port: a ovs_lib.VifPort object. + ''' + # Don't kill a port if it's already dead + cur_tag = self.int_br.db_get_val("Port", port.port_name, "tag") + if cur_tag != DEAD_VLAN_TAG: + self.int_br.set_db_attribute("Port", port.port_name, "tag", + DEAD_VLAN_TAG) + self.int_br.add_flow(priority=2, in_port=port.ofport, + actions="drop") + + def setup_integration_br(self): + '''Setup the integration bridge. + + Create patch ports and remove all existing flows. + + :param bridge_name: the name of the integration bridge. + :returns: the integration bridge + ''' + # Ensure the integration bridge is created. + # ovs_lib.OVSBridge.create() will run + # ovs-vsctl -- --may-exist add-br BRIDGE_NAME + # which does nothing if bridge already exists. + self.int_br.create() + self.int_br.set_secure_mode() + + self.int_br.delete_port(cfg.CONF.OVS.int_peer_patch_port) + self.int_br.remove_all_flows() + # switch all traffic using L2 learning + self.int_br.add_flow(priority=1, actions="normal") + # Add a canary flow to int_br to track OVS restarts + self.int_br.add_flow(table=constants.CANARY_TABLE, priority=0, + actions="drop") + + def setup_ancillary_bridges(self, integ_br, tun_br): + '''Setup ancillary bridges - for example br-ex.''' + ovs_bridges = set(ovs_lib.get_bridges(self.root_helper)) + # Remove all known bridges + ovs_bridges.remove(integ_br) + if self.enable_tunneling: + ovs_bridges.remove(tun_br) + br_names = [self.phys_brs[physical_network].br_name for + physical_network in self.phys_brs] + ovs_bridges.difference_update(br_names) + # Filter list of bridges to those that have external + # bridge-id's configured + br_names = [] + for bridge in ovs_bridges: + id = ovs_lib.get_bridge_external_bridge_id(self.root_helper, + bridge) + if id != bridge: + br_names.append(bridge) + ovs_bridges.difference_update(br_names) + ancillary_bridges = [] + for bridge in ovs_bridges: + br = ovs_lib.OVSBridge(bridge, self.root_helper) + LOG.info(_('Adding %s to list of bridges.'), bridge) + ancillary_bridges.append(br) + return ancillary_bridges + + def setup_tunnel_br(self, tun_br=None): + '''Setup the tunnel bridge. + + Creates tunnel bridge, and links it to the integration bridge + using a patch port. + + :param tun_br: the name of the tunnel bridge. + ''' + if not self.tun_br: + self.tun_br = ovs_lib.OVSBridge(tun_br, self.root_helper) + + self.tun_br.reset_bridge() + self.patch_tun_ofport = self.int_br.add_patch_port( + cfg.CONF.OVS.int_peer_patch_port, cfg.CONF.OVS.tun_peer_patch_port) + self.patch_int_ofport = self.tun_br.add_patch_port( + cfg.CONF.OVS.tun_peer_patch_port, cfg.CONF.OVS.int_peer_patch_port) + if int(self.patch_tun_ofport) < 0 or int(self.patch_int_ofport) < 0: + LOG.error(_("Failed to create OVS patch port. Cannot have " + "tunneling enabled on this agent, since this version " + "of OVS does not support tunnels or patch ports. " + "Agent terminated!")) + exit(1) + + self.tun_br.remove_all_flows() + # Table 0 (default) will sort incoming traffic depending on in_port + self.tun_br.add_flow(priority=1, + in_port=self.patch_int_ofport, + actions="resubmit(,%s)" % + constants.PATCH_LV_TO_TUN) + self.tun_br.add_flow(priority=0, actions="drop") + if self.arp_responder_enabled: + # ARP broadcast-ed request go to the local ARP_RESPONDER table to + # be locally resolved + self.tun_br.add_flow(table=constants.PATCH_LV_TO_TUN, + priority=1, + proto='arp', + dl_dst="ff:ff:ff:ff:ff:ff", + actions=("resubmit(,%s)" % + constants.ARP_RESPONDER)) + # PATCH_LV_TO_TUN table will handle packets coming from patch_int + # unicasts go to table UCAST_TO_TUN where remote addresses are learnt + self.tun_br.add_flow(table=constants.PATCH_LV_TO_TUN, + priority=0, + dl_dst="00:00:00:00:00:00/01:00:00:00:00:00", + actions="resubmit(,%s)" % constants.UCAST_TO_TUN) + # Broadcasts/multicasts go to table FLOOD_TO_TUN that handles flooding + self.tun_br.add_flow(table=constants.PATCH_LV_TO_TUN, + priority=0, + dl_dst="01:00:00:00:00:00/01:00:00:00:00:00", + actions="resubmit(,%s)" % constants.FLOOD_TO_TUN) + # Tables [tunnel_type]_TUN_TO_LV will set lvid depending on tun_id + # for each tunnel type, and resubmit to table LEARN_FROM_TUN where + # remote mac addresses will be learnt + for tunnel_type in constants.TUNNEL_NETWORK_TYPES: + self.tun_br.add_flow(table=constants.TUN_TABLE[tunnel_type], + priority=0, + actions="drop") + # LEARN_FROM_TUN table will have a single flow using a learn action to + # dynamically set-up flows in UCAST_TO_TUN corresponding to remote mac + # addresses (assumes that lvid has already been set by a previous flow) + learned_flow = ("table=%s," + "priority=1," + "hard_timeout=300," + "NXM_OF_VLAN_TCI[0..11]," + "NXM_OF_ETH_DST[]=NXM_OF_ETH_SRC[]," + "load:0->NXM_OF_VLAN_TCI[]," + "load:NXM_NX_TUN_ID[]->NXM_NX_TUN_ID[]," + "output:NXM_OF_IN_PORT[]" % + constants.UCAST_TO_TUN) + # Once remote mac addresses are learnt, output packet to patch_int + self.tun_br.add_flow(table=constants.LEARN_FROM_TUN, + priority=1, + actions="learn(%s),output:%s" % + (learned_flow, self.patch_int_ofport)) + # Egress unicast will be handled in table UCAST_TO_TUN, where remote + # mac addresses will be learned. For now, just add a default flow that + # will resubmit unknown unicasts to table FLOOD_TO_TUN to treat them + # as broadcasts/multicasts + self.tun_br.add_flow(table=constants.UCAST_TO_TUN, + priority=0, + actions="resubmit(,%s)" % + constants.FLOOD_TO_TUN) + if self.arp_responder_enabled: + # If none of the ARP entries correspond to the requested IP, the + # broadcast-ed packet is resubmitted to the flooding table + self.tun_br.add_flow(table=constants.ARP_RESPONDER, + priority=0, + actions="resubmit(,%s)" % + constants.FLOOD_TO_TUN) + # FLOOD_TO_TUN will handle flooding in tunnels based on lvid, + # for now, add a default drop action + self.tun_br.add_flow(table=constants.FLOOD_TO_TUN, + priority=0, + actions="drop") + + self.tun_br.add_flow(table=constants.FLOW_KEY_TABLE, + priority=0, + actions="drop") + + self.setup_flowkey_tun_port('flowkey_tun_port', p_const.TYPE_VXLAN) + + def get_peer_name(self, prefix, name): + """Construct a peer name based on the prefix and name. + + The peer name can not exceed the maximum length allowed for a linux + device. Longer names are hashed to help ensure uniqueness. + """ + if len(prefix + name) <= q_const.DEVICE_NAME_MAX_LEN: + return prefix + name + # We can't just truncate because bridges may be distinguished + # by an ident at the end. A hash over the name should be unique. + # Leave part of the bridge name on for easier identification + hashlen = 6 + namelen = q_const.DEVICE_NAME_MAX_LEN - len(prefix) - hashlen + new_name = ('%(prefix)s%(truncated)s%(hash)s' % + {'prefix': prefix, 'truncated': name[0:namelen], + 'hash': hashlib.sha1(name).hexdigest()[0:hashlen]}) + LOG.warning(_("Creating an interface named %(name)s exceeds the " + "%(limit)d character limitation. It was shortened to " + "%(new_name)s to fit."), + {'name': name, 'limit': q_const.DEVICE_NAME_MAX_LEN, + 'new_name': new_name}) + return new_name + + def setup_physical_bridges(self, bridge_mappings): + '''Setup the physical network bridges. + + Creates physical network bridges and links them to the + integration bridge using veths. + + :param bridge_mappings: map physical network names to bridge names. + ''' + self.phys_brs = {} + self.int_ofports = {} + self.phys_ofports = {} + ip_wrapper = ip_lib.IPWrapper(self.root_helper) + ovs_bridges = ovs_lib.get_bridges(self.root_helper) + for physical_network, bridge in bridge_mappings.iteritems(): + LOG.info(_("Mapping physical network %(physical_network)s to " + "bridge %(bridge)s"), + {'physical_network': physical_network, + 'bridge': bridge}) + # setup physical bridge + if bridge not in ovs_bridges: + LOG.error(_("Bridge %(bridge)s for physical network " + "%(physical_network)s does not exist. Agent " + "terminated!"), + {'physical_network': physical_network, + 'bridge': bridge}) + sys.exit(1) + br = ovs_lib.OVSBridge(bridge, self.root_helper) + br.remove_all_flows() + br.add_flow(priority=1, actions="normal") + self.phys_brs[physical_network] = br + + # interconnect physical and integration bridges using veth/patchs + int_if_name = self.get_peer_name(constants.PEER_INTEGRATION_PREFIX, + bridge) + phys_if_name = self.get_peer_name(constants.PEER_PHYSICAL_PREFIX, + bridge) + self.int_br.delete_port(int_if_name) + br.delete_port(phys_if_name) + if self.use_veth_interconnection: + if ip_lib.device_exists(int_if_name, self.root_helper): + ip_lib.IPDevice(int_if_name, + self.root_helper).link.delete() + # Give udev a chance to process its rules here, to avoid + # race conditions between commands launched by udev rules + # and the subsequent call to ip_wrapper.add_veth + utils.execute(['/sbin/udevadm', 'settle', '--timeout=10']) + int_veth, phys_veth = ip_wrapper.add_veth(int_if_name, + phys_if_name) + int_ofport = self.int_br.add_port(int_veth) + phys_ofport = br.add_port(phys_veth) + else: + # Create patch ports without associating them in order to block + # untranslated traffic before association + int_ofport = self.int_br.add_patch_port( + int_if_name, constants.NONEXISTENT_PEER) + phys_ofport = br.add_patch_port( + phys_if_name, constants.NONEXISTENT_PEER) + + self.int_ofports[physical_network] = int_ofport + self.phys_ofports[physical_network] = phys_ofport + + # block all untranslated traffic between bridges + self.int_br.add_flow(priority=2, in_port=int_ofport, + actions="drop") + br.add_flow(priority=2, in_port=phys_ofport, actions="drop") + + if self.use_veth_interconnection: + # enable veth to pass traffic + int_veth.link.set_up() + phys_veth.link.set_up() + if self.veth_mtu: + # set up mtu size for veth interfaces + int_veth.link.set_mtu(self.veth_mtu) + phys_veth.link.set_mtu(self.veth_mtu) + else: + # associate patch ports to pass traffic + self.int_br.set_db_attribute('Interface', int_if_name, + 'options:peer', phys_if_name) + br.set_db_attribute('Interface', phys_if_name, + 'options:peer', int_if_name) + + def scan_ports(self, registered_ports, updated_ports=None): + cur_ports = self.int_br.get_vif_port_set() + self.int_br_device_count = len(cur_ports) + port_info = {'current': cur_ports} + if updated_ports is None: + updated_ports = set() + updated_ports.update(self.check_changed_vlans(registered_ports)) + if updated_ports: + # Some updated ports might have been removed in the + # meanwhile, and therefore should not be processed. + # In this case the updated port won't be found among + # current ports. + updated_ports &= cur_ports + if updated_ports: + port_info['updated'] = updated_ports + + # FIXME(salv-orlando): It's not really necessary to return early + # if nothing has changed. + if cur_ports == registered_ports: + # No added or removed ports to set, just return here + return port_info + + port_info['added'] = cur_ports - registered_ports + # Remove all the known ports not found on the integration bridge + port_info['removed'] = registered_ports - cur_ports + return port_info + + def check_changed_vlans(self, registered_ports): + """Return ports which have lost their vlan tag. + + The returned value is a set of port ids of the ports concerned by a + vlan tag loss. + """ + port_tags = self.int_br.get_port_tag_dict() + changed_ports = set() + for lvm in self.local_vlan_map.values(): + for port in registered_ports: + if ( + port in lvm.vif_ports + and lvm.vif_ports[port].port_name in port_tags + and port_tags[lvm.vif_ports[port].port_name] != lvm.vlan + ): + LOG.info( + _("Port '%(port_name)s' has lost " + "its vlan tag '%(vlan_tag)d'!"), + {'port_name': lvm.vif_ports[port].port_name, + 'vlan_tag': lvm.vlan} + ) + changed_ports.add(port) + return changed_ports + + def update_ancillary_ports(self, registered_ports): + ports = set() + for bridge in self.ancillary_brs: + ports |= bridge.get_vif_port_set() + + if ports == registered_ports: + return + added = ports - registered_ports + removed = registered_ports - ports + return {'current': ports, + 'added': added, + 'removed': removed} + + def treat_vif_port(self, vif_port, port_id, network_id, network_type, + physical_network, segmentation_id, admin_state_up, + fixed_ips, device_owner, ovs_restarted): + # When this function is called for a port, the port should have + # an OVS ofport configured, as only these ports were considered + # for being treated. If that does not happen, it is a potential + # error condition of which operators should be aware + if not vif_port.ofport: + LOG.warn(_("VIF port: %s has no ofport configured, and might not " + "be able to transmit"), vif_port.vif_id) + if vif_port: + if admin_state_up: + self.port_bound(vif_port, network_id, network_type, + physical_network, segmentation_id, + fixed_ips, device_owner, ovs_restarted) + else: + self.port_dead(vif_port) + else: + LOG.debug(_("No VIF port for port %s defined on agent."), port_id) + + def setup_flowkey_tun_port(self, port_name, tunnel_type): + ofport = self.tun_br.add_flowkey_tunnel_port(port_name, + tunnel_type, + self.vxlan_udp_port, + self.dont_fragment) + ofport_int = -1 + try: + ofport_int = int(ofport) + except (TypeError, ValueError): + LOG.exception(_("ofport should have a value that can be " + "interpreted as an integer")) + if ofport_int < 0: + LOG.error(_("Failed to set-up %(type)s flow_key tunnel port"), + {'type': tunnel_type}) + return 0 + + self.tun_br_flowkey_ofport = ofport + # Add flow in default table to resubmit to the right + # tunnelling table (lvid will be set in the latter) + self.tun_br.add_flow(priority=1, + in_port=ofport, + actions="resubmit(,%s)" % + constants.TUN_TABLE[tunnel_type]) + + return ofport + + def setup_tunnel_port(self, port_name, remote_ip, tunnel_type): + ofport = self.tun_br.add_tunnel_port(port_name, + remote_ip, + self.local_ip, + tunnel_type, + self.vxlan_udp_port, + self.dont_fragment) + ofport_int = -1 + try: + ofport_int = int(ofport) + except (TypeError, ValueError): + LOG.exception(_("ofport should have a value that can be " + "interpreted as an integer")) + if ofport_int < 0: + LOG.error(_("Failed to set-up %(type)s tunnel port to %(ip)s"), + {'type': tunnel_type, 'ip': remote_ip}) + return 0 + + self.tun_br_ofports[tunnel_type][remote_ip] = ofport + # Add flow in default table to resubmit to the right + # tunnelling table (lvid will be set in the latter) + self.tun_br.add_flow(priority=1, + in_port=ofport, + actions="resubmit(,%s)" % + constants.TUN_TABLE[tunnel_type]) + + ofports = ','.join(self.tun_br_ofports[tunnel_type].values()) + if ofports and not self.l2_pop: + # Update flooding flows to include the new tunnel + for network_id, vlan_mapping in self.local_vlan_map.iteritems(): + if vlan_mapping.network_type == tunnel_type: + self.tun_br.mod_flow(table=constants.FLOOD_TO_TUN, + dl_vlan=vlan_mapping.vlan, + actions="strip_vlan," + "set_tunnel:%s,output:%s" % + (vlan_mapping.segmentation_id, + ofports)) + return ofport + + def cleanup_tunnel_port(self, tun_ofport, tunnel_type): + # Check if this tunnel port is still used + for lvm in self.local_vlan_map.values(): + if tun_ofport in lvm.tun_ofports: + break + # If not, remove it + else: + for remote_ip, ofport in self.tun_br_ofports[tunnel_type].items(): + if ofport == tun_ofport: + port_name = '%s-%s' % (tunnel_type, + self.get_ip_in_hex(remote_ip)) + self.tun_br.delete_port(port_name) + self.tun_br.delete_flows(in_port=ofport) + self.tun_br_ofports[tunnel_type].pop(remote_ip, None) + + def treat_devices_added_or_updated(self, devices, ovs_restarted): + try: + devices_details_list = self.plugin_rpc.get_devices_details_list( + self.context, + devices, + self.agent_id, + cfg.CONF.host) + except Exception as e: + LOG.debug("Unable to get port details for %(devices)s: %(e)s", + {'devices': devices, 'e': e}) + # resync is needed + return True + for details in devices_details_list: + device = details['device'] + LOG.debug("Processing port: %s", device) + port = self.int_br.get_vif_port_by_id(device) + if not port: + # The port has disappeared and should not be processed + # There is no need to put the port DOWN in the plugin as + # it never went up in the first place + LOG.info(_("Port %s was not found on the integration bridge " + "and will therefore not be processed"), device) + continue + + if 'port_id' in details: + LOG.info(_("Port %(device)s updated. Details: %(details)s"), + {'device': device, 'details': details}) + self.treat_vif_port(port, details['port_id'], + details['network_id'], + details['network_type'], + details['physical_network'], + details['segmentation_id'], + details['admin_state_up'], + details['fixed_ips'], + details['device_owner'], + ovs_restarted) + # update plugin about port status + if details.get('admin_state_up'): + LOG.debug(_("Setting status for %s to UP"), device) + self.plugin_rpc.update_device_up( + self.context, device, self.agent_id, cfg.CONF.host) + else: + LOG.debug(_("Setting status for %s to DOWN"), device) + self.plugin_rpc.update_device_down( + self.context, device, self.agent_id, cfg.CONF.host) + LOG.info(_("Configuration for device %s completed."), device) + else: + LOG.warn(_("Device %s not defined on plugin"), device) + if (port and port.ofport != -1): + self.port_dead(port) + return False + + def treat_ancillary_devices_added(self, devices): + try: + devices_details_list = self.plugin_rpc.get_devices_details_list( + self.context, + devices, + self.agent_id, + cfg.CONF.host) + except Exception as e: + LOG.debug("Unable to get port details for " + "%(devices)s: %(e)s", {'devices': devices, 'e': e}) + # resync is needed + return True + + for details in devices_details_list: + device = details['device'] + LOG.info(_("Ancillary Port %s added"), device) + + # update plugin about port status + self.plugin_rpc.update_device_up(self.context, + device, + self.agent_id, + cfg.CONF.host) + return False + + def treat_devices_removed(self, devices): + resync = False + self.sg_agent.remove_devices_filter(devices) + for device in devices: + LOG.info(_("Attachment %s removed"), device) + try: + self.plugin_rpc.update_device_down(self.context, + device, + self.agent_id, + cfg.CONF.host) + except Exception as e: + LOG.debug(_("port_removed failed for %(device)s: %(e)s"), + {'device': device, 'e': e}) + resync = True + continue + self.port_unbound(device) + return resync + + def treat_ancillary_devices_removed(self, devices): + resync = False + for device in devices: + LOG.info(_("Attachment %s removed"), device) + try: + details = self.plugin_rpc.update_device_down(self.context, + device, + self.agent_id, + cfg.CONF.host) + except Exception as e: + LOG.debug(_("port_removed failed for %(device)s: %(e)s"), + {'device': device, 'e': e}) + resync = True + continue + if details['exists']: + LOG.info(_("Port %s updated."), device) + # Nothing to do regarding local networking + else: + LOG.debug(_("Device %s not defined on plugin"), device) + return resync + + def process_network_ports(self, port_info, ovs_restarted): + resync_a = False + resync_b = False + # TODO(salv-orlando): consider a solution for ensuring notifications + # are processed exactly in the same order in which they were + # received. This is tricky because there are two notification + # sources: the neutron server, and the ovs db monitor process + # If there is an exception while processing security groups ports + # will not be wired anyway, and a resync will be triggered + # TODO(salv-orlando): Optimize avoiding applying filters unnecessarily + # (eg: when there are no IP address changes) + self.sg_agent.setup_port_filters(port_info.get('added', set()), + port_info.get('updated', set())) + # VIF wiring needs to be performed always for 'new' devices. + # For updated ports, re-wiring is not needed in most cases, but needs + # to be performed anyway when the admin state of a device is changed. + # A device might be both in the 'added' and 'updated' + # list at the same time; avoid processing it twice. + devices_added_updated = (port_info.get('added', set()) | + port_info.get('updated', set())) + if devices_added_updated: + start = time.time() + resync_a = self.treat_devices_added_or_updated( + devices_added_updated, ovs_restarted) + LOG.debug(_("process_network_ports - iteration:%(iter_num)d -" + "treat_devices_added_or_updated completed " + "in %(elapsed).3f"), + {'iter_num': self.iter_num, + 'elapsed': time.time() - start}) + if 'removed' in port_info: + start = time.time() + resync_b = self.treat_devices_removed(port_info['removed']) + LOG.debug(_("process_network_ports - iteration:%(iter_num)d -" + "treat_devices_removed completed in %(elapsed).3f"), + {'iter_num': self.iter_num, + 'elapsed': time.time() - start}) + # If one of the above operations fails => resync with plugin + return (resync_a | resync_b) + + def process_ancillary_network_ports(self, port_info): + resync_a = False + resync_b = False + if 'added' in port_info: + start = time.time() + resync_a = self.treat_ancillary_devices_added(port_info['added']) + LOG.debug(_("process_ancillary_network_ports - iteration: " + "%(iter_num)d - treat_ancillary_devices_added " + "completed in %(elapsed).3f"), + {'iter_num': self.iter_num, + 'elapsed': time.time() - start}) + if 'removed' in port_info: + start = time.time() + resync_b = self.treat_ancillary_devices_removed( + port_info['removed']) + LOG.debug(_("process_ancillary_network_ports - iteration: " + "%(iter_num)d - treat_ancillary_devices_removed " + "completed in %(elapsed).3f"), + {'iter_num': self.iter_num, + 'elapsed': time.time() - start}) + + # If one of the above operations fails => resync with plugin + return (resync_a | resync_b) + + def get_ip_in_hex(self, ip_address): + try: + return '%08x' % netaddr.IPAddress(ip_address, version=4) + except Exception: + LOG.warn(_("Unable to create tunnel port. Invalid remote IP: %s"), + ip_address) + return + + def tunnel_sync(self): + resync = False + try: + for tunnel_type in self.tunnel_types: + details = self.plugin_rpc.tunnel_sync(self.context, + self.local_ip, + tunnel_type) + if not self.l2_pop: + tunnels = details['tunnels'] + for tunnel in tunnels: + if self.local_ip != tunnel['ip_address']: + tunnel_id = tunnel.get('id') + # Unlike the OVS plugin, ML2 doesn't return an id + # key. So use ip_address to form port name instead. + # Port name must be <=15 chars, so use shorter hex. + remote_ip = tunnel['ip_address'] + remote_ip_hex = self.get_ip_in_hex(remote_ip) + if not tunnel_id and not remote_ip_hex: + continue + tun_name = '%s-%s' % (tunnel_type, + tunnel_id or remote_ip_hex) + self.setup_tunnel_port(tun_name, + tunnel['ip_address'], + tunnel_type) + except Exception as e: + LOG.debug(_("Unable to sync tunnel IP %(local_ip)s: %(e)s"), + {'local_ip': self.local_ip, 'e': e}) + resync = True + return resync + + def _agent_has_updates(self, polling_manager): + return (polling_manager.is_polling_required or + self.updated_ports or + self.sg_agent.firewall_refresh_needed()) + + def _port_info_has_changes(self, port_info): + return (port_info.get('added') or + port_info.get('removed') or + port_info.get('updated')) + + def check_ovs_restart(self): + # Check for the canary flow + canary_flow = self.int_br.dump_flows_for_table(constants.CANARY_TABLE) + return not canary_flow + + def _parse_json_file(self, json_file): + try: + fjson = open(json_file) + json_dict = json.load(fjson) + fjson.close() + except Exception: + LOG.error(_('parse json file failed! json_file= %s'), json_file) + return None + + return json_dict + + def scan_notify_l2_agent_file(self): + fp = os.path.join(cfg.CONF.AGENT.notify_l2_file_path, + cfg.CONF.AGENT.notify_l2_file_name) + return self._parse_json_file(fp) + + def process_extern_port_info(self): + extern_port_info = self.scan_notify_l2_agent_file() + if not extern_port_info: + return + + for port_name, mac_ip_map in extern_port_info.items(): + gpe = self.gateway_port_extern_info.get(port_name, None) + if not gpe: + continue + add_ports = set(mac_ip_map.keys()) - \ + set(gpe.extern_ip_mac_mapping.keys()) + remove_ports = set(gpe.extern_ip_mac_mapping.keys()) - \ + set(mac_ip_map.keys()) + for mac in list(add_ports): + dst_extern_ip = mac_ip_map.get(mac) + segmentation_id = cfg.CONF.AGENT.extern_tun_vni + ofport = self.tun_br_flowkey_ofport + self.tun_br.add_flow( + table=constants.FLOW_KEY_TABLE, + priority=1, + dl_vlan=gpe.local_vlan, + dl_dst=mac, + actions="strip_vlan,set_field:%s->tun_id," + "set_field:%s->tun_dst,set_field:%s->tun_src," + "output:%s" % + (segmentation_id, + dst_extern_ip, + gpe.gateway_ip, + ofport)) + gpe.extern_ip_mac_mapping[mac] = dst_extern_ip + for mac in list(remove_ports): + gpe.extern_ip_mac_mapping.pop(mac, None) + segmentation_id = cfg.CONF.AGENT.extern_tun_vni + ofport = self.tun_br_flowkey_ofport + self.tun_br.delete_flows(table=constants.FLOW_KEY_TABLE, + dl_vlan=gpe.local_vlan, + dl_dst=mac) + + def rpc_loop(self, polling_manager=None): + if not polling_manager: + polling_manager = polling.AlwaysPoll() + + sync = True + ports = set() + updated_ports_copy = set() + ancillary_ports = set() + tunnel_sync = True + ovs_restarted = False + while self.run_daemon_loop: + start = time.time() + port_stats = {'regular': {'added': 0, + 'updated': 0, + 'removed': 0}, + 'ancillary': {'added': 0, + 'removed': 0}} + LOG.debug(_("Agent rpc_loop - iteration:%d started"), + self.iter_num) + if sync: + LOG.info(_("Agent out of sync with plugin!")) + ports.clear() + ancillary_ports.clear() + sync = False + polling_manager.force_polling() + ovs_restarted = self.check_ovs_restart() + if ovs_restarted: + self.setup_integration_br() + self.setup_physical_bridges(self.bridge_mappings) + if self.enable_tunneling: + self.setup_tunnel_br() + tunnel_sync = True + self.dvr_agent.reset_ovs_parameters(self.int_br, + self.tun_br, + self.patch_int_ofport, + self.patch_tun_ofport) + self.dvr_agent.setup_dvr_flows_on_integ_tun_br() + # Notify the plugin of tunnel IP + if self.enable_tunneling and tunnel_sync: + LOG.info(_("Agent tunnel out of sync with plugin!")) + try: + tunnel_sync = self.tunnel_sync() + except Exception: + LOG.exception(_("Error while synchronizing tunnels")) + tunnel_sync = True + + self.process_extern_port_info() + if self._agent_has_updates(polling_manager) or ovs_restarted: + try: + LOG.debug(_("Agent rpc_loop - iteration:%(iter_num)d - " + "starting polling. Elapsed:%(elapsed).3f"), + {'iter_num': self.iter_num, + 'elapsed': time.time() - start}) + # Save updated ports dict to perform rollback in + # case resync would be needed, and then clear + # self.updated_ports. As the greenthread should not yield + # between these two statements, this will be thread-safe + updated_ports_copy = self.updated_ports + self.updated_ports = set() + reg_ports = (set() if ovs_restarted else ports) + port_info = self.scan_ports(reg_ports, updated_ports_copy) + ports = port_info['current'] + LOG.debug(_("Agent rpc_loop - iteration:%(iter_num)d - " + "port information retrieved. " + "Elapsed:%(elapsed).3f"), + {'iter_num': self.iter_num, + 'elapsed': time.time() - start}) + # Secure and wire/unwire VIFs and update their status + # on Neutron server + if (self._port_info_has_changes(port_info) or + self.sg_agent.firewall_refresh_needed() or + ovs_restarted): + LOG.debug(_("Starting to process devices in:%s"), + port_info) + # If treat devices fails - must resync with plugin + sync = self.process_network_ports(port_info, + ovs_restarted) + LOG.debug(_("Agent rpc_loop - iteration:%(iter_num)d -" + "ports processed. Elapsed:%(elapsed).3f"), + {'iter_num': self.iter_num, + 'elapsed': time.time() - start}) + port_stats['regular']['added'] = ( + len(port_info.get('added', []))) + port_stats['regular']['updated'] = ( + len(port_info.get('updated', []))) + port_stats['regular']['removed'] = ( + len(port_info.get('removed', []))) + # Treat ancillary devices if they exist + if self.ancillary_brs: + port_info = self.update_ancillary_ports( + ancillary_ports) + LOG.debug(_("Agent rpc_loop - iteration:%(iter_num)d -" + "ancillary port info retrieved. " + "Elapsed:%(elapsed).3f"), + {'iter_num': self.iter_num, + 'elapsed': time.time() - start}) + + if port_info: + rc = self.process_ancillary_network_ports( + port_info) + LOG.debug(_("Agent rpc_loop - iteration:" + "%(iter_num)d - ancillary ports " + "processed. Elapsed:%(elapsed).3f"), + {'iter_num': self.iter_num, + 'elapsed': time.time() - start}) + ancillary_ports = port_info['current'] + port_stats['ancillary']['added'] = ( + len(port_info.get('added', []))) + port_stats['ancillary']['removed'] = ( + len(port_info.get('removed', []))) + sync = sync | rc + + polling_manager.polling_completed() + except Exception: + LOG.exception(_("Error while processing VIF ports")) + # Put the ports back in self.updated_port + self.updated_ports |= updated_ports_copy + sync = True + + # sleep till end of polling interval + elapsed = (time.time() - start) + LOG.debug(_("Agent rpc_loop - iteration:%(iter_num)d " + "completed. Processed ports statistics: " + "%(port_stats)s. Elapsed:%(elapsed).3f"), + {'iter_num': self.iter_num, + 'port_stats': port_stats, + 'elapsed': elapsed}) + if (elapsed < self.polling_interval): + time.sleep(self.polling_interval - elapsed) + else: + LOG.debug(_("Loop iteration exceeded interval " + "(%(polling_interval)s vs. %(elapsed)s)!"), + {'polling_interval': self.polling_interval, + 'elapsed': elapsed}) + self.iter_num = self.iter_num + 1 + + def daemon_loop(self): + with polling.get_polling_manager( + self.minimize_polling, + self.root_helper, + self.ovsdb_monitor_respawn_interval) as pm: + + self.rpc_loop(polling_manager=pm) + + def _handle_sigterm(self, signum, frame): + LOG.debug("Agent caught SIGTERM, quitting daemon loop.") + self.run_daemon_loop = False + + +def create_agent_config_map(config): + """Create a map of agent config parameters. + + :param config: an instance of cfg.CONF + :returns: a map of agent configuration parameters + """ + try: + bridge_mappings = q_utils.parse_mappings(config.OVS.bridge_mappings) + except ValueError as e: + raise ValueError(_("Parsing bridge_mappings failed: %s.") % e) + + kwargs = dict( + integ_br=config.OVS.integration_bridge, + tun_br=config.OVS.tunnel_bridge, + local_ip=config.OVS.local_ip, + bridge_mappings=bridge_mappings, + root_helper=config.AGENT.root_helper, + polling_interval=config.AGENT.polling_interval, + minimize_polling=config.AGENT.minimize_polling, + tunnel_types=config.AGENT.tunnel_types, + veth_mtu=config.AGENT.veth_mtu, + enable_distributed_routing=config.AGENT.enable_distributed_routing, + l2_population=config.AGENT.l2_population, + arp_responder=config.AGENT.arp_responder, + use_veth_interconnection=config.OVS.use_veth_interconnection, + ) + + # If enable_tunneling is TRUE, set tunnel_type to default to GRE + if config.OVS.enable_tunneling and not kwargs['tunnel_types']: + kwargs['tunnel_types'] = [p_const.TYPE_GRE] + + # Verify the tunnel_types specified are valid + for tun in kwargs['tunnel_types']: + if tun not in constants.TUNNEL_NETWORK_TYPES: + msg = _('Invalid tunnel type specified: %s'), tun + raise ValueError(msg) + if not kwargs['local_ip']: + msg = _('Tunneling cannot be enabled without a valid local_ip.') + raise ValueError(msg) + + return kwargs + + +def main(): + cfg.CONF.register_opts(ip_lib.OPTS) + common_config.init(sys.argv[1:]) + common_config.setup_logging(cfg.CONF) + q_utils.log_opt_values(LOG) + + try: + agent_config = create_agent_config_map(cfg.CONF) + except ValueError as e: + LOG.error(_('%s Agent terminated!'), e) + sys.exit(1) + + is_xen_compute_host = 'rootwrap-xen-dom0' in agent_config['root_helper'] + if is_xen_compute_host: + # Force ip_lib to always use the root helper to ensure that ip + # commands target xen dom0 rather than domU. + cfg.CONF.set_default('ip_lib_force_root', True) + + agent = OVSNeutronAgent(**agent_config) + signal.signal(signal.SIGTERM, agent._handle_sigterm) + + # Start everything. + LOG.info(_("Agent initialized successfully, now running... ")) + agent.daemon_loop() + + +if __name__ == "__main__": + main() diff --git a/icehouse-patches/neutron/vlan2vlan/neutron/plugins/openvswitch/common/config.py b/icehouse-patches/neutron/vlan2vlan/neutron/plugins/openvswitch/common/config.py new file mode 100644 index 00000000..e2e73e73 --- /dev/null +++ b/icehouse-patches/neutron/vlan2vlan/neutron/plugins/openvswitch/common/config.py @@ -0,0 +1,110 @@ +# Copyright 2012 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo.config import cfg + +from neutron.agent.common import config +from neutron.plugins.openvswitch.common import constants + + +DEFAULT_BRIDGE_MAPPINGS = [] +DEFAULT_VLAN_RANGES = [] +DEFAULT_TUNNEL_RANGES = [] +DEFAULT_TUNNEL_TYPES = [] + +ovs_opts = [ + cfg.StrOpt('integration_bridge', default='br-int', + help=_("Integration bridge to use")), + cfg.BoolOpt('enable_tunneling', default=False, + help=_("Enable tunneling support")), + cfg.StrOpt('tunnel_bridge', default='br-tun', + help=_("Tunnel bridge to use")), + cfg.StrOpt('int_peer_patch_port', default='patch-tun', + help=_("Peer patch port in integration bridge for tunnel " + "bridge")), + cfg.StrOpt('tun_peer_patch_port', default='patch-int', + help=_("Peer patch port in tunnel bridge for integration " + "bridge")), + cfg.StrOpt('local_ip', default='', + help=_("Local IP address of GRE tunnel endpoints.")), + cfg.ListOpt('bridge_mappings', + default=DEFAULT_BRIDGE_MAPPINGS, + help=_("List of :")), + cfg.StrOpt('tenant_network_type', default='local', + help=_("Network type for tenant networks " + "(local, vlan, gre, vxlan, or none)")), + cfg.ListOpt('network_vlan_ranges', + default=DEFAULT_VLAN_RANGES, + help=_("List of :: " + "or ")), + cfg.ListOpt('tunnel_id_ranges', + default=DEFAULT_TUNNEL_RANGES, + help=_("List of :")), + cfg.StrOpt('tunnel_type', default='', + help=_("The type of tunnels to use when utilizing tunnels, " + "either 'gre' or 'vxlan'")), + cfg.BoolOpt('use_veth_interconnection', default=False, + help=_("Use veths instead of patch ports to interconnect the " + "integration bridge to physical bridges")), +] + +agent_opts = [ + cfg.IntOpt('polling_interval', default=2, + help=_("The number of seconds the agent will wait between " + "polling for local device changes.")), + cfg.BoolOpt('minimize_polling', + default=True, + help=_("Minimize polling by monitoring ovsdb for interface " + "changes.")), + cfg.IntOpt('ovsdb_monitor_respawn_interval', + default=constants.DEFAULT_OVSDBMON_RESPAWN, + help=_("The number of seconds to wait before respawning the " + "ovsdb monitor after losing communication with it")), + cfg.ListOpt('tunnel_types', default=DEFAULT_TUNNEL_TYPES, + help=_("Network types supported by the agent " + "(gre and/or vxlan)")), + cfg.IntOpt('vxlan_udp_port', default=constants.VXLAN_UDP_PORT, + help=_("The UDP port to use for VXLAN tunnels.")), + cfg.IntOpt('veth_mtu', + help=_("MTU size of veth interfaces")), + cfg.BoolOpt('l2_population', default=False, + help=_("Use ml2 l2population mechanism driver to learn " + "remote mac and IPs and improve tunnel scalability")), + cfg.BoolOpt('arp_responder', default=False, + help=_("Enable local ARP responder if it is supported")), + cfg.BoolOpt('dont_fragment', default=True, + help=_("Set or un-set the don't fragment (DF) bit on " + "outgoing IP packet carrying GRE/VXLAN tunnel")), + cfg.BoolOpt('enable_distributed_routing', default=False, + help=_("Make the l2 agent run in dvr mode ")), + + # added by j00209498 ---begin + cfg.StrOpt('notify_l2_file_path', + default='/var/', + help=_('Location of notify_l2_file_path')), + cfg.StrOpt('notify_l2_file_name', + default='notify_l2_agent_info.json', + help=_('File name of notify_l2_file')), + cfg.StrOpt('extern_net_port', default='eth8', + help=_("extern_net_port name")), + cfg.IntOpt('extern_tun_vni', default=100000, + help=_("access neutron tenant to use")), + # added by j00209498 ---end +] + + +cfg.CONF.register_opts(ovs_opts, "OVS") +cfg.CONF.register_opts(agent_opts, "AGENT") +config.register_agent_state_opts_helper(cfg.CONF) +config.register_root_helper(cfg.CONF) diff --git a/icehouse-patches/neutron/vlan2vlan/neutron/plugins/openvswitch/common/constants.py b/icehouse-patches/neutron/vlan2vlan/neutron/plugins/openvswitch/common/constants.py new file mode 100644 index 00000000..7fac65c7 --- /dev/null +++ b/icehouse-patches/neutron/vlan2vlan/neutron/plugins/openvswitch/common/constants.py @@ -0,0 +1,70 @@ +# Copyright (c) 2012 OpenStack Foundation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from neutron.plugins.common import constants as p_const + + +# Special vlan_id value in ovs_vlan_allocations table indicating flat network +FLAT_VLAN_ID = -1 + +# Topic for tunnel notifications between the plugin and agent +TUNNEL = 'tunnel' + +# Values for network_type +VXLAN_UDP_PORT = 4789 + +# Name prefixes for veth device or patch port pair linking the integration +# bridge with the physical bridge for a physical network +PEER_INTEGRATION_PREFIX = 'int-' +PEER_PHYSICAL_PREFIX = 'phy-' + +# Nonexistent peer used to create patch ports without associating them, it +# allows to define flows before association +NONEXISTENT_PEER = 'nonexistent-peer' + +# The different types of tunnels +TUNNEL_NETWORK_TYPES = [p_const.TYPE_GRE, p_const.TYPE_VXLAN] + +# Various tables for DVR use of integration bridge flows +LOCAL_SWITCHING = 0 +DVR_TO_SRC_MAC = 1 +DVR_TO_DST_MAC = 2 + +# Various tables for tunneling flows +DVR_PROCESS = 1 +PATCH_LV_TO_TUN = 2 +GRE_TUN_TO_LV = 3 +VXLAN_TUN_TO_LV = 4 +DVR_NOT_LEARN = 9 +LEARN_FROM_TUN = 10 +UCAST_TO_TUN = 20 +ARP_RESPONDER = 21 +FLOOD_TO_TUN = 22 +CANARY_TABLE = 23 + +FLOW_KEY_TABLE = 24 + +# Map tunnel types to tables number +TUN_TABLE = {p_const.TYPE_GRE: GRE_TUN_TO_LV, + p_const.TYPE_VXLAN: VXLAN_TUN_TO_LV} + +# The default respawn interval for the ovsdb monitor +DEFAULT_OVSDBMON_RESPAWN = 30 + +# Special return value for an invalid OVS ofport +INVALID_OFPORT = '-1' + +# Represent invalid OF Port +OFPORT_INVALID = -1 diff --git a/icehouse-patches/nova/instance_mapping_uuid_patch/README.md b/icehouse-patches/nova/instance_mapping_uuid_patch/README.md new file mode 100644 index 00000000..b4b91dae --- /dev/null +++ b/icehouse-patches/nova/instance_mapping_uuid_patch/README.md @@ -0,0 +1,63 @@ +Nova instance mapping_uuid patch +=============================== +add instance mapping_uuid attribute patch,it will be patched in cascading level's control node + +How can we manage the servers in cascading level? To solve this problem,nova proxy must can get relation of cascading and cascaded server.So we can do this through adding instance attribute mapping_uuid + +Key modules +----------- + +* adding mapping_uuid column in nova instance table,when nova synchronizes db: + + nova\db\sqlalchemy\migrate_repo\versions\234_add_mapping_uuid_column_to_instance.py + nova\db\sqlalchemy\models.py + nova-2014.1\nova\objects\instance.py + nova\network\neutronv2\api.py + +* allowing nova proxy update instance mapping_uuid through conductor + nova\conductor\manager.py + +Requirements +------------ +* openstack icehouse has been installed + +Installation +------------ + +We provide two ways to install the instance_mapping_uuid patch code. In this section, we will guide you through installing the instance_mapping_uuid patch. + +* **Note:** + + - Make sure you have an existing installation of **Openstack Icehouse**. + - We recommend that you Do backup at least the following files before installation, because they are to be overwritten or modified: + +* **Manual Installation** + + - Make sure you have performed backups properly. + + - Navigate to the local repository and copy the contents in 'nova' sub-directory to the corresponding places in existing nova, e.g. + ```cp -r $LOCAL_REPOSITORY_DIR/nova $NOVA_PARENT_DIR``` + (replace the $... with actual directory name.) + + - synchronize the nova db. + ``` + mysql -u root -p$MYSQL_PASS -e "DROP DATABASE if exists nova; + CREATE DATABASE nova; + GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'%' IDENTIFIED BY '$NOVA_PASSWORD'; + GRANT ALL PRIVILEGES ON *.* TO 'nova'@'%'IDENTIFIED BY '$NOVA_PASSWORD'; + nova-manage db sync + ``` + + - Done. The nova proxy should be working with a demo configuration. + +* **Automatic Installation** + + - Make sure you have performed backups properly. + + - Navigate to the installation directory and run installation script. + ``` + cd $LOCAL_REPOSITORY_DIR/installation + sudo bash ./install.sh + ``` + (replace the $... with actual directory name.) + diff --git a/icehouse-patches/nova/instance_mapping_uuid_patch/installation/install.sh b/icehouse-patches/nova/instance_mapping_uuid_patch/installation/install.sh new file mode 100644 index 00000000..b8c58b35 --- /dev/null +++ b/icehouse-patches/nova/instance_mapping_uuid_patch/installation/install.sh @@ -0,0 +1,91 @@ +#!/bin/bash + +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# Copyright (c) 2014 Huawei Technologies. + +_MYSQL_PASS="1234" +_NOVA_INSTALL="/usr/lib64/python2.6/site-packages" +_NOVA_DIR="${_NOVA_INSTALL}/nova" +# if you did not make changes to the installation files, +# please do not edit the following directories. +_CODE_DIR="../nova" +_BACKUP_DIR="${_NOVA_INSTALL}/.instance_mapping_uuid_patch-installation-backup" + +_SCRIPT_LOGFILE="/var/log/instance_mapping_uuid_patch/installation/install.log" + +function log() +{ + log_path=`dirname ${_SCRIPT_LOGFILE}` + if [ ! -d $log_path ] ; then + mkdir -p $log_path + fi + echo "$@" + echo "`date -u +'%Y-%m-%d %T.%N'`: $@" >> $_SCRIPT_LOGFILE +} + +if [[ ${EUID} -ne 0 ]]; then + log "Please run as root." + exit 1 +fi + + +cd `dirname $0` + +log "checking installation directories..." +if [ ! -d "${_NOVA_DIR}" ] ; then + log "Could not find the nova installation. Please check the variables in the beginning of the script." + log "aborted." + exit 1 +fi + +log "checking previous installation..." +if [ -d "${_BACKUP_DIR}/nova" ] ; then + log "It seems nova-proxy has already been installed!" + log "Please check README for solution if this is not true." + exit 1 +fi + +log "backing up current files that might be overwritten..." +cp -r "${_NOVA_DIR}/" "${_BACKUP_DIR}/" +if [ $? -ne 0 ] ; then + rm -r "${_BACKUP_DIR}/nova" + echo "Error in code backup, aborted." + exit 1 +fi + +log "copying in new files..." +cp -r "${_CODE_DIR}" `dirname ${_NOVA_DIR}` +if [ $? -ne 0 ] ; then + log "Error in copying, aborted." + log "Recovering original files..." + cp -r "${_BACKUP_DIR}/nova" `dirname ${_NOVA_DIR}` && rm -r "${_BACKUP_DIR}/nova" + if [ $? -ne 0 ] ; then + log "Recovering failed! Please install manually." + fi + exit 1 +fi + +log "syc nova db..." +mysql -u root -p$_MYSQL_PASS -e "DROP DATABASE if exists nova;CREATE DATABASE nova;GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'%' IDENTIFIED BY 'Galax8800';GRANT ALL PRIVILEGES ON *.* TO 'nova'@'%'IDENTIFIED BY 'Galax8800';" + +nova-manage db sync + +if [ $? -ne 0 ] ; then + log "There was an error in restarting the service, please restart nova scheduler manually." + exit 1 +fi + +log "Completed." +log "See README to get started." +exit 0 + diff --git a/icehouse-patches/nova/instance_mapping_uuid_patch/installation/uninstall.sh b/icehouse-patches/nova/instance_mapping_uuid_patch/installation/uninstall.sh new file mode 100644 index 00000000..f770fd93 --- /dev/null +++ b/icehouse-patches/nova/instance_mapping_uuid_patch/installation/uninstall.sh @@ -0,0 +1,19 @@ +#!/bin/bash + +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# Copyright (c) 2014 Huawei Technologies. + + +# The uninstallation script don't had been realization, +# it will be supplied if needed. +exit 1 \ No newline at end of file diff --git a/icehouse-patches/nova/instance_mapping_uuid_patch/nova/conductor/manager.py b/icehouse-patches/nova/instance_mapping_uuid_patch/nova/conductor/manager.py new file mode 100644 index 00000000..93376bb7 --- /dev/null +++ b/icehouse-patches/nova/instance_mapping_uuid_patch/nova/conductor/manager.py @@ -0,0 +1,1116 @@ +# Copyright 2013 IBM Corp. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Handles database requests from other nova services.""" + +from oslo import messaging +import six + +from nova.api.ec2 import ec2utils +from nova import block_device +from nova.cells import rpcapi as cells_rpcapi +from nova.compute import api as compute_api +from nova.compute import rpcapi as compute_rpcapi +from nova.compute import task_states +from nova.compute import utils as compute_utils +from nova.compute import vm_states +from nova.conductor.tasks import live_migrate +from nova.db import base +from nova import exception +from nova.image import glance +from nova import manager +from nova import network +from nova.network.security_group import openstack_driver +from nova import notifications +from nova.objects import base as nova_object +from nova.objects import instance as instance_obj +from nova.objects import migration as migration_obj +from nova.objects import quotas as quotas_obj +from nova.openstack.common import excutils +from nova.openstack.common.gettextutils import _ +from nova.openstack.common import jsonutils +from nova.openstack.common import log as logging +from nova.openstack.common import timeutils +from nova import quota +from nova.scheduler import rpcapi as scheduler_rpcapi +from nova.scheduler import utils as scheduler_utils + +LOG = logging.getLogger(__name__) + +# Instead of having a huge list of arguments to instance_update(), we just +# accept a dict of fields to update and use this whitelist to validate it. +allowed_updates = ['task_state', 'vm_state', 'expected_task_state', + 'power_state', 'access_ip_v4', 'access_ip_v6', + 'launched_at', 'terminated_at', 'host', 'node', + 'memory_mb', 'vcpus', 'root_gb', 'ephemeral_gb', + 'instance_type_id', 'root_device_name', 'launched_on', + 'progress', 'vm_mode', 'default_ephemeral_device', + 'default_swap_device', 'root_device_name', + 'system_metadata', 'updated_at', 'mapping_uuid' + ] + +# Fields that we want to convert back into a datetime object. +datetime_fields = ['launched_at', 'terminated_at', 'updated_at'] + + +class ConductorManager(manager.Manager): + + """Mission: Conduct things. + + The methods in the base API for nova-conductor are various proxy operations + performed on behalf of the nova-compute service running on compute nodes. + Compute nodes are not allowed to directly access the database, so this set + of methods allows them to get specific work done without locally accessing + the database. + + The nova-conductor service also exposes an API in the 'compute_task' + namespace. See the ComputeTaskManager class for details. + """ + + target = messaging.Target(version='1.64') + + def __init__(self, *args, **kwargs): + super(ConductorManager, self).__init__(service_name='conductor', + *args, **kwargs) + self.security_group_api = ( + openstack_driver.get_openstack_security_group_driver()) + self._network_api = None + self._compute_api = None + self.compute_task_mgr = ComputeTaskManager() + self.cells_rpcapi = cells_rpcapi.CellsAPI() + self.additional_endpoints.append(self.compute_task_mgr) + self.additional_endpoints.append(_ConductorManagerV2Proxy(self)) + + @property + def network_api(self): + # NOTE(danms): We need to instantiate our network_api on first use + # to avoid the circular dependency that exists between our init + # and network_api's + if self._network_api is None: + self._network_api = network.API() + return self._network_api + + @property + def compute_api(self): + if self._compute_api is None: + self._compute_api = compute_api.API() + return self._compute_api + + def ping(self, context, arg): + # NOTE(russellb) This method can be removed in 2.0 of this API. It is + # now a part of the base rpc API. + return jsonutils.to_primitive({'service': 'conductor', 'arg': arg}) + + @messaging.expected_exceptions(KeyError, ValueError, + exception.InvalidUUID, + exception.InstanceNotFound, + exception.UnexpectedTaskStateError) + def instance_update(self, context, instance_uuid, + updates, service=None): + for key, value in updates.iteritems(): + if key not in allowed_updates: + LOG.error(_("Instance update attempted for " + "'%(key)s' on %(instance_uuid)s"), + {'key': key, 'instance_uuid': instance_uuid}) + raise KeyError("unexpected update keyword '%s'" % key) + if key in datetime_fields and isinstance(value, six.string_types): + updates[key] = timeutils.parse_strtime(value) + + old_ref, instance_ref = self.db.instance_update_and_get_original( + context, instance_uuid, updates) + notifications.send_update(context, old_ref, instance_ref, service) + return jsonutils.to_primitive(instance_ref) + + # NOTE(russellb): This method is now deprecated and can be removed in + # version 2.0 of the RPC API + @messaging.expected_exceptions(exception.InstanceNotFound) + def instance_get(self, context, instance_id): + return jsonutils.to_primitive( + self.db.instance_get(context, instance_id)) + + @messaging.expected_exceptions(exception.InstanceNotFound) + def instance_get_by_uuid(self, context, instance_uuid, + columns_to_join=None): + return jsonutils.to_primitive( + self.db.instance_get_by_uuid(context, instance_uuid, + columns_to_join)) + + # NOTE(hanlind): This method can be removed in v2.0 of the RPC API. + def instance_get_all(self, context): + return jsonutils.to_primitive(self.db.instance_get_all(context)) + + def instance_get_all_by_host(self, context, host, node=None, + columns_to_join=None): + if node is not None: + result = self.db.instance_get_all_by_host_and_node( + context.elevated(), host, node) + else: + result = self.db.instance_get_all_by_host(context.elevated(), host, + columns_to_join) + return jsonutils.to_primitive(result) + + # NOTE(comstud): This method is now deprecated and can be removed in + # version v2.0 of the RPC API + @messaging.expected_exceptions(exception.MigrationNotFound) + def migration_get(self, context, migration_id): + migration_ref = self.db.migration_get(context.elevated(), + migration_id) + return jsonutils.to_primitive(migration_ref) + + # NOTE(comstud): This method is now deprecated and can be removed in + # version v2.0 of the RPC API + def migration_get_unconfirmed_by_dest_compute(self, context, + confirm_window, + dest_compute): + migrations = self.db.migration_get_unconfirmed_by_dest_compute( + context, confirm_window, dest_compute) + return jsonutils.to_primitive(migrations) + + def migration_get_in_progress_by_host_and_node(self, context, + host, node): + migrations = self.db.migration_get_in_progress_by_host_and_node( + context, host, node) + return jsonutils.to_primitive(migrations) + + # NOTE(comstud): This method can be removed in v2.0 of the RPC API. + def migration_create(self, context, instance, values): + values.update({'instance_uuid': instance['uuid'], + 'source_compute': instance['host'], + 'source_node': instance['node']}) + migration_ref = self.db.migration_create(context.elevated(), values) + return jsonutils.to_primitive(migration_ref) + + # NOTE(russellb): This method is now deprecated and can be removed in + # version 2.0 of the RPC API + @messaging.expected_exceptions(exception.MigrationNotFound) + def migration_update(self, context, migration, status): + migration_ref = self.db.migration_update(context.elevated(), + migration['id'], + {'status': status}) + return jsonutils.to_primitive(migration_ref) + + @messaging.expected_exceptions(exception.AggregateHostExists) + def aggregate_host_add(self, context, aggregate, host): + host_ref = self.db.aggregate_host_add(context.elevated(), + aggregate['id'], host) + + return jsonutils.to_primitive(host_ref) + + @messaging.expected_exceptions(exception.AggregateHostNotFound) + def aggregate_host_delete(self, context, aggregate, host): + self.db.aggregate_host_delete(context.elevated(), + aggregate['id'], host) + + # NOTE(russellb): This method is now deprecated and can be removed in + # version 2.0 of the RPC API + @messaging.expected_exceptions(exception.AggregateNotFound) + def aggregate_get(self, context, aggregate_id): + aggregate = self.db.aggregate_get(context.elevated(), aggregate_id) + return jsonutils.to_primitive(aggregate) + + # NOTE(russellb): This method is now deprecated and can be removed in + # version 2.0 of the RPC API + def aggregate_get_by_host(self, context, host, key=None): + aggregates = self.db.aggregate_get_by_host(context.elevated(), + host, key) + return jsonutils.to_primitive(aggregates) + + # NOTE(danms): This method is now deprecated and can be removed in + # version 2.0 of the RPC API + def aggregate_metadata_add(self, context, aggregate, metadata, + set_delete=False): + new_metadata = self.db.aggregate_metadata_add(context.elevated(), + aggregate['id'], + metadata, set_delete) + return jsonutils.to_primitive(new_metadata) + + # NOTE(danms): This method is now deprecated and can be removed in + # version 2.0 of the RPC API + @messaging.expected_exceptions(exception.AggregateMetadataNotFound) + def aggregate_metadata_delete(self, context, aggregate, key): + self.db.aggregate_metadata_delete(context.elevated(), + aggregate['id'], key) + + def aggregate_metadata_get_by_host(self, context, host, + key='availability_zone'): + result = self.db.aggregate_metadata_get_by_host(context, host, key) + return jsonutils.to_primitive(result) + + def bw_usage_update(self, context, uuid, mac, start_period, + bw_in=None, bw_out=None, + last_ctr_in=None, last_ctr_out=None, + last_refreshed=None, + update_cells=True): + if [bw_in, bw_out, last_ctr_in, last_ctr_out].count(None) != 4: + self.db.bw_usage_update(context, uuid, mac, start_period, + bw_in, bw_out, last_ctr_in, last_ctr_out, + last_refreshed, + update_cells=update_cells) + usage = self.db.bw_usage_get(context, uuid, start_period, mac) + return jsonutils.to_primitive(usage) + + # NOTE(russellb) This method can be removed in 2.0 of this API. It is + # deprecated in favor of the method in the base API. + def get_backdoor_port(self, context): + return self.backdoor_port + + # NOTE(danms): This method can be removed in version 2.0 of this API. + def security_group_get_by_instance(self, context, instance): + group = self.db.security_group_get_by_instance(context, + instance['uuid']) + return jsonutils.to_primitive(group) + + # NOTE(danms): This method can be removed in version 2.0 of this API. + def security_group_rule_get_by_security_group(self, context, secgroup): + rules = self.db.security_group_rule_get_by_security_group( + context, secgroup['id']) + return jsonutils.to_primitive(rules, max_depth=4) + + def provider_fw_rule_get_all(self, context): + rules = self.db.provider_fw_rule_get_all(context) + return jsonutils.to_primitive(rules) + + def agent_build_get_by_triple(self, context, hypervisor, os, architecture): + info = self.db.agent_build_get_by_triple(context, hypervisor, os, + architecture) + return jsonutils.to_primitive(info) + + def block_device_mapping_update_or_create(self, context, values, + create=None): + if create is None: + bdm = self.db.block_device_mapping_update_or_create(context, + values) + elif create is True: + bdm = self.db.block_device_mapping_create(context, values) + else: + bdm = self.db.block_device_mapping_update(context, + values['id'], + values) + # NOTE:comstud): 'bdm' is always in the new format, so we + # account for this in cells/messaging.py + self.cells_rpcapi.bdm_update_or_create_at_top(context, bdm, + create=create) + + def block_device_mapping_get_all_by_instance(self, context, instance, + legacy=True): + bdms = self.db.block_device_mapping_get_all_by_instance( + context, instance['uuid']) + if legacy: + bdms = block_device.legacy_mapping(bdms) + return jsonutils.to_primitive(bdms) + + # NOTE(russellb) This method can be removed in 2.0 of this API. It is + # deprecated in favor of the method in the base API. + def block_device_mapping_destroy(self, context, bdms=None, + instance=None, volume_id=None, + device_name=None): + if bdms is not None: + for bdm in bdms: + self.db.block_device_mapping_destroy(context, bdm['id']) + # NOTE(comstud): bdm['id'] will be different in API cell, + # so we must try to destroy by device_name or volume_id. + # We need an instance_uuid in order to do this properly, + # too. + # I hope to clean a lot of this up in the object + # implementation. + instance_uuid = (bdm['instance_uuid'] or + (instance and instance['uuid'])) + if not instance_uuid: + continue + # Better to be safe than sorry. device_name is not + # NULLable, however it could be an empty string. + if bdm['device_name']: + self.cells_rpcapi.bdm_destroy_at_top( + context, instance_uuid, + device_name=bdm['device_name']) + elif bdm['volume_id']: + self.cells_rpcapi.bdm_destroy_at_top( + context, instance_uuid, + volume_id=bdm['volume_id']) + elif instance is not None and volume_id is not None: + self.db.block_device_mapping_destroy_by_instance_and_volume( + context, instance['uuid'], volume_id) + self.cells_rpcapi.bdm_destroy_at_top( + context, instance['uuid'], volume_id=volume_id) + elif instance is not None and device_name is not None: + self.db.block_device_mapping_destroy_by_instance_and_device( + context, instance['uuid'], device_name) + self.cells_rpcapi.bdm_destroy_at_top( + context, instance['uuid'], device_name=device_name) + else: + # NOTE(danms): This shouldn't happen + raise exception.Invalid(_("Invalid block_device_mapping_destroy" + " invocation")) + + def instance_get_all_by_filters(self, context, filters, sort_key, + sort_dir, columns_to_join=None, + use_slave=False): + result = self.db.instance_get_all_by_filters( + context, filters, sort_key, sort_dir, + columns_to_join=columns_to_join, use_slave=use_slave) + return jsonutils.to_primitive(result) + + # NOTE(hanlind): This method can be removed in v2.0 of the RPC API. + def instance_get_all_hung_in_rebooting(self, context, timeout): + result = self.db.instance_get_all_hung_in_rebooting(context, timeout) + return jsonutils.to_primitive(result) + + def instance_get_active_by_window(self, context, begin, end=None, + project_id=None, host=None): + # Unused, but cannot remove until major RPC version bump + result = self.db.instance_get_active_by_window(context, begin, end, + project_id, host) + return jsonutils.to_primitive(result) + + def instance_get_active_by_window_joined(self, context, begin, end=None, + project_id=None, host=None): + result = self.db.instance_get_active_by_window_joined( + context, begin, end, project_id, host) + return jsonutils.to_primitive(result) + + def instance_destroy(self, context, instance): + result = self.db.instance_destroy(context, instance['uuid']) + return jsonutils.to_primitive(result) + + def instance_info_cache_delete(self, context, instance): + self.db.instance_info_cache_delete(context, instance['uuid']) + + # NOTE(hanlind): This method is now deprecated and can be removed in + # version v2.0 of the RPC API. + def instance_info_cache_update(self, context, instance, values): + self.db.instance_info_cache_update(context, instance['uuid'], + values) + + # NOTE(danms): This method is now deprecated and can be removed in + # version v2.0 of the RPC API. + def instance_type_get(self, context, instance_type_id): + result = self.db.flavor_get(context, instance_type_id) + return jsonutils.to_primitive(result) + + def instance_fault_create(self, context, values): + result = self.db.instance_fault_create(context, values) + return jsonutils.to_primitive(result) + + # NOTE(kerrin): This method can be removed in v2.0 of the RPC API. + def vol_get_usage_by_time(self, context, start_time): + result = self.db.vol_get_usage_by_time(context, start_time) + return jsonutils.to_primitive(result) + + # NOTE(kerrin): The last_refreshed argument is unused by this method + # and can be removed in v2.0 of the RPC API. + def vol_usage_update(self, context, vol_id, rd_req, rd_bytes, wr_req, + wr_bytes, instance, last_refreshed=None, + update_totals=False): + vol_usage = self.db.vol_usage_update(context, vol_id, + rd_req, rd_bytes, + wr_req, wr_bytes, + instance['uuid'], + instance['project_id'], + instance['user_id'], + instance['availability_zone'], + update_totals) + + # We have just updated the database, so send the notification now + self.notifier.info(context, 'volume.usage', + compute_utils.usage_volume_info(vol_usage)) + + @messaging.expected_exceptions(exception.ComputeHostNotFound, + exception.HostBinaryNotFound) + def service_get_all_by(self, context, topic=None, host=None, binary=None): + if not any((topic, host, binary)): + result = self.db.service_get_all(context) + elif all((topic, host)): + if topic == 'compute': + result = self.db.service_get_by_compute_host(context, host) + # FIXME(comstud) Potentially remove this on bump to v2.0 + result = [result] + else: + result = self.db.service_get_by_host_and_topic(context, + host, topic) + elif all((host, binary)): + result = self.db.service_get_by_args(context, host, binary) + elif topic: + result = self.db.service_get_all_by_topic(context, topic) + elif host: + result = self.db.service_get_all_by_host(context, host) + + return jsonutils.to_primitive(result) + + @messaging.expected_exceptions(exception.InstanceActionNotFound) + def action_event_start(self, context, values): + evt = self.db.action_event_start(context, values) + return jsonutils.to_primitive(evt) + + @messaging.expected_exceptions(exception.InstanceActionNotFound, + exception.InstanceActionEventNotFound) + def action_event_finish(self, context, values): + evt = self.db.action_event_finish(context, values) + return jsonutils.to_primitive(evt) + + def service_create(self, context, values): + svc = self.db.service_create(context, values) + return jsonutils.to_primitive(svc) + + @messaging.expected_exceptions(exception.ServiceNotFound) + def service_destroy(self, context, service_id): + self.db.service_destroy(context, service_id) + + def compute_node_create(self, context, values): + result = self.db.compute_node_create(context, values) + return jsonutils.to_primitive(result) + + def compute_node_update(self, context, node, values, prune_stats=False): + # NOTE(belliott) prune_stats is no longer relevant and will be + # ignored + if isinstance(values.get('stats'), dict): + # NOTE(danms): In Icehouse, the 'stats' was changed from a dict + # to a JSON string. If we get a dict-based value, convert it to + # JSON, which the lower layers now expect. This can be removed + # in version 2.0 of the RPC API + values['stats'] = jsonutils.dumps(values['stats']) + + result = self.db.compute_node_update(context, node['id'], values) + return jsonutils.to_primitive(result) + + def compute_node_delete(self, context, node): + result = self.db.compute_node_delete(context, node['id']) + return jsonutils.to_primitive(result) + + @messaging.expected_exceptions(exception.ServiceNotFound) + def service_update(self, context, service, values): + svc = self.db.service_update(context, service['id'], values) + return jsonutils.to_primitive(svc) + + def task_log_get(self, context, task_name, begin, end, host, state=None): + result = self.db.task_log_get(context, task_name, begin, end, host, + state) + return jsonutils.to_primitive(result) + + def task_log_begin_task(self, context, task_name, begin, end, host, + task_items=None, message=None): + result = self.db.task_log_begin_task(context.elevated(), task_name, + begin, end, host, task_items, + message) + return jsonutils.to_primitive(result) + + def task_log_end_task(self, context, task_name, begin, end, host, + errors, message=None): + result = self.db.task_log_end_task(context.elevated(), task_name, + begin, end, host, errors, message) + return jsonutils.to_primitive(result) + + def notify_usage_exists(self, context, instance, current_period=False, + ignore_missing_network_data=True, + system_metadata=None, extra_usage_info=None): + compute_utils.notify_usage_exists(self.notifier, context, instance, + current_period, + ignore_missing_network_data, + system_metadata, extra_usage_info) + + def security_groups_trigger_handler(self, context, event, args): + self.security_group_api.trigger_handler(event, context, *args) + + def security_groups_trigger_members_refresh(self, context, group_ids): + self.security_group_api.trigger_members_refresh(context, group_ids) + + def network_migrate_instance_start(self, context, instance, migration): + self.network_api.migrate_instance_start(context, instance, migration) + + def network_migrate_instance_finish(self, context, instance, migration): + self.network_api.migrate_instance_finish(context, instance, migration) + + def quota_commit(self, context, reservations, project_id=None, + user_id=None): + quota.QUOTAS.commit(context, reservations, project_id=project_id, + user_id=user_id) + + def quota_rollback(self, context, reservations, project_id=None, + user_id=None): + quota.QUOTAS.rollback(context, reservations, project_id=project_id, + user_id=user_id) + + def get_ec2_ids(self, context, instance): + ec2_ids = {} + + ec2_ids['instance-id'] = ec2utils.id_to_ec2_inst_id(instance['uuid']) + ec2_ids['ami-id'] = ec2utils.glance_id_to_ec2_id(context, + instance['image_ref']) + for image_type in ['kernel', 'ramdisk']: + image_id = instance.get('%s_id' % image_type) + if image_id is not None: + ec2_image_type = ec2utils.image_type(image_type) + ec2_id = ec2utils.glance_id_to_ec2_id(context, image_id, + ec2_image_type) + ec2_ids['%s-id' % image_type] = ec2_id + + return ec2_ids + + # NOTE(danms): This method is now deprecated and can be removed in + # version v2.0 of the RPC API + def compute_stop(self, context, instance, do_cast=True): + # NOTE(mriedem): Clients using an interface before 1.43 will be sending + # dicts so we need to handle that here since compute/api::stop() + # requires an object. + if isinstance(instance, dict): + instance = instance_obj.Instance._from_db_object( + context, instance_obj.Instance(), instance) + self.compute_api.stop(context, instance, do_cast) + + # NOTE(comstud): This method is now deprecated and can be removed in + # version v2.0 of the RPC API + def compute_confirm_resize(self, context, instance, migration_ref): + if isinstance(instance, dict): + attrs = ['metadata', 'system_metadata', 'info_cache', + 'security_groups'] + instance = instance_obj.Instance._from_db_object( + context, instance_obj.Instance(), instance, + expected_attrs=attrs) + if isinstance(migration_ref, dict): + migration_ref = migration_obj.Migration._from_db_object( + context.elevated(), migration_ref) + self.compute_api.confirm_resize(context, instance, + migration=migration_ref) + + def compute_unrescue(self, context, instance): + self.compute_api.unrescue(context, instance) + + def _object_dispatch(self, target, method, context, args, kwargs): + """Dispatch a call to an object method. + + This ensures that object methods get called and any exception + that is raised gets wrapped in an ExpectedException for forwarding + back to the caller (without spamming the conductor logs). + """ + try: + # NOTE(danms): Keep the getattr inside the try block since + # a missing method is really a client problem + return getattr(target, method)(context, *args, **kwargs) + except Exception: + raise messaging.ExpectedException() + + def object_class_action(self, context, objname, objmethod, + objver, args, kwargs): + """Perform a classmethod action on an object.""" + objclass = nova_object.NovaObject.obj_class_from_name(objname, + objver) + result = self._object_dispatch(objclass, objmethod, context, + args, kwargs) + # NOTE(danms): The RPC layer will convert to primitives for us, + # but in this case, we need to honor the version the client is + # asking for, so we do it before returning here. + return (result.obj_to_primitive(target_version=objver) + if isinstance(result, nova_object.NovaObject) else result) + + def object_action(self, context, objinst, objmethod, args, kwargs): + """Perform an action on an object.""" + oldobj = objinst.obj_clone() + result = self._object_dispatch(objinst, objmethod, context, + args, kwargs) + updates = dict() + # NOTE(danms): Diff the object with the one passed to us and + # generate a list of changes to forward back + for name, field in objinst.fields.items(): + if not objinst.obj_attr_is_set(name): + # Avoid demand-loading anything + continue + if (not oldobj.obj_attr_is_set(name) or + oldobj[name] != objinst[name]): + updates[name] = field.to_primitive(objinst, name, + objinst[name]) + # This is safe since a field named this would conflict with the + # method anyway + updates['obj_what_changed'] = objinst.obj_what_changed() + return updates, result + + # NOTE(danms): This method is now deprecated and can be removed in + # v2.0 of the RPC API + def compute_reboot(self, context, instance, reboot_type): + self.compute_api.reboot(context, instance, reboot_type) + + def object_backport(self, context, objinst, target_version): + return objinst.obj_to_primitive(target_version=target_version) + + +class ComputeTaskManager(base.Base): + + """Namespace for compute methods. + + This class presents an rpc API for nova-conductor under the 'compute_task' + namespace. The methods here are compute operations that are invoked + by the API service. These methods see the operation to completion, which + may involve coordinating activities on multiple compute nodes. + """ + + target = messaging.Target(namespace='compute_task', version='1.6') + + def __init__(self): + super(ComputeTaskManager, self).__init__() + self.compute_rpcapi = compute_rpcapi.ComputeAPI() + self.scheduler_rpcapi = scheduler_rpcapi.SchedulerAPI() + self.image_service = glance.get_default_image_service() + + @messaging.expected_exceptions(exception.NoValidHost, + exception.ComputeServiceUnavailable, + exception.InvalidHypervisorType, + exception.UnableToMigrateToSelf, + exception.DestinationHypervisorTooOld, + exception.InvalidLocalStorage, + exception.InvalidSharedStorage, + exception.MigrationPreCheckError) + def migrate_server( + self, + context, + instance, + scheduler_hint, + live, + rebuild, + flavor, + block_migration, + disk_over_commit, + reservations=None): + if instance and not isinstance(instance, instance_obj.Instance): + # NOTE(danms): Until v2 of the RPC API, we need to tolerate + # old-world instance objects here + attrs = ['metadata', 'system_metadata', 'info_cache', + 'security_groups'] + instance = instance_obj.Instance._from_db_object( + context, instance_obj.Instance(), instance, + expected_attrs=attrs) + if live and not rebuild and not flavor: + self._live_migrate(context, instance, scheduler_hint, + block_migration, disk_over_commit) + elif not live and not rebuild and flavor: + instance_uuid = instance['uuid'] + with compute_utils.EventReporter(context, self.db, + 'cold_migrate', instance_uuid): + self._cold_migrate(context, instance, flavor, + scheduler_hint['filter_properties'], + reservations) + else: + raise NotImplementedError() + + def _cold_migrate(self, context, instance, flavor, filter_properties, + reservations): + image_ref = instance.image_ref + image = compute_utils.get_image_metadata( + context, self.image_service, image_ref, instance) + + request_spec = scheduler_utils.build_request_spec( + context, image, [instance], instance_type=flavor) + + quotas = quotas_obj.Quotas.from_reservations(context, + reservations, + instance=instance) + try: + hosts = self.scheduler_rpcapi.select_destinations( + context, request_spec, filter_properties) + host_state = hosts[0] + except exception.NoValidHost as ex: + vm_state = instance['vm_state'] + if not vm_state: + vm_state = vm_states.ACTIVE + updates = {'vm_state': vm_state, 'task_state': None} + self._set_vm_state_and_notify(context, 'migrate_server', + updates, ex, request_spec) + quotas.rollback() + + LOG.warning(_("No valid host found for cold migrate"), + instance=instance) + return + + try: + scheduler_utils.populate_filter_properties(filter_properties, + host_state) + # context is not serializable + filter_properties.pop('context', None) + + # TODO(timello): originally, instance_type in request_spec + # on compute.api.resize does not have 'extra_specs', so we + # remove it for now to keep tests backward compatibility. + request_spec['instance_type'].pop('extra_specs') + + (host, node) = (host_state['host'], host_state['nodename']) + self.compute_rpcapi.prep_resize( + context, image, instance, + flavor, host, + reservations, request_spec=request_spec, + filter_properties=filter_properties, node=node) + except Exception as ex: + with excutils.save_and_reraise_exception(): + updates = {'vm_state': instance['vm_state'], + 'task_state': None} + self._set_vm_state_and_notify(context, 'migrate_server', + updates, ex, request_spec) + quotas.rollback() + + def _set_vm_state_and_notify(self, context, method, updates, ex, + request_spec): + scheduler_utils.set_vm_state_and_notify( + context, 'compute_task', method, updates, + ex, request_spec, self.db) + + def _live_migrate(self, context, instance, scheduler_hint, + block_migration, disk_over_commit): + destination = scheduler_hint.get("host") + try: + live_migrate.execute(context, instance, destination, + block_migration, disk_over_commit) + except (exception.NoValidHost, + exception.ComputeServiceUnavailable, + exception.InvalidHypervisorType, + exception.UnableToMigrateToSelf, + exception.DestinationHypervisorTooOld, + exception.InvalidLocalStorage, + exception.InvalidSharedStorage, + exception.HypervisorUnavailable, + exception.MigrationPreCheckError) as ex: + with excutils.save_and_reraise_exception(): + # TODO(johngarbutt) - eventually need instance actions here + request_spec = {'instance_properties': { + 'uuid': instance['uuid'], }, + } + scheduler_utils.set_vm_state_and_notify( + context, + 'compute_task', + 'migrate_server', + dict( + vm_state=instance['vm_state'], + task_state=None, + expected_task_state=task_states.MIGRATING, + ), + ex, + request_spec, + self.db) + except Exception as ex: + LOG.error(_('Migration of instance %(instance_id)s to host' + ' %(dest)s unexpectedly failed.'), + {'instance_id': instance['uuid'], 'dest': destination}, + exc_info=True) + raise exception.MigrationError(reason=ex) + + def build_instances( + self, + context, + instances, + image, + filter_properties, + admin_password, + injected_files, + requested_networks, + security_groups, + block_device_mapping, + legacy_bdm=True): + request_spec = scheduler_utils.build_request_spec(context, image, + instances) + # NOTE(alaski): For compatibility until a new scheduler method is used. + request_spec.update({'block_device_mapping': block_device_mapping, + 'security_group': security_groups}) + self.scheduler_rpcapi.run_instance( + context, + request_spec=request_spec, + admin_password=admin_password, + injected_files=injected_files, + requested_networks=requested_networks, + is_first_time=True, + filter_properties=filter_properties, + legacy_bdm_in_spec=legacy_bdm) + + def _get_image(self, context, image_id): + if not image_id: + return None + return self.image_service.show(context, image_id) + + def _delete_image(self, context, image_id): + (image_service, image_id) = glance.get_remote_image_service(context, + image_id) + return image_service.delete(context, image_id) + + def _schedule_instances(self, context, image, filter_properties, + *instances): + request_spec = scheduler_utils.build_request_spec(context, image, + instances) + # dict(host='', nodename='', limits='') + hosts = self.scheduler_rpcapi.select_destinations( + context, + request_spec, + filter_properties) + return hosts + + def unshelve_instance(self, context, instance): + sys_meta = instance.system_metadata + + if instance.vm_state == vm_states.SHELVED: + instance.task_state = task_states.POWERING_ON + instance.save(expected_task_state=task_states.UNSHELVING) + self.compute_rpcapi.start_instance(context, instance) + snapshot_id = sys_meta.get('shelved_image_id') + if snapshot_id: + self._delete_image(context, snapshot_id) + elif instance.vm_state == vm_states.SHELVED_OFFLOADED: + try: + with compute_utils.EventReporter(context, self.db, + 'get_image_info', instance.uuid): + image = self._get_image(context, + sys_meta['shelved_image_id']) + except exception.ImageNotFound: + with excutils.save_and_reraise_exception(): + LOG.error(_('Unshelve attempted but vm_state not SHELVED ' + 'or SHELVED_OFFLOADED'), instance=instance) + instance.vm_state = vm_states.ERROR + instance.save() + + try: + with compute_utils.EventReporter(context, self.db, + 'schedule_instances', + instance.uuid): + filter_properties = {} + hosts = self._schedule_instances(context, image, + filter_properties, + instance) + host_state = hosts[0] + scheduler_utils.populate_filter_properties( + filter_properties, host_state) + (host, node) = (host_state['host'], host_state['nodename']) + self.compute_rpcapi.unshelve_instance( + context, instance, host, image=image, + filter_properties=filter_properties, node=node) + except exception.NoValidHost as ex: + instance.task_state = None + instance.save() + LOG.warning(_("No valid host found for unshelve instance"), + instance=instance) + return + else: + LOG.error(_('Unshelve attempted but vm_state not SHELVED or ' + 'SHELVED_OFFLOADED'), instance=instance) + instance.vm_state = vm_states.ERROR + instance.save() + return + + for key in ['shelved_at', 'shelved_image_id', 'shelved_host']: + if key in sys_meta: + del(sys_meta[key]) + instance.system_metadata = sys_meta + instance.save() + + +class _ConductorManagerV2Proxy(object): + + target = messaging.Target(version='2.0') + + def __init__(self, manager): + self.manager = manager + + def instance_update(self, context, instance_uuid, updates, + service): + return self.manager.instance_update(context, instance_uuid, updates, + service) + + def instance_get_by_uuid(self, context, instance_uuid, + columns_to_join): + return self.manager.instance_get_by_uuid(context, instance_uuid, + columns_to_join) + + def migration_get_in_progress_by_host_and_node(self, context, + host, node): + return self.manager.migration_get_in_progress_by_host_and_node( + context, + host, + node) + + def aggregate_host_add(self, context, aggregate, host): + return self.manager.aggregate_host_add(context, aggregate, host) + + def aggregate_host_delete(self, context, aggregate, host): + return self.manager.aggregate_host_delete(context, aggregate, host) + + def aggregate_metadata_get_by_host(self, context, host, key): + return self.manager.aggregate_metadata_get_by_host(context, host, key) + + def bw_usage_update(self, context, uuid, mac, start_period, + bw_in, bw_out, last_ctr_in, last_ctr_out, + last_refreshed, update_cells): + return self.manager.bw_usage_update( + context, + uuid, + mac, + start_period, + bw_in, + bw_out, + last_ctr_in, + last_ctr_out, + last_refreshed, + update_cells) + + def provider_fw_rule_get_all(self, context): + return self.manager.provider_fw_rule_get_all(context) + + def agent_build_get_by_triple(self, context, hypervisor, os, architecture): + return self.manager.agent_build_get_by_triple(context, hypervisor, os, + architecture) + + def block_device_mapping_update_or_create(self, context, values, create): + return self.manager.block_device_mapping_update_or_create( + context, + values, + create) + + def block_device_mapping_get_all_by_instance(self, context, instance, + legacy): + return self.manager.block_device_mapping_get_all_by_instance( + context, + instance, + legacy) + + def instance_get_all_by_filters(self, context, filters, sort_key, + sort_dir, columns_to_join, use_slave): + return self.manager.instance_get_all_by_filters( + context, + filters, + sort_key, + sort_dir, + columns_to_join, + use_slave) + + def instance_get_active_by_window_joined(self, context, begin, end, + project_id, host): + return self.manager.instance_get_active_by_window_joined( + context, + begin, + end, + project_id, + host) + + def instance_destroy(self, context, instance): + return self.manager.instance_destroy(context, instance) + + def instance_info_cache_delete(self, context, instance): + return self.manager.instance_info_cache_delete(context, instance) + + def vol_get_usage_by_time(self, context, start_time): + return self.manager.vol_get_usage_by_time(context, start_time) + + def vol_usage_update(self, context, vol_id, rd_req, rd_bytes, wr_req, + wr_bytes, instance, last_refreshed, update_totals): + return self.manager.vol_usage_update( + context, + vol_id, + rd_req, + rd_bytes, + wr_req, + wr_bytes, + instance, + last_refreshed, + update_totals) + + def service_get_all_by(self, context, topic, host, binary): + return self.manager.service_get_all_by(context, topic, host, binary) + + def instance_get_all_by_host(self, context, host, node, columns_to_join): + return self.manager.instance_get_all_by_host(context, host, node, + columns_to_join) + + def instance_fault_create(self, context, values): + return self.manager.instance_fault_create(context, values) + + def action_event_start(self, context, values): + return self.manager.action_event_start(context, values) + + def action_event_finish(self, context, values): + return self.manager.action_event_finish(context, values) + + def service_create(self, context, values): + return self.manager.service_create(context, values) + + def service_destroy(self, context, service_id): + return self.manager.service_destroy(context, service_id) + + def compute_node_create(self, context, values): + return self.manager.compute_node_create(context, values) + + def compute_node_update(self, context, node, values): + return self.manager.compute_node_update(context, node, values) + + def compute_node_delete(self, context, node): + return self.manager.compute_node_delete(context, node) + + def service_update(self, context, service, values): + return self.manager.service_update(context, service, values) + + def task_log_get(self, context, task_name, begin, end, host, state): + return self.manager.task_log_get(context, task_name, begin, end, host, + state) + + def task_log_begin_task(self, context, task_name, begin, end, host, + task_items, message): + return self.manager.task_log_begin_task(context, task_name, begin, end, + host, task_items, message) + + def task_log_end_task(self, context, task_name, begin, end, host, errors, + message): + return self.manager.task_log_end_task(context, task_name, begin, end, + host, errors, message) + + def notify_usage_exists(self, context, instance, current_period, + ignore_missing_network_data, + system_metadata, extra_usage_info): + return self.manager.notify_usage_exists( + context, + instance, + current_period, + ignore_missing_network_data, + system_metadata, + extra_usage_info) + + def security_groups_trigger_handler(self, context, event, args): + return self.manager.security_groups_trigger_handler(context, event, + args) + + def security_groups_trigger_members_refresh(self, context, group_ids): + return self.manager.security_groups_trigger_members_refresh(context, + group_ids) + + def network_migrate_instance_start(self, context, instance, migration): + return self.manager.network_migrate_instance_start(context, instance, + migration) + + def network_migrate_instance_finish(self, context, instance, migration): + return self.manager.network_migrate_instance_finish(context, instance, + migration) + + def quota_commit(self, context, reservations, project_id, user_id): + return self.manager.quota_commit(context, reservations, project_id, + user_id) + + def quota_rollback(self, context, reservations, project_id, user_id): + return self.manager.quota_rollback(context, reservations, project_id, + user_id) + + def get_ec2_ids(self, context, instance): + return self.manager.get_ec2_ids(context, instance) + + def compute_unrescue(self, context, instance): + return self.manager.compute_unrescue(context, instance) + + def object_class_action(self, context, objname, objmethod, objver, + args, kwargs): + return self.manager.object_class_action(context, objname, objmethod, + objver, args, kwargs) + + def object_action(self, context, objinst, objmethod, args, kwargs): + return self.manager.object_action(context, objinst, objmethod, args, + kwargs) + + def object_backport(self, context, objinst, target_version): + return self.manager.object_backport(context, objinst, target_version) diff --git a/icehouse-patches/nova/instance_mapping_uuid_patch/nova/db/sqlalchemy/migrate_repo/versions/234_add_mapping_uuid_column_to_instance.py b/icehouse-patches/nova/instance_mapping_uuid_patch/nova/db/sqlalchemy/migrate_repo/versions/234_add_mapping_uuid_column_to_instance.py new file mode 100644 index 00000000..5c0db160 --- /dev/null +++ b/icehouse-patches/nova/instance_mapping_uuid_patch/nova/db/sqlalchemy/migrate_repo/versions/234_add_mapping_uuid_column_to_instance.py @@ -0,0 +1,20 @@ +from sqlalchemy import Column, String, MetaData, Table + + +def upgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + instances = Table('instances', meta, autoload=True) + mapping_uuid = Column('mapping_uuid', + String(length=36)) + instances.create_column(mapping_uuid) + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + instances = Table('instances', meta, autoload=True) + mapping_uuid = instances.columns.mapping_uuid + mapping_uuid.drop() diff --git a/icehouse-patches/nova/instance_mapping_uuid_patch/nova/db/sqlalchemy/models.py b/icehouse-patches/nova/instance_mapping_uuid_patch/nova/db/sqlalchemy/models.py new file mode 100644 index 00000000..68ce1a11 --- /dev/null +++ b/icehouse-patches/nova/instance_mapping_uuid_patch/nova/db/sqlalchemy/models.py @@ -0,0 +1,1470 @@ +# Copyright (c) 2011 X.commerce, a business unit of eBay Inc. +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# Copyright 2011 Piston Cloud Computing, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +SQLAlchemy models for nova data. +""" + +from sqlalchemy import Column, Index, Integer, BigInteger, Enum, String, schema +from sqlalchemy.dialects.mysql import MEDIUMTEXT +from sqlalchemy.ext.declarative import declarative_base +from sqlalchemy import ForeignKey, DateTime, Boolean, Text, Float +from sqlalchemy.orm import relationship, backref, object_mapper +from oslo.config import cfg + +from nova.db.sqlalchemy import types +from nova.openstack.common.db.sqlalchemy import models +from nova.openstack.common import timeutils + +CONF = cfg.CONF +BASE = declarative_base() + + +def MediumText(): + return Text().with_variant(MEDIUMTEXT(), 'mysql') + + +class NovaBase(models.SoftDeleteMixin, + models.TimestampMixin, + models.ModelBase): + metadata = None + + def save(self, session=None): + from nova.db.sqlalchemy import api + + if session is None: + session = api.get_session() + + super(NovaBase, self).save(session=session) + + +class Service(BASE, NovaBase): + + """Represents a running service on a host.""" + + __tablename__ = 'services' + __table_args__ = ( + schema.UniqueConstraint("host", "topic", "deleted", + name="uniq_services0host0topic0deleted"), + schema.UniqueConstraint("host", "binary", "deleted", + name="uniq_services0host0binary0deleted") + ) + + id = Column(Integer, primary_key=True) + host = Column(String(255)) # , ForeignKey('hosts.id')) + binary = Column(String(255)) + topic = Column(String(255)) + report_count = Column(Integer, nullable=False, default=0) + disabled = Column(Boolean, default=False) + disabled_reason = Column(String(255)) + + +class ComputeNode(BASE, NovaBase): + + """Represents a running compute service on a host.""" + + __tablename__ = 'compute_nodes' + __table_args__ = () + id = Column(Integer, primary_key=True) + service_id = Column(Integer, ForeignKey('services.id'), nullable=False) + service = relationship(Service, + backref=backref('compute_node'), + foreign_keys=service_id, + primaryjoin='and_(' + 'ComputeNode.service_id == Service.id,' + 'ComputeNode.deleted == 0)') + + vcpus = Column(Integer, nullable=False) + memory_mb = Column(Integer, nullable=False) + local_gb = Column(Integer, nullable=False) + vcpus_used = Column(Integer, nullable=False) + memory_mb_used = Column(Integer, nullable=False) + local_gb_used = Column(Integer, nullable=False) + hypervisor_type = Column(MediumText(), nullable=False) + hypervisor_version = Column(Integer, nullable=False) + hypervisor_hostname = Column(String(255)) + + # Free Ram, amount of activity (resize, migration, boot, etc) and + # the number of running VM's are a good starting point for what's + # important when making scheduling decisions. + free_ram_mb = Column(Integer) + free_disk_gb = Column(Integer) + current_workload = Column(Integer) + running_vms = Column(Integer) + + # Note(masumotok): Expected Strings example: + # + # '{"arch":"x86_64", + # "model":"Nehalem", + # "topology":{"sockets":1, "threads":2, "cores":3}, + # "features":["tdtscp", "xtpr"]}' + # + # Points are "json translatable" and it must have all dictionary keys + # above, since it is copied from tag of getCapabilities() + # (See libvirt.virtConnection). + cpu_info = Column(MediumText(), nullable=False) + disk_available_least = Column(Integer) + host_ip = Column(types.IPAddress()) + supported_instances = Column(Text) + metrics = Column(Text) + + # Note(yongli): json string PCI Stats + # '{"vendor_id":"8086", "product_id":"1234", "count":3 }' + pci_stats = Column(Text) + + # extra_resources is a json string containing arbitrary + # data about additional resources. + extra_resources = Column(Text) + + # json-encode string containing compute node statistics + stats = Column(Text, default='{}') + + +class Certificate(BASE, NovaBase): + + """Represents a x509 certificate.""" + __tablename__ = 'certificates' + __table_args__ = ( + Index('certificates_project_id_deleted_idx', 'project_id', 'deleted'), + Index('certificates_user_id_deleted_idx', 'user_id', 'deleted') + ) + id = Column(Integer, primary_key=True) + + user_id = Column(String(255)) + project_id = Column(String(255)) + file_name = Column(String(255)) + + +class Instance(BASE, NovaBase): + + """Represents a guest VM.""" + __tablename__ = 'instances' + __table_args__ = ( + Index('uuid', 'uuid', unique=True), + Index('project_id', 'project_id'), + Index('instances_host_deleted_idx', + 'host', 'deleted'), + Index('instances_reservation_id_idx', + 'reservation_id'), + Index('instances_terminated_at_launched_at_idx', + 'terminated_at', 'launched_at'), + Index('instances_uuid_deleted_idx', + 'uuid', 'deleted'), + Index('instances_task_state_updated_at_idx', + 'task_state', 'updated_at'), + Index('instances_host_node_deleted_idx', + 'host', 'node', 'deleted'), + Index('instances_host_deleted_cleaned_idx', + 'host', 'deleted', 'cleaned'), + ) + injected_files = [] + + id = Column(Integer, primary_key=True, autoincrement=True) + + @property + def name(self): + try: + base_name = CONF.instance_name_template % self.id + except TypeError: + # Support templates like "uuid-%(uuid)s", etc. + info = {} + # NOTE(russellb): Don't use self.iteritems() here, as it will + # result in infinite recursion on the name property. + for column in iter(object_mapper(self).columns): + key = column.name + # prevent recursion if someone specifies %(name)s + # %(name)s will not be valid. + if key == 'name': + continue + info[key] = self[key] + try: + base_name = CONF.instance_name_template % info + except KeyError: + base_name = self.uuid + return base_name + + @property + def _extra_keys(self): + return ['name'] + + user_id = Column(String(255)) + project_id = Column(String(255)) + + image_ref = Column(String(255)) + kernel_id = Column(String(255)) + ramdisk_id = Column(String(255)) + hostname = Column(String(255)) + + launch_index = Column(Integer) + key_name = Column(String(255)) + key_data = Column(MediumText()) + + power_state = Column(Integer) + vm_state = Column(String(255)) + task_state = Column(String(255)) + + memory_mb = Column(Integer) + vcpus = Column(Integer) + root_gb = Column(Integer) + ephemeral_gb = Column(Integer) + ephemeral_key_uuid = Column(String(36)) + + # This is not related to hostname, above. It refers + # to the nova node. + host = Column(String(255)) # , ForeignKey('hosts.id')) + # To identify the "ComputeNode" which the instance resides in. + # This equals to ComputeNode.hypervisor_hostname. + node = Column(String(255)) + + # *not* flavorid, this is the internal primary_key + instance_type_id = Column(Integer) + + user_data = Column(MediumText()) + + reservation_id = Column(String(255)) + + scheduled_at = Column(DateTime) + launched_at = Column(DateTime) + terminated_at = Column(DateTime) + + availability_zone = Column(String(255)) + + # User editable field for display in user-facing UIs + display_name = Column(String(255)) + display_description = Column(String(255)) + + # To remember on which host an instance booted. + # An instance may have moved to another host by live migration. + launched_on = Column(MediumText()) + + # NOTE(jdillaman): locked deprecated in favor of locked_by, + # to be removed in Icehouse + locked = Column(Boolean) + locked_by = Column(Enum('owner', 'admin')) + + os_type = Column(String(255)) + architecture = Column(String(255)) + vm_mode = Column(String(255)) + uuid = Column(String(36)) + mapping_uuid = Column(String(36)) + + root_device_name = Column(String(255)) + default_ephemeral_device = Column(String(255)) + default_swap_device = Column(String(255)) + config_drive = Column(String(255)) + + # User editable field meant to represent what ip should be used + # to connect to the instance + access_ip_v4 = Column(types.IPAddress()) + access_ip_v6 = Column(types.IPAddress()) + + auto_disk_config = Column(Boolean()) + progress = Column(Integer) + + # EC2 instance_initiated_shutdown_terminate + # True: -> 'terminate' + # False: -> 'stop' + # Note(maoy): currently Nova will always stop instead of terminate + # no matter what the flag says. So we set the default to False. + shutdown_terminate = Column(Boolean(), default=False) + + # EC2 disable_api_termination + disable_terminate = Column(Boolean(), default=False) + + # OpenStack compute cell name. This will only be set at the top of + # the cells tree and it'll be a full cell name such as 'api!hop1!hop2' + cell_name = Column(String(255)) + internal_id = Column(Integer) + + # Records whether an instance has been deleted from disk + cleaned = Column(Integer, default=0) + + +class InstanceInfoCache(BASE, NovaBase): + + """Represents a cache of information about an instance + """ + __tablename__ = 'instance_info_caches' + __table_args__ = ( + schema.UniqueConstraint( + "instance_uuid", + name="uniq_instance_info_caches0instance_uuid"),) + id = Column(Integer, primary_key=True, autoincrement=True) + + # text column used for storing a json object of network data for api + network_info = Column(MediumText()) + + instance_uuid = Column(String(36), ForeignKey('instances.uuid'), + nullable=False) + instance = relationship(Instance, + backref=backref('info_cache', uselist=False), + foreign_keys=instance_uuid, + primaryjoin=instance_uuid == Instance.uuid) + + +class InstanceTypes(BASE, NovaBase): + + """Represents possible flavors for instances. + + Note: instance_type and flavor are synonyms and the term instance_type is + deprecated and in the process of being removed. + """ + __tablename__ = "instance_types" + + __table_args__ = ( + schema.UniqueConstraint("flavorid", "deleted", + name="uniq_instance_types0flavorid0deleted"), + schema.UniqueConstraint("name", "deleted", + name="uniq_instance_types0name0deleted") + ) + + # Internal only primary key/id + id = Column(Integer, primary_key=True) + name = Column(String(255)) + memory_mb = Column(Integer, nullable=False) + vcpus = Column(Integer, nullable=False) + root_gb = Column(Integer) + ephemeral_gb = Column(Integer) + # Public facing id will be renamed public_id + flavorid = Column(String(255)) + swap = Column(Integer, nullable=False, default=0) + rxtx_factor = Column(Float, default=1) + vcpu_weight = Column(Integer) + disabled = Column(Boolean, default=False) + is_public = Column(Boolean, default=True) + + +class Volume(BASE, NovaBase): + + """Represents a block storage device that can be attached to a VM.""" + __tablename__ = 'volumes' + __table_args__ = ( + Index('volumes_instance_uuid_idx', 'instance_uuid'), + ) + id = Column(String(36), primary_key=True, nullable=False) + deleted = Column(String(36), default="") + + @property + def name(self): + return CONF.volume_name_template % self.id + + ec2_id = Column(String(255)) + user_id = Column(String(255)) + project_id = Column(String(255)) + + snapshot_id = Column(String(36)) + + host = Column(String(255)) + size = Column(Integer) + availability_zone = Column(String(255)) + instance_uuid = Column(String(36)) + mountpoint = Column(String(255)) + attach_time = Column(DateTime) + status = Column(String(255)) # TODO(vish): enum? + attach_status = Column(String(255)) # TODO(vish): enum + + scheduled_at = Column(DateTime) + launched_at = Column(DateTime) + terminated_at = Column(DateTime) + + display_name = Column(String(255)) + display_description = Column(String(255)) + + provider_location = Column(String(256)) + provider_auth = Column(String(256)) + + volume_type_id = Column(Integer) + + +class Quota(BASE, NovaBase): + + """Represents a single quota override for a project. + + If there is no row for a given project id and resource, then the + default for the quota class is used. If there is no row for a + given quota class and resource, then the default for the + deployment is used. If the row is present but the hard limit is + Null, then the resource is unlimited. + """ + + __tablename__ = 'quotas' + __table_args__ = ( + schema.UniqueConstraint("project_id", "resource", "deleted", + name="uniq_quotas0project_id0resource0deleted" + ), + ) + id = Column(Integer, primary_key=True) + + project_id = Column(String(255)) + + resource = Column(String(255), nullable=False) + hard_limit = Column(Integer) + + +class ProjectUserQuota(BASE, NovaBase): + + """Represents a single quota override for a user with in a project.""" + + __tablename__ = 'project_user_quotas' + uniq_name = "uniq_project_user_quotas0user_id0project_id0resource0deleted" + __table_args__ = ( + schema.UniqueConstraint("user_id", "project_id", "resource", "deleted", + name=uniq_name), + Index('project_user_quotas_project_id_deleted_idx', + 'project_id', 'deleted'), + Index('project_user_quotas_user_id_deleted_idx', + 'user_id', 'deleted') + ) + id = Column(Integer, primary_key=True, nullable=False) + + project_id = Column(String(255), nullable=False) + user_id = Column(String(255), nullable=False) + + resource = Column(String(255), nullable=False) + hard_limit = Column(Integer) + + +class QuotaClass(BASE, NovaBase): + + """Represents a single quota override for a quota class. + + If there is no row for a given quota class and resource, then the + default for the deployment is used. If the row is present but the + hard limit is Null, then the resource is unlimited. + """ + + __tablename__ = 'quota_classes' + __table_args__ = ( + Index('ix_quota_classes_class_name', 'class_name'), + ) + id = Column(Integer, primary_key=True) + + class_name = Column(String(255)) + + resource = Column(String(255)) + hard_limit = Column(Integer) + + +class QuotaUsage(BASE, NovaBase): + + """Represents the current usage for a given resource.""" + + __tablename__ = 'quota_usages' + __table_args__ = ( + Index('ix_quota_usages_project_id', 'project_id'), + ) + id = Column(Integer, primary_key=True) + + project_id = Column(String(255)) + user_id = Column(String(255)) + resource = Column(String(255), nullable=False) + + in_use = Column(Integer, nullable=False) + reserved = Column(Integer, nullable=False) + + @property + def total(self): + return self.in_use + self.reserved + + until_refresh = Column(Integer) + + +class Reservation(BASE, NovaBase): + + """Represents a resource reservation for quotas.""" + + __tablename__ = 'reservations' + __table_args__ = ( + Index('ix_reservations_project_id', 'project_id'), + Index('reservations_uuid_idx', 'uuid'), + ) + id = Column(Integer, primary_key=True, nullable=False) + uuid = Column(String(36), nullable=False) + + usage_id = Column(Integer, ForeignKey('quota_usages.id'), nullable=False) + + project_id = Column(String(255)) + user_id = Column(String(255)) + resource = Column(String(255)) + + delta = Column(Integer, nullable=False) + expire = Column(DateTime) + + usage = relationship( + "QuotaUsage", + foreign_keys=usage_id, + primaryjoin='and_(Reservation.usage_id == QuotaUsage.id,' + 'QuotaUsage.deleted == 0)') + + +class Snapshot(BASE, NovaBase): + + """Represents a block storage device that can be attached to a VM.""" + __tablename__ = 'snapshots' + __table_args__ = () + id = Column(String(36), primary_key=True, nullable=False) + deleted = Column(String(36), default="") + + @property + def name(self): + return CONF.snapshot_name_template % self.id + + @property + def volume_name(self): + return CONF.volume_name_template % self.volume_id + + user_id = Column(String(255)) + project_id = Column(String(255)) + + volume_id = Column(String(36), nullable=False) + status = Column(String(255)) + progress = Column(String(255)) + volume_size = Column(Integer) + scheduled_at = Column(DateTime) + + display_name = Column(String(255)) + display_description = Column(String(255)) + + +class BlockDeviceMapping(BASE, NovaBase): + + """Represents block device mapping that is defined by EC2.""" + __tablename__ = "block_device_mapping" + __table_args__ = ( + Index('snapshot_id', 'snapshot_id'), + Index('volume_id', 'volume_id'), + Index('block_device_mapping_instance_uuid_device_name_idx', + 'instance_uuid', 'device_name'), + Index('block_device_mapping_instance_uuid_volume_id_idx', + 'instance_uuid', 'volume_id'), + Index('block_device_mapping_instance_uuid_idx', 'instance_uuid'), + # TODO(sshturm) Should be dropped. `virtual_name` was dropped + # in 186 migration, + # Duplicates `block_device_mapping_instance_uuid_device_name_idx` + # index. + Index("block_device_mapping_instance_uuid_virtual_name" + "_device_name_idx", 'instance_uuid', 'device_name'), + ) + id = Column(Integer, primary_key=True, autoincrement=True) + + instance_uuid = Column(String(36), ForeignKey('instances.uuid')) + instance = relationship(Instance, + backref=backref('block_device_mapping'), + foreign_keys=instance_uuid, + primaryjoin='and_(BlockDeviceMapping.' + 'instance_uuid==' + 'Instance.uuid,' + 'BlockDeviceMapping.deleted==' + '0)') + + source_type = Column(String(255)) + destination_type = Column(String(255)) + guest_format = Column(String(255)) + device_type = Column(String(255)) + disk_bus = Column(String(255)) + + boot_index = Column(Integer) + + device_name = Column(String(255)) + + # default=False for compatibility of the existing code. + # With EC2 API, + # default True for ami specified device. + # default False for created with other timing. + # TODO(sshturm) add default in db + delete_on_termination = Column(Boolean, default=False) + + snapshot_id = Column(String(36)) + + volume_id = Column(String(36)) + volume_size = Column(Integer) + + image_id = Column(String(36)) + + # for no device to suppress devices. + no_device = Column(Boolean) + + connection_info = Column(MediumText()) + + +class IscsiTarget(BASE, NovaBase): + + """Represents an iscsi target for a given host.""" + __tablename__ = 'iscsi_targets' + __table_args__ = ( + Index('iscsi_targets_volume_id_fkey', 'volume_id'), + Index('iscsi_targets_host_idx', 'host'), + Index('iscsi_targets_host_volume_id_deleted_idx', 'host', 'volume_id', + 'deleted') + ) + id = Column(Integer, primary_key=True, nullable=False) + target_num = Column(Integer) + host = Column(String(255)) + volume_id = Column(String(36), ForeignKey('volumes.id')) + volume = relationship(Volume, + backref=backref('iscsi_target', uselist=False), + foreign_keys=volume_id, + primaryjoin='and_(IscsiTarget.volume_id==Volume.id,' + 'IscsiTarget.deleted==0)') + + +class SecurityGroupInstanceAssociation(BASE, NovaBase): + __tablename__ = 'security_group_instance_association' + __table_args__ = ( + Index('security_group_instance_association_instance_uuid_idx', + 'instance_uuid'), + ) + id = Column(Integer, primary_key=True, nullable=False) + security_group_id = Column(Integer, ForeignKey('security_groups.id')) + instance_uuid = Column(String(36), ForeignKey('instances.uuid')) + + +class SecurityGroup(BASE, NovaBase): + + """Represents a security group.""" + __tablename__ = 'security_groups' + __table_args__ = ( + Index('uniq_security_groups0project_id0name0deleted', 'project_id', + 'name', 'deleted'), + ) + id = Column(Integer, primary_key=True) + + name = Column(String(255)) + description = Column(String(255)) + user_id = Column(String(255)) + project_id = Column(String(255)) + + instances = relationship(Instance, + secondary="security_group_instance_association", + primaryjoin='and_(' + 'SecurityGroup.id == ' + 'SecurityGroupInstanceAssociation.security_group_id,' + 'SecurityGroupInstanceAssociation.deleted == 0,' + 'SecurityGroup.deleted == 0)', + secondaryjoin='and_(' + 'SecurityGroupInstanceAssociation.instance_uuid == Instance.uuid,' + # (anthony) the condition below shouldn't be necessary now that the + # association is being marked as deleted. However, removing this + # may cause existing deployments to choke, so I'm leaving it + 'Instance.deleted == 0)', + backref='security_groups') + + +class SecurityGroupIngressRule(BASE, NovaBase): + + """Represents a rule in a security group.""" + __tablename__ = 'security_group_rules' + __table_args__ = () + id = Column(Integer, primary_key=True) + + parent_group_id = Column(Integer, ForeignKey('security_groups.id')) + parent_group = relationship( + "SecurityGroup", + backref="rules", + foreign_keys=parent_group_id, + primaryjoin='and_(' + 'SecurityGroupIngressRule.parent_group_id == SecurityGroup.id,' + 'SecurityGroupIngressRule.deleted == 0)') + + protocol = Column(String(255)) + from_port = Column(Integer) + to_port = Column(Integer) + cidr = Column(types.CIDR()) + + # Note: This is not the parent SecurityGroup. It's SecurityGroup we're + # granting access for. + group_id = Column(Integer, ForeignKey('security_groups.id')) + grantee_group = relationship( + "SecurityGroup", + foreign_keys=group_id, + primaryjoin='and_(' + 'SecurityGroupIngressRule.group_id == SecurityGroup.id,' + 'SecurityGroupIngressRule.deleted == 0)') + + +class SecurityGroupIngressDefaultRule(BASE, NovaBase): + __tablename__ = 'security_group_default_rules' + __table_args__ = () + id = Column(Integer, primary_key=True, nullable=False) + protocol = Column(String(5)) # "tcp", "udp" or "icmp" + from_port = Column(Integer) + to_port = Column(Integer) + cidr = Column(types.CIDR()) + + +class ProviderFirewallRule(BASE, NovaBase): + + """Represents a rule in a security group.""" + __tablename__ = 'provider_fw_rules' + __table_args__ = () + id = Column(Integer, primary_key=True, nullable=False) + + protocol = Column(String(5)) # "tcp", "udp", or "icmp" + from_port = Column(Integer) + to_port = Column(Integer) + cidr = Column(types.CIDR()) + + +class KeyPair(BASE, NovaBase): + + """Represents a public key pair for ssh.""" + __tablename__ = 'key_pairs' + __table_args__ = ( + schema.UniqueConstraint("user_id", "name", "deleted", + name="uniq_key_pairs0user_id0name0deleted"), + ) + id = Column(Integer, primary_key=True, nullable=False) + + name = Column(String(255)) + + user_id = Column(String(255)) + + fingerprint = Column(String(255)) + public_key = Column(MediumText()) + + +class Migration(BASE, NovaBase): + + """Represents a running host-to-host migration.""" + __tablename__ = 'migrations' + __table_args__ = ( + Index('migrations_instance_uuid_and_status_idx', 'instance_uuid', + 'status'), + Index('migrations_by_host_nodes_and_status_idx', 'deleted', + 'source_compute', 'dest_compute', 'source_node', 'dest_node', + 'status'), + ) + id = Column(Integer, primary_key=True, nullable=False) + # NOTE(tr3buchet): the ____compute variables are instance['host'] + source_compute = Column(String(255)) + dest_compute = Column(String(255)) + # nodes are equivalent to a compute node's 'hypervisor_hostname' + source_node = Column(String(255)) + dest_node = Column(String(255)) + # NOTE(tr3buchet): dest_host, btw, is an ip address + dest_host = Column(String(255)) + old_instance_type_id = Column(Integer()) + new_instance_type_id = Column(Integer()) + instance_uuid = Column(String(36), ForeignKey('instances.uuid')) + # TODO(_cerberus_): enum + status = Column(String(255)) + + instance = relationship("Instance", foreign_keys=instance_uuid, + primaryjoin='and_(Migration.instance_uuid == ' + 'Instance.uuid, Instance.deleted == ' + '0)') + + +class Network(BASE, NovaBase): + + """Represents a network.""" + __tablename__ = 'networks' + __table_args__ = ( + schema.UniqueConstraint("vlan", "deleted", + name="uniq_networks0vlan0deleted"), + Index('networks_bridge_deleted_idx', 'bridge', 'deleted'), + Index('networks_host_idx', 'host'), + Index('networks_project_id_deleted_idx', 'project_id', 'deleted'), + Index('networks_uuid_project_id_deleted_idx', 'uuid', + 'project_id', 'deleted'), + Index('networks_vlan_deleted_idx', 'vlan', 'deleted'), + Index('networks_cidr_v6_idx', 'cidr_v6') + ) + + id = Column(Integer, primary_key=True, nullable=False) + label = Column(String(255)) + + injected = Column(Boolean, default=False) + cidr = Column(types.CIDR()) + cidr_v6 = Column(types.CIDR()) + multi_host = Column(Boolean, default=False) + + gateway_v6 = Column(types.IPAddress()) + netmask_v6 = Column(types.IPAddress()) + netmask = Column(types.IPAddress()) + bridge = Column(String(255)) + bridge_interface = Column(String(255)) + gateway = Column(types.IPAddress()) + broadcast = Column(types.IPAddress()) + dns1 = Column(types.IPAddress()) + dns2 = Column(types.IPAddress()) + + vlan = Column(Integer) + vpn_public_address = Column(types.IPAddress()) + vpn_public_port = Column(Integer) + vpn_private_address = Column(types.IPAddress()) + dhcp_start = Column(types.IPAddress()) + + rxtx_base = Column(Integer) + + project_id = Column(String(255)) + priority = Column(Integer) + host = Column(String(255)) # , ForeignKey('hosts.id')) + uuid = Column(String(36)) + + +class VirtualInterface(BASE, NovaBase): + + """Represents a virtual interface on an instance.""" + __tablename__ = 'virtual_interfaces' + __table_args__ = ( + schema.UniqueConstraint( + "address", "deleted", name="uniq_virtual_interfaces0address0deleted"), Index( + 'network_id', 'network_id'), Index( + 'virtual_interfaces_instance_uuid_fkey', 'instance_uuid'), ) + id = Column(Integer, primary_key=True, nullable=False) + address = Column(String(255)) + network_id = Column(Integer) + instance_uuid = Column(String(36), ForeignKey('instances.uuid')) + uuid = Column(String(36)) + + +# TODO(vish): can these both come from the same baseclass? +class FixedIp(BASE, NovaBase): + + """Represents a fixed ip for an instance.""" + __tablename__ = 'fixed_ips' + __table_args__ = ( + schema.UniqueConstraint( + "address", "deleted", name="uniq_fixed_ips0address0deleted"), + Index('fixed_ips_virtual_interface_id_fkey', 'virtual_interface_id'), + Index('network_id', 'network_id'), + Index('address', 'address'), + Index('fixed_ips_instance_uuid_fkey', 'instance_uuid'), + Index('fixed_ips_host_idx', 'host'), + Index('fixed_ips_network_id_host_deleted_idx', 'network_id', 'host', + 'deleted'), + Index('fixed_ips_address_reserved_network_id_deleted_idx', + 'address', 'reserved', 'network_id', 'deleted'), + Index('fixed_ips_deleted_allocated_idx', 'address', 'deleted', + 'allocated') + ) + id = Column(Integer, primary_key=True) + address = Column(types.IPAddress()) + network_id = Column(Integer) + virtual_interface_id = Column(Integer) + instance_uuid = Column(String(36), ForeignKey('instances.uuid')) + # associated means that a fixed_ip has its instance_id column set + # allocated means that a fixed_ip has its virtual_interface_id column set + # TODO(sshturm) add default in db + allocated = Column(Boolean, default=False) + # leased means dhcp bridge has leased the ip + # TODO(sshturm) add default in db + leased = Column(Boolean, default=False) + # TODO(sshturm) add default in db + reserved = Column(Boolean, default=False) + host = Column(String(255)) + network = relationship(Network, + backref=backref('fixed_ips'), + foreign_keys=network_id, + primaryjoin='and_(' + 'FixedIp.network_id == Network.id,' + 'FixedIp.deleted == 0,' + 'Network.deleted == 0)') + instance = relationship(Instance, + foreign_keys=instance_uuid, + primaryjoin='and_(' + 'FixedIp.instance_uuid == Instance.uuid,' + 'FixedIp.deleted == 0,' + 'Instance.deleted == 0)') + + +class FloatingIp(BASE, NovaBase): + + """Represents a floating ip that dynamically forwards to a fixed ip.""" + __tablename__ = 'floating_ips' + __table_args__ = ( + schema.UniqueConstraint("address", "deleted", + name="uniq_floating_ips0address0deleted"), + Index('fixed_ip_id', 'fixed_ip_id'), + Index('floating_ips_host_idx', 'host'), + Index('floating_ips_project_id_idx', 'project_id'), + Index('floating_ips_pool_deleted_fixed_ip_id_project_id_idx', + 'pool', 'deleted', 'fixed_ip_id', 'project_id') + ) + id = Column(Integer, primary_key=True) + address = Column(types.IPAddress()) + fixed_ip_id = Column(Integer) + project_id = Column(String(255)) + host = Column(String(255)) # , ForeignKey('hosts.id')) + auto_assigned = Column(Boolean, default=False) + # TODO(sshturm) add default in db + pool = Column(String(255)) + interface = Column(String(255)) + fixed_ip = relationship(FixedIp, + backref=backref('floating_ips'), + foreign_keys=fixed_ip_id, + primaryjoin='and_(' + 'FloatingIp.fixed_ip_id == FixedIp.id,' + 'FloatingIp.deleted == 0,' + 'FixedIp.deleted == 0)') + + +class DNSDomain(BASE, NovaBase): + + """Represents a DNS domain with availability zone or project info.""" + __tablename__ = 'dns_domains' + __table_args__ = ( + Index('project_id', 'project_id'), + Index('dns_domains_domain_deleted_idx', 'domain', 'deleted'), + ) + deleted = Column(Boolean, default=False) + domain = Column(String(255), primary_key=True) + scope = Column(String(255)) + availability_zone = Column(String(255)) + project_id = Column(String(255)) + + +class ConsolePool(BASE, NovaBase): + + """Represents pool of consoles on the same physical node.""" + __tablename__ = 'console_pools' + __table_args__ = ( + schema.UniqueConstraint( + "host", "console_type", "compute_host", "deleted", + name="uniq_console_pools0host0console_type0compute_host0deleted"), + ) + id = Column(Integer, primary_key=True) + address = Column(types.IPAddress()) + username = Column(String(255)) + password = Column(String(255)) + console_type = Column(String(255)) + public_hostname = Column(String(255)) + host = Column(String(255)) + compute_host = Column(String(255)) + + +class Console(BASE, NovaBase): + + """Represents a console session for an instance.""" + __tablename__ = 'consoles' + __table_args__ = ( + Index('consoles_instance_uuid_idx', 'instance_uuid'), + ) + id = Column(Integer, primary_key=True) + instance_name = Column(String(255)) + instance_uuid = Column(String(36), ForeignKey('instances.uuid')) + password = Column(String(255)) + port = Column(Integer) + pool_id = Column(Integer, ForeignKey('console_pools.id')) + pool = relationship(ConsolePool, backref=backref('consoles')) + + +class InstanceMetadata(BASE, NovaBase): + + """Represents a user-provided metadata key/value pair for an instance.""" + __tablename__ = 'instance_metadata' + __table_args__ = ( + Index('instance_metadata_instance_uuid_idx', 'instance_uuid'), + ) + id = Column(Integer, primary_key=True) + key = Column(String(255)) + value = Column(String(255)) + instance_uuid = Column(String(36), ForeignKey('instances.uuid')) + instance = relationship(Instance, backref="metadata", + foreign_keys=instance_uuid, + primaryjoin='and_(' + 'InstanceMetadata.instance_uuid == ' + 'Instance.uuid,' + 'InstanceMetadata.deleted == 0)') + + +class InstanceSystemMetadata(BASE, NovaBase): + + """Represents a system-owned metadata key/value pair for an instance.""" + __tablename__ = 'instance_system_metadata' + __table_args__ = () + id = Column(Integer, primary_key=True) + key = Column(String(255), nullable=False) + value = Column(String(255)) + instance_uuid = Column(String(36), + ForeignKey('instances.uuid'), + nullable=False) + + primary_join = ('and_(InstanceSystemMetadata.instance_uuid == ' + 'Instance.uuid, InstanceSystemMetadata.deleted == 0)') + instance = relationship(Instance, backref="system_metadata", + foreign_keys=instance_uuid, + primaryjoin=primary_join) + + +class InstanceTypeProjects(BASE, NovaBase): + + """Represent projects associated instance_types.""" + __tablename__ = "instance_type_projects" + __table_args__ = (schema.UniqueConstraint( + "instance_type_id", "project_id", "deleted", + name="uniq_instance_type_projects0instance_type_id0project_id0deleted" + ), + ) + id = Column(Integer, primary_key=True) + instance_type_id = Column(Integer, ForeignKey('instance_types.id'), + nullable=False) + project_id = Column(String(255)) + + instance_type = relationship( + InstanceTypes, + backref="projects", + foreign_keys=instance_type_id, + primaryjoin='and_(' + 'InstanceTypeProjects.instance_type_id == InstanceTypes.id,' + 'InstanceTypeProjects.deleted == 0)') + + +class InstanceTypeExtraSpecs(BASE, NovaBase): + + """Represents additional specs as key/value pairs for an instance_type.""" + __tablename__ = 'instance_type_extra_specs' + __table_args__ = ( + Index('instance_type_extra_specs_instance_type_id_key_idx', + 'instance_type_id', 'key'), + schema.UniqueConstraint( + "instance_type_id", "key", "deleted", + name=("uniq_instance_type_extra_specs0" + "instance_type_id0key0deleted") + ), + ) + id = Column(Integer, primary_key=True) + key = Column(String(255)) + value = Column(String(255)) + instance_type_id = Column(Integer, ForeignKey('instance_types.id'), + nullable=False) + instance_type = relationship( + InstanceTypes, + backref="extra_specs", + foreign_keys=instance_type_id, + primaryjoin='and_(' + 'InstanceTypeExtraSpecs.instance_type_id == InstanceTypes.id,' + 'InstanceTypeExtraSpecs.deleted == 0)') + + +class Cell(BASE, NovaBase): + + """Represents parent and child cells of this cell. Cells can + have multiple parents and children, so there could be any number + of entries with is_parent=True or False + """ + __tablename__ = 'cells' + __table_args__ = (schema.UniqueConstraint( + "name", "deleted", name="uniq_cells0name0deleted" + ), + ) + id = Column(Integer, primary_key=True) + # Name here is the 'short name' of a cell. For instance: 'child1' + name = Column(String(255)) + api_url = Column(String(255)) + + transport_url = Column(String(255), nullable=False) + + weight_offset = Column(Float(), default=0.0) + weight_scale = Column(Float(), default=1.0) + is_parent = Column(Boolean()) + + +class AggregateHost(BASE, NovaBase): + + """Represents a host that is member of an aggregate.""" + __tablename__ = 'aggregate_hosts' + __table_args__ = (schema.UniqueConstraint( + "host", "aggregate_id", "deleted", + name="uniq_aggregate_hosts0host0aggregate_id0deleted" + ), + ) + id = Column(Integer, primary_key=True, autoincrement=True) + host = Column(String(255)) + aggregate_id = Column(Integer, ForeignKey('aggregates.id'), nullable=False) + + +class AggregateMetadata(BASE, NovaBase): + + """Represents a metadata key/value pair for an aggregate.""" + __tablename__ = 'aggregate_metadata' + __table_args__ = ( + schema.UniqueConstraint( + "aggregate_id", + "key", + "deleted", + name="uniq_aggregate_metadata0aggregate_id0key0deleted"), + Index( + 'aggregate_metadata_key_idx', + 'key'), + ) + id = Column(Integer, primary_key=True) + key = Column(String(255), nullable=False) + value = Column(String(255), nullable=False) + aggregate_id = Column(Integer, ForeignKey('aggregates.id'), nullable=False) + + +class Aggregate(BASE, NovaBase): + + """Represents a cluster of hosts that exists in this zone.""" + __tablename__ = 'aggregates' + __table_args__ = () + id = Column(Integer, primary_key=True, autoincrement=True) + name = Column(String(255)) + _hosts = relationship(AggregateHost, + primaryjoin='and_(' + 'Aggregate.id == AggregateHost.aggregate_id,' + 'AggregateHost.deleted == 0,' + 'Aggregate.deleted == 0)') + + _metadata = relationship(AggregateMetadata, + primaryjoin='and_(' + 'Aggregate.id == AggregateMetadata.aggregate_id,' + 'AggregateMetadata.deleted == 0,' + 'Aggregate.deleted == 0)') + + @property + def _extra_keys(self): + return ['hosts', 'metadetails', 'availability_zone'] + + @property + def hosts(self): + return [h.host for h in self._hosts] + + @property + def metadetails(self): + return dict([(m.key, m.value) for m in self._metadata]) + + @property + def availability_zone(self): + if 'availability_zone' not in self.metadetails: + return None + return self.metadetails['availability_zone'] + + +class AgentBuild(BASE, NovaBase): + + """Represents an agent build.""" + __tablename__ = 'agent_builds' + __table_args__ = ( + Index( + 'agent_builds_hypervisor_os_arch_idx', + 'hypervisor', + 'os', + 'architecture'), + schema.UniqueConstraint( + "hypervisor", + "os", + "architecture", + "deleted", + name="uniq_agent_builds0hypervisor0os0architecture0deleted"), + ) + id = Column(Integer, primary_key=True) + hypervisor = Column(String(255)) + os = Column(String(255)) + architecture = Column(String(255)) + version = Column(String(255)) + url = Column(String(255)) + md5hash = Column(String(255)) + + +class BandwidthUsage(BASE, NovaBase): + + """Cache for instance bandwidth usage data pulled from the hypervisor.""" + __tablename__ = 'bw_usage_cache' + __table_args__ = ( + Index('bw_usage_cache_uuid_start_period_idx', 'uuid', + 'start_period'), + ) + id = Column(Integer, primary_key=True, nullable=False) + uuid = Column(String(36)) + mac = Column(String(255)) + start_period = Column(DateTime, nullable=False) + last_refreshed = Column(DateTime) + bw_in = Column(BigInteger) + bw_out = Column(BigInteger) + last_ctr_in = Column(BigInteger) + last_ctr_out = Column(BigInteger) + + +class VolumeUsage(BASE, NovaBase): + + """Cache for volume usage data pulled from the hypervisor.""" + __tablename__ = 'volume_usage_cache' + __table_args__ = () + id = Column(Integer, primary_key=True, nullable=False) + volume_id = Column(String(36), nullable=False) + instance_uuid = Column(String(36)) + project_id = Column(String(36)) + user_id = Column(String(36)) + availability_zone = Column(String(255)) + tot_last_refreshed = Column(DateTime) + tot_reads = Column(BigInteger, default=0) + tot_read_bytes = Column(BigInteger, default=0) + tot_writes = Column(BigInteger, default=0) + tot_write_bytes = Column(BigInteger, default=0) + curr_last_refreshed = Column(DateTime) + curr_reads = Column(BigInteger, default=0) + curr_read_bytes = Column(BigInteger, default=0) + curr_writes = Column(BigInteger, default=0) + curr_write_bytes = Column(BigInteger, default=0) + + +class S3Image(BASE, NovaBase): + + """Compatibility layer for the S3 image service talking to Glance.""" + __tablename__ = 's3_images' + __table_args__ = () + id = Column(Integer, primary_key=True, nullable=False, autoincrement=True) + uuid = Column(String(36), nullable=False) + + +class VolumeIdMapping(BASE, NovaBase): + + """Compatibility layer for the EC2 volume service.""" + __tablename__ = 'volume_id_mappings' + __table_args__ = () + id = Column(Integer, primary_key=True, nullable=False, autoincrement=True) + uuid = Column(String(36), nullable=False) + + +class SnapshotIdMapping(BASE, NovaBase): + + """Compatibility layer for the EC2 snapshot service.""" + __tablename__ = 'snapshot_id_mappings' + __table_args__ = () + id = Column(Integer, primary_key=True, nullable=False, autoincrement=True) + uuid = Column(String(36), nullable=False) + + +class InstanceFault(BASE, NovaBase): + __tablename__ = 'instance_faults' + __table_args__ = ( + Index('instance_faults_host_idx', 'host'), + Index('instance_faults_instance_uuid_deleted_created_at_idx', + 'instance_uuid', 'deleted', 'created_at') + ) + + id = Column(Integer, primary_key=True, nullable=False) + instance_uuid = Column(String(36), + ForeignKey('instances.uuid')) + code = Column(Integer(), nullable=False) + message = Column(String(255)) + details = Column(MediumText()) + host = Column(String(255)) + + +class InstanceAction(BASE, NovaBase): + + """Track client actions on an instance. + + The intention is that there will only be one of these per user request. A + lookup by (instance_uuid, request_id) should always return a single result. + """ + __tablename__ = 'instance_actions' + __table_args__ = ( + Index('instance_uuid_idx', 'instance_uuid'), + Index('request_id_idx', 'request_id') + ) + + id = Column(Integer, primary_key=True, nullable=False, autoincrement=True) + action = Column(String(255)) + instance_uuid = Column(String(36), + ForeignKey('instances.uuid')) + request_id = Column(String(255)) + user_id = Column(String(255)) + project_id = Column(String(255)) + start_time = Column(DateTime, default=timeutils.utcnow) + finish_time = Column(DateTime) + message = Column(String(255)) + + +class InstanceActionEvent(BASE, NovaBase): + + """Track events that occur during an InstanceAction.""" + __tablename__ = 'instance_actions_events' + __table_args__ = () + + id = Column(Integer, primary_key=True, nullable=False, autoincrement=True) + event = Column(String(255)) + action_id = Column(Integer, ForeignKey('instance_actions.id')) + start_time = Column(DateTime, default=timeutils.utcnow) + finish_time = Column(DateTime) + result = Column(String(255)) + traceback = Column(Text) + host = Column(String(255)) + details = Column(Text) + + +class InstanceIdMapping(BASE, NovaBase): + + """Compatibility layer for the EC2 instance service.""" + __tablename__ = 'instance_id_mappings' + __table_args__ = ( + Index('ix_instance_id_mappings_uuid', 'uuid'), + ) + id = Column(Integer, primary_key=True, nullable=False, autoincrement=True) + uuid = Column(String(36), nullable=False) + + +class TaskLog(BASE, NovaBase): + + """Audit log for background periodic tasks.""" + __tablename__ = 'task_log' + __table_args__ = ( + schema.UniqueConstraint( + 'task_name', 'host', 'period_beginning', 'period_ending', + name="uniq_task_log0task_name0host0period_beginning0period_ending" + ), + Index('ix_task_log_period_beginning', 'period_beginning'), + Index('ix_task_log_host', 'host'), + Index('ix_task_log_period_ending', 'period_ending'), + ) + id = Column(Integer, primary_key=True, nullable=False, autoincrement=True) + task_name = Column(String(255), nullable=False) + state = Column(String(255), nullable=False) + host = Column(String(255), nullable=False) + period_beginning = Column(DateTime, default=timeutils.utcnow, + nullable=False) + period_ending = Column(DateTime, default=timeutils.utcnow, + nullable=False) + message = Column(String(255), nullable=False) + task_items = Column(Integer(), default=0) + errors = Column(Integer(), default=0) + + +class InstanceGroupMember(BASE, NovaBase): + + """Represents the members for an instance group.""" + __tablename__ = 'instance_group_member' + __table_args__ = ( + Index('instance_group_member_instance_idx', 'instance_id'), + ) + id = Column(Integer, primary_key=True, nullable=False) + instance_id = Column(String(255)) + group_id = Column(Integer, ForeignKey('instance_groups.id'), + nullable=False) + + +class InstanceGroupPolicy(BASE, NovaBase): + + """Represents the policy type for an instance group.""" + __tablename__ = 'instance_group_policy' + __table_args__ = ( + Index('instance_group_policy_policy_idx', 'policy'), + ) + id = Column(Integer, primary_key=True, nullable=False) + policy = Column(String(255)) + group_id = Column(Integer, ForeignKey('instance_groups.id'), + nullable=False) + + +class InstanceGroupMetadata(BASE, NovaBase): + + """Represents a key/value pair for an instance group.""" + __tablename__ = 'instance_group_metadata' + __table_args__ = ( + Index('instance_group_metadata_key_idx', 'key'), + ) + id = Column(Integer, primary_key=True, nullable=False) + key = Column(String(255)) + value = Column(String(255)) + group_id = Column(Integer, ForeignKey('instance_groups.id'), + nullable=False) + + +class InstanceGroup(BASE, NovaBase): + + """Represents an instance group. + + A group will maintain a collection of instances and the relationship + between them. + """ + + __tablename__ = 'instance_groups' + __table_args__ = ( + schema.UniqueConstraint("uuid", "deleted", + name="uniq_instance_groups0uuid0deleted"), + ) + + id = Column(Integer, primary_key=True, autoincrement=True) + user_id = Column(String(255)) + project_id = Column(String(255)) + uuid = Column(String(36), nullable=False) + name = Column(String(255)) + _policies = relationship( + InstanceGroupPolicy, + primaryjoin='and_(' + 'InstanceGroup.id == InstanceGroupPolicy.group_id,' + 'InstanceGroupPolicy.deleted == 0,' + 'InstanceGroup.deleted == 0)') + _metadata = relationship( + InstanceGroupMetadata, + primaryjoin='and_(' + 'InstanceGroup.id == InstanceGroupMetadata.group_id,' + 'InstanceGroupMetadata.deleted == 0,' + 'InstanceGroup.deleted == 0)') + _members = relationship(InstanceGroupMember, primaryjoin='and_(' + 'InstanceGroup.id == InstanceGroupMember.group_id,' + 'InstanceGroupMember.deleted == 0,' + 'InstanceGroup.deleted == 0)') + + @property + def policies(self): + return [p.policy for p in self._policies] + + @property + def metadetails(self): + return dict((m.key, m.value) for m in self._metadata) + + @property + def members(self): + return [m.instance_id for m in self._members] + + +class PciDevice(BASE, NovaBase): + + """Represents a PCI host device that can be passed through to instances. + """ + __tablename__ = 'pci_devices' + __table_args__ = ( + Index('ix_pci_devices_compute_node_id_deleted', + 'compute_node_id', 'deleted'), + Index('ix_pci_devices_instance_uuid_deleted', + 'instance_uuid', 'deleted'), + schema.UniqueConstraint( + "compute_node_id", "address", "deleted", + name="uniq_pci_devices0compute_node_id0address0deleted") + ) + id = Column(Integer, primary_key=True) + + compute_node_id = Column(Integer, ForeignKey('compute_nodes.id'), + nullable=False) + + # physical address of device domain:bus:slot.func (0000:09:01.1) + address = Column(String(12), nullable=False) + + vendor_id = Column(String(4), nullable=False) + product_id = Column(String(4), nullable=False) + dev_type = Column(String(8), nullable=False) + dev_id = Column(String(255)) + + # label is abstract device name, that is used to unify devices with the + # same functionality with different addresses or host. + label = Column(String(255), nullable=False) + + status = Column(String(36), nullable=False) + + extra_info = Column(Text) + + instance_uuid = Column(String(36)) + instance = relationship(Instance, backref="pci_devices", + foreign_keys=instance_uuid, + primaryjoin='and_(' + 'PciDevice.instance_uuid == Instance.uuid,' + 'PciDevice.deleted == 0)') diff --git a/icehouse-patches/nova/instance_mapping_uuid_patch/nova/network/neutronv2/api.py b/icehouse-patches/nova/instance_mapping_uuid_patch/nova/network/neutronv2/api.py new file mode 100644 index 00000000..480401a6 --- /dev/null +++ b/icehouse-patches/nova/instance_mapping_uuid_patch/nova/network/neutronv2/api.py @@ -0,0 +1,1250 @@ +# Copyright 2012 OpenStack Foundation +# All Rights Reserved +# Copyright (c) 2012 NEC Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +import time + +from neutronclient.common import exceptions as neutron_client_exc +from oslo.config import cfg +import six + +from nova.compute import flavors +from nova import conductor +from nova.db import base +from nova import exception +from nova.network import api as network_api +from nova.network import model as network_model +from nova.network import neutronv2 +from nova.network.neutronv2 import constants +from nova.network.security_group import openstack_driver +from nova.openstack.common import excutils +from nova.openstack.common.gettextutils import _ +from nova.openstack.common import jsonutils +from nova.openstack.common import log as logging +from nova.openstack.common import uuidutils + +neutron_opts = [ + cfg.StrOpt('neutron_url', + default='http://127.0.0.1:9696', + help='URL for connecting to neutron'), + cfg.IntOpt('neutron_url_timeout', + default=30, + help='Timeout value for connecting to neutron in seconds'), + cfg.StrOpt('neutron_admin_username', + help='Username for connecting to neutron in admin context'), + cfg.StrOpt('neutron_admin_password', + help='Password for connecting to neutron in admin context', + secret=True), + cfg.StrOpt('neutron_admin_tenant_id', + help='Tenant id for connecting to neutron in admin context'), + cfg.StrOpt('neutron_admin_tenant_name', + help='Tenant name for connecting to neutron in admin context. ' + 'This option is mutually exclusive with ' + 'neutron_admin_tenant_id. Note that with Keystone V3 ' + 'tenant names are only unique within a domain.'), + cfg.StrOpt('neutron_region_name', + help='Region name for connecting to neutron in admin context'), + cfg.StrOpt('neutron_admin_auth_url', + default='http://localhost:5000/v2.0', + help='Authorization URL for connecting to neutron in admin ' + 'context'), + cfg.BoolOpt('neutron_api_insecure', + default=False, + help='If set, ignore any SSL validation issues'), + cfg.StrOpt('neutron_auth_strategy', + default='keystone', + help='Authorization strategy for connecting to ' + 'neutron in admin context'), + # TODO(berrange) temporary hack until Neutron can pass over the + # name of the OVS bridge it is configured with + cfg.StrOpt('neutron_ovs_bridge', + default='br-int', + help='Name of Integration Bridge used by Open vSwitch'), + cfg.IntOpt('neutron_extension_sync_interval', + default=600, + help='Number of seconds before querying neutron for' + ' extensions'), + cfg.StrOpt('neutron_ca_certificates_file', + help='Location of CA certificates file to use for ' + 'neutron client requests.'), +] + +CONF = cfg.CONF +CONF.register_opts(neutron_opts) +CONF.import_opt('default_floating_pool', 'nova.network.floating_ips') +CONF.import_opt('flat_injected', 'nova.network.manager') +LOG = logging.getLogger(__name__) + +refresh_cache = network_api.refresh_cache +update_instance_info_cache = network_api.update_instance_cache_with_nw_info + + +class API(base.Base): + + """API for interacting with the neutron 2.x API.""" + _sentinel = object() + + def __init__(self): + super(API, self).__init__() + self.last_neutron_extension_sync = None + self.extensions = {} + self.conductor_api = conductor.API() + self.security_group_api = ( + openstack_driver.get_openstack_security_group_driver()) + + def setup_networks_on_host(self, context, instance, host=None, + teardown=False): + """Setup or teardown the network structures.""" + + def _get_available_networks(self, context, project_id, + net_ids=None, neutron=None): + """Return a network list available for the tenant. + The list contains networks owned by the tenant and public networks. + If net_ids specified, it searches networks with requested IDs only. + """ + if not neutron: + neutron = neutronv2.get_client(context) + + if net_ids: + # If user has specified to attach instance only to specific + # networks then only add these to **search_opts. This search will + # also include 'shared' networks. + search_opts = {'id': net_ids} + nets = neutron.list_networks(**search_opts).get('networks', []) + else: + # (1) Retrieve non-public network list owned by the tenant. + search_opts = {'tenant_id': project_id, 'shared': False} + nets = neutron.list_networks(**search_opts).get('networks', []) + # (2) Retrieve public network list. + search_opts = {'shared': True} + nets += neutron.list_networks(**search_opts).get('networks', []) + + _ensure_requested_network_ordering( + lambda x: x['id'], + nets, + net_ids) + + if not context.is_admin: + for net in nets: + # Perform this check here rather than in validate_networks to + # ensure the check is performed everytime allocate_for_instance + # is invoked + if net.get('router:external'): + raise exception.ExternalNetworkAttachForbidden( + network_uuid=net['id']) + + return nets + + def _create_port(self, port_client, instance, network_id, port_req_body, + fixed_ip=None, security_group_ids=None, + available_macs=None, dhcp_opts=None): + """Attempts to create a port for the instance on the given network. + + :param port_client: The client to use to create the port. + :param instance: Create the port for the given instance. + :param network_id: Create the port on the given network. + :param port_req_body: Pre-populated port request. Should have the + device_id, device_owner, and any required neutron extension values. + :param fixed_ip: Optional fixed IP to use from the given network. + :param security_group_ids: Optional list of security group IDs to + apply to the port. + :param available_macs: Optional set of available MAC addresses to use. + :param dhcp_opts: Optional DHCP options. + :returns: ID of the created port. + :raises PortLimitExceeded: If neutron fails with an OverQuota error. + """ + try: + if fixed_ip: + port_req_body['port']['fixed_ips'] = [{'ip_address': fixed_ip}] + port_req_body['port']['network_id'] = network_id + port_req_body['port']['admin_state_up'] = True + port_req_body['port']['tenant_id'] = instance['project_id'] + if security_group_ids: + port_req_body['port']['security_groups'] = security_group_ids + if available_macs is not None: + if not available_macs: + raise exception.PortNotFree( + instance=instance['display_name']) + mac_address = available_macs.pop() + port_req_body['port']['mac_address'] = mac_address + if dhcp_opts is not None: + port_req_body['port']['extra_dhcp_opts'] = dhcp_opts + port_id = port_client.create_port(port_req_body)['port']['id'] + LOG.debug(_('Successfully created port: %s') % port_id, + instance=instance) + return port_id + except neutron_client_exc.NeutronClientException as e: + # NOTE(mriedem): OverQuota in neutron is a 409 + if e.status_code == 409: + LOG.warning(_('Neutron error: quota exceeded')) + raise exception.PortLimitExceeded() + with excutils.save_and_reraise_exception(): + LOG.exception(_('Neutron error creating port on network %s'), + network_id, instance=instance) + + def allocate_for_instance(self, context, instance, **kwargs): + """Allocate network resources for the instance. + + :param requested_networks: optional value containing + network_id, fixed_ip, and port_id + :param security_groups: security groups to allocate for instance + :param macs: None or a set of MAC addresses that the instance + should use. macs is supplied by the hypervisor driver (contrast + with requested_networks which is user supplied). + NB: NeutronV2 currently assigns hypervisor supplied MAC addresses + to arbitrary networks, which requires openflow switches to + function correctly if more than one network is being used with + the bare metal hypervisor (which is the only one known to limit + MAC addresses). + :param dhcp_options: None or a set of key/value pairs that should + determine the DHCP BOOTP response, eg. for PXE booting an instance + configured with the baremetal hypervisor. It is expected that these + are already formatted for the neutron v2 api. + See nova/virt/driver.py:dhcp_options_for_instance for an example. + """ + hypervisor_macs = kwargs.get('macs', None) + available_macs = None + if hypervisor_macs is not None: + # Make a copy we can mutate: records macs that have not been used + # to create a port on a network. If we find a mac with a + # pre-allocated port we also remove it from this set. + available_macs = set(hypervisor_macs) + neutron = neutronv2.get_client(context) + LOG.debug(_('allocate_for_instance() for %s'), + instance['display_name']) + if not instance['project_id']: + msg = _('empty project id for instance %s') + raise exception.InvalidInput( + reason=msg % instance['display_name']) + requested_networks = kwargs.get('requested_networks') + dhcp_opts = kwargs.get('dhcp_options', None) + ports = {} + fixed_ips = {} + net_ids = [] + if requested_networks: + for network_id, fixed_ip, port_id in requested_networks: + if port_id: + port = neutron.show_port(port_id)['port'] + if port.get('device_id'): + raise exception.PortInUse(port_id=port_id) + if hypervisor_macs is not None: + if port['mac_address'] not in hypervisor_macs: + raise exception.PortNotUsable( + port_id=port_id, + instance=instance['display_name']) + else: + # Don't try to use this MAC if we need to create a + # port on the fly later. Identical MACs may be + # configured by users into multiple ports so we + # discard rather than popping. + available_macs.discard(port['mac_address']) + network_id = port['network_id'] + ports[network_id] = port + elif fixed_ip and network_id: + fixed_ips[network_id] = fixed_ip + if network_id: + net_ids.append(network_id) + + nets = self._get_available_networks(context, instance['project_id'], + net_ids) + + if not nets: + LOG.warn(_("No network configured!"), instance=instance) + return network_model.NetworkInfo([]) + + security_groups = kwargs.get('security_groups', []) + security_group_ids = [] + + # TODO(arosen) Should optimize more to do direct query for security + # group if len(security_groups) == 1 + if len(security_groups): + search_opts = {'tenant_id': instance['project_id']} + user_security_groups = neutron.list_security_groups( + **search_opts).get('security_groups') + + for security_group in security_groups: + name_match = None + uuid_match = None + for user_security_group in user_security_groups: + if user_security_group['name'] == security_group: + if name_match: + raise exception.NoUniqueMatch( + _("Multiple security groups found matching" + " '%s'. Use an ID to be more specific.") % + security_group) + + name_match = user_security_group['id'] + if user_security_group['id'] == security_group: + uuid_match = user_security_group['id'] + + # If a user names the security group the same as + # another's security groups uuid, the name takes priority. + if not name_match and not uuid_match: + raise exception.SecurityGroupNotFound( + security_group_id=security_group) + elif name_match: + security_group_ids.append(name_match) + elif uuid_match: + security_group_ids.append(uuid_match) + + touched_port_ids = [] + created_port_ids = [] + ports_in_requested_order = [] + for network in nets: + # If security groups are requested on an instance then the + # network must has a subnet associated with it. Some plugins + # implement the port-security extension which requires + # 'port_security_enabled' to be True for security groups. + # That is why True is returned if 'port_security_enabled' + # is not found. + if (security_groups and not ( + network['subnets'] + and network.get('port_security_enabled', True))): + + raise exception.SecurityGroupCannotBeApplied() + network_id = network['id'] + zone = 'compute:%s' % instance['availability_zone'] + port_req_body = {'port': {'device_id': instance['uuid'], + 'device_owner': zone}} + try: + port = ports.get(network_id) + self._populate_neutron_extension_values(context, instance, + port_req_body) + # Requires admin creds to set port bindings + port_client = (neutron if not + self._has_port_binding_extension(context) else + neutronv2.get_client(context, admin=True)) + if port: + if 'binding:profile' in port: + port_req_body['port']['binding:profile'] = \ + port['binding:profile'] + port_client.update_port(port['id'], port_req_body) + touched_port_ids.append(port['id']) + ports_in_requested_order.append(port['id']) + else: + created_port = self._create_port( + port_client, instance, network_id, + port_req_body, fixed_ips.get(network_id), + security_group_ids, available_macs, dhcp_opts) + created_port_ids.append(created_port) + ports_in_requested_order.append(created_port) + except Exception: + with excutils.save_and_reraise_exception(): + for port_id in touched_port_ids: + try: + port_req_body = {'port': {'device_id': None}} + # Requires admin creds to set port bindings + if self._has_port_binding_extension(context): + port_req_body['port']['binding:host_id'] = None + port_client = neutronv2.get_client( + context, admin=True) + else: + port_client = neutron + port_client.update_port(port_id, port_req_body) + except Exception: + msg = _("Failed to update port %s") + LOG.exception(msg, port_id) + + for port_id in created_port_ids: + try: + neutron.delete_port(port_id) + except Exception: + msg = _("Failed to delete port %s") + LOG.exception(msg, port_id) + + nw_info = self.get_instance_nw_info(context, instance, networks=nets, + port_ids=ports_in_requested_order) + # NOTE(danms): Only return info about ports we created in this run. + # In the initial allocation case, this will be everything we created, + # and in later runs will only be what was created that time. Thus, + # this only affects the attach case, not the original use for this + # method. + return network_model.NetworkInfo([port for port in nw_info + if port['id'] in created_port_ids + + touched_port_ids]) + + def _refresh_neutron_extensions_cache(self, context): + """Refresh the neutron extensions cache when necessary.""" + if (not self.last_neutron_extension_sync or + ((time.time() - self.last_neutron_extension_sync) + >= CONF.neutron_extension_sync_interval)): + neutron = neutronv2.get_client(context) + extensions_list = neutron.list_extensions()['extensions'] + self.last_neutron_extension_sync = time.time() + self.extensions.clear() + self.extensions = dict((ext['name'], ext) + for ext in extensions_list) + + def _has_port_binding_extension(self, context, refresh_cache=False): + if refresh_cache: + self._refresh_neutron_extensions_cache(context) + return constants.PORTBINDING_EXT in self.extensions + + def _populate_neutron_extension_values(self, context, instance, + port_req_body): + """Populate neutron extension values for the instance. + + If the extension contains nvp-qos then get the rxtx_factor. + """ + self._refresh_neutron_extensions_cache(context) + if 'nvp-qos' in self.extensions: + flavor = flavors.extract_flavor(instance) + rxtx_factor = flavor.get('rxtx_factor') + port_req_body['port']['rxtx_factor'] = rxtx_factor + if self._has_port_binding_extension(context): + port_req_body['port']['binding:host_id'] = instance.get('host') + + def deallocate_for_instance(self, context, instance, **kwargs): + """Deallocate all network resources related to the instance.""" + LOG.debug(_('deallocate_for_instance() for %s'), + instance['display_name']) + search_opts = {'device_id': instance['uuid']} + neutron = neutronv2.get_client(context) + data = neutron.list_ports(**search_opts) + ports = [port['id'] for port in data.get('ports', [])] + + requested_networks = kwargs.get('requested_networks') or {} + ports_to_skip = [port_id for nets, fips, port_id in requested_networks] + ports = set(ports) - set(ports_to_skip) + + for port in ports: + try: + neutron.delete_port(port) + except neutronv2.exceptions.NeutronClientException as e: + if e.status_code == 404: + LOG.warning(_("Port %s does not exist"), port) + else: + with excutils.save_and_reraise_exception(): + LOG.exception(_("Failed to delete neutron port %s"), + port) + + # NOTE(arosen): This clears out the network_cache only if the instance + # hasn't already been deleted. This is needed when an instance fails to + # launch and is rescheduled onto another compute node. If the instance + # has already been deleted this call does nothing. + update_instance_info_cache(self, context, instance, + network_model.NetworkInfo([])) + + def allocate_port_for_instance(self, context, instance, port_id, + network_id=None, requested_ip=None): + """Allocate a port for the instance.""" + return self.allocate_for_instance( + context, instance, requested_networks=[ + (network_id, requested_ip, port_id)]) + + def deallocate_port_for_instance(self, context, instance, port_id): + """Remove a specified port from the instance. + + Return network information for the instance + """ + try: + neutronv2.get_client(context).delete_port(port_id) + except Exception: + LOG.exception(_("Failed to delete neutron port %s") % + port_id) + + return self.get_instance_nw_info(context, instance) + + def list_ports(self, context, **search_opts): + """List ports for the client based on search options.""" + return neutronv2.get_client(context).list_ports(**search_opts) + + def show_port(self, context, port_id): + """Return the port for the client given the port id.""" + return neutronv2.get_client(context).show_port(port_id) + + @refresh_cache + def get_instance_nw_info(self, context, instance, networks=None, + port_ids=None, use_slave=False): + """Return network information for specified instance + and update cache. + """ + # NOTE(geekinutah): It would be nice if use_slave had us call + # special APIs that pummeled slaves instead of + # the master. For now we just ignore this arg. + result = self._get_instance_nw_info(context, instance, networks, + port_ids) + return result + + def _get_instance_nw_info(self, context, instance, networks=None, + port_ids=None): + # keep this caching-free version of the get_instance_nw_info method + # because it is used by the caching logic itself. + LOG.debug(_('get_instance_nw_info() for %s'), instance['display_name']) + nw_info = self._build_network_info_model(context, instance, networks, + port_ids) + return network_model.NetworkInfo.hydrate(nw_info) + + def _gather_port_ids_and_networks(self, context, instance, networks=None, + port_ids=None): + """Return an instance's complete list of port_ids and networks.""" + + if ((networks is None and port_ids is not None) or + (port_ids is None and networks is not None)): + message = ("This method needs to be called with either " + "networks=None and port_ids=None or port_ids and " + " networks as not none.") + raise exception.NovaException(message=message) + + # Unfortunately, this is sometimes in unicode and sometimes not + if isinstance(instance['info_cache']['network_info'], six.text_type): + ifaces = jsonutils.loads(instance['info_cache']['network_info']) + else: + ifaces = instance['info_cache']['network_info'] + + # This code path is only done when refreshing the network_cache + if port_ids is None: + port_ids = [iface['id'] for iface in ifaces] + net_ids = [iface['network']['id'] for iface in ifaces] + + if networks is None: + networks = self._get_available_networks(context, + instance['project_id'], + net_ids) + # an interface was added/removed from instance. + else: + # Since networks does not contain the existing networks on the + # instance we use their values from the cache and add it. + networks = networks + [ + {'id': iface['network']['id'], + 'name': iface['network']['label'], + 'tenant_id': iface['network']['meta']['tenant_id']} + for iface in ifaces] + + # Include existing interfaces so they are not removed from the db. + port_ids = [iface['id'] for iface in ifaces] + port_ids + + return networks, port_ids + + @refresh_cache + def add_fixed_ip_to_instance(self, context, instance, network_id): + """Add a fixed ip to the instance from specified network.""" + search_opts = {'network_id': network_id} + data = neutronv2.get_client(context).list_subnets(**search_opts) + ipam_subnets = data.get('subnets', []) + if not ipam_subnets: + raise exception.NetworkNotFoundForInstance( + instance_id=instance['uuid']) + + zone = 'compute:%s' % instance['availability_zone'] + search_opts = {'device_id': instance['uuid'], + 'device_owner': zone, + 'network_id': network_id} + data = neutronv2.get_client(context).list_ports(**search_opts) + ports = data['ports'] + for p in ports: + for subnet in ipam_subnets: + fixed_ips = p['fixed_ips'] + fixed_ips.append({'subnet_id': subnet['id']}) + port_req_body = {'port': {'fixed_ips': fixed_ips}} + try: + neutronv2.get_client(context).update_port(p['id'], + port_req_body) + return + except Exception as ex: + msg = _("Unable to update port %(portid)s on subnet " + "%(subnet_id)s with failure: %(exception)s") + LOG.debug(msg, {'portid': p['id'], + 'subnet_id': subnet['id'], + 'exception': ex}) + + raise exception.NetworkNotFoundForInstance( + instance_id=instance['uuid']) + + @refresh_cache + def remove_fixed_ip_from_instance(self, context, instance, address): + """Remove a fixed ip from the instance.""" + zone = 'compute:%s' % instance['availability_zone'] + search_opts = {'device_id': instance['uuid'], + 'device_owner': zone, + 'fixed_ips': 'ip_address=%s' % address} + data = neutronv2.get_client(context).list_ports(**search_opts) + ports = data['ports'] + for p in ports: + fixed_ips = p['fixed_ips'] + new_fixed_ips = [] + for fixed_ip in fixed_ips: + if fixed_ip['ip_address'] != address: + new_fixed_ips.append(fixed_ip) + port_req_body = {'port': {'fixed_ips': new_fixed_ips}} + try: + neutronv2.get_client(context).update_port(p['id'], + port_req_body) + except Exception as ex: + msg = _("Unable to update port %(portid)s with" + " failure: %(exception)s") + LOG.debug(msg, {'portid': p['id'], 'exception': ex}) + return + + raise exception.FixedIpNotFoundForSpecificInstance( + instance_uuid=instance['uuid'], ip=address) + + def validate_networks(self, context, requested_networks, num_instances): + """Validate that the tenant can use the requested networks. + + Return the number of instances than can be successfully allocated + with the requested network configuration. + """ + LOG.debug(_('validate_networks() for %s'), + requested_networks) + + neutron = neutronv2.get_client(context) + ports_needed_per_instance = 0 + + if not requested_networks: + nets = self._get_available_networks(context, context.project_id, + neutron=neutron) + if len(nets) > 1: + # Attaching to more than one network by default doesn't + # make sense, as the order will be arbitrary and the guest OS + # won't know which to configure + msg = _("Multiple possible networks found, use a Network " + "ID to be more specific.") + raise exception.NetworkAmbiguous(msg) + else: + ports_needed_per_instance = 1 + + else: + net_ids = [] + + for (net_id, _i, port_id) in requested_networks: + if port_id: + try: + port = neutron.show_port(port_id).get('port') + except neutronv2.exceptions.NeutronClientException as e: + if e.status_code == 404: + port = None + else: + with excutils.save_and_reraise_exception(): + LOG.exception(_("Failed to access port %s"), + port_id) + if not port: + raise exception.PortNotFound(port_id=port_id) + if port.get('device_id', None): + raise exception.PortInUse(port_id=port_id) + if not port.get('fixed_ips'): + raise exception.PortRequiresFixedIP(port_id=port_id) + net_id = port['network_id'] + else: + ports_needed_per_instance += 1 + + if net_id in net_ids: + raise exception.NetworkDuplicated(network_id=net_id) + net_ids.append(net_id) + + # Now check to see if all requested networks exist + nets = self._get_available_networks(context, + context.project_id, net_ids, + neutron=neutron) + for net in nets: + if not net.get('subnets'): + raise exception.NetworkRequiresSubnet( + network_uuid=net['id']) + + if len(nets) != len(net_ids): + requsted_netid_set = set(net_ids) + returned_netid_set = set([net['id'] for net in nets]) + lostid_set = requsted_netid_set - returned_netid_set + id_str = '' + for _id in lostid_set: + id_str = id_str and id_str + ', ' + _id or _id + raise exception.NetworkNotFound(network_id=id_str) + + # Note(PhilD): Ideally Nova would create all required ports as part of + # network validation, but port creation requires some details + # from the hypervisor. So we just check the quota and return + # how many of the requested number of instances can be created + + ports = neutron.list_ports(tenant_id=context.project_id)['ports'] + quotas = neutron.show_quota(tenant_id=context.project_id)['quota'] + if quotas.get('port') == -1: + # Unlimited Port Quota + return num_instances + else: + free_ports = quotas.get('port') - len(ports) + ports_needed = ports_needed_per_instance * num_instances + if free_ports >= ports_needed: + return num_instances + else: + return free_ports // ports_needed_per_instance + + def _get_instance_uuids_by_ip(self, context, address): + """Retrieve instance uuids associated with the given ip address. + + :returns: A list of dicts containing the uuids keyed by 'instance_uuid' + e.g. [{'instance_uuid': uuid}, ...] + """ + search_opts = {"fixed_ips": 'ip_address=%s' % address} + data = neutronv2.get_client(context).list_ports(**search_opts) + ports = data.get('ports', []) + return [{'instance_uuid': port['device_id']} for port in ports + if port['device_id']] + + def get_instance_uuids_by_ip_filter(self, context, filters): + """Return a list of dicts in the form of + [{'instance_uuid': uuid}] that matched the ip filter. + """ + # filters['ip'] is composed as '^%s$' % fixed_ip.replace('.', '\\.') + ip = filters.get('ip') + # we remove ^$\ in the ip filer + if ip[0] == '^': + ip = ip[1:] + if ip[-1] == '$': + ip = ip[:-1] + ip = ip.replace('\\.', '.') + return self._get_instance_uuids_by_ip(context, ip) + + def _get_port_id_by_fixed_address(self, client, + instance, address): + """Return port_id from a fixed address.""" + zone = 'compute:%s' % instance['availability_zone'] + search_opts = {'device_id': instance['uuid'], + 'device_owner': zone} + data = client.list_ports(**search_opts) + ports = data['ports'] + port_id = None + for p in ports: + for ip in p['fixed_ips']: + if ip['ip_address'] == address: + port_id = p['id'] + break + if not port_id: + raise exception.FixedIpNotFoundForAddress(address=address) + return port_id + + @refresh_cache + def associate_floating_ip(self, context, instance, + floating_address, fixed_address, + affect_auto_assigned=False): + """Associate a floating ip with a fixed ip.""" + + # Note(amotoki): 'affect_auto_assigned' is not respected + # since it is not used anywhere in nova code and I could + # find why this parameter exists. + + client = neutronv2.get_client(context) + port_id = self._get_port_id_by_fixed_address(client, instance, + fixed_address) + fip = self._get_floating_ip_by_address(client, floating_address) + param = {'port_id': port_id, + 'fixed_ip_address': fixed_address} + client.update_floatingip(fip['id'], {'floatingip': param}) + + if fip['port_id']: + port = client.show_port(fip['port_id'])['port'] + orig_instance_uuid = port['device_id'] + + msg_dict = dict(address=floating_address, + instance_id=orig_instance_uuid) + LOG.info(_('re-assign floating IP %(address)s from ' + 'instance %(instance_id)s') % msg_dict) + orig_instance = self.db.instance_get_by_uuid(context, + orig_instance_uuid) + + # purge cached nw info for the original instance + update_instance_info_cache(self, context, orig_instance) + + def get_all(self, context): + """Get all networks for client.""" + client = neutronv2.get_client(context) + networks = client.list_networks().get('networks') + for network in networks: + network['label'] = network['name'] + return networks + + def get(self, context, network_uuid): + """Get specific network for client.""" + client = neutronv2.get_client(context) + network = client.show_network(network_uuid).get('network') or {} + network['label'] = network['name'] + return network + + def delete(self, context, network_uuid): + """Delete a network for client.""" + raise NotImplementedError() + + def disassociate(self, context, network_uuid): + """Disassociate a network for client.""" + raise NotImplementedError() + + def associate(self, context, network_uuid, host=_sentinel, + project=_sentinel): + """Associate a network for client.""" + raise NotImplementedError() + + def get_fixed_ip(self, context, id): + """Get a fixed ip from the id.""" + raise NotImplementedError() + + def get_fixed_ip_by_address(self, context, address): + """Return instance uuids given an address.""" + uuid_maps = self._get_instance_uuids_by_ip(context, address) + if len(uuid_maps) == 1: + return uuid_maps[0] + elif not uuid_maps: + raise exception.FixedIpNotFoundForAddress(address=address) + else: + raise exception.FixedIpAssociatedWithMultipleInstances( + address=address) + + def _setup_net_dict(self, client, network_id): + if not network_id: + return {} + pool = client.show_network(network_id)['network'] + return {pool['id']: pool} + + def _setup_port_dict(self, client, port_id): + if not port_id: + return {} + port = client.show_port(port_id)['port'] + return {port['id']: port} + + def _setup_pools_dict(self, client): + pools = self._get_floating_ip_pools(client) + return dict([(i['id'], i) for i in pools]) + + def _setup_ports_dict(self, client, project_id=None): + search_opts = {'tenant_id': project_id} if project_id else {} + ports = client.list_ports(**search_opts)['ports'] + return dict([(p['id'], p) for p in ports]) + + def get_floating_ip(self, context, id): + """Return floating ip object given the floating ip id.""" + client = neutronv2.get_client(context) + try: + fip = client.show_floatingip(id)['floatingip'] + except neutronv2.exceptions.NeutronClientException as e: + if e.status_code == 404: + raise exception.FloatingIpNotFound(id=id) + else: + with excutils.save_and_reraise_exception(): + LOG.exception(_('Unable to access floating IP %s'), id) + pool_dict = self._setup_net_dict(client, + fip['floating_network_id']) + port_dict = self._setup_port_dict(client, fip['port_id']) + return self._format_floating_ip_model(fip, pool_dict, port_dict) + + def _get_floating_ip_pools(self, client, project_id=None): + search_opts = {constants.NET_EXTERNAL: True} + if project_id: + search_opts.update({'tenant_id': project_id}) + data = client.list_networks(**search_opts) + return data['networks'] + + def get_floating_ip_pools(self, context): + """Return floating ip pools.""" + client = neutronv2.get_client(context) + pools = self._get_floating_ip_pools(client) + return [{'name': n['name'] or n['id']} for n in pools] + + def _format_floating_ip_model(self, fip, pool_dict, port_dict): + pool = pool_dict[fip['floating_network_id']] + result = {'id': fip['id'], + 'address': fip['floating_ip_address'], + 'pool': pool['name'] or pool['id'], + 'project_id': fip['tenant_id'], + # In Neutron v2, an exact fixed_ip_id does not exist. + 'fixed_ip_id': fip['port_id'], + } + # In Neutron v2 API fixed_ip_address and instance uuid + # (= device_id) are known here, so pass it as a result. + result['fixed_ip'] = {'address': fip['fixed_ip_address']} + if fip['port_id']: + instance_uuid = port_dict[fip['port_id']]['device_id'] + result['instance'] = {'uuid': instance_uuid} + else: + result['instance'] = None + return result + + def get_floating_ip_by_address(self, context, address): + """Return a floating ip given an address.""" + client = neutronv2.get_client(context) + fip = self._get_floating_ip_by_address(client, address) + pool_dict = self._setup_net_dict(client, + fip['floating_network_id']) + port_dict = self._setup_port_dict(client, fip['port_id']) + return self._format_floating_ip_model(fip, pool_dict, port_dict) + + def get_floating_ips_by_project(self, context): + client = neutronv2.get_client(context) + project_id = context.project_id + fips = client.list_floatingips(tenant_id=project_id)['floatingips'] + pool_dict = self._setup_pools_dict(client) + port_dict = self._setup_ports_dict(client, project_id) + return [self._format_floating_ip_model(fip, pool_dict, port_dict) + for fip in fips] + + def get_floating_ips_by_fixed_address(self, context, fixed_address): + raise NotImplementedError() + + def get_instance_id_by_floating_address(self, context, address): + """Return the instance id a floating ip's fixed ip is allocated to.""" + client = neutronv2.get_client(context) + fip = self._get_floating_ip_by_address(client, address) + if not fip['port_id']: + return None + port = client.show_port(fip['port_id'])['port'] + return port['device_id'] + + def get_vifs_by_instance(self, context, instance): + raise NotImplementedError() + + def get_vif_by_mac_address(self, context, mac_address): + raise NotImplementedError() + + def _get_floating_ip_pool_id_by_name_or_id(self, client, name_or_id): + search_opts = {constants.NET_EXTERNAL: True, 'fields': 'id'} + if uuidutils.is_uuid_like(name_or_id): + search_opts.update({'id': name_or_id}) + else: + search_opts.update({'name': name_or_id}) + data = client.list_networks(**search_opts) + nets = data['networks'] + + if len(nets) == 1: + return nets[0]['id'] + elif len(nets) == 0: + raise exception.FloatingIpPoolNotFound() + else: + msg = (_("Multiple floating IP pools matches found for name '%s'") + % name_or_id) + raise exception.NovaException(message=msg) + + def allocate_floating_ip(self, context, pool=None): + """Add a floating ip to a project from a pool.""" + client = neutronv2.get_client(context) + pool = pool or CONF.default_floating_pool + pool_id = self._get_floating_ip_pool_id_by_name_or_id(client, pool) + + # TODO(amotoki): handle exception during create_floatingip() + # At this timing it is ensured that a network for pool exists. + # quota error may be returned. + param = {'floatingip': {'floating_network_id': pool_id}} + try: + fip = client.create_floatingip(param) + except (neutron_client_exc.IpAddressGenerationFailureClient, + neutron_client_exc.ExternalIpAddressExhaustedClient) as e: + raise exception.NoMoreFloatingIps(unicode(e)) + return fip['floatingip']['floating_ip_address'] + + def _get_floating_ip_by_address(self, client, address): + """Get floatingip from floating ip address.""" + if not address: + raise exception.FloatingIpNotFoundForAddress(address=address) + data = client.list_floatingips(floating_ip_address=address) + fips = data['floatingips'] + if len(fips) == 0: + raise exception.FloatingIpNotFoundForAddress(address=address) + elif len(fips) > 1: + raise exception.FloatingIpMultipleFoundForAddress(address=address) + return fips[0] + + def _get_floating_ips_by_fixed_and_port(self, client, fixed_ip, port): + """Get floatingips from fixed ip and port.""" + try: + data = client.list_floatingips(fixed_ip_address=fixed_ip, + port_id=port) + # If a neutron plugin does not implement the L3 API a 404 from + # list_floatingips will be raised. + except neutronv2.exceptions.NeutronClientException as e: + if e.status_code == 404: + return [] + with excutils.save_and_reraise_exception(): + LOG.exception(_('Unable to access floating IP %(fixed_ip)s ' + 'for port %(port_id)s'), + {'fixed_ip': fixed_ip, 'port_id': port}) + return data['floatingips'] + + def release_floating_ip(self, context, address, + affect_auto_assigned=False): + """Remove a floating ip with the given address from a project.""" + + # Note(amotoki): We cannot handle a case where multiple pools + # have overlapping IP address range. In this case we cannot use + # 'address' as a unique key. + # This is a limitation of the current nova. + + # Note(amotoki): 'affect_auto_assigned' is not respected + # since it is not used anywhere in nova code and I could + # find why this parameter exists. + + client = neutronv2.get_client(context) + fip = self._get_floating_ip_by_address(client, address) + if fip['port_id']: + raise exception.FloatingIpAssociated(address=address) + client.delete_floatingip(fip['id']) + + @refresh_cache + def disassociate_floating_ip(self, context, instance, address, + affect_auto_assigned=False): + """Disassociate a floating ip from the instance.""" + + # Note(amotoki): 'affect_auto_assigned' is not respected + # since it is not used anywhere in nova code and I could + # find why this parameter exists. + + client = neutronv2.get_client(context) + fip = self._get_floating_ip_by_address(client, address) + client.update_floatingip(fip['id'], {'floatingip': {'port_id': None}}) + + def migrate_instance_start(self, context, instance, migration): + """Start to migrate the network of an instance.""" + # NOTE(wenjianhn): just pass to make migrate instance doesn't + # raise for now. + pass + + def migrate_instance_finish(self, context, instance, migration): + """Finish migrating the network of an instance.""" + if not self._has_port_binding_extension(context, refresh_cache=True): + return + neutron = neutronv2.get_client(context, admin=True) + search_opts = {'device_id': instance['uuid'], + 'tenant_id': instance['project_id']} + data = neutron.list_ports(**search_opts) + ports = data['ports'] + for p in ports: + port_req_body = {'port': {'binding:host_id': + migration['dest_compute']}} + try: + neutron.update_port(p['id'], port_req_body) + except Exception: + with excutils.save_and_reraise_exception(): + msg = _("Unable to update host of port %s") + LOG.exception(msg, p['id']) + + def add_network_to_project(self, context, project_id, network_uuid=None): + """Force add a network to the project.""" + raise NotImplementedError() + + def _nw_info_get_ips(self, client, port): + network_IPs = [] + for fixed_ip in port['fixed_ips']: + fixed = network_model.FixedIP(address=fixed_ip['ip_address']) + floats = self._get_floating_ips_by_fixed_and_port( + client, fixed_ip['ip_address'], port['id']) + for ip in floats: + fip = network_model.IP(address=ip['floating_ip_address'], + type='floating') + fixed.add_floating_ip(fip) + network_IPs.append(fixed) + return network_IPs + + def _nw_info_get_subnets(self, context, port, network_IPs): + subnets = self._get_subnets_from_port(context, port) + for subnet in subnets: + subnet['ips'] = [fixed_ip for fixed_ip in network_IPs + if fixed_ip.is_in_subnet(subnet)] + return subnets + + def _nw_info_build_network(self, port, networks, subnets): + network_name = None + for net in networks: + if port['network_id'] == net['id']: + network_name = net['name'] + tenant_id = net['tenant_id'] + break + else: + tenant_id = port['tenant_id'] + LOG.warning(_("Network %(id)s not matched with the tenants " + "network! The ports tenant %(tenant_id)s will be " + "used."), + {'id': port['network_id'], 'tenant_id': tenant_id}) + + bridge = None + ovs_interfaceid = None + # Network model metadata + should_create_bridge = None + vif_type = port.get('binding:vif_type') + # TODO(berrange) Neutron should pass the bridge name + # in another binding metadata field + if vif_type == network_model.VIF_TYPE_OVS: + bridge = CONF.neutron_ovs_bridge + ovs_interfaceid = port['id'] + elif vif_type == network_model.VIF_TYPE_BRIDGE: + bridge = "brq" + port['network_id'] + should_create_bridge = True + + if bridge is not None: + bridge = bridge[:network_model.NIC_NAME_LEN] + + network = network_model.Network( + id=port['network_id'], + bridge=bridge, + injected=CONF.flat_injected, + label=network_name, + tenant_id=tenant_id + ) + network['subnets'] = subnets + port_profile = port.get('binding:profile') + if port_profile: + physical_network = port_profile.get('physical_network') + if physical_network: + network['physical_network'] = physical_network + + if should_create_bridge is not None: + network['should_create_bridge'] = should_create_bridge + return network, ovs_interfaceid + + def _build_network_info_model(self, context, instance, networks=None, + port_ids=None): + """Return list of ordered VIFs attached to instance. + + :param context - request context. + :param instance - instance we are returning network info for. + :param networks - List of networks being attached to an instance. + If value is None this value will be populated + from the existing cached value. + :param port_ids - List of port_ids that are being attached to an + instance in order of attachment. If value is None + this value will be populated from the existing + cached value. + """ + + search_opts = {'tenant_id': instance['project_id'], + 'device_id': instance['uuid'], } + client = neutronv2.get_client(context, admin=True) + data = client.list_ports(**search_opts) + + current_neutron_ports = data.get('ports', []) + networks, port_ids = self._gather_port_ids_and_networks( + context, instance, networks, port_ids) + nw_info = network_model.NetworkInfo() + + current_neutron_port_map = {} + for current_neutron_port in current_neutron_ports: + current_neutron_port_map[current_neutron_port['id']] = ( + current_neutron_port) + + for port_id in port_ids: + current_neutron_port = current_neutron_port_map.get(port_id) + if current_neutron_port: + vif_active = False + if (current_neutron_port['admin_state_up'] is False + or current_neutron_port['status'] == 'ACTIVE'): + vif_active = True + + network_IPs = self._nw_info_get_ips(client, + current_neutron_port) + subnets = self._nw_info_get_subnets(context, + current_neutron_port, + network_IPs) + + devname = "tap" + current_neutron_port['id'] + devname = devname[:network_model.NIC_NAME_LEN] + + network, ovs_interfaceid = ( + self._nw_info_build_network(current_neutron_port, + networks, subnets)) + + nw_info.append(network_model.VIF( + id=current_neutron_port['id'], + address=current_neutron_port['mac_address'], + network=network, + type=current_neutron_port.get('binding:vif_type'), + details=current_neutron_port.get('binding:vif_details'), + ovs_interfaceid=ovs_interfaceid, + devname=devname, + active=vif_active)) + + return nw_info + + def _get_subnets_from_port(self, context, port): + """Return the subnets for a given port.""" + + fixed_ips = port['fixed_ips'] + # No fixed_ips for the port means there is no subnet associated + # with the network the port is created on. + # Since list_subnets(id=[]) returns all subnets visible for the + # current tenant, returned subnets may contain subnets which is not + # related to the port. To avoid this, the method returns here. + if not fixed_ips: + return [] + search_opts = {'id': [ip['subnet_id'] for ip in fixed_ips]} + data = neutronv2.get_client(context).list_subnets(**search_opts) + ipam_subnets = data.get('subnets', []) + subnets = [] + + for subnet in ipam_subnets: + subnet_dict = {'cidr': subnet['cidr'], + 'gateway': network_model.IP( + address=subnet['gateway_ip'], + type='gateway'), + } + + # attempt to populate DHCP server field + search_opts = {'network_id': subnet['network_id'], + 'device_owner': 'network:dhcp'} + data = neutronv2.get_client(context).list_ports(**search_opts) + dhcp_ports = data.get('ports', []) + for p in dhcp_ports: + for ip_pair in p['fixed_ips']: + if ip_pair['subnet_id'] == subnet['id']: + subnet_dict['dhcp_server'] = ip_pair['ip_address'] + break + + subnet_object = network_model.Subnet(**subnet_dict) + for dns in subnet.get('dns_nameservers', []): + subnet_object.add_dns( + network_model.IP(address=dns, type='dns')) + + # TODO(gongysh) get the routes for this subnet + subnets.append(subnet_object) + return subnets + + def get_dns_domains(self, context): + """Return a list of available dns domains. + + These can be used to create DNS entries for floating ips. + """ + raise NotImplementedError() + + def add_dns_entry(self, context, address, name, dns_type, domain): + """Create specified DNS entry for address.""" + raise NotImplementedError() + + def modify_dns_entry(self, context, name, address, domain): + """Create specified DNS entry for address.""" + raise NotImplementedError() + + def delete_dns_entry(self, context, name, domain): + """Delete the specified dns entry.""" + raise NotImplementedError() + + def delete_dns_domain(self, context, domain): + """Delete the specified dns domain.""" + raise NotImplementedError() + + def get_dns_entries_by_address(self, context, address, domain): + """Get entries for address and domain.""" + raise NotImplementedError() + + def get_dns_entries_by_name(self, context, name, domain): + """Get entries for name and domain.""" + raise NotImplementedError() + + def create_private_dns_domain(self, context, domain, availability_zone): + """Create a private DNS domain with nova availability zone.""" + raise NotImplementedError() + + def create_public_dns_domain(self, context, domain, project=None): + """Create a private DNS domain with optional nova project.""" + raise NotImplementedError() + + +def _ensure_requested_network_ordering(accessor, unordered, preferred): + """Sort a list with respect to the preferred network ordering.""" + if preferred: + unordered.sort(key=lambda i: preferred.index(accessor(i))) diff --git a/icehouse-patches/nova/instance_mapping_uuid_patch/nova/objects/instance.py b/icehouse-patches/nova/instance_mapping_uuid_patch/nova/objects/instance.py new file mode 100644 index 00000000..17abda76 --- /dev/null +++ b/icehouse-patches/nova/instance_mapping_uuid_patch/nova/objects/instance.py @@ -0,0 +1,739 @@ +# Copyright 2013 IBM Corp. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from nova.cells import opts as cells_opts +from nova.cells import rpcapi as cells_rpcapi +from nova.compute import flavors +from nova import db +from nova import exception +from nova import notifications +from nova.objects import base +from nova.objects import fields +from nova.objects import flavor as flavor_obj +from nova.objects import instance_fault +from nova.objects import instance_info_cache +from nova.objects import pci_device +from nova.objects import security_group +from nova.openstack.common.gettextutils import _ +from nova.openstack.common import log as logging +from nova.openstack.common import timeutils +from nova import utils + +from oslo.config import cfg + + +CONF = cfg.CONF +LOG = logging.getLogger(__name__) + + +# List of fields that can be joined in DB layer. +_INSTANCE_OPTIONAL_JOINED_FIELDS = ['metadata', 'system_metadata', + 'info_cache', 'security_groups', + 'pci_devices'] +# These are fields that are optional but don't translate to db columns +_INSTANCE_OPTIONAL_NON_COLUMN_FIELDS = ['fault'] + +# These are fields that can be specified as expected_attrs +INSTANCE_OPTIONAL_ATTRS = (_INSTANCE_OPTIONAL_JOINED_FIELDS + + _INSTANCE_OPTIONAL_NON_COLUMN_FIELDS) +# These are fields that most query calls load by default +INSTANCE_DEFAULT_FIELDS = ['metadata', 'system_metadata', + 'info_cache', 'security_groups'] + + +def _expected_cols(expected_attrs): + """Return expected_attrs that are columns needing joining.""" + if not expected_attrs: + return expected_attrs + return [attr for attr in expected_attrs + if attr in _INSTANCE_OPTIONAL_JOINED_FIELDS] + + +class Instance(base.NovaPersistentObject, base.NovaObject): + # Version 1.0: Initial version + # Version 1.1: Added info_cache + # Version 1.2: Added security_groups + # Version 1.3: Added expected_vm_state and admin_state_reset to + # save() + # Version 1.4: Added locked_by and deprecated locked + # Version 1.5: Added cleaned + # Version 1.6: Added pci_devices + # Version 1.7: String attributes updated to support unicode + # Version 1.8: 'security_groups' and 'pci_devices' cannot be None + # Version 1.9: Make uuid a non-None real string + # Version 1.10: Added use_slave to refresh and get_by_uuid + # Version 1.11: Update instance from database during destroy + # Version 1.12: Added ephemeral_key_uuid + # Version 1.13: Added delete_metadata_key() + VERSION = '1.13' + + fields = { + 'id': fields.IntegerField(), + + 'user_id': fields.StringField(nullable=True), + 'project_id': fields.StringField(nullable=True), + + 'image_ref': fields.StringField(nullable=True), + 'kernel_id': fields.StringField(nullable=True), + 'ramdisk_id': fields.StringField(nullable=True), + 'hostname': fields.StringField(nullable=True), + + 'launch_index': fields.IntegerField(nullable=True), + 'key_name': fields.StringField(nullable=True), + 'key_data': fields.StringField(nullable=True), + + 'power_state': fields.IntegerField(nullable=True), + 'vm_state': fields.StringField(nullable=True), + 'task_state': fields.StringField(nullable=True), + + 'memory_mb': fields.IntegerField(nullable=True), + 'vcpus': fields.IntegerField(nullable=True), + 'root_gb': fields.IntegerField(nullable=True), + 'ephemeral_gb': fields.IntegerField(nullable=True), + 'ephemeral_key_uuid': fields.UUIDField(nullable=True), + + 'host': fields.StringField(nullable=True), + 'node': fields.StringField(nullable=True), + + 'instance_type_id': fields.IntegerField(nullable=True), + + 'user_data': fields.StringField(nullable=True), + + 'reservation_id': fields.StringField(nullable=True), + + 'scheduled_at': fields.DateTimeField(nullable=True), + 'launched_at': fields.DateTimeField(nullable=True), + 'terminated_at': fields.DateTimeField(nullable=True), + + 'availability_zone': fields.StringField(nullable=True), + + 'display_name': fields.StringField(nullable=True), + 'display_description': fields.StringField(nullable=True), + + 'launched_on': fields.StringField(nullable=True), + + # NOTE(jdillaman): locked deprecated in favor of locked_by, + # to be removed in Icehouse + 'locked': fields.BooleanField(default=False), + 'locked_by': fields.StringField(nullable=True), + + 'os_type': fields.StringField(nullable=True), + 'architecture': fields.StringField(nullable=True), + 'vm_mode': fields.StringField(nullable=True), + 'uuid': fields.UUIDField(), + 'mapping_uuid': fields.UUIDField(nullable=True), + + 'root_device_name': fields.StringField(nullable=True), + 'default_ephemeral_device': fields.StringField(nullable=True), + 'default_swap_device': fields.StringField(nullable=True), + 'config_drive': fields.StringField(nullable=True), + + 'access_ip_v4': fields.IPV4AddressField(nullable=True), + 'access_ip_v6': fields.IPV6AddressField(nullable=True), + + 'auto_disk_config': fields.BooleanField(default=False), + 'progress': fields.IntegerField(nullable=True), + + 'shutdown_terminate': fields.BooleanField(default=False), + 'disable_terminate': fields.BooleanField(default=False), + + 'cell_name': fields.StringField(nullable=True), + + 'metadata': fields.DictOfStringsField(), + 'system_metadata': fields.DictOfNullableStringsField(), + + 'info_cache': fields.ObjectField('InstanceInfoCache', + nullable=True), + + 'security_groups': fields.ObjectField('SecurityGroupList'), + + 'fault': fields.ObjectField('InstanceFault', nullable=True), + + 'cleaned': fields.BooleanField(default=False), + + 'pci_devices': fields.ObjectField('PciDeviceList', nullable=True), + } + + obj_extra_fields = ['name'] + + def __init__(self, *args, **kwargs): + super(Instance, self).__init__(*args, **kwargs) + self._reset_metadata_tracking() + + def _reset_metadata_tracking(self, fields=None): + if fields is None or 'system_metadata' in fields: + self._orig_system_metadata = (dict(self.system_metadata) if + 'system_metadata' in self else {}) + if fields is None or 'metadata' in fields: + self._orig_metadata = (dict(self.metadata) if + 'metadata' in self else {}) + + def obj_reset_changes(self, fields=None): + super(Instance, self).obj_reset_changes(fields) + self._reset_metadata_tracking(fields=fields) + + def obj_what_changed(self): + changes = super(Instance, self).obj_what_changed() + if 'metadata' in self and self.metadata != self._orig_metadata: + changes.add('metadata') + if 'system_metadata' in self and (self.system_metadata != + self._orig_system_metadata): + changes.add('system_metadata') + return changes + + @classmethod + def _obj_from_primitive(cls, context, objver, primitive): + self = super(Instance, cls)._obj_from_primitive(context, objver, + primitive) + self._reset_metadata_tracking() + return self + + def obj_make_compatible(self, primitive, target_version): + target_version = (int(target_version.split('.')[0]), + int(target_version.split('.')[1])) + unicode_attributes = ['user_id', 'project_id', 'image_ref', + 'kernel_id', 'ramdisk_id', 'hostname', + 'key_name', 'key_data', 'host', 'node', + 'user_data', 'availability_zone', + 'display_name', 'display_description', + 'launched_on', 'locked_by', 'os_type', + 'architecture', 'vm_mode', 'root_device_name', + 'default_ephemeral_device', + 'default_swap_device', 'config_drive', + 'cell_name'] + if target_version < (1, 10) and 'info_cache' in primitive: + # NOTE(danms): Instance <= 1.9 (havana) had info_cache 1.4 + self.info_cache.obj_make_compatible(primitive['info_cache'], + '1.4') + primitive['info_cache']['nova_object.version'] = '1.4' + if target_version < (1, 7): + # NOTE(danms): Before 1.7, we couldn't handle unicode in + # string fields, so squash it here + for field in [x for x in unicode_attributes if x in primitive + and primitive[x] is not None]: + primitive[field] = primitive[field].encode('ascii', 'replace') + if target_version < (1, 6): + # NOTE(danms): Before 1.6 there was no pci_devices list + if 'pci_devices' in primitive: + del primitive['pci_devices'] + + @property + def name(self): + try: + base_name = CONF.instance_name_template % self.id + except TypeError: + # Support templates like "uuid-%(uuid)s", etc. + info = {} + # NOTE(russellb): Don't use self.iteritems() here, as it will + # result in infinite recursion on the name property. + for key in self.fields: + if key == 'name': + # NOTE(danms): prevent recursion + continue + elif not self.obj_attr_is_set(key): + # NOTE(danms): Don't trigger lazy-loads + continue + info[key] = self[key] + try: + base_name = CONF.instance_name_template % info + except KeyError: + base_name = self.uuid + return base_name + + @staticmethod + def _from_db_object(context, instance, db_inst, expected_attrs=None): + """Method to help with migration to objects. + + Converts a database entity to a formal object. + """ + if expected_attrs is None: + expected_attrs = [] + # Most of the field names match right now, so be quick + for field in instance.fields: + if field in INSTANCE_OPTIONAL_ATTRS: + continue + elif field == 'deleted': + instance.deleted = db_inst['deleted'] == db_inst['id'] + elif field == 'cleaned': + instance.cleaned = db_inst['cleaned'] == 1 + else: + instance[field] = db_inst[field] + + if 'metadata' in expected_attrs: + instance['metadata'] = utils.instance_meta(db_inst) + if 'system_metadata' in expected_attrs: + instance['system_metadata'] = utils.instance_sys_meta(db_inst) + if 'fault' in expected_attrs: + instance['fault'] = ( + instance_fault.InstanceFault.get_latest_for_instance( + context, instance.uuid)) + + if 'pci_devices' in expected_attrs: + pci_devices = base.obj_make_list( + context, pci_device.PciDeviceList(), + pci_device.PciDevice, db_inst['pci_devices']) + instance['pci_devices'] = pci_devices + if 'info_cache' in expected_attrs: + if db_inst['info_cache'] is None: + instance.info_cache = None + elif not instance.obj_attr_is_set('info_cache'): + # TODO(danms): If this ever happens on a backlevel instance + # passed to us by a backlevel service, things will break + instance.info_cache = instance_info_cache.InstanceInfoCache() + if instance.info_cache is not None: + instance_info_cache.InstanceInfoCache._from_db_object( + context, instance.info_cache, db_inst['info_cache']) + if 'security_groups' in expected_attrs: + sec_groups = base.obj_make_list( + context, security_group.SecurityGroupList(), + security_group.SecurityGroup, db_inst['security_groups']) + instance['security_groups'] = sec_groups + + instance._context = context + instance.obj_reset_changes() + return instance + + @base.remotable_classmethod + def get_by_uuid(cls, context, uuid, expected_attrs=None, use_slave=False): + if expected_attrs is None: + expected_attrs = ['info_cache', 'security_groups'] + columns_to_join = _expected_cols(expected_attrs) + db_inst = db.instance_get_by_uuid(context, uuid, + columns_to_join=columns_to_join, + use_slave=use_slave) + return cls._from_db_object(context, cls(), db_inst, + expected_attrs) + + @base.remotable_classmethod + def get_by_id(cls, context, inst_id, expected_attrs=None): + if expected_attrs is None: + expected_attrs = ['info_cache', 'security_groups'] + columns_to_join = _expected_cols(expected_attrs) + db_inst = db.instance_get(context, inst_id, + columns_to_join=columns_to_join) + return cls._from_db_object(context, cls(), db_inst, + expected_attrs) + + @base.remotable + def create(self, context): + if self.obj_attr_is_set('id'): + raise exception.ObjectActionError(action='create', + reason='already created') + updates = self.obj_get_changes() + updates.pop('id', None) + expected_attrs = [attr for attr in INSTANCE_DEFAULT_FIELDS + if attr in updates] + if 'security_groups' in updates: + updates['security_groups'] = [x.name for x in + updates['security_groups']] + if 'info_cache' in updates: + updates['info_cache'] = { + 'network_info': updates['info_cache'].network_info.json() + } + db_inst = db.instance_create(context, updates) + Instance._from_db_object(context, self, db_inst, expected_attrs) + + @base.remotable + def destroy(self, context): + if not self.obj_attr_is_set('id'): + raise exception.ObjectActionError(action='destroy', + reason='already destroyed') + if not self.obj_attr_is_set('uuid'): + raise exception.ObjectActionError(action='destroy', + reason='no uuid') + if not self.obj_attr_is_set('host') or not self.host: + # NOTE(danms): If our host is not set, avoid a race + constraint = db.constraint(host=db.equal_any(None)) + else: + constraint = None + + try: + db_inst = db.instance_destroy(context, self.uuid, + constraint=constraint) + Instance._from_db_object(context, self, db_inst) + except exception.ConstraintNotMet: + raise exception.ObjectActionError(action='destroy', + reason='host changed') + delattr(self, base.get_attrname('id')) + + def _save_info_cache(self, context): + self.info_cache.save(context) + + def _save_security_groups(self, context): + for secgroup in self.security_groups: + secgroup.save(context) + self.security_groups.obj_reset_changes() + + def _save_fault(self, context): + # NOTE(danms): I don't think we need to worry about this, do we? + pass + + def _save_pci_devices(self, context): + # NOTE(yjiang5): All devices held by PCI tracker, only PCI tracker + # permitted to update the DB. all change to devices from here will + # be dropped. + pass + + @base.remotable + def save(self, context, expected_vm_state=None, + expected_task_state=None, admin_state_reset=False): + """Save updates to this instance + + Column-wise updates will be made based on the result of + self.what_changed(). If expected_task_state is provided, + it will be checked against the in-database copy of the + instance before updates are made. + :param context: Security context + :param expected_task_state: Optional tuple of valid task states + for the instance to be in. + :param expected_vm_state: Optional tuple of valid vm states + for the instance to be in. + :param admin_state_reset: True if admin API is forcing setting + of task_state/vm_state. + """ + + cell_type = cells_opts.get_cell_type() + if cell_type == 'api' and self.cell_name: + # NOTE(comstud): We need to stash a copy of ourselves + # before any updates are applied. When we call the save + # methods on nested objects, we will lose any changes to + # them. But we need to make sure child cells can tell + # what is changed. + # + # We also need to nuke any updates to vm_state and task_state + # unless admin_state_reset is True. compute cells are + # authoritative for their view of vm_state and task_state. + stale_instance = self.obj_clone() + + def _handle_cell_update_from_api(): + cells_api = cells_rpcapi.CellsAPI() + cells_api.instance_update_from_api(context, stale_instance, + expected_vm_state, + expected_task_state, + admin_state_reset) + else: + stale_instance = None + + updates = {} + changes = self.obj_what_changed() + for field in self.fields: + if (self.obj_attr_is_set(field) and + isinstance(self[field], base.NovaObject)): + try: + getattr(self, '_save_%s' % field)(context) + except AttributeError: + LOG.exception(_('No save handler for %s') % field, + instance=self) + elif field in changes: + updates[field] = self[field] + + if not updates: + if stale_instance: + _handle_cell_update_from_api() + return + + # Cleaned needs to be turned back into an int here + if 'cleaned' in updates: + if updates['cleaned']: + updates['cleaned'] = 1 + else: + updates['cleaned'] = 0 + + if expected_task_state is not None: + if (self.VERSION == '1.9' and + expected_task_state == 'image_snapshot'): + # NOTE(danms): Icehouse introduced a pending state which + # Havana doesn't know about. If we're an old instance, + # tolerate the pending state as well + expected_task_state = [ + expected_task_state, 'image_snapshot_pending'] + updates['expected_task_state'] = expected_task_state + if expected_vm_state is not None: + updates['expected_vm_state'] = expected_vm_state + + expected_attrs = [attr for attr in _INSTANCE_OPTIONAL_JOINED_FIELDS + if self.obj_attr_is_set(attr)] + # NOTE(alaski): We need to pull system_metadata for the + # notification.send_update() below. If we don't there's a KeyError + # when it tries to extract the flavor. + if 'system_metadata' not in expected_attrs: + expected_attrs.append('system_metadata') + old_ref, inst_ref = db.instance_update_and_get_original( + context, self.uuid, updates, update_cells=False, + columns_to_join=_expected_cols(expected_attrs)) + + if stale_instance: + _handle_cell_update_from_api() + elif cell_type == 'compute': + cells_api = cells_rpcapi.CellsAPI() + cells_api.instance_update_at_top(context, inst_ref) + + self._from_db_object(context, self, inst_ref, expected_attrs) + notifications.send_update(context, old_ref, inst_ref) + self.obj_reset_changes() + + @base.remotable + def refresh(self, context, use_slave=False): + extra = [field for field in INSTANCE_OPTIONAL_ATTRS + if self.obj_attr_is_set(field)] + current = self.__class__.get_by_uuid(context, uuid=self.uuid, + expected_attrs=extra, + use_slave=use_slave) + # NOTE(danms): We orphan the instance copy so we do not unexpectedly + # trigger a lazy-load (which would mean we failed to calculate the + # expected_attrs properly) + current._context = None + + for field in self.fields: + if self.obj_attr_is_set(field): + if field == 'info_cache': + self.info_cache.refresh() + # NOTE(danms): Make sure this shows up as touched + self.info_cache = self.info_cache + elif self[field] != current[field]: + self[field] = current[field] + self.obj_reset_changes() + + def obj_load_attr(self, attrname): + if attrname not in INSTANCE_OPTIONAL_ATTRS: + raise exception.ObjectActionError( + action='obj_load_attr', + reason='attribute %s not lazy-loadable' % attrname) + if not self._context: + raise exception.OrphanedObjectError(method='obj_load_attr', + objtype=self.obj_name()) + + LOG.debug(_("Lazy-loading `%(attr)s' on %(name)s uuid %(uuid)s"), + {'attr': attrname, + 'name': self.obj_name(), + 'uuid': self.uuid, + }) + # FIXME(comstud): This should be optimized to only load the attr. + instance = self.__class__.get_by_uuid(self._context, + uuid=self.uuid, + expected_attrs=[attrname]) + + # NOTE(danms): Never allow us to recursively-load + if instance.obj_attr_is_set(attrname): + self[attrname] = instance[attrname] + else: + raise exception.ObjectActionError( + action='obj_load_attr', + reason='loading %s requires recursion' % attrname) + + def get_flavor(self, namespace=None): + prefix = ('%s_' % namespace) if namespace is not None else '' + + db_flavor = flavors.extract_flavor(self, prefix) + flavor = flavor_obj.Flavor() + for key in flavors.system_metadata_flavor_props: + flavor[key] = db_flavor[key] + return flavor + + def set_flavor(self, flavor, namespace=None): + prefix = ('%s_' % namespace) if namespace is not None else '' + + self.system_metadata = flavors.save_flavor_info( + self.system_metadata, flavor, prefix) + self.save() + + def delete_flavor(self, namespace): + self.system_metadata = flavors.delete_flavor_info( + self.system_metadata, "%s_" % namespace) + self.save() + + @base.remotable + def delete_metadata_key(self, context, key): + """Optimized metadata delete method. + + This provides a more efficient way to delete a single metadata + key, instead of just calling instance.save(). This should be called + with the key still present in self.metadata, which it will update + after completion. + """ + db.instance_metadata_delete(context, self.uuid, key) + md_was_changed = 'metadata' in self.obj_what_changed() + del self.metadata[key] + self._orig_metadata.pop(key, None) + instance_dict = base.obj_to_primitive(self) + notifications.send_update(context, instance_dict, instance_dict) + if not md_was_changed: + self.obj_reset_changes(['metadata']) + + +def _make_instance_list(context, inst_list, db_inst_list, expected_attrs): + get_fault = expected_attrs and 'fault' in expected_attrs + inst_faults = {} + if get_fault: + # Build an instance_uuid:latest-fault mapping + expected_attrs.remove('fault') + instance_uuids = [inst['uuid'] for inst in db_inst_list] + faults = instance_fault.InstanceFaultList.get_by_instance_uuids( + context, instance_uuids) + for fault in faults: + if fault.instance_uuid not in inst_faults: + inst_faults[fault.instance_uuid] = fault + + inst_list.objects = [] + for db_inst in db_inst_list: + inst_obj = Instance._from_db_object(context, Instance(), db_inst, + expected_attrs=expected_attrs) + if get_fault: + inst_obj.fault = inst_faults.get(inst_obj.uuid, None) + inst_list.objects.append(inst_obj) + inst_list.obj_reset_changes() + return inst_list + + +class InstanceList(base.ObjectListBase, base.NovaObject): + # Version 1.0: Initial version + # Version 1.1: Added use_slave to get_by_host + # Instance <= version 1.9 + # Version 1.2: Instance <= version 1.11 + # Version 1.3: Added use_slave to get_by_filters + # Version 1.4: Instance <= version 1.12 + # Version 1.5: Added method get_active_by_window_joined. + # Version 1.6: Instance <= version 1.13 + VERSION = '1.6' + + fields = { + 'objects': fields.ListOfObjectsField('Instance'), + } + child_versions = { + '1.1': '1.9', + # NOTE(danms): Instance was at 1.9 before we added this + '1.2': '1.11', + '1.3': '1.11', + '1.4': '1.12', + '1.5': '1.12', + '1.6': '1.13', + } + + @base.remotable_classmethod + def get_by_filters(cls, context, filters, + sort_key='created_at', sort_dir='desc', limit=None, + marker=None, expected_attrs=None, use_slave=False): + db_inst_list = db.instance_get_all_by_filters( + context, filters, sort_key, sort_dir, limit=limit, marker=marker, + columns_to_join=_expected_cols(expected_attrs), + use_slave=use_slave) + return _make_instance_list(context, cls(), db_inst_list, + expected_attrs) + + @base.remotable_classmethod + def get_by_host(cls, context, host, expected_attrs=None, use_slave=False): + db_inst_list = db.instance_get_all_by_host( + context, host, columns_to_join=_expected_cols(expected_attrs), + use_slave=use_slave) + return _make_instance_list(context, cls(), db_inst_list, + expected_attrs) + + @base.remotable_classmethod + def get_by_host_and_node(cls, context, host, node, expected_attrs=None): + db_inst_list = db.instance_get_all_by_host_and_node( + context, host, node) + return _make_instance_list(context, cls(), db_inst_list, + expected_attrs) + + @base.remotable_classmethod + def get_by_host_and_not_type(cls, context, host, type_id=None, + expected_attrs=None): + db_inst_list = db.instance_get_all_by_host_and_not_type( + context, host, type_id=type_id) + return _make_instance_list(context, cls(), db_inst_list, + expected_attrs) + + @base.remotable_classmethod + def get_hung_in_rebooting(cls, context, reboot_window, + expected_attrs=None): + db_inst_list = db.instance_get_all_hung_in_rebooting(context, + reboot_window) + return _make_instance_list(context, cls(), db_inst_list, + expected_attrs) + + @base.remotable_classmethod + def _get_active_by_window_joined(cls, context, begin, end=None, + project_id=None, host=None, + expected_attrs=None): + # NOTE(mriedem): We need to convert the begin/end timestamp strings + # to timezone-aware datetime objects for the DB API call. + begin = timeutils.parse_isotime(begin) + end = timeutils.parse_isotime(end) if end else None + db_inst_list = db.instance_get_active_by_window_joined(context, + begin, + end, + project_id, + host) + return _make_instance_list(context, cls(), db_inst_list, + expected_attrs) + + @classmethod + def get_active_by_window_joined(cls, context, begin, end=None, + project_id=None, host=None, + expected_attrs=None): + """Get instances and joins active during a certain time window. + + :param context: nova request context + :param begin: datetime for the start of the time window + :param end: datetime for the end of the time window + :param project_id: used to filter instances by project + :param host: used to filter instances on a given compute host + :param expected_attrs: list of related fields that can be joined + in the database layer when querying for instances + :returns: InstanceList + """ + # NOTE(mriedem): We have to convert the datetime objects to string + # primitives for the remote call. + begin = timeutils.isotime(begin) + end = timeutils.isotime(end) if end else None + return cls._get_active_by_window_joined(context, begin, end, + project_id, host, + expected_attrs) + + @base.remotable_classmethod + def get_by_security_group_id(cls, context, security_group_id): + db_secgroup = db.security_group_get( + context, security_group_id, + columns_to_join=['instances.info_cache', + 'instances.system_metadata']) + return _make_instance_list(context, cls(), db_secgroup['instances'], + ['info_cache', 'system_metadata']) + + @classmethod + def get_by_security_group(cls, context, security_group): + return cls.get_by_security_group_id(context, security_group.id) + + def fill_faults(self): + """Batch query the database for our instances' faults. + + :returns: A list of instance uuids for which faults were found. + """ + uuids = [inst.uuid for inst in self] + faults = instance_fault.InstanceFaultList.get_by_instance_uuids( + self._context, uuids) + faults_by_uuid = {} + for fault in faults: + if fault.instance_uuid not in faults_by_uuid: + faults_by_uuid[fault.instance_uuid] = fault + + for instance in self: + if instance.uuid in faults_by_uuid: + instance.fault = faults_by_uuid[instance.uuid] + else: + # NOTE(danms): Otherwise the caller will cause a lazy-load + # when checking it, and we know there are none + instance.fault = None + instance.obj_reset_changes(['fault']) + + return faults_by_uuid.keys() diff --git a/minimal_setup.png b/minimal_setup.png new file mode 100644 index 0000000000000000000000000000000000000000..931a74180c6f500ab1f938cde7727def17f34a42 GIT binary patch literal 102649 zcmeFZ1yo$kwl2DGhv04j0to?v1*ehVK|^p!LeRz?0yKdD2^t_kaJOI~(73z1yE~*C z*XDKhKI5GC?|ZJCGtRy5jyLv#>d~uKb#={>S@ZkmtiGSQUjZH{%PYwPD5wB{g8T#S z=K)y&8xs=?69XFy3kwGa8yBC903QzzpPGb}h>V_wk%69uj*f|yo1N+5Qx-Zp4xz_S zpYaL^2r#mXNC@+ZbMp)E{iy^62L}fq51)#FfQpZqj+yU2eBHMI#Mr1H^q*)b4*^tS z6f|O#`woB+*-i|Uzg@t;Tqvk$=opw-*f_X&$O6y@04fR^8Y(&(1_nAhva~PqIe<=# zLBh-{jY+Ec7VDuC8Q;g~Ol%gJ@>X)KQ5Y-#JLdo#Tnb7mY8tjj>>Q7u2naqG5*87a zeIX~Wps1w$O8d2ruAaVu*?V&fODk&|7gslT4^J=ez)wNJA)mj5#>9S$i%B zlbe@cQ266lMJ2SVx~8_St-YhOtGlPSZ)|*Ga%y^Jc5ZcTePeTLduMkKetdFzc7Abr zb^S*!6aek-Vj*AuF4#ZFMU0dS6&)Q79qW%=D5xIDghq^x!OV+EBCUz_)`|2X-$!gR zndr>&RvZ?7Eg1Pb=TTe=R)JME_#e^!CfWa*U;+Or$^KQaf0Jttz(+$t4jvjY00J(q zIkNn5|1*g*eOlZsZ%4O$MqFsR`Y6qE*7ZiWCi+$0Ij%7EdeY>$AeErqbG)0k9HHh5 zMNiG?Tmo>@q|u3`D0Z?!0gkL6Hg%u8)nYvg`oiIJZR33TT4a`}!3P+1PlD2ZBqY3B z5srWe<@!$@Ra2;xzH~A^GEtHeT7>=2~I!|6V!Rkog;Uq_9q4daQR8u!b zl{x8~E-2%&`{nfSN9CcM^J148ic<;agAR{nyI>`gE?o*|>bhA~-8*S!ZEd z4PiPlBm*>G#%l*qCdtp!JhqVZcedfAo%dC4Te==o=bKv-jAw79jSH4;-EjOcf)UD5 zTq0{6s5a$#$fuZN?GUdcZ)9i6DU@2_(X)~DnCoXoY7b3o zv`dC0`2u7*ez8TeMV+p&t{twU_f>Y6n_pL=BPTPZFv$D%5yD}z^$i~dl5b9bezfPw zt)8J+q-7*QPQV0M;+vATBO{V)vwYhVGd-1ZtGuVdtPcb+r1tx(+N$A7~`!_MvFd4)6A$ zxmK@vN>cl*XGOPkX?IFA987r|@VI$# zeRD_jRjI%{UjOG(ZrhUlB+~C2@oW}qF0Cvq>B>d>&bFQjp>FRXv_?yFMsxGVb2eWM zIqD|!DD6&|BwyEDiT}dc6X}(J>-gw%hkS5aw-%QxR(f?6JlA)bQFS$$azT+Aqk( z$=NbZC5q4LoIJsred{U;R}98EmahKmYwZUWcRu-tK3#+x^kne9W6xa;n>wzvvit=D zwPf*qFRq3ie|;7ODlu9*_uC-S`g&uB>wDmPM#=lzX7yw7^wo%_*y78qajix)lhYU9vvft!R*wsBYIY5YRo-EAPMFav$A83(<2Xf=1;<# zg75OWl95Q?O4PUbK|7w2tbP1%*N3lTG%`?XB0muFD_J3aPdJ!%pNd;bCA_MGvqfe< zug)~sCn~h9jW`1bx~~sB8A*1=HKW4^Fj0oRM`#?OBb#t#6Vp^JA`$^jk^0zoFYM2$ z+AQtIaE9skXXF#|ORky?Y2SK!FJS8qh@1U3lMv2N#^XovpOfCCJi+(Vn==(z40jtA zIm;3?>_=c-p+lR~Zy8U-=XItke-nX)l0t%grb4< zYg9pMu%kG*Zbt$?MUVI*sUtJ-^xpr3a)4;sr&qVD4IeVY&Ofj7__%iCKo z&MnvOr7mwLEs40CgyHe~-d> zNA{SCj;KyV;*NV+W&DuM+OC-EZS?ngamODq-G!`#4M{9xrj5?-kNN#BXY9~Ck{a|%7}ffO9;H z+C{7K_9>D{jbu)GFPP&d&@Ch?f~hY`OKj~P%*R3fX&qlsh5PLg2Ud?q3Ku>K;QVev zy{7ce2|6Z+Uo3$wVUu%oq^zSO%t+;#E<0Jdukyjyn{vzm!QrM7#+e>3oN5$g{opn; zQHouH$Y#q8>u1G?kE!*lv6gFc zcDCCo-D{R%$5;~;79~CqQB9RB-AOo+O~?2_;0{w2ksp{gPn94Vb zwF@sT8QXXxTXcVYf5`u3KxrFehILK|V?X??QySS8sgKM*p%E{d9JPa_PC$(_d&LnHO zWAlbB=~L=_bK=YIdKkz#n&rAMl&jneV0H5{4CuTatpIOfH{Omg9g(uf6r#DHB)1WV z5@3j((+Jkfaj8+fiA!+7 z=jifgh)W>Y%*zI^8uC!=Rmf(l*tx@1CD!M|@{~~9-hU+r?M1Hk^j=Sw8Shrne6prL zjDz;cJBJAsRDvMkl5l4bf1L4eZo>%)sp8I_gO=MZm8WzH?!4 z*WqHkY@$NutPsq>nurN2mfCKn78YFc;feHgO_d=nv`&cs;u7XIUiQ8|640hl=aY`@ z9pZCJZ>xV^Uh%=2D`^TdKMwZb zyB&U6uk|ywQ?#kD(A2=NDV9(6`4_?N*t=ZYq6S`!knheNKUN~2c~56~l6IhVc(YCi zAF{ZrV5l9AsNc~yxvEPhe4d)kkeTyT_MUKPLf)XN3ACFY4xvA)+XpO?n} z)lbHo6Z3V9uK=Tm3O(&sD>Ag*yFO-A4rz7%J4yGkP^7|n++Auy@Y$fk0smc^b;-L= z-CK4#O!jf=@+OP>MWu}GG;vj_Uh7yj?u>BpxPTuNtazUM<5x+EL(ifFQooMb%~FC^ z1sd*wO!kg@;9_t29*EMs2XJz??{Fm9?}28Wd*If)u3$lhM6tFVXqP}AzeQ&^>z)ija3+vsyRu#XvL}yXIsQw_Z1AW5={}L zD#O<#KI->?U&%didBzA!GJ%Z5u0d|j9PWYcqT(!n5zwV74`@dT4!XOpz6aJPAZ^xO z_kfK0mwVv%Wb-}1P#S8iJVdXytQPZz`-sKsVHnoa!ieq>GspdXodUGjd*IX93F1>a z={>Mq1VKnB{6ow4IWh><|4SNDV)vngB8?%mPo-wd$;*pA#ZN0R#AhWXx|{LN#=W+5 zZiixOd{KeFq;o>&^F!e4@$*S`R;SoOJPqD^;D0^>RknitcCz}Dt~J?-mg6*ni77y_ zF#c&!yeY!-C5ZY79%w{CKGd=76>=oPN%0Zq81b|Dy-&zm$QevQ8S!g7?9}WvIrVz&YIZzu*ugSwstTUn1_7CO`T{e4d3dZz#&ox(nt1QB`)AaIUdUTm8PuDa8u{QVv^VgWkbMY)|K} zijQ&{-6IuKN{yMLCxm{)Yew%XsN4glQ<)p?r@G13P25o$@Sym@r(@D!#*icPhU%wI z_~iouQo1+&!Hnu9R~i)=igk_^TurQ00?bad3($nS`_%Z=mZt~H zMnGk4+fH&`pf9%yMVEoH6%Ao3RmPacHdDg(7`^-t5}u%an~x~g4d)dP53MfqF@}x_ z`pQt<0~HL@YRSE6A`b4x%JM>2nukp5tUd#L(uHdGz;ymqL|${5>9NG~&|Wyf;DR6d z1b5{y$3mEeXLzZmLVwBzs&qF?B(%*_^tZ-H1M}(~=;f&E)+F?E(`2;sJ}FZKo+>}t zV#5Y1ARayR_~IDREbaR8jE3`54ScE)oG03!B|kSHn&RymAH8rJIfSC1%jA>uzzc{( z#LZeC`b-f&&!unh_{1p4m17xBcWwcs@y=+Dtxk0&arO%|leCF@5EftesW=oF59a57 zuf=E92438ttW?O~-UH*(It|jRsbX4iyQa9@!GiV=tm$?W+$+xMT&((hUduCQL7l?Yr0 zfiX&>=?{Z$_Pm3>dB{xl&E^W*+OIeRdoJ@!#XO%%c*N}r_i6GH%sgm2ZL4C`aTVde z92Rt$;x@|E)orTpVx7f(>S|t4n5jIk9TvMCnd#2M5Nv9mT1!YcE0@0q*0{s+_goB= zFmqWcjz8NwtX6nbee6X8_XwlEw=Ct?oKDid z#;F~6D5?}mi)W3yw#Im6V+{Y=&F*mFeh*Yyb0^U5oB3S!zcgmJ(X={}nKh5j_qKODl5T_m?*Ql-zlO;Bcp>?JC|~ zI-gvTNxoY=lUX>!GVc?}VXEsA$C&+@zUw3C>xaYIp?h|N;qpILqWApXcG6Rp-GcTc z%SI6fenEI3N(gwLf%*gW^WE#X)QNHVJt5BS%~kbGtpNhjVPt>#{%E%(qxl-J=J0S? zBW7@F%39<~N4AwU0Lv_N*c5Z`na>OG4h?fgFGl|%77^JAIw5c>hUTmC+K>RRGe*w} zLDT$md~Mvd|1;g5rZLHQHy-N8cXPPqZ5y$XLAmil-MPjb`26IuV6R=3Gs=&ToLlL2HZSPAlvlzM56f?Pl_U zg-b{mv9-q%Ep+^J*L9XBX-n*)j(NaNDCt%I{Kc_4>pJQWYI!QoIhvC-9fRT~1i zsTWDc2H;or?m^7ao^I>nZ_XILxf2$jHp*(M&_A@3W=`8>2^-Km%syd|p0|mFwtA{} z_1mqoO}Qls?uRAM#=Wab8nBP<;i{S?mww;vtgZ&#(U|`%aCsR0`h^qrq;?8E{u|5m zp&AcZSFKs*rSe`I9ysEajLa|ne(sJ$o+Q zTje^k+pc<_M(K=tBs(njaUGOSUZ1@eR=iv29q&;Uy77_x0P`^>kybv`aaxje`V6f4 zO0%)Ls(QkWcK@+oH`&b;)f+eQT7aLp1}4Ed%5F`uBp)QyNA}HV-KiTjG67K~J=L>@ z+~^-qL`B83dd|1!x#W>nY*Bs?ADnL#ejH_xyS4=nPA`;ZNLZ_BIF#S+|22LO{E7}O z)8S&aJ$&8E6(zm&qaW6`>m%?ePyLGg0$fwEl$uY=jk`)O_CTv1&cxGs7|o zQC%>aB20ctiZlN*L*>*m==dH8;BJ1X%#roAEtDez^*@tpR7nxyx4!ql zTd(HRB;2F@5Li>9WuXlN7o`(5DtPRTgZaJ#RwjAmLz2|TGPF=TNS-#0mTGE5#gmy9 zJ%4fNM!HQ}!K1MJZmaVmhntW$1GzIBk*TVtySjvs%I30KocRo0B9D83tqf^{{$y>w z7SMIl)4CLsPVU%U6M3pM`Sq>6_!TB5?}dlm?%`{@SBNS~`R0;gfk-YLham5SFTz_2~27}Cp=$xXGg{jb}N`^_ResCukH^#nBkcsRPp>~asJ@{>Pxp1@_ zy@gNwF?RfD+9r)^bX+Q3#_*ZI?Cmfg5ii2-K)-g#i}m7%cY^;dZ_%=}7bKT;TqcYr zmL=#(BN~S-D1U&6?$!QpJ$8Vy=yv)MSWdgI?^1;`7T_|C+QY=0+wVLDaa?}ot#G)4_NABz`k`xnXoDni7ilHh+p9fqdFEmz4$CLvw51i_$ zFLI{hO(5U7fA?&&CTL7IsIP`82^!9WKMWG8gyljLv-zIC+aSJBQ`a2gL!mBCE^*O; zA)?n*`3Q72VB_4 zZDfgcRK}k67_`bbrZiDK)6F!|BNX1ZCRLQ$gOZSXYOwQgIbN4-_e^3&5KvPE!!DuS!3c@s}Xpw5ToO+ z-&gBeahJ?$q9oioNeXw5Kel!&t9%Gip$3u34>HQtIaA=-*^lX`oW59)SFh63PA5*Z z`~|x!eo13}RwN&RvKdfg!7_sqYwWw2T9s+dmAgT_VAxmxKJ@h;%0@6K%af>GlD_fA zA~82dSIoUQinC9{W4cf|Cn>o|6$x8PMgJROC}j3#gbPQRQj%Ufn!-;s`t?Xpz^4+xPsmv5e)^)wAw zqBOn6Afqnv<%gz=0Y-M=BULLa6UtE^LA=f!Yq+7q&F`3F(9KXsdADuallg|SZRdw2 zv|f7CS)q^d|JeL%O%Kmt33uV{>Am~V{|+wb8Fy7bxbrZWK#Xodtb0kXKJacHgg@d> za|u5sn{j1i@1pd+2j*PP!#GOBBho$0`0W?dT?Yfd#0Xl6Bmu$ zCbWjCy76&MIDJ=U?yfV=)29)2@)`xq;@xz;l?=jkC&ndYTqt{vBtAk7PpWu5lUzsN zHbx#VwagVB_w}(pc|WA@j7`rQJx#YWyh@-Y@osNj0WL6SpZ-B8-OVcg3k#(%zFBnB zs_?13*Li7OC`B^Fbn@e#ZZq`8KH0^(*)p9}u9>=PMc!$-#6^e!w}wVCh%mWCBC;@h z9?!}_t|r>}`Hvc_-VOSUb@2zH)5dauGp{FeP)Tj=WNvLUO^QL>Tx{$b zfUxGGG*+uR?c`-$RaNbcz;Q=U?7{M83~xO(b@x<>rtG91MdUWMP&ybu~;kwAf8m=D5GB`~^?x%VW-6?8C^=`tZcIQlRua zRCGa9ZyvGJ1WImBQCN7rOR9-0KJ`C+}G-SNCX8w)n=`9rhP`t2P$ExG) znr4z)xWE=y6t!Y8WQst|j9Pi(ODbP6B2eIZL&izw6K@tD9i?No_+=vr$RtY>`(~eX zB2~qHNwTD0ewkppskPpfeo8S4f|(-eABK{K(IlXwI|8)Bl?7R8Qb$P0i(B6V(soEj z1>+b3-huQvuuNm5%hYPxMF2huP8MWf7T5H=| zoqbupx`7T-Xch)7_LI2MKMN#Q-ppLcuRm!Ij_6)=llUwiC`Hdj-sg#BP&er+!UbhX zRZ&enC{4aO2tm0AR#XBnC+{3OuOIop@TmS+ntWs+m8YL_jc$~_D@7Zp@OsL1k#Jyf zFJVN#1r%wCOZ4*$q`W0LyAC4hLgY=+-D8Z+bAvQk>0QtgaAtJpV$jMgX(YrV^S?+(epLas!B#HMds=lUDw6x zCuAgQ(km_>)8gule3IPyeN4i%kdAD*(U%X`-_9@&4J}-+L+P~;Rg6fiU(#LOF^tQV zqbsW!eYjx05jm%$Co+FCMtR}0{o5?F#Llpz&OVe$=+V|IVJ@0EgI>o@NaN>;6=#u) z(udQs%~35TAShxe(VoYFXK9ItBREmtPbRxYqHqowF_LYiQ!VP==#=w3=4%6uify#L zC)OZS5eHs4oj;+A!Yxs!k-EY~s@dgq`)8exz~P*S)-T= zJpN0uO+?pcE(4-~=iny39tt}>4^pfJlUcpFV=to5ql&6UG%o_L(YQ3EWCxao^x#n~vo7MI^=cxM`VZb|bd$ z1kum)Eu!9@{`^ojT}uM2q6c@6$}*9&bQ3r>R-ul21FeejdTvP@+LaSDOf9a92hOm; zgE4B3C99^G_Ha`fQbm0quy~j6V6={|^ELeytSsyZisMS4@(Ma}a2O+EYephYM03bn z!)k{mRxuu{U`UV}9JI#8*-;gb#o7v|2^?NL~R8YwXw7Y=dX72%GTX)CHYPnk2J#fC*IGfb% zKF~>8@q6gGEt>lQJlvva-g8;Lv0{R~>O)>ktE{^HxpPh*_R5vH>;qq^Qb{pJ(%mMC z#!2R%4OPinQ%#F5X79BZI&IXjO|l1swWPLVM<&a#trJXPNj7u-V`pGa&Otu5FHn&* zB|NXFuO9ri;@fY;QHiJvJ*v;<)^!k#wqQ@8?kS|n9Ski@FBjm&;m(P{c^KrZ_FODn zNUh`KEZ@fO*{?})=CZ+@K`-*orep!Q5?Aq0h6k%7brCbd8{VNC0(UHi6O!a|t7Qjb zJ%U^$R;-@(BdkJAM!9|RSIi&rrt?{*1t*sF6Vfk9%+yH?XS1C_Azjh6nBVS~Vp~E1zXgJZDMUo>7N_oQxW3FsU zk3v9Cr`1oG-Yz^jiPDnbK<)5*3V+muunck4U=a7CSneiZ`FFg;Oi?pt>8zk23 z#5&@~lq|+rN2>b6tf$}q1YH<^fUwL}aSk=SrGnipHisX4EuOM1U;Y8<|DeP4jHT8n z7;9!aQ|{p)NzW(pw_g>Uq=>RYIe`CQLi2}zJdtGf$T$V{nyQ_ z)|X<4fM1XmGNfBsj=#m(@22y`ndnCI^%I%{@nLUZ;~$DJ!YXg+k$6KN1jB_O9@Z`M zFGcY$`HS#e8u$?+muzwRB?bujl?u09AS}p;j`}TX`aSTuw0htod*C92Pz6De9@xCY z$&Nrll6)OVfR3l$x_nBG)B`|1(nY~paw4&V{6~wq2ZHifeqZDE-UI%xkX~&vNZUBf z;|}9JlKy&(^iPNG`wV;kX<|_hn1bdgl_4{40q5{ND`yaA3=aAme+8z=gK6d!EOwCp~wCm@C>5nl(g0)0QkIGc_uI27#np-hm3!#U$yLd4&@^o zajS@1Y|C5kzB}%S%FH0#UcHEL;~}?MVuCh>E|RL%9%nu)M2{E{>6HwqPqYlhSbSPY zb>@!R5V$h``Rq4My{qAv`rYYS;L08L?b}IcszZl|E*MM}8h;M!_fPDd<0Ashszox0 zAW9^1_>t7=GK6xhv2wUa@h_zJ21DUQcx>JNP5h8Hb~|4u$yhH6t;dG$<3E2&`bXrv zcmMHgciiXIWqr}^b?Rh?fm%F01&1l4TR^yFc#IM|eYq4pR(Qkf5+s_Wz7@&Qx8ak^ zH!7)(#{i0s;wH8uS8&g&+~l4P0Q5?NC+_*H6|3qIX*A>Uh_&P8!KxTE$N3~yzUT@` z{{YW!gQd&VHEcJc&*y%$Q-ajC;t~#2F&iCuC_R#{$18aZu594Wl5gdff|q}Zgz@?_ zDMw_IU9hK=m;M&qFs<0W{bCSx_X!y?A=hnE{jXDlW1nUzHN*PQw{RisgZZ1F^9pA* z07aVpxb(wcC+{mCpACgf!sU5)(6tD->_0YZ|36z;Xo~RV;IlE+5xd8Mw^8Yb=>c6gF5;T?rkz<#~_QPf%=X9SxC_@`YKU((|-7 zvDHmLjTN*fkuHHlL24cSr`YVk_Mw{T+?<9N(Fl~eKRV!9o3Zh(YK?V>5G|olTlQi-p0YP`PQ1) zl{a``#g-PmhuVx88HBc%2_17ukdA~l7nz6)dqJfO5F?d>a%@;4Ywn#hdRV0qaV3InT^vTikNCFZ;frl5RYtMpRR3sR&OGx#cXq z{L!yhA2Vg`7MvtN@vDa8?I%>+LV9h)>)lEXEOMki1h;}tDgWq0d(h<~Bdn@{6sd2$ zKrqbpdmv&OEC)E5{zK;IKYq&nf3+&F(#G{>z%u|DT~gw_CkSl_h2!hI3NU%TfHGhi zWq|DTAm~c^HQpa3EW~(gUvI#HHYC8qc!?!7p42yQSR2DHUa@8PxOX7ZD3XQFJdzWs z;HLv1J3QcwO&~n3wGNk!m|@UqdMH- zoG8d35*fLLz%JV7w7uOTi-d#lc&)j(jTgUfCVtOTVvSMdo9<3Ssf1HxmgH4C+w&l~ zXX4`gnJkH!?xU0R@7qbINgOp^bh@`(Wk?M_VxoSFQiG(oTp?|9F!ehOB-PbI8-)-; zvacwe|B#2=xc>4Px4BWN|FVi0UavXOdO|vBLV|2v!M*nOEYl?x?{A(Kexm&-Vvbf* z_8Li`gHxCVx0|r9&WI>q`rUwIm?hgd%5V2akQ<-Qvhv9?Dl!;rI1F?kA%qAG^%uh* zO|nPY&td`=$w~ZDgoD(1NMjkTLf*PITL3ut_aADz6>X0TaLFSC3D2v>UjA6bdCiJ; z^?G0m6b9N*odz8+I^6?VbN=G(Eg)(ry-UZ`wMnxb7+ok7X{*P@UL>y2#eQ{Em?odR zm4#R3uBuwLKX)fKZ17HPm*Y)TV!{mfsbRUQ2WmZXjBXT>}yAiGC8Qis>>= zGdjX=b$*6Ps3>tWg4JkI%k$ZL;M&38?ZY8AOwI$9dzOXi8=F5uzJ`dwb6KnIQaWSN zuy15>AHyt7@r!I6qa^PL1!HvaGWqReC~UdCKEh!5b(0}9h~|*`9`KM<>}k0W*_Ply z8%I#?RnJYgmvCxKl$xv^nFo67ObY#$Man)=AsS_u)h|z4Qypq7HROUF zwKFuKbYTy6oidBDGy9{1LObChw_ijr?gC3er-nMspYDMTopI3q6we*H2IeGrk7wUK zV4!N+UMC+`sF1HxxbQN@sPSjIm7;+fS@g8M=mZEBd3k~exq;jgcOw@P2O2c1rtDI| z2N}jFd)JJ>Mk?9$Z;&>z_2FwqWXL*9f$<)o%OIQzU29&Yhi~5zog-~emZ*Cm5^1|) z|BeFfK#-A;t#puEXMZsR&QN5?okw^2DQF(aR7QdjA_})GP2$LKPQ>y(fR0>M@IK(s zs+1E!yvzU@xd)Jul>x%Y8aEmL*{=Uke;L5swtijAohOvo@b2+jPiuB0Jho+8D8p6i z3-G4wHkm+)U0c3-%z%FnlvYEo)g~vVxz>JPaUlLJLj!gE?>g1MHhph?SBg#{fYBDg z!FWcSfHVo^46nO#=Ip;fe2N-NN;bap6uxF>%yp3@@~t*uXFQ|%TNz{4+($ucUojQ5 zit}s4iIomBrmj$o05h-R8iqyiTWoqNO6;o#_7je`MEFWL% zioBxv>T))yKKuB0raU9%UR`797s6OOYkEYs?KEFwuSF=j44PvO1>0w|Xw}IE(;`oZ z;%A#t>#2(q<7BH+2AQgXWDl>L_dlt6=G2@;Pm&6Wg3eUMRL2-!PH<&U3hbnjSVW{z zXS?@mp`bgXpnH%%U80?|ciqWP*2gjmJy?>HY#WiEncw_D8qKf}S-XwokjlDmgYNt= zhH);(Zya9Lk8L%Mm$3)1C2|YgwFzlCO5-cx6N;s5=o)X`ObceUmvjqUh%RM12*d#8 zt=80vW@ya=DkQ$cHnS&^T9&TrId?Rn@3FMaW*ft7Se^3|12cP9^)P}nUyt9v=;o=*M{ zw6~T0GFEaO)ji*i8Ed+7|D>xa*(KhZ`Nii-`bVk;-CBeu0=;v3!B3n>yHUu+~vr? z_|w_zl1wRbnd#RN`jDN?(&WL5Qqwi+&Zbbv9>3*1kgZcK{@WXRUDBCP>#L^DEZI{^vm}%I zjn{&?8~wdq@jee)H`XmHMeJf_l6@|TEGpVbOQ6$yY!`~Xgg?1pUQp?x4UJC{qR`sO zF0VP!zG~!`z!;z{{wYQnJ%w;9s0Oru`ZPZbDnC-(5-@*bZ>Ml0v4*>3@T+!dX2|gY z#s03gxXSvVfScHH(+iDhy|ASOyFna&Ii_m05NM^* z;C{JjX^uU7`Qzi&l=*NFG;3}!A}!yNFz!b`>o~vEpkr(BCz%yjU8~l92hhjSIN^x#jcKtWNhFihM1CH8aV2Mbx$_v`jqIBS7HC!d zilM<68I(=2e61F@Fir3e86$r`(oDV$gS5R};{uzALPFn+b3GMAv!ug%snm-qyzKO@ zn$z zXyTfRm;EhI(Y#@1jCFBdqyW8s=mV^61Ej7+gT=}pn2uUSZZwO0K}Fje_R3C5@YJNVi@Dx2~7z9R1EW9FALMnY@eci@x>2gJk6_ST9WEzkF}tj|{#j1;l0 z&+$LWfjT+}6K^>etTKq6jW&?`7T)3nwhR4CmXOK`FBGC>9JVxC&yxpm=Uxb4|&WIGsza3u4c5Yek@ zta-sLKOCe%+~3MObiRaAQ`eaKCFaP@cN0=4fdz3feW58u)kHQaXlyDSdZIs0nCv~& zS~BDbz`V}LtCi#cF68<~9sM7_G4pIkyh#0kXq{%f<|;C4gB){Or=RUpA_6#RJMV#G z*8Syk!oOn)NhFaHZTJ#t6zDad5=&A4IZqXR)3SVa=5Y_?{L+1EWEp1Ey&i>L6OqH| z;wi)P3K7PRS|`D;k#u#W^}(T{f$%{dZ1LlsB$O~mdxGJ>hN~zE8E>Q)5ZVrptQQJy zV22d7qN;QKD%9I}NB-qaL-biOZ5g>|e*08!;bq8+@r|)(_$Rv_JS_7(jAATVF%2ix ztZznrZ7IZlJb_kan|t1Hm9EBhfSF%7>T%*1vJ{}Z=F#h_={#}6(`b|b9LTpJ8Zpr5 z{jrxm2r$aZtgfrBuTn_{f6l9Rii;OQh9*kW(G=G$#he(n)#XOVZ+b`ttO_YhXj^X4 zF?>Vu3(bcV?Ivl+7VJHbfA{j>=EXk4fm_!azrG(E+%I|cWB%PQD-V-}ZdA;~S2pFX zU$-xH3k6U{G+(n#y6y;^H76ma#2x`9?E_hSL^4zAi$z@|@ZX%E)8$+(yQ1?! zvd^YSgmtY%;P$$EFG;>WYhM?^1lw2e=E0TNN|2-+ZkNws}utg>ftDaf_7eo-2<3rNG=aa z+x<0TI>$GF=m=M`73%^6@M^wgW8gp{M>U>XY9v{u@Yh?D=yPmP4aW^S;{qAV780m_ z^UOvB2{*$mkgOqE2Z951@)$Y)19|U(uKlsURmn5MgjmTS^k|{*h&RCFy5%J3sOBFU zE&7euB=5+-9B6E(KnJF33&-`dLILx4h_+eLV4VLLr4B0Q;)$oVza>mOG|(?qSyQ?P zLeUJ6%SZvC4K&Z2JZAyHg61GAn~=*zp7ZI5dtgVU64_@+C=RdNqcHvE`vh8A2KMss z0;31zReV(tJ!Aos<729uVDCy%G0vL^A*JLVzXz~5k&WF!%)y)QaNN@`MNAPwhE++E zsb`0WsSUaLzuY3-=H#c$@kJy&S#pg}tkB~AQdZ#~zk8=l5WCne`~*iI`C~ltV?ylo zWTZOcbHFB~ssT9~GRFA`Lft!nVBDuo^RC^2f!*+t?ylCBKSk(EBrURzV45{qvUZQkZ>bmtf4~^|k~2dmzxsKIP*hh0 zIVC{RD^-%Hc?to>o;zYO&9U&fj7FL&Mwe+D@F|F+4wZJ;^rRz|%m8gw2-AJiD` zQpoAf#{C{I@_D$K!@fy|C~kn5UY5vfU}6H;d(1Cyg@%UI;^LRph$gP)(^Ss7Qp&&H z|NZzv4LzaC3~g7%oGP6RmxblK-kKP$l@_`ckFEN>lRFwEr**JriDl~cl!eqg4Vmosy~M1*1eBZ zC(oaQewwIa%L}NLF5062kwiv&@k^!_%z90U$f1gu2}ko`s!8i?3rjo6 zj2Wnb2SJZNQ7Y`_qUE7d0xWZkoLV04UH zIyz04u56y)0U=s4HhhtHO=Z8_53Nt@(>vTY8)5zQW*e#rg}E)Hw{x4zMm| z^#eg3Kb0k(dCxPVV|O}t@#R*`dev-I?C`nc2wrmS9IqW}$6jZ)`8%WA8Z@*rDBcGU z&n5F1@>pPWE=7}TGu`EouguZPZk=yX4nk+kJlyVU`Rg5=mLbu#y5vUpu3mxZ#03In z*MMbQHREqThx2j4;S;y~1$*AhQD%D74<}9wxDGAs$ro7t!*59V&wh-lm+g1AJxj-1 zMjkxddmyoqz=nz2(Wi{T?)*Nbyc-5{c62vMPs>iwy|axBI9XEf2+6*+D*EJWWXw7{(91-fcZ&eTj)TF4%gojXNR(Ctc`m zGvhH?Wn;rVKy)H>7Dc~P$8~$|lH8k9rEy^gwr1_M$^U9@j?UDjE)k@@=J232VU_yK z5L0P0UeNF<-Iw>VcvK9vth%XtO*9jy_W&t4PaFPiYA&L;Cg_y2ApQ|P!1=HiL$Qm0 zNFmpi!oUj2%7)yEP(|B{i&!Lh5ce{B4gCj>B4-N!hG*q?TUXGE8{yIe~gVbmg|CIeX88&IJDai?ari$Fh>P7qum_2^t z#b8Pq#fCenJ^5|A4>`s{o{uW+e(M5fk*Wg!#JIf&-o-1H4nn`hlr(icF_6g!GL5U1 z2)04wuX8b~gtmq)m?#yL1osX^$!@V9=v1d$;6O z^z%W+c%vTeFCNbxM~bwf`#$kiXiuMIpo5)#x>jrSfS6e$e=Z@TC+sPqQ=_B>BlBU( z>7iEImuo-S570d;&+LwUaG59fwOCP)z)d1I{z#CUwL*-0k-}uM`?DNN!4Ydo{!wU{ zuZ-Si(ro2wa!=V?D^`Cq5%V|LR9MHQw$^?R8=GMmOXao}4HX|knxkdb_}($An1!Fx zIi*baF~O<|i@}IzhMq62A$f)h4xF%aJH!n&7F8Tu7>|c z_^PXQd^*?SGCQ5&k`{FGFQCJOSh`08gxlr1`M67Ir@3^qOX*R4w4rgUd+CTIEK;+E z1z$AM#s)yk$bv>3mTuI$3NMFt{6Flyby$>byEi%rf*_&Nor03mNDYXTNK2OrNC`+c zgCHRt0@5uFN(~*-AYC$amvoK9_+36e>wSHmz4m&)wT`{l_w8eUe{kr1&m0#sH`jH> z@BE#=ldDzC;%zfFITJAF(nSL|kpr@07mN;>=A^1t!X+7&<57A#f=Ly%+tc5Kq{;iW z!(-P&GhXAOVSUP$%L-k&sU_}5q~~#b9WlP{1`DFF4aAZhA0~950Kv{A=`%EuWX^RZ z=-gGi=QPZ?4@-4?yT7|^8_nL3+N~*{)U6EC` zEZlOrbh>D_X0L-avt8Ho;NUMVrVGBt80IT|>#7FT5&COGQghUeqy>)3Y?3|s#qM-z zxD@G`MH-_qeaE&R#20_?s>gJmiT|LJDMOj)-Ar-61ux#mHQe%c7FXJaWrUdAOicOs~JZg5BsXVKnNtw;}rmq+urp-PsX7A&D2rSq#M1y|}Kq%7a z)ulvzPJg(y!cB+Pq9%F5h-hH%2z~wNuIk{;c1qHf@3Ork;qT~BP1mVt1By8KB7$cb z%SMVbD=NIihmA2D%D*VchE`O3~!8x${44bdU(m8p~7G` z&VAS?HcK%rOELO&TNF)imcysoRo{MlJ4I9ZGObQL)gIp%0BtTChT+#3>6X<^!fuCU zMG+4LkZYUVT$hGCZRK;hk^Ql&3^4ZBCow!Iysgq2_6$y5GtMG;o`f?@tq!f(9=VBm zmj8gTCv6;@s~@EiA}q`L%$V%IZbrgNKGjm1D%)MI06@`11v3;;m1N5%Es$YF6*5L+ zis6kh0);z9oZ;xe<{yCA++O_FV1?rI!?si5)QH^Ke*SzFc?Zkrd#XgTkdG|nED`P- zum#+`w9D%hTquY%;FJwsP1~MfMES-UA#XG-$)4;)0se#n*TprX{ero@09R#{e;6sC zS1mBy=jjg)=>!iOuhg;BjGc;fPfV4 zZ5Har05-@FK4pSZABNF|B18u@GYnn7Yx)E7`Z|gaX0t(iL{tX22L3-F6reD3%?y+) z*R_XWdsnzn%n?W_Q2;snPCN>O3psRImkk?K>-_<_LV95d zzo1VACMUxfe8J{xO~^#(-@Y?uHgCJbjW}#I?WwFROssw4+xD~sZLikjHTOEAT1m28 zqcS1HPxQrs9U-0oR<#ED7J5KuWpupx1JYAlKXH2>n38t%B4AGne?V9v>zc~%yx4rp z#2IZ#UDMagH&okCs^KADO`JKFSrm(eN?KlgmlZ3>$w_I5UD+O`e*=$ZkGW-!jYDKS z;&W#Uwv~QH#ZG!wl|mb4nz}hFCS7X%+#|$zHmgg*?&u9GT}zg+R>E zU;DbJI_SBt#^4t(l7PPaJ~ff5XZ+h=+~2ius3dRWP}-b)t8S(muh~0E^b{irEK~m2 z_M~7}q}sYQtyl%@7A|GIq=flVrx53wcYSx@qfB8s1L{5&@VsbeEV%6r*H-lr&eGlV z@)7HFZHB5E*N}4G$Duuv&+lQ0YVwh~hnLd^va^isdJnDs20tyX1>#*~A3HosUz#A&hOWkSIU2fChaN}$z3|?;yXnUf>G~@~BXF;T8}pni zbZZm&TeFdu&|(}E36J)cE-L1vE-6G$=efD~$lcgT(PzOsHgf&$BaL+8p0uFSoLi`T zc{jLJZ*mH;Ng6+dNjOWy6K#(_hPWqUr;QShaq+(m$PMw}U3bELL_%Q4*N^foDIL!J z&z{u$>5|FC+60Ek%@T#Os10Rsuof+Ie?Ma-4vSZ$?JTl%2W}}~Ao`-^se;K39v57r z+moa&wEExa%=|C26Nd27Z=A~q+oi=HJk4I(7gBkl6>79tCMv~dUP$kK8m{PNqZPP5 z#4>3;%z?aVEF#g*1LHOLDyo8>_cJiU<-&$_9WKtDAwk3Nuw%(iweqzekgomT?)-8H z%Ri}O^aNx809U?b@vTMt?S_vq;-|W;Nm(Zb7hg#}#a%OJAsj5G808YF&H?_*oWOu$ zqnC)j<^VN(oGGq803|ib%pr+sUT<2=B70kfE#4SzA)IOwx!E7+`5Uae-|Jqc)g7;_ zgEv-&Zw|Yz!DC&RK7BNOwt1~oXp~a@6Efd{Brp@PdDT%)y(e|6OM6uAE93IZGvnaG zg-cB_f(7Kq^3lcNO0who95y6YB=0*#xShkv7+GMm2GvZ)OhX`h9Y4XZLRHml9ufM; zue0?!(gxMOqfibu(h0D7Tk#}VF4c;;EZSC(UL*6l^F?cEMxRq6ac;;hNtfa+64D!e zH#y8~O^gq;4+OfaIUc9S#jMu=cRx!>uyFF`cKOvV(K2UywGQk%emfG?1Ueey;5lMLkinUAi1T$;GiV@7FyY`Y6_<8}LS2 zmMJ)j{c`$K6{fUHpOr^1-mXp1S$y*mce8q0Vh2-p?O}Y?$C(xjjy4NBA!DM)&Ak2{ zi&&l}W?T9nJ`!AT*y7U6J!=5Ry~%OgBkgVDE%a!P-ENwx=Q&erXFnj3)Y7q+9{`my zgzbF^{d9LD3$EbZ7INwq3{9uC5KocZ&oPEp&NrkP>&*|83dkcDe3*{0{IqGYTWD~e z%3K@eY@tbOa$kO-VT-ZBlt+hb*pu57AGu9kzm1%_Xl(+Z+I0ZH&qx*`6m7w8%6)ygcj1kxJCq^3JTYn@-2<sr%ZSgC$0tIM^wMs+K01F? z;TrAX^O{2KL8vgC>54nSqwWb&Lh-`IFhP~@O}>v`-xlar!GC_i=PIH%z+yLnT)ddpL%AbNOo!sUgI z^D_6^H?3$m>v^WWyMIy5sh+madG^(1zf(M6F(0X{w4dsiwUH>+_rdq1DiQ?uF6E?j z_}A2LP!fR=PXLoWF7L*rb|US#DfEcPa?w^x)Iijaq2)V@FbC_{K3O#*_m zBdQFd#m|BZDN;HDAWk@^VphN5m9#%4By>?2J?yAVrL#r2iSW-2EUmI)>K8#`;i} z`*Me87F#A>o!Jc%<%Us3236$WwrJ&^ZpkpAbX|*?qtdj=nt#jhQA)}&x_`R_Zi}Jv zj;Y?70?y7IYU1bPww81M<+n#0Fa|}z4*0=loB(1VV27{JT0Z#ewAg2bhL76p!S}<0 zLzhw|sq;^~?~PVkODjb!gNw(DetF4u-Sbq$ERFT0vG^H{i7=JZVj zTyR*Q*mzHvm|CQK3iUTdMC#c&Z^-6Mvnv!xe4n}Z_O!F&soww= zliil$2_Sa#s&bS7TH6s*T1RbGk^+ZdtFlK?LxI7Ju2-?K!V}wC3K>J~GviMedV+0o zGBMe+QyYTn3NZH*E{kgRk6#KV)$U{LJa$6Ix?doE7wPV#Oe|kAwc*a0e-d=-uJy5| zx>(wKU0ZLux}o>Fh5E6Va&DI% zuc4&ez2^b8qYP+n@@Hf zt+~{qOd3aC?xe^N=f2i|y^c-%3Q_%LzF&e|$HY?#LGivgL zYZ~WFENHbro``>)zXxfBlBK19O#q&1w3GxD^sOlEhYdrXy_J9%yZntoqlV^WJxjtX z&JT6$;)Ir+i@(;36iot>Yebk+MW>|w^BRz?j*irW+T(4)mZszJ-5-#L{Ej;5xDt(m zaHAR55T)8qt71V5=pK{ZMIlz3vp3uLw(q7ciN71Wev6B#2S-C2%b0dX=Li$Uj>F@~ zi@*&D8neBUL;c__gHNMHWVur_%k{jBHwC&{WYEdKJ14*1()H#WJuZY2t;@>APMHYu z_L-Ay*Jw<}cAkDzA}EHo*juNywW6}rKxUj*u$p1!1Pwj1ejo(boszVVZd^EspR6#x zf4^lzR^!rP*%&@uXC}tkp~+$?eWbor2uhFE6hv#Si^}Yq zhVEu1Gbk;Cgx%ES#WIGyvE#rtHn!e}SP!MEBRS_q-!h@wzfRs}vYCG+U*rM3+Woxy z1MD+>jsfwz?Xk5;BJQ>O<_A=bLQA1b$ttxl1L?W5b$9;UApi0FVjZs}+Y__+!`*>| zn(L08)ZDW*visqcd9D$k3Cy0Vb*lQx=Y(vt@-}0n#j`xU-??vw$gZiHsF=HHd9YFY zQZVkU!fx?J_015MCv1mzm;KTf`qV!SRi=oElD}eqIBYT4ZR>I)fjH~%O4xi%0H}0R z9kugY&7Ey>I3EiFRwIpqQ_l3}sW9L8nLg`-FUN}n}a-R{ak+hwF{qR2ORlRR!7)6*VuGak-IYksds z##63r)$Wv8kWOqO)~JL{?JSmjK0iuC<- zM?99Q5Qo<>|KU*E`YVpB0yU?`^%p9XwFizo%JMdlxu&z?U;Ny^O zqt;%5D3=_>%M#uEl`2YvC!4SOUf33EnNYBQ_n1`^Uryl9clJ zy|kJtvMh`lOA}nS!Ti)x4-~c;C|w<=(ntE^A1%IQSz6h?N~FSPPzDp=#9b@f^a??P@Rl|lL?$f>O|4)bxBJig^Ka4Pd%mf zN-G4LdRVs&g;s+)S=qAiH3B37L>>Ea->|Moa&5zqgftT7EvAV{i`?M1+MX5l#`3j} zxy&8E5i!K75P{)pxoRu4aYIVb!n*u=L>4_}AqbPX3%^!vlXg(F=WP>rE97-nQJEd5 z^?~dZP|v>_ls3xm(Pf+T ze`o!^i%fBSL zP~TEO;AO9an=RUtLox-{5>laQdv~+Yp;a1LXrZgj&S6_viA5uQ-SCd#q;;~CEm6ih z?q{e^O83PIqL5amG$~fpA9B5H^QUI{k)vg$Z2hfP<(OCH*gCkwv6jXf3v*>Sdgl;% zHJwqlrZhHoVf7a?vdX$ns~M)bGG#`FdQ*&4Y|5}?qnHT6dZb) z;rB&X&D8l3v%d*7v%pv?&6Pnkz7L=~3(HK<&VJgT<$>;tB^v44&a>z-I>DYEtcPaG zq?QmZ!{Gc3ZV~Fp%Hv`-s#;CsQHXr@5_l7cxqy4vSWQ`FogC*<=ldKr(OcJ8y;?9R zzFj~UUT4nC8qHV=>ZazpRlcaJ!NG6uAJtGHeqyqRrJgu!VEovEKFo>o!C^Fms_^Oq zeEgwQx253dL)Eg8D8sXZ*-8BcT{jIhaX;pez<6V>wp7x&s+#HNy#`JOHJCi=H z-DHdyH=4uK!!4;PKbt9!(`t`8SvBaXbGu=Sea>W8U1MUP!{L$W?oeX$rVQc~{7xv# z71vH{jEQK?6I%BwpbS&`JkfV9eOw`~qRx`QTGBkj8`I&T`q9Sz0Ew^7#kD--DW7ja zf_PKR-&b^Fz>@j-1$f8VJ(Tn)9DDLChmjq_DVAYM7G zP6)#7ZR6Ci(8aEV_XrWWYPAc{a173m7NwAWF&MylU$k!$iHepbaD z{*r8;DZO0$fs z@~Qoam)-gMYqC6L2DizN{fCcTS9&&UGiOATp3hzjHS3|mT=a} z!o|&nNwYtm{%B*=R~5r}M}T`2J?b=EQKO3ZsAj`aE5-H2_+f{kOAwc*WPzRn-q9y1 zQ!~32DV$DvG!x0zn9!6Nhw#{#C9el3mz8}A615Y$2IQgk_3qIKghW?TM7OUg-4cBO zzFjJ}x)-O2mGSsink`X>t*4~fRsXT}jxd}@Lv3gly;=93&@^$1f2eWrfGy5QwIZZD zirsRh9I}{mUY*=>lalQz+o6nM+kel2~>X2-O0WQp5AL0}^?h>0zp zDvruwJh?&{Ff;u=#ua-u5sq84SS1OWnVa}$OFIzx;UGq-eT=ZMfb}jThP2a%mR_I}$3dK%&mU-=JFuz9v#hiYiqMvHbGp3JHchq#= zUoP@Uu+-4Jn~gZzeX0jDC~;pQUgNQVi&N}R_`kFUBKI?hS$TWYs>XU=5;pAn-OD9* zEnQd`Ph(OO2cGP+BbM|Gke1iI_(c304CF+J$q}9nKB4;^u6^{>SBaNv4N_3$^Ox#6 z)%(tcw&H0{?oSUmgXF5@+OTCs&hROHsv+)EHRCq*>Y3&wNtLgQ^*4-FJiK`Jbn?3( ze9=xgO0OVd?o>F?(t0m%Fw{V5h+j;pS7v0TX_Q5qBeUZ>8DVH7=%?!@wy9@R_mVO~ zayg$>UhX0sXC5QM;ypqNzT{zqr(?@u?cVGr-!e!oygF7@3L=23Q(LvM4LPH{ef(R~ zq_s&F7&2vWx{J1D2mj_=+pU#XTbC}M#(>td@D+<~QJzO!w$IBI-+$LR^mUjDo+#zp z?}%cK2kYQ(>{+^$H=SI<;~m_%QdrGgG@%~Uo76PY@)X2zM;Vt3VOaGJ9QyG!( zU;3zm=S{T_(6DyG?{ZBXJ+@VZ6@PQ7P?f%dv*O7Qj#)$)bn9masyuy3L5&d}tZNAf zF#UkQ!6}n72VJ*D2dbi!AXc^c0XdS=lF~m71&y^CNWnjN)$0KzeteLMN%4g18n7X5Vf^cfJ!%K14y>;K&AXIm;4CW^C{RX)yoiTK%1sAx?opOKD)9t zhlI>6$^L#}kbA-bKfOx(|JbS^%LBjTAIpya{IG$0yM4kK(Y?0(HHinSrr{=c7IZbA zMY_L|b|%KS!Gv9jc~vnHqPrr_n1)QROglS4xP2hbVVnXFdIY) z|4n#DvH66$W%RlsZB4?gHG@3Ok>6=3i;3G0h->7WKXmJ&yJlq06=d{fjj0F`F|{K- zf!?>@ic+;Ro^|84hQ6Kv60j8$ZD%Gi7OK*At0^kHy6@DaSW6b=w>y{m>=LaT(+6kg zT%~m2=j?EM(Kw2*PxPjTj&rO_cPZ_ zW!yJr?|a!Mp>}C7U12Oenw-@MwtbSvNY;7S$XG7wUAz9( z{)_@EXK$hrrm7gVfF)&%)z_tcr3h*{qlD((rw;I%``hZhml@XcUdGA|+L^&~s^ugn zy`G$@GmrPFLucF%c>D20Y}isuZD96IkK!`m&>Ev0g+!@8Y2V@u-}66c>q}dUbxu}N zTFlUe)_s#lr$U=}tpnTp!~tEuUJB3%bXb@>?#afhBz+SNu5Ws_J2;TVzaJ>*U(eSz z^pkZ{^&M1u)lYgq7mDO|?j@Sj?ghklKkN64l96+gDbSAa|K_Um+x$R+fqLCxY9lfE z2145YcJ$_GN6o9GSFm~J22n@iM{`O6GjN9au^xyMQ9>3UTEqgucj|h*pZ$BF??Wke zR_p5g56E9{4PK>ImL5LN^Gs1!s%Qvx)CCStg_?gsNHz_ngv*cEG6fOt z)Ud*C8u$Y%)4i}up)J16(!M_~{!=7-~O|E{X8UMzqK}x4;G;_>)I|H->C-xha z^YYm|VBDdsp@osH$7jYx3bWq^zo#ch3li#L6z}j6 z!wg;ne;AQH*d?AjEeiJ;pcCZ*g2-+Qu%Z0xFeG;%h=0=Yed~8MhmN+bmc;5QQj!HD zncwJ331dC*T;zGO2ehh(VV_;V_dTOHf?kjT_^VwVwjj7?o3ILcNz!*a1Yzb<$?j%O z)V7x|Mb;;dYgS26B!hDcYvC*Litso^@y#lCJ|n-2{t%^|eEIb( zqvKVB?4!*hO68@rgo0a1MHLO|HGNT^(L6wJ^fP;npA|=g_b8| z?YIJ&_AiI_YqfdAQ*0c^N5>xDwmToJLm8Y8OptYwmvo{Cm9Jh4HQI|Yr_tF=vkjz; z+guZsT_Ve=4XdCfb|uk+aZ&QKjo|?|?DmK-dA|jnY&Vf+X%;ZD)Rf;mA2dJPJ@K@c zHN7ksdd;M%MrdihP($TDu9-edLQBd&w3uOHjcT=gh8olmIRE&)e>dKnfE-A@HrODI z4E7=dD{|pU9ktXm(}&#-4t6K&D)MZ}(WGw8>yRU2-2v^rY(u{B%8)c-q>Y}!mWO#( z=;lolJerGW?9P2$2gRKR=mX7xvrPw07tJ#lpVG$yBw-6%GoGE#E^`5eyrE9ObNq3{ z%nlYL@;-UKCtl9R?h*l^#s0tt%S5OC6v8O8T#kn9Ko1{cG}BkyVa=(on|SWmU8h=X zY2#*D260_GMqv2dM>BG%{CiJK$SxGq>CtU_Vb_*McAo&~KpOx(Mk1+t*PUpATu zDmD*FiNphKI#NT4cW*IXd&BDLO08$g4V;Gd?(Z1|fpbY{z}S!KNb3`}rxOt^e@$=r z{oC~MX-Sl*#eCl6q^RP;vDtll_@3{aQBtC%YiG;N!Xj^`y#uz(dHA5u&ztt+DA@8% z&tmb;n+^+ojF0!=zD^!u;@6zGswQpfA70TBjCjv`{M2E#!M$rh7XL!4z0rvxVg z-ugQH{rCL2y%Z!A70^Hcy1nj!PN*llg~uQ!u!d(0s_FxPnZ+Btl2IxJ8l^&^7aFvG ze|v!G@6-fC>m_AEbBgwfH`^6`4vLM*t^q?|G$-tOT4eT=h+@KJ#g8}m{v<3+wWRHz z|1+zU|CLpGf{jJn_|L3z^A9f3e=EuQcftaXC@}d9FX{j#;ru6IVGyZ|lD6hWH9Q9> zytG3q2XtS?s_BFPB!xrP2DE@V`J2$o&v|pp-g|MMt4DKau2%9574_vKlQu7V$!W~X^4)c6-?KE!Q#I#i4!a#Z` zkICfPK9wFW=OIm%C*g5_C-Xdi0x1Dj5N%#75ye~REjFl>=R4q>X^SRXi{^_8bdi2q z7kcX7bJSXiyfau!miVFOWMebxkvFihfaFmkxB(N=g9(@uMBT^ zx;8^>cRZQ8dM7VeH<-MYuR9MwFhzE|~&+xIN84EtTLOwNzV8 ztT5rOO~soK-MfD(Rs8x-WHQb|VAu}7m(97tFejfCb2*S2bdv9~AXlO^F~;zb zB%i*6UYl9dbs@M|xN=K=Nx>DU1mVS{zJbj>ZX0cFM#R91_e;u@t>5BATtWshw~0ZR zaqaf*C4S;AN)q%bI*!bzSU{KNI?f`G4vRBl4!?trI``m}l9%-wS@dq6S{c)HRF{nt zT`c=r%@_HecNdWtvJQEZWneBerX*8bx!cDsN})Sn$G*z%gM7uNt27eANELftg5MFkjGp-|wfw5eduoW41tmO0R?3t=WyKvlGhMqxu| zuQdxUC006QSP05LkJc#&!Njv#;gwyXm8=LCtEE_NS|A3$YTb0aY4;oWw6uG{Td*UZ zEu+&DD2l;HP}$!RbF@F*@t8M3bQm{cB_4GepOVzDpEwlU(|^6LE^iU^!q17p37gED zmxQQj#=1Zb%vKCcU2idL?(5I;BJ{7nfkwiS7ilY9Gs01^@$z+5t6rtk*#^S+ayGVN%2@#QgFq_k7h8M#yG`8h49d`j#YW;k~f zpVSpj>%F}t6D-e&XxF|s(7kyT;?B`@7JK%jl1R!#g61Au`~-i*w$(Wo!Fr-!NX?pN zO=_PXe?5-oLk4wn3zKOj7n4y(Bf|ZM_2%v`)^z(WZbb&_A{4pkyOc+aM&Al zz*VnvUA}((il9$k?#fHTZ3B()EVFzG=Cay=MIN*n!T{PFi0)5mnV$z1Hw_EcwY4~K zhWrVYIF_7LbN%hcFQ#plcu0=^ml1u7IJKVX^6o3;wD}CBsuJW)d*SP8M_41EKbsu- zIRYu`sC$X$wF=0@L<_v6kf$?FhsJn96%#r_K4~INCeF z*GYIme?ZV;F7AM5>GBXDC*o-UIq60b=SV{UYW`Uy6E;Yz4B!@ky5m3>sE45I1i2uY zQ$e{uVvhI+E}|2x;1oMEWW1GG-3MDqwmoQGn;W!ugWK; zr=AyU%YBZcDp)CCiq0caKiE$N?jXyVA})#_kZwsJSM3A>+#x)@gny@-u`F!{K3qM3 zzH#QAn{5?kNg^--Dke6D)a7j@paCm`c=8dgpa2QF#9PR6b!}}eVwrhOS0^k4`f^s%Fk5R5k{}; z<=LZjEVWLD)2H_bJYE&amWe`X5#fIl!9~v>j-ooMkWeVat{gKHJ4FF9K9Cmu~_%X3|0v{=0n80P4x3=jmIahPABCE22=Y=mc6BpPO2 z4V;>N#&jrg8|S7VJ~pjV!zLS^p592khH=g&MPFPg-jUk9dwSOlulR`s;ZCZLh*uXh zvW^gH?HdK$T2m&CyK{Pq*tlH6+{HeOxLs{*9kn`N8N?UFAORS!d$*iT2QN&plfD$U8W5 zTtHuUKh>%@{x;2hx3HJDnO?u1wgK&5cB!#6pMbp6#@T8vewy=J z504jguNR)Nu`3D6spny_lnPOe0Xtf<+>mxWI5_4_zo|eFoK;?#id``-X~i@idc&*P zg|I{Zxx`NWgtXP7N6fsySgyvorz)R~&r|ZTU}KscsP2-dG~N;c`DBj&a3lj zL$Ok#BSLYzD%@t?r6~?!^3`XDwWsSod-@e@s?Z@)4^LlRX7~IUMy#Ay-n0R;1B
Pcj$BAE0dP{M>hH zPZ0F~3ChlCq5r%i`r93qU$SaoV$sj3MgNH(bf#ZI##~r{ntB@i1M=-Y7f>>85Kun0 zJ}Twj3%r22mZDmte=#UHfJiZ)?{s{nq}#Aq3z2hC^s^FSdfV z3Y!{$VSNAj47HU1+E-EepLi<&_5`t#krSv#1b$7wT**|CG9|2A;-8*G=il=r4wPPO zeJ}a-ny_XqHR0}xGh@YlM{v7o1O$1DYT;d^43y%iwovR1Z*p1aAQr1AY+>!e?YhJ( zGS?EvpL8y25%)}%zTw~iWBOe3uQO44f0-x<>YZ_wc;yvTD(i(a(-e>)PVI97#y6;s ziIV1h{i-Xqs9kaN@zu$;>8qe_f40y_55mVI$6VL6EpN^ZEQW-sZQf=e=gd9yOm`J| zaEDyHnzrJ;0N?r9!2WeP4ZuGRRB43m=uK8%YTvoE@MPe!t(Pjaiq}+p z5&ss~PGX0=IaH7%9Mi?|q$F63WCr?11Nk%@?TyNvEAIbNDd3tdJ*hz1!&cVKClSVm zuRd=zYsE#Jd{2@z({p=Tyw^f3C?tjmBQH@tFh3Iu{SIpsUY94M1mVJL9_Z1ph zeLIaBN#&6^wO1NQD(exFo4B~Rvt^qm*5;pZ;*0gML;2BA)zEWzq!3l`xC+rzoL{vW zAdB|`4BoNzgDyJ&#XL{owl{>ey!5er!72Fd4ou=Ik?Mcx2lo$b@{1=Uz$Q^YvB{s4 zM_7+G{%E{v{cN-kiYkB}qwq^2BK3c*Lz4)(T&N=c zVJWnJw-i8{`oHjiXdISc-|x_Xq~)iAF?|6oxj?!om&0ZJNknA={q>WlkCPtf(UPt0 z>0d4L3)TlYma&C`zRI!r$-E#WIxJZ-!An^mdW zj3ioN*KG5s6(1pl1p+T)Pc9`m2b(NN*;(e-#Le_i&YnQO&T@mScs2y6X>WjeQM7-% zhKR@vs!Ll0Tk7VlB|;d^wm)1Cy_`PUx+tiG`!iIHjw7vvmqSa8y5m)xgU*RWjS;!1 zj{S&B%p;=j+!La5Z;64;)A zbMSJJHM67_=)(M;k5ppR8<^==-%;Kval(q`jS7FVX9@deO9rw6TObS2;+S^f9`Uzp zANny-E}BBm3B@8S&2}FegPikrvd+wK*46y15BbK#g+`kspLqxpEbNEmzLKp3GH3kt zixzwc%iGLqORNehR5TNr?8JD>QNeNJ8J)f4)uazYvp#%$m&AOR0#?MM{N!zQ<@|jk z@~q3oE5==&w(iB*$QNP^d($PQMKjd?o^`!z3HNs^PM3;#+uTzwvuiazyg%8WHkvJl zC)klYNvM8J$#@NY<+7^|X7adpqWs*sP}j2opgD?m_0<1))2QKoWs3ckHd@v>71t%Q zBh}_Ycd(bgyvo|7h|PT`kJ+=4!IA9s&1nOe?Y}Mn`|Bkde`9Fe8Jh*LVk%ME_s$>2 zvyX(P4m&Nub0o8n&SEqRtBUHHb)0q1v$OR*V$EWBTqlB|uyw&qaKLK8$W__DUP=)E zzR`KU|8t|C92E-p`y}>)jcghB=SF^cwp7gW*9HE*9w9Y3{%`x7W9_)#v%hBhDDTMn zDxYRTjM^BN=pY*Dg!Mgrvz@hEUEBY7xf_xTa>d8|jTHUW-^$2E6Zo;rBR(m;Kgq9* z!0#-Y&fYE9Jn<)d(aSdBugtXkdh~ES{+zM{dPE77Dhyaaw@KDtFVnGYDxTC1WsWFi z`PFH+sY)Ka zWO%~N4a)SPj#f%5bwM>*VilVO1$Yp6r#RHGTuEoalZD|;)q5Zg6a;XvBm=|YWGyD^gm|B6-xerrzx2cNM>xy1mv#X| z7XBH|7LNXkeGhiA0nt&a-ROiW{-@Nu1U3K9u>1GV_UA`-IJ?@gvKFvIKl-8}*sa7S z-a~{bFV*1L<7cA#*6L9~D<)X`Q)wMO3_}<9X(H?o znF|YN;tcAzYz<9m=4k?FGh#kRDHKp17Fz{(7zuV6NVXX`z|9mJXJd6K=WbXTvX1lh zaqcQr_$h_Y!1ZvxMP9-qbKo2~eA8#@mM43r3ufDY=I zbAofW?X&QiI`5H_4~J)e8fv4z8Rq{RL!H3Sp?25@x#{IRa<#*I$K9!55UF;@Nz#)) z#qO*8svB3{{h-4Uv&SA!0G9VJOh5Cdg|Ej+?-Gs`bq6zFPMW&!_pi>erg$F%{T-?UezeWR87XKIvocQ;s1QJ606SI zOb^Z#F{5WmKxK?hb!T3q!RVplb24KuDYc4&xAa9Ni6pkCL0c+PIj$Y%jPd7I(v+hD z(DLjhMpPZJ`hRzai4(F4iquC#u)IelWL}w8ac%)#R0rHs!-X3(R1vbZ79$#CJH5;^ z@$ga}e0Lv@^O1k)VSR0Jqy_S9`^!rh$vx+8^JCq_|6}WqnduD1=yH>eR*FD@($|vn z;LgD5r}3nJ@tCuuB{Qhm&ZSflGU8yHpUVP3w`gno=z}=|$Gnu6Ij6wTEgjd$prXYt ztu8giqUm34R!LfahKMI1K$sF|1gm*BD|1HP%=b4` zvon0{b{na&r2nxuA^zt1?{0Gp2j%LK($3H#r|TYtG0(>jI-RhSze_e6vA@|!Yl=}Z zcmI%wVi>Qm*3^^F9{F;WxynBamq~Hjw{K)i9k}o>G@L_@$m#1=d7P;li2HX}w%6dZ zd}wulH{F=Z05Vus0zlk1S}5R^Z(aZw^-o@T4Ht15GV8eMZ0}c;{x|5H@`8@fN711K~41r-z?*)FI=w}Zg#JNHXlneeC$wdzPHOdfB z>V6Km;sNRGACnFxsEdjJ93q8j1lm1vLCON^G=I#^!fpCB=1>L8O8OsDyMV?2HNX(M zVC~9+^5X)f9I*TMiO>*3`u{ev$`n+LehyfS_&*=%f8r^@j|d3OQ7lz)OS1K(+z(Lt zfxV8bU$HRmVPz1Zb-FI0WOuP(6M9XTH^!J7nB!@p*tX~ugIrH{)LNa7C!5!HxAW>9 zes&}L3A^w$G7LlE6*`W)6>c;pQP6~PX92m=P!p%Ahz=i)P1(yUwxT_ZfvRMjgt;#V@LI0p6?nvb)a zeR+Xqx0N_hwvS7C*0+;_v-a&%J4-Sd@vHf$P%pM#zByJewpL;9bm9t)F`I=b=KBvc z(z;~^Fr1o2U)@dgsT_&#R7HBWQ{5#lhz&~Pyr;YinHq%X(K)Whcr_K{z?HW=XgV>o ztR0%Zo!;v$4>*_>m;*`zh7F5#90U*RM&Y+7CbMplJ&$tXiH1OxtOkxa#KFYX+Lu8n z@=AGYQyR_4GMk!+7m_yVKF%qfX%30~!cMVG&0BR{#Y?2|+_VDo={!P&Pp{0pf5;!-SS7s6gfT04 zOL%`<|JmLSJnve@xTjpaCe8-qRLj$kP~$*$~&gi`QZAz#(zb*F?hc0Qr6OF`?@V1DA?Dh zzcO+~;M;{>TJ??(w2t3%RoZRcco~0a@Gw*R+M!uk;I8NA`!+mE?zE2z=__cU=Bgl@ zeq&ua_CW)^8#Oo;iH+KQ;9=V?Vr9E^b}8+}>ApDAwz*;|b4~6!8FjW3)^N4n-Duml zjr7riS~8VotUdPE+EVadSe^2k*Y+3Y=v~1IdLuJMWt|Wd4or}zwIw`Dk~p`G+6Hm; zIw%`(XNLEXNZjQBKwT+!!#BNsM&mXb66i6p(Ip{=4YII(e3r1RqOLAL=XvUi^+5a> z5B5y>{PUV!!epyP(m&ou%mM8*HHm4Jo99V+aYK?E!`fXqqa;_M02w{^I6^U!K2_vA zuQ<#6>r>nH*~JQ`QI3oR`nXlHm6#nyA>291+4?kXv%zCUf>wLR%P6NHmXYlHj6KsX z(rHhaj3)zgbVw5Mvj9%#H1O8A%A?CB+la^0EmDH?REEb*KU_vQO_x%$sdNMNTtUy> zyJ^||eQj8_d0ViF%Jn;pbTkP>onW+({|AqtI3^LDUZ3)i{8p@F)qnopA z_3`dnlPv7`tMBhdv!6$k!J6|%9cwr$=5W{l2Yc@U6;-pXi#DJlk_5?7B}xX#sR@!r z5Xnsxlq4WI(*(&$K>+~)$vH!lqk!bxK$CM0jpWeYrT;$r{JQt~_dDmkd*3}{oH1bZ zpy{qvt5#LbS@ZkmH|-%CC%XOxFv3sDAypP!I05gyj(d{ajS?teM^}#5T+a7Gb}9_< z^sE%q4#*_Vj9QBd7#GPk$RkN#%hIF2ez<4w z@-(O!#a|w}*e}0S?~_{=*5j+aJxw5a7bGi&1SoF+YXF{V9Hon-(4WW%VTH69-Q^*^ zoAdXXsqBIYn{}pBt$KJCa@4QaaEZF*i$($tsAt4;`!EPjJ53trv{#^57e_r1z&WMG{}_#mlk#b`3~x zC|KFXEi#^*p)diXXAW_@0j+3T_V;H*!h|{=9%ZvnKPl>A;rVD5YC}dIgXt6P1#-Bp9`%k>OB`P%{M>3@IjJ?6#5~nWbX?F1C|jq8k_3| z;1J_qi?Y)V_2_G5dO9}O2Zc1By_Fpq9Os+|LMlX>K;Xv!AX%=XSD8h!D&K;b6VY=` zkqya7e13iXxhoYwt0ql{j)#NKL0MlSaA~#tHN0eu0@C?iime#!zLf)_gDx2pql+Bt3dy#8jThW&9}{Tvkkz%d@KqqP~$G>p}NMV+Rt~*0#q&^d;aZR zJ$1CJ)DVZ^%3f0)hpls4hpM(;o&||{=)CRY-Y0Debt;i;b_;Qz`2e%?m69- z0&`M?x>h6|tU6YHS!ykPGkn*oMpMM~?hCWG(c(rA2Qi{OZjo-{z+q$cJ&Dy>5k~S1+CvCaaT^IS zwC)2te}7Huy`A0nl?`8>u_WE03vj-CmP=K&Qi*fu8wYy;gL{TKp+PQ?!{@B`T)Em{m$TxB(Q0rdbY4$-FNTHfUV zf1n03;QCueb}x%u5)4BQcb+K*sVcy4F5|vWdSl8mrF~mP)s@d(S#g+-EP9g&@O$%d z?^2?YD_32_DMqX>4HqKrySA^CP0jQ$*Py-WS1dK7#?xM-7IuG%8yB>91SSy({#@y{ zlXOLjr`AJwebr`rt_XOI1YEm75aa&QVB<+~jzq zt7lImB}GXqQ_zHS+S=1jfq$HtC+cC(;HT0=j&YYZrv-Cxw9)KR0lG8y9rU6YJRG)e zaA!zV8uQYDH6ArB7I}_o?rLG zW}J}}!u5miHwcrR-DRH8<-;t9E1*L8XfUitdxvA$&K@HqsjT!RF#BS{O|mnwUtBq2 zna>@K9!~PHYA5gklh*<|7WfwZJ4i(r`unsySbuehD+L&e;zhy29Hu)`fplTQk#pXUVj7o_*%Gvg~<{nT{ zL?{ycDe^UO3llYz#eKS?f4%I1W5){Shk?iANz+vyGEZ&WYEjYY+rkdHX25KvKQm?D z#lrC#g%9|D$QZHV3-P^>%@@(9I##J8yS&Z0_GBu?J}_YtsH)z!V+PL5s zmyWUXmw>?RKPwG<1Ni3U)APA2SAb2JhN%4+r!35Zju#l7-z);R%b_0N+M?=#6E{H0 z5Be42`e&s4s6UhP9pSYB4cq0Jb$bWetXvx@CThzu3Ju!ZJN5uNrhYfATvW0MaccW0 zMWbFJ`KICxZ3#9PK}jEZYHgQ729fs!(?8VUSw*zr+lPWX6#Gr*`lFg}^Ei~`cYF`2 z6~z6=LqS7np)F&V5aVQ<2sjRG3o-qP-Zmeu$#7WaZ{U%+EF8na;gc$ECbw#N>h)qPP5| z$AGk1-ik)lKw}A>{O#Ee!pH9OcUpG(KtR?bkaeJ4xcG@rR5zJ|ooQ-zZUEDE_d95& z-}@Ntl}T!!*0`t^V~A%}&q9O8)9V;#aFQN+6QL!BPEmJNOc@~SfgJfMDrCnAJ9vAq zfUxx$5QMOmiLyeA`TAUNZ$qmBpTJ_4u{8EX>1~fS)gI_=Bvoz;>!uhk$a4H2%P0LN z7e*#73^*evKz7<6!e12mOO#eq12`%-lB31CV$stc`up;b09cn~{?AW-`}xT&0HUV| zY8S3QX9`+vs*;~ok0wG_Ra{tkBzAMV!-sRz5V;_5k$t8mwh%^%tI{k?MG^3+VN|O7 zKA~%y??GfoEhPSOog7@xt;e-bUC%=2r?kHK^v$Ej`0F%2wu?R@lpfUL!|ery1%(Th zM$*z$lB)jWckNUH;r+A5y!<7d%pt~j!Dti09A222b}9RBI4>_ZX-{dv-$A5um-n#0 zgTTT;D9iz1;SORNAelL__hsLNw-Xkt?FA7ZnkLC%WS*rZNo#@|N#FFK zzmCZe;A;b$+CcwjfPDQ82r@aMH6pxL)OES1XZORl-A1?;lFJ-lD%E68b0Wh5e` zl%Idjl)bub1@~jWL*&N+z9h%3x&b1YoVlyGZ8*zfy9t}2gAaKM9q8v12A1`&L#6V| zLk5N(WqTT@4;^Ph$(GgQK^Mb@nzp)WuRxmkmv@I?-6NaVRjyH<0i)cxRWe+VKH|8} zwxC}sP0Tdc5vTTZ>E`Kgng61PI&ketowibLM%!E1nq)=c<|OD;9@;uaL08!@dcv8 zwg8*^2U}(RO|j(PBt)41)31M!##ao0J@IW+Xw$X;Cn_woLGvf9_h7&2@=g7Z^HmM> z@W=V;WjOFNh`#zyUrIf>ZC$L=c(wAj1Q10KJbUMge&7g5eZR^I&UL(_PlyB)|0oU}o~*%wI}fYCAfV|0!S z^ljymx8!3KhE2*UD(cN39S99S*eo-s#biLw9nSRY$OBp4cz?1qAU$NL+%*~!Sb$VJ zNiq8U$Y!~_#Ly0)TqWHmV4qhwQ9f2YJuUo0hLCARn=EsB!Vbo|53MEZuX|+Y40{Rpl2{B!ivSUk{#wp16rRt}*5z*4^1-+pIDs znoVN*R5-5d7l+%BQQLJ`#s1#HZ=1N4>wKlGlB-FGK0@|pD*Bh@n8@W}QOyJWPRD)O zae@cx?{nBvk3dJD`5&Oozt}oq37Q{rraSFCvUT+aNT;LCH732^gqGZ^f|NVo4c2>4 z@KGvaO0s|YsIwA4ZvM)&-EraeIdne$*h;`{kt@`4o^Ukk$pFyg@a*z*#sH)PXd$;EePnpbD#@Sl#CW)GbU4cQ}|AMxB@!WgcmT zv*a|>vOjlC0YDfG==j(iD!NPG)EOzmR+hG~a<`^y4FkRWlSn_0KDg_Brs)s^F>yn^ zW~rNMu6mas8$d`o%C#KZ$z@yo!4K>b7zH-_%qCP+WEo|vyH;qu+oJ1Hd}d8mAhuX6 z%8Jdn*ZSK$K`hAV&Il8{ZbwU@41PH2cqNN34p+-un${B0=32h(?qgwnSSJFxs9QIl za&XQmZ(MYFbB*)bywRQEF!RB^G|^~f;1PX6k?y~bZnG*5E?BW}^yv9^E$$k1J`c=U z&xGD&RWy7xv3SxvVK2aw-Nvyec(DR`&D~*2zt_|}@KV&+|F#wg&F?h`y@BGS*_(Wn zH&KjdO-Zw|jA?t}#XAGhwY!2w6GF>om{F3?^jrnnP7J7{B;p^PxFX!QuXnvzdKi$MlE( z;RzI**=(V}8%L%$t#Bb}ldB}j4*>CM?jprws@Tvy(ZY7?Ws|B0khC#!g>x}*#y|+r zE&!0)e4N%K-2y=k^E;ZMSk&QDmbEhHc=_#P-}DoISK}mOiLXm52-YE|2)4cjkr|d& zE^M8K`kN5n1M8%GNnDnQbI_>|+dXA986TFo@NhbLs8XaZr3N_0#p^ zn~KUZa$TsP?Zd(lUS+0M_whHomm<7= zUYgs|&Ys6HJ43v$diSF3CDC9WDY}PZV40DMI!Kt6$P0N6hG>hY@zpL4u}pSiUc#Er z@8+Sn>jGu7776OkCxqyAZFR3Oa~{W(cyNfgs~55#T!MSE3sKOhGa5HzTMSPr8ZwuG zH}j2dh4&}lShcIJ;;da)jb5+|M@|t#HQ6(PY*V~)=YXT0oa6!8<~VXv9PkYD4p7X8 zcsk|gx}1!^`~@D{e7p)_h#t>Z%GX|JGj*bCLVO9e45-|t)5=1{Zfp-_7V>)~tOD`h zKHlt&Xhwj@um{utc~kJ#d1V9IoMy22{dI(A3U;N45c}ACc?BnWw70BGYqA(C@g-ln z0id0`|6!(R<_0e7&TH)ua+BES%iN+Xp^sn6@)ACX@p zkeh@Vo?=~Z*lK8+FK^#98#fy=MQXqTY}-4J-@*wmc5Z{LN3F5Ty)WH_$U2`5H(+Wb ziE3LX*HPrv(jGYyFnq@wD-~6?XXa1@T>wq}Au$}=RR%Ws#q!={c@IWJbqDuG=+Z!| z?RCQJi~AB9=(wQ4FrH0$4Xi zLTvX4-GsJg%e-l2-&r8O*rla0mOAF;aS`1h18EldF6h&?VF_E1lu4AoHO051uc6DH zGl^`-I|}@0mPOxXhX202_uE&2J2ew4QA|zQiY{YKoCkLR(UTsa%tmp~LJIk5`VI}v z`0iqTji33;(vf8INtp_HZ{rS^2ebk1)J9eYyo0=yndS9OnvJil%VE#hc9K)>FMpFa zOFB*YqNUpzDhkfgJymHJSN89JIhXM@do4VtbJ23Fzue9RL)vM+!P{KnT1Kt+3O$v- zpT}+d1zn0cpoTK8ipq%N03Kh+C^?+6P^T*8of7v!4E<}tE?UL@F;8&rY2b8L&7zl- zk^I$I5&Hs_pKSZZ;gt>&YQ7Tf-u~KhQF#0dMf4cdt<)pLVR-kcK1d1dC> z|1f-BWAF5lC;*#D*VCRds-T2)XR%{wuJ7*;T($ufX4OWMX_5HH+gf|aP`d575nU7S zoeMkxN3wzJEeW!w{b1KRRFErp|8dHteblcFX*t;vZE|gvTOb$%ofLM`bvTst*={A) zXk7EJ2`2BPuE~QLh)|qglssAVtc{uaATNJ$uu!ZdVC?c#4PXz|6ptqrorGMLqCL z-W)c4XIq1y8A^8=NJx0}9`u)k2yr(HhE_Pum_5D#$6X~LnLaj&zdXcEG6^@MR+cPs zjoxfMpHm*L7}#N#)$Z+}6AVz}va3n*AKkwpv^MNh3ea_K28A`Py8vjxH2}NpWCq++ z>Grup5#S{4XA0QK_ZKNN?-r_^MA+F^Yduy4=qW*%nCwZfB?pFmgeX(6 zU2A9I7EKF$zV#KHQQMB>%>$Q*`36*@45ODqSqA-*35)OtrYu|nJm{~*vueepj1U?_ zn%Bsv{6sFZs`st|q%2hqJ`B(L4%vD}32 zp%JBsO4{uXHQgh`7S0o<)?KQ@C0mTpkv@ec zXc3xdjD6b&ENFF@7;Hi<1<+G@Q2>FKV#r3}KLaWfs$!bS0FlVIuHR5^lvFNhvH{(L zbQB>z4RkQX0ua^&Jk%w1AU##j?2?{F6j*_TBS2~M+`ma7_;;op-q(fBAAwKDK+cX@ zPgFOC5_aB(rP0WZp}nPUC*pvZp@=`h&2|ybmnvkhs6c zdklWYdzkitl^>GwZ}0jDi~hx=18tAt(rk9`sqk#<2d93^RQ` zlPCP-6G_Z-YvXsHL?>VLmBA@3R$z~c6c~c+t+GuH629z9?;ttD03qEUK`?fZ(!boXJ~Z&x zEuSWg$Q?X94zsWk1sqcs6Xp)?eix?!OJxrOVZIDy;*7XB7s6ocouzv&HP^yRFVg)K z)-Xze>T|!^KtOHf7dw-ijx3z!PpBT_HF=S<0T2AP-MTCvBa7CegC05W-$`?%2Btjq z<*h#Qn8Pa2)d?|+Hg{r(eH0$Pi&gStXCp%6{H?{`Etjm*)q@wIjHBCd9qs$FBRMah zquoLi>bgedF%qXYLaAoW=R_&(oz8B_Xvm9W_P*bp>M#}0>R0mz9C&}-)mgJCI+CB) zyf>U8+tDs;JIfE2Uk(yFlRwFx+H{T!-?DbOwnL`2_yB_#ho`3Vaim-!$iC^9T{iDj zMYX9`S#$hZtx|p%t}H~7=Mmj(Za8&nk`vz)#1Jwo#aLM#>Kbxa^a;5F<~39I+cLnu z^5%&Xn27c~0%Lfnpc^{ z>TFnnP!x?ynq&J3y6>g}df6eA$WY&sGi!@NXoU0eja7Qchfnn3xAA*HZRoPHB9Z3k zL4WMgV#NI90#dyUuSUxRcQH!B^Uch^o3`Sl@;N$4;B^F z^&Xd&;GfuI%`X!i-3UVq`TbG-g8}|9+*f1~dxF$f$wA1tG2akz`J^d_nMIV89CVPY zrB>nbu@Js{Bl$Wmuatx?mpciyg!my^H*grfx43ss4ak4A@IaB>zuIBPLB;Ryl-IJT?C@yJ ziEIz?rP<^Y`xha+fM5jH@t!VBw*;vFvcuWMW=oeK$+-RM;MgwQ;nR0eril)}HRahTxaJWC1U?wCPYsqROM1lc>HfzZk;U_kaO~HSZ*o~m}1~6WijgJW;A3* z$-=_H-ptC%EI!rBMT0^0PS;(>{FMEY3h>zyptdldum{+^aSfRtQ~uXChX8goHUO0* z3lE^MiU8zg#z|cj6NuFO_Ac#j-}|4&{tu6t=DtaLj%!%E?ic5nPU5j?@kcQJV(i+& zdd1gNH#%1MCWjHG6E+^c!rq$Z|w^bF%%nl|eSYp&@0SfMvSBnhp?d zhN0_>l>qp`07gEs1Rw1%q5RCz_xv+D{dm!Uth_1c6-nj5=|eq0R%G0`ezag`1vOb~ z9rGk-Vs88zJp0XyPQSx@8`)C;#8a93VvzU2s@uyU#|?UCw%y|#r8}e1a*UDb8_(l{ zMU4sPvO=;g`87{?Wa$%*-G;wjRC~*geY!qi-c-K<9B4 zc`ewlqN02I<2%kE8)KzH6;1fA6C|E(HF8>k$c|nzZuOojPH6{^hbB>MEWaV1CvTrE zg|=u_aZZUIW|Ree4YllIO`l8n=K9xpo|2YprUZ|%&=FVfrO9BE<=waVt$~so@tv07 zd2e8NFp)wY9-lB!cSLd>;T6K}8`f88@C(nBdVZU!4(Gi~R@Q}cHj1gNOycgaFq5lL zj$jeC6RHlug%b7TdXwQg^NK5BY^Ji%R=9|qkSiha&CVG+|3UTP&TGX)Q8+5}^zk1D z?~v1x(``F~&tz@FkXa(evu6jPt} zaE<98rvidqr<6YJM98m`jPwc{|bBCC-r-Q-?1kkU7 z&^n#(ARI$l{6i!lEzJQw^!(sW2MDh$pmv^L3dGp@vdLZXRs(#K16B0(^Nvs1u50aS z$0+zjJ$&WS2#d|t*Nqrm_km!?M4tpbL;;-Jpk=i!S5o*sl>kdLq3o`hFh#-VuPN&UzwGT{<!$qWUGW|T zXNBhyUDB%?6~j<`aG0p5r|_%rFsdeVevjAemyH5|duC#4F48TRG5^5}{4^(L&IIsIc`CvU^6<1w$fX(JtMVu3O1J zirc4?vgAUnd`ojrdWc-{%Y*N`qrglmNrNSMXk zYhp=!k%8w=T{>J|P{0X>?eghMdAL!@TJNY4v95f9*i9=4J-17;YtE`DLp9~2Ku2`~ zl~|a^F9yGZvN$gz>`_>NmL~wCAFbe{d{`|}hW`dHm;NZCdfz4r*bo5VejVM1ecfGp z13$04x6G>wrBvD6G1wxD-_)Z!S%8m|xc3lp;gS-yqu@QTx^|^G+5vb>cn%}8u=4*r za(w%g6eT1TS1!Q8Y&br@vWRtY;4;Qb z8dZmYZamf)l*z9@AD-;QV55P+;x{FqXW++8C08bGcHlm6dLUCbF_sp}DkT+aTBJs5 z7oHdWP|rINiTyaLje_HI@l3mgTdwZe7Ln@LmePn82^@#7OY|x3pzZYnB4l<@#vrYp zzB0Ic=(6f8LpJwM?^3+S`ZP_4o3uY8_X%wwWEUrCGRQXj#fWOs6I< zn^3I=rLsS%4P9P54O%!o6cbTxXf(B&fE6fMJ(QPU2o8Vn`m--N9ktN1nxz4clqa`% z1Vh7EY^gVUvZm%&<;W*OWJX$4kwS}~Doctp`>R4*_5)!Ki?{@PHpBGqY_i|n^u@50 z?-q{f$K+G>l-rM+x}G~WD*w_P%iQ#4WPy6VkbeHxyQbmCeGEDMS)pz9NbexyLdGaN z*EUsqABndL_s&ooXX-wJTZ1kgmRO>03-OgP8I5^i&I|`nOrUHES8W_Kz6~%lQK!#V zCDhc!F#;hK9&UVsYmM@Wk0VU$2w6E}-6g*h=v_;G&SO2iCgsIj$6O(R2N(}N>x7{&-h?77om_W8T zeKX;>+{M@;Q}TiQJL^@2JF7+*5AEn~-$2J%3pWb|9kQ^Zk95q^G!ENV3i9C~CLVYc z?thIU^i7JNls??(6w8~9Sfu9sLeR$;_j;rgd)w?lpD5*62}I@*9ts*wtKOqH?bvE_ z;kVPCYImsc%Dd7dd|ru{*U#k&=B5+%EQGiZ9B;$lf`iOP2xUb+6{oooWeHA|#wBoO zHpA2EPvnp)VWy);l|c5;=ypY&^Yw}zi>H}2oA;&qr%Ht9U(3@}+RPauH4_UgxLpIs z6)za`9|Q_+*SD~j@33LS(hD}m*u+A z3X(quQ91D?)s9C1@kNaVY=|cSYtH7S)1K_;`VdqYQp)4)k?3%EFvfC$1sF?qyeYP- zqGy%y!*7YLwCVN1RKoq1LGoRK3v$Gj4^BgPqJ!ErO|CK%?7DKxc1q4ty3fTDd1Y%+R_qjisZXL(~Xe8 zT&XI9mQ}jd)FA`w?(Nb=3j%RG>c^-`s$}A_YO=b9v7)OYKJs{PNa?xijZXtUh#?mP z*YBWbL88&CH#;{gBOJ0^HOCWELgQNMwi|t-zCOs7nTO&?O$COS_qu<);|5N)dNL(9 zyp258=O4y_4lol#-=^Zd?wpTZ1qaix)!LprI=`h&l~60oDY1YqjkJj~LyvclR)VkOuMJ0GZ#qQ9DF)7sWseulL^YCP92K4X_$<>t z@$yz+bp=eU_t|Wnx3yi=aJl1LfLf1j(^EL-PR2L-tG+rP{yo80%gZHg4*kmR-d412 zS|=2dkv1GjqD>anQFqY}GWzpJCS^cI5KorDdqN3o&uYXjr^+4zBXzIed+Lj+UIyLmQd(E^~b+N6zK?1kcUq+Kf^( z(nBt07^5+R4r;R-=QbIta~sDn8J^n%w;HF4f>7xwg1ul%6TzI1cSM2~8o&|c-)>9fNqoXv!k zK*v+gOAQx}M7{!p7YPt)nq>rD--3$?Oz)lk^~QG@x@`H@nUhlGj9rhM*FTRB&fAMc z=to2|!v)9Gc+oV}jX=U6W>gaws);Usn5}TGBbl`t;)+-#hC!TPTz{u8wukRajg_~S2P4;=FVpy#?xZtTa*tNt|<0E3e&r4@W@UCCUn?+Tv}o=S@a*YL@d`l76nnJYhoOwCxj*uvaFu@v>ThLZ9oZ6GW8V87gNw%1__7$fO!W{puN%Aq3yI`UJqq)f;X^`XPb6i z=P@y-qD$GTa2ISkKt-%fcmS<)V30YP5b zE=^B}+x0Big4?cHB$?W|)Uf=52Grs1a>77~SJNF%AlgApii(J?6`4|^R^*F3@1NZu z+SN#Ac#WTPO%07x6q2F4Q9jq{LcPqt;<;BQUlu=_WMhO6vR5hL>$Mm`$Sol`KIS); zz^bAvdlXpujx{h@h~InWDRj)eIxb4+0idh7hugwlg~cCzP>)y=OGgB$j(E33SaF4@ z*h~22vjhBbA+lN5RJCQ$HzX66nnPaW*|UH`6d2BTA}((K!k;f1bH;!~N1q1_!|77XA zQIyktIa{i7#kY^jl%x2@K%9{`mgbO|Bjep?ubDuIVm$mOHKpcPY6{}NkXMD#Cw`Y2C`0fjX!q=dHN^q@IS?7s=Cx)~ z+KQi*f43x+gGOJThu64`YjLSOT*S#6%(zU^-L8+6@7vhn*IZj&42MgY{z9;wv%NqOorH;atB|O{A z!wtvcMP855ru_gFca7af&^7#4A6^HIJ!|;xusl6wv2Re58WQZ&L^S7%q7 zf2t~w&vZcLJ#IG5V%iArj$;2p@`-*jil>^O%a-uE(AIlZ=#?W{k3gF~6&3r(Tf%67 zp+Ksq>=#y~oE?Er&HzZGR_Cx#7jm4;Nif}p_PTY(`ZEjWiMq?2L041O!-gl^J6jS|ahI&tOeimvD-;wcP(xf5^Bp9fjRKh5r{mA7;s9pEgE0mH z5BUq*_lf1nb#}09Y8G~mHr29d*#>ylD6!A2GD#{A{Nk_a;0TaC^v}JCI@Ls|T%M;~ zTml%3K8uBJ+2;L)dO4t!gUxqPIi4a&Z;IS7!TGGY2Dkgx;-I`JW=)}(e0lAb*i=O>)IrgA( zwuY7uXIGX-NVualM8Rx)wa#z%RFa+$B9(3-XOjmQ)|-0}ipa#=S-p@~M;BpaRlX~7 zws*qMS4L9+R|{mp2C=ShdZl$ope=%W)deDYgMMCd-W0_LH_(}#wm?eS=Ji;;u5;1L z;4T-(6w%PxCQm|*uo)NjH9(%4Sq+=R#JE!w8GYG|t)(J#H|HFHBJok>vL(Hk z@xjgE0gc#J86gY0KmiW!5xkqU0StwowuN8CezPo_)~niqDdSjnz3c1-J&OXOszLwp zWYNg7Kz#Vrrr=gI043MExeN;diPy;7NUs^Lw6I0EddfLmyY5S}WsS{D^N}?L?jw}) z=jkE$r-qB_)JvNhxV^%(6IG@@&FXP?HI0ajF44?Z99Gop-$hjAvg+^9?F7=}CLK*h zG|n(%MA70_gnu)a2U7M105XD>Rn}x@_Jp*>tv8`WH$z33$uk17!q<4wID!hq3(nK(m1rl5 zmn-zBc5e|aUtEqACgfYaur@X0A1rw3_IP*)#Uyw=stLW|@1bDKmw%by`S1T66VuYD z9IBWsE~dvKGC`V5O6)46VvD00p;(a+yc7#xhuyo6$J4a1_Ing+ONX-Ro77;n;X5wE zDf8?@U^HL-8IYYJ33NGellEwK*RT28M$%jNp@Yp!`(pE5=oKqU!VhU>BCSW}LnEi;Kca zD4ju_3}1TF(fDP{W`P^=QNzcm_5(|RCCC5Yz6G7GE#b>tjtwYwObva$LS{#M4i1lT61=vO`63-3IxEr z7p=|IMmf_fec30%0xh&zgD}i*xkt<^WiC`Pow#?9|I){y~9FCQ^j= zEw)CL!>WjovG}F+)K{;`9fJWGvDqegnl3y3U1U*;L&0eRwN$4v>04JzB5QdYx}7#;O^nb{!AwmnwTY8{M@AJsmegmMeA!E_9zP=cWzE&gD)c298&}S%9eu< z3)T5@QKMAjDP{M=%E{8|C9#L$KIR9g%4t!6O?ux8Lm^6P|0?n$`84}yb@r>{&fd9@ zu1l-l!wN4?dIAll1p zDr#C~wSsPCpV63*1|1EMSPJjfyeOKQ(6JP$I1Kd?8Q`>~S^3H^j`lHWi6*c#DAL9e z{}#ganR-KPrxNiJa~UY~FkcT5uaD6YCqM>$gB?wr!K^&{lOX@~`#=(M;@uG0mX z{*UDL|f59DVzMN)Y|CbduJ)vBNk;ypYet}ZvO|@4)*5@+c?qkY_nv? ze%soYQ!ZYsOocxk8Fo}$-t;VQRpb4d@;6Jj87eq17rRxqG@V#Qrs~X>%Ew-mzI&pp zy%5n8?%0NoSxkhMp72ly9_un_F$@{V>v4EJRW-xQozEyo%i;t0A*mx3S5LYZG8@K= zIG3h;iDHp=F~^xx2XE9YYf%@tl0F{edtm#raz?azc2QP;f{YqJm6$dNqK_o`aH9Zq zKVjnWUZT0RG4p4=BD5FmUkwe8hBjjbP8zi)D%~osYGzj2s1mP!IwBHOQGc=kzxg31 z)R~(bjANAw2HUyl`(J#02QbYkUC-5p4N`I@yLUVa2Xg2*i0S>9TT}GX{3WQe|Kp|} z^A>7bg3ZP0YWRP8Azlg8bq*&2)-&ODNvm07Yd%1^#;2;7 zKuG1!o`X4{C}a)Lum^I#gPI0VeB)g&JkO;NS%ob6K4=ZMJG8(3@I>UOnTNNF3u2#o zRzB2xitcDJHy?S1FGwtnYHN> zC#59jzSzp1N|p`_X%Vk+0(v$kK?VEcj`4puaYgBE2;w5&mvH#&;B+&NHaj7c(F3ml z?Yi=YlqZq1xM~oq0PNb0=dv%G*H(aO;YXRa@MGe2J1-}_mb15_`_)fWHqP9H!$R1{gNtm9L93)y0T_0hdHB@s z!*d(%g=0%fR*RHjg7RbmfA}Tl7yx7N&*KM_9R0J$v&}~dOjqrt3Shc^kY{Im-LcXk ze7o6qtDVHzrNm-b((cDhot(1Y?`m)|shp0G z`@m2yk9O3}%tt-@Az#Jwo8+QMqB%A?I~+lBON<9tGqXNI#sA?Ws#7m(Y#_J_(@Iof z{nR075(6D~6})^fq%)fp4KL19Vi;1pz7T?e?oOR*y|f`g1^p)zGUyK^?B z25QeoDzA;NJ&S71f{l!MckxXJ?jBE)lWYvK8kg1uG?O>)AbdFgsYV4)z50Uay~2ky z;%x#zR!qZw4(mb$FM%I>IwtCjc$J1Z=HjIX^cJE=@pv>oIohxnlcBJoK8NHwGfPAraI+3nCntP$uwj|xh_p!k>!ycVCn%>^9*YVJNg zP65!8K_+D&#jy)~Izwv#B)OqG&RyaF=ckDA{Hs9xvsLB`9NKTas+jM8Y3cXtH~~pU z#{MX`Q1Ld*%Wp(zE5Eii^xKu(nO+R!3U_|1v!P zKi<-R*GKU`{RS4^a+PUW+2hKxgUt-Fo@8o>#=Qb%TTjeGNbm|hXg?LhXWNM3IIkC1P zKThPmV32FMOUOeM`2iDA4~W4?MBzc_yP+s!m%?aHU41D=SMz7aHHqH^j;reLR!kM!Phq zoCQ>bi9;6I(Z}^lV&R)au@7#LlbKRXrTK{3q6qegPMvy%t+%z{(z}v5H)hBV2|4M# zB(uX(NK96T9eXbffEZH8*0{mVxjkk++6f>)l>FRBG-XGe6-dE*k_aUCM?;ad;=fuO zt)Ir==}#+?iqCxULpXE&4xk`6Z<=yx6IB7wqJ4#JQ8#|JJ%;*e3%q_>5sdC%n@a!f zLPyX)OabjrD*`(BwXOBvuGC=o(}cw?DEEgjP{){Jr1s#vlDA zrfz>v^Ym&0sI}g5i3JXR*6JK)maW}`x8Yn7$B}_TmN!e!iEU;6)-zZmp{_JjTZ7O? zG=?<*%^J`j`2VDnzmCV&f0wcSpKkx3oXP1PwiK2U%_)-zOMx5Ouz|ZYQ+__)spEw| zJ-G)P)`T>-Z-JIY@6Fd)W6|h!@%45(j^E+mV6fE$kE9k2;^5p;1#SG-#L54asJR`D z2%vh9{RXQY*Dv?+HBO90I=eF z8;VQ-FJRUIxi)j{;8Oz3?;!GQ@rKOxIB$Sx=Y%V(y{$`H8>o^X;@OdLGj7li>!gp% zQ*nR4+SOEGNFZ(%@{w)%sVfiWuHe-|7unRf(1rx|?;ZR7*5Hv*i#x13BYU=VZiSjH zp)>6s1cCBgb!D5=zL5?Zj!>F)M1Ll3xB}Fcp_`mtceigrDU&LOlB*5J=n9ucLh@f- zVC)mPQ&=-MCs9esOYsrg2KC&8Dak>ZnSDll3z!u;$5<{(yVQxijw-cv$hOqnuy(i` zs_=@2SN?84QV{(XGZ!)F__ zBEyx`2DrS4%~%J$-HV5_6J6*k6{g!SsgMK1d9$96_pjcMmwv5%LI{OcX2Ue>`E;!cbk+Qv;M*?sK>Rulypu33i}t=C<;;-dsW%TF{d_?t+T^H z>S!Z!d^hq7kbCw#ZwtjR;wfNzEpCrh!H(&Jt*z}HCANX_n_*pxL0R*aJikvIKosMb z`-xLTq9XEV=5(d0FG>0Nzk@7l0yoD#yN(YAwhTZ!ij@}&dGPTjtRjc6LQk9T-4i!i zeS3!vHcO<<@$$E^miz5l@R{i%;ZdJA3AKCU#-Fv3#V+gUiY`v#%f8JKkwp_dUj3AF zx+Fc4KC*0)RZS7j&KF-9kZI98)d~m<&vJ2ZZAiEg|Eum#413DFC=gqIU9jKxQeB1k zi>!e1S1MCllXF_d()7(L#Oj9|nJ?)tc;_o#${@5&!zw3h0jY zM`4uD=st>8sq|K#NqPXDZSEK2mY{h5|8_fXHZOia!w(CDyUlvNG`+#amA$?1)I~fu z&H!l5-gIq4wGBfzM2SD?dJp4~R(6Ni=)Rm5h{MXdp)9=A7tZ4LL_^83iz=B_G4w$P zyUi}+$U1SyX)?Q1Pe<*I8tY7bK#Nq;&RSUa6(_k@00gIvZbfLdj)mnsi<7RmUTI-R z_qc8GK4hYY&QfY_+sEW&>bZp7!1J4heO7X{)ks^vUE>N)Yol);r_q9aee%|j%|#ud z785ktNtUyBM*kOkZygq8*RKr^l9CG2Fd(2PA<|tUCE{Svr2-N|cMl=mt#nGKFf>Sm zNSAbX&5$Du<9BgC&))BI-+Ld=x8rz^{eIu^{xOHLu4~p@xz=x;zw>vVr(qdPv)vIl zcA|B;b32!mdV^Rc18r}f7lCOIC&$o+H??qYer-!g@{8gE`FkbSx61qcRGhvoyNaHb zzfoq5=Z%?w2_D8#L*==9U=nu6V|^<)>Nli<(?>E3Sw_UBdzT z86*5ppbu|Vj=M+pBRMc>RL%MNs8&_@*dwF5rgV>45}CcnRA$2*>Gt%tCtc3&AM(sg z%;6Ui9~YeR+HFEr^IXXEZV{j@=BY*(nRgv}9h&I_cIUVi8CI9{ z^x4I$?lrnFH3#f~80~6JGA(#f4n4z?VIej2{k8B{cpo&vaanhu!&_N9$p-@aHoH(D zxwl#CPMW>Ih#+ZlhHPw~J4ZSCGc~9O2eNOes*jyU$SESD!qaCvrrdL1Ep%f~^Vw+L zx=-0b*?v53%78TZtUco#X7$Bz$0fR{^nQ-m&|`Z74T8d>CELe2W#+ZciyjzY^kR8c zd%9zPf$y`F)Rs776P)?sbD2rAtJQ&V@Y5rHq2ZCLSdgXrhm_K-S}CRp3Gu?MhBp@h ziHUKt_p()6Oq4;thI+v?_#>`m9Ltj3XLEX$qrpA|6JV& z8oU!t>d*}VscSPM1vh=GAtX#h1|4=^^rrJV581q5_uTQZ4iCInb%!DvM7xH}Pzvif zi*YSl?c`+ZKJa)`ZOkQ65!&U8O*;rFg}#0P*<=+Ek_ha-8vX%_Vb}F@d z+@Rvzxhw=vHRGO8;>!ll%c3n@CAiI*LQJOe>zf)InM;W59+>T`w;)1fXa@0=`&LV> z)!_K8r&rD*QNc=;Jlqt&ln!=5V|5T+!o0-j>aZ&A{i zKvjNVdZ$L4VdrJnm!q_#$be8N?l};Y0s3ubOO%k{ zPTEZHJ;O+b5>35A>nI&?!JwgbZ!q7>Nq{Qx;3x6(1MN?dPm0#!465@vNIZcwtHwws zJm}@rX$s9F?nD(25w=vahja=qmZrrNQQ1*CXSgWerBgNOWwtSC#9dgokC+8#GLT2| zUjn#hv;sqD0z-hmL`4P!x?9HovWw3D-R1v-*CmhXmM8NI{E)xh)J)RYP`}Z$NzA-8 z*8xjiK+9f?h>3J?;nub8`hWgYC81nCi?UHjQcI)Lz+rpLzL2aS^2X2})?M#fZ9@UI z;~s2Wp)baVebU-~&Z6ZN{)uaI93feN-I`rzh?0Wi6a;cG`z z0&`$&$mv`}@!23UNq0h*Vap`aCjzbjh=`|(xdUsbgKe21g)zh_=m*#h|Lr4)G?}Np zQJ3*gPvba)T7S-!LU*istMq<-g;>G3z}D-uGNTxS+uKPoN@qsf2E|KWr24GNJkIT^ z3i|^CdmX5C>sFs7vUgh4t)y0m_X9Sk1gDd!k6F!aRPNZkjA>gt*33a@bb(w`I&-!Q zGJsP(`rm!3|MM67|A$8)|0rt3ij=B;xJlNjfX}nLNBteg4{_i7!gqUKt**^k zZ7((oS41()ip%qK7n6guj;%EU$GX41+&?BB{5I8PqcQY2!bT##%QVHuXlU8u_R^-#qJO+L1=9v7vjBT<)Q!_j8;qqmx zie?RuUFDk`zFNZ2J4QC|2Hl44aGzqT0jqM!YLRmMMT-SWgh6we1zoOtKxy;DPZjy< z+f*4toON|Z9lYC-{EL*+Wx zuK3z1VKId-qmA1~MV2ACqVG$nZP!vAdMq-YA|?)EShyD0#%qYQa(Fvz>{;6*eG=7) z*GQ$_ds*mM3mws4K_rK5nl!ULg@;eK1c`3v>E=GTFo)k;3rT%w^yq+|I~oKcws#w< zoC0#>1XlTIW&}!ipZ_Y6?J0#_Q-Ey;Tr6~(V|H9t{?4m^Z(zm_K&9A_%z>%l^&$8D z3~$Zk#x$&JE7{887vyJ<>`GBMCAGgMN)uG(Gpn`t%!G~IB%71oOD!MGLgk+gRiL1k z(tVeJjtf-_BqJ>aftruB%Xn7ZBG@|0B8^BudGCBy#C2uG_!h4IlBTsi;-f2LJ@m@sw$wICR751B@P3bpF&WPZj?eL2XQTm-m!7NjsmS zIHj%90i?#tzoTEa7B<>#y1-@GkfF9ETxUs4zGB*`VDV8l5EUfW*Bz}z!6!&j`sj|A9%B*3b(Cy3id9mPp z42x@f#ZNgaiI~VHHM|A<%pC0D*VoP_Kf!+CWU?>&0Pc=)!o)474f)~ub9On((XnTSE5_KV`ud&~pGiS!3Q zc8&2p=3npA8J1I-m&7)iqdcc_+xYa_7)C0LorrT?@bm1wdD{>QEu;iuRy{y*!%svp z6{6)6z+mtCLs|3Ze<9q!_CaP{LLxn>P1zr()NgXQQz_9DqHmTJ3-+OEG0znJfvWvQ zOk9nVcIJPWGyHFaCxCBG#h@*woaQ+;JDeN9S%SGqRnNm&EN$MtC(5|o!Z?*9qPjJu z?jz8X?z!Enx)9i-_bB2W<(W?Jw^sloal}CZ+TTJ6ohhp{s%GxVE;ZqZMu zGy|H|X)jK{M0T8=hY@B6=CyD{!Lnj8Wws23$z0YK@N0ft+n3LLd0f^C))l5aviR zmrT-Kfe%f>ZOKz1O_E4sl=!R3`xB*lVuvwd3GLJ*aj|4HtdhHPe-^eXN>0xLC*{pA zelHoBeR9lK{^}0747%XPf!Tl^Je?a!avD#axIobC${t}7fmyT^<|Ov_`^5l06-Kjp zKLA2Js&gQqqiRL#gPed%)(3F>6<4AzZri{R?*Oc!3jp44GKETS$u|R`Wk5L~r!T|! z!E6Air-(u2U_RaSgEl=GsAjt`JT<{qLHm`@?>G`8 zSb5PeR9yr+Ts1L@t<->iS1l$k&qVS@dn zPq+jR_nWvuV`^nIz3{KmkFiPq-j^MdD!FSeK!bmb@yddnfaO2W5gr> zn{dQwVTSb4kmA?^5Yi*=>E|N_u)?fXm_je|X1=84n){8PHjG=Qk9$4&^`q09a$84i zr}#^!U?JC9>*f!rfJxLWR*2wW&&trP#p5q1b~OTb<$@u`3MTb_;q9j}=qIhq)&eB= z`LBA{8e+UZCK!(Th!aM-EyIKRB`UjUc8(saNmJ;hdJ|7ryG@|YOXkwI&DUqL(3C3P zs;_uD3<+sJPU)D=ZZI8!o6kG8$hco<4tY_4phE*=4Kv? z2A>Tvzas2_8~|YoaBU@>qDZ4`_NF7Hj|Be^k~xEbfopHtv54XV7AR4GBiWz+{+o=9 zG4k*V&r=d8;OlEeT`~Rn&;Rqfx$U(aMG4hRR(vUrexYnm8`LOu)a&WxfdAK7mnpxHy4RignX?)a~N^G>PM)4&|7ad&V*>=Ii0L zO|>2#1+3}RG-m5E-#g~$R!++UW)!mWmCf@Zc3)V9ki?3ffJ902x-jUnZ0t~m(YnTa z=0k{=XK2aU+0nD2`J3u&*uOM|bNW}Ube~p?C%qZYrgk0~WEJzww>~8a|24NN*tf&$ zy4#MS9WQa>1-ItaR0r3-l$u_`RqDsP>^PP>T|h}acV(4fIwh?II2%xz+gEYjfVv;e zFkn~Cb8T0?mkvaWncV;`X;-Dtbe8Jj zlg9o&ZDS8J9itm{q~F{L>t@obosp^%NSP`h%^$Y)y@R5e#2Hntix_`8G7M23F!YRj z{rRLz3vJ%{d@#=mzx6`jXUS<7q7o28IpWVr9Ei^x3R*k$IHca9QZCTnq2nH?tM_}z z659RdJzIj2h|xn(#PpWC=7%y`S;{_7**GckcYs3@9Cq=V(TQsFobx2}CLpHB55!mw zhL6!QqlZpBq2LE~m8QohCn^;bhwolV#GDMm2rpa&S7&Z;wN?v<836!%f_Tk zHG3;ra-$w*gK4(1C6`vyO;;R4mO}7Q$1jkf^YS#T>+_nmA&$5B*8^zPs095Agv>QX59u{O5ix&qM!qsLDn!55 zp76`-J6T~SA&k6i6ImV%`RKU)Y#9<66HNmq76nwG-0*Pe9O+cAG z&whb!$|-|ni>^T*#jjOA?!?1;a5zt6%DfuU1eg<2=tzT3Q$D?K7^Qw*=i!!bo{WMO zhktxofgE3*Y6f|fnGM_r78j48u^F$Cq&X|4iYlXzs{75j-ufv(i%k z>ZAIAuzu~h0H>;UaIN18WxQJn-f)Kxy@4vBhSFGOvUw zIyGJg_u2(XEmR-YC~o{Rc<+|tV%a+=X}2vy1N+k#3jT6py0>~|q2U+|Lc6tGz^bC}rR|+5x_s)lw_hv37)OM~AT3^DDj4M2KiH6pFFJLyrSjmeE_9rr4qP4oA* zb6_{AI?7VA_B1c_yKGd{%c*xQje$<-SGm2Hxwk4obv-x^wcEPsq9~YGDHNW54{?y4 zNH<>;@cO|o2I5m@1_pAmTZ#Y)dRNDwwFI@=aWR>+)R=a6pB+-&t0N(1&FGxoVS7P$ zl0mhVjHyoS&q9S%JAe-bO75b6tTJ}<4sB{RNCtHl)_77%Ngbyhf}1Fx!qrp5Xg|VT1BF5VjwP zrng-4A2f{Lx{k*z4VVHN*g#+_BiNYy4Os}daPVsbW8A~m6Koj@)Qjh?;&M!<*9j*1 zQs}QtP0?j@-erDq8Rv=0-i(6Sfst^q26G5v4kdHS(2AXr6H+`V9T^FE%A+3cF`V(0pa}9wa%KU z3m|+*QLiC+HKkIKesN~VF@wXy_K~8R6vZ%=y4^bCuHO?-Lwh>^1bM1oIWNoaPd0G# zU8UVow9IDVrX-YJn=Z0-iP53c0je4E)eTu1T6Zbd8Ts1$xz`@?!Jxfa!UWn+$XPOt zT~ppkRD%|OCTXDj{=})39Kg8m{!E98u&Tavb^Z-{Uu~v6k8SPz0VmuR{{@M5>}Drj zKAM$^da8ED_*~V2aZEu&RR6YpX~6g5Tlgg2Wt+XrUh{mgATQ0&cP>?v zlo@%J9l+c)auQ~_%=Q%expFrU3o2yuaiCZfcFXmv*0}PHZ>Kqmhgl6bLbyi^FJ#r` z4(-9r@@6{OeNK@X=Qv9OQX%YNKaTEVq-VjycjoWRAn57|9!>Y(pjm?2zF$idWVmQ? zM3dA5B<{xsi+Nd7uA78&_0gc=;#5CdH}-<^H&r77r2<$+U#4fV#`*#@nW8hvxPx>i z(K7jitw_luVf5D#$DvBOljFR*Q5zf!Y0GIZMvbHrmLxeA)>5w;^~OoYr%%5TD=tm! zj7OY9MtvkcFa_Uq&0IjsS69pZ263GOF! zH#Dk7W+zmyqpo5bEgoP46!n0brKk%MCNwr6br1~XSEu}>w*+7aQW%D%QRn~e@t?mQ zv6ZNlr7IP++RBNTAPbwV67x0 z2x;dtYIDE!8&u%biV4%HS;Yoib`whhjX!%JV#BNQeE3L$DVz8=sQ-EpGrM8PpgU&) zCY&g>k~L4iq5j7HCvyUm8UfmTccqz%hp4RltWGud8$IDt`lsorL(_0~q$c{%{RgIP z1Q&QS0EmXmd#ceTLvK|y(^R}{{;K?qM+S=uOt?dnWUUuu-2bo*JFCF*#7kHa zj*#4s$W>cV;erabm$Ai5aOrwDS-WlKTkLJXtLx(X1*`d#Mrc&TZaf1QpGd{iL!TsW zS*>td%KBK+g!}r|jzx-OnP0ipUc>@|LFH^weB)M*x*0(&tmA?LKD*f)OUeSDj$=AM zvS5!tns0Jso|u0Dyd~<$w27u@GwRzN`QNNoCncZaD&2 z8?VtMh_&S6c|{i(Zgo_BDXT5T)v#SOXD*_r+4fp$O_f3mx`2uA zMSX&6jf40yoL_)PA5UK<$vI9}IW10M1WqQxidW%DyVOsHV;=W&`*=>3Va)BvSo8~9 z{I?=2f5@nLn+@;-7lr|XSJ!;+3@wY~xpJR|i56F}`|ELs?W25Gcs!uen}Q&x7OPj2 zKuKr_@Md0l0N`zAGDLhK`JhT+q>eb8z=V}K>Y1v(mihGDGWShj-U<+iaORT-WOzwh zvVDxn=CK%BeZ?V_-~@1LqHrrBKpV=*kPJ1KUkUVnAG?Ipz2y4-4jij&OV)zRyPbRG zl09kNjUI2{Q}>)>w@8tY%JnwroE4azk_Khf5D%#vQSeCI2?|Yv(6#9V@4@+L1-xicRrJT7dT0?FO+aZ33?9BVH6+r z(1JP)R`)9lWCTmmYv>a$D8K)#!^px?ubL_l{z5A*)pR&@s?4p%*F1Y`5^Ojv&?rJ| zv%#KlqinL9@3Giw=*V&N_8ifIl|u`yC2FY46q63D8b{5FUk{#_*bu@!^;hBmyh9D( zNILrLA1j!D2A-yvm1AJk(g3jWx4ThSR1>v-GDHjb%htw!+FA{=?X?&)0B*@?D*sje`@faG8N1GzP5*nM;{V!h2^k#|D#t0~;^^-I z>W`_t7x`HWKZ72g1siaGGqql46#*z`^d01@?e33<&~$jt z%iUj(1Rk`?Nu*`DguEm5{C0_QnjqwR4mRovSLG@VK-p-3yU}KGEi8qV2jqAa(qXnU zkwr5$SJ*nckBu(ty7yaVD`)d5^E@)O79nh9qdhHMB|-`^>Jh;b#7?W*`+AFKF4thd zdmv24Ee28>FaO%_m{gbr(){-4``wA(hu;PLP$wjrb7o8Bs1s!)-$( z0grw`MR9j$SxI6ux(+IKwseQo z(=vy@c83`RkON{h*isD)RRlT!#^sHcyDAwVsGRf>5OkJg{P}Q-&KKyB)?bZ;(mYmZ zB5KqvpcR^?Z*TC7BEWT z!Cv{jqq}~ZW=mXo&ft$uyZT24pGn}ex;|ew@5R|9`J%PAlUH-pJ(Rl|cUmFilXmCt z_jCbC+=|{~j}J(f;HH-=N|v5ljoW@;e?dXIg88p9<#ZyRPoBYkfs(`%NAy zn>B6k8##w+t&2p;8)&;N`KN+WC04?Eb%>s@CXHG!0}^+1VUY`dG`&5$dLzkxjJIRQ-fLRLm>wG$Q^S z%i1Q)D_KzE{iT8yf)vCE2;4&CkO`q;E7h>=rSvvaS1J2l z$siMlf_UR-SrZ9rG5vzk(xwT{4J+xU&$2u8H~l0@tUQG(Q&dqNIWu!QeNeF{p1rx@ zIh<5%Yhc-p48z?YV$`RG)MoCFvd)Mu#~2r*AK`Ek$MWA-`X2DQZVikFq3wQX*`%Gd zBPMXsTzb)*(8%{=omNK(A6J`zm<-OZ=iFGB#}2s1ITxC!>d=1w)REFj+FqbPRp zu^TIYK5oww@&XuxZ*Mo4tVeX`Ns!B#S9*^#N2u$!Gx*-cs>n6_T1knC;J?if~6OL1QPOhuM>os}Q z9Y$VH*foch3Q`(Wlpt(JG?xNPOY*xDqaB?ur&aQvr#n{|ilcvG)bg(kpFBaP8WTIH z&6>{u@4COI>N4dRPw8P-*N@5piF;NDlpBvIjkjDLww?3=Ys)!+titVm{B^9!BpVv5 zYLmC1EKp1@);gW{)K)yFD!>MHQ>w-nNQp3QlPnM5&1y`1t`RRVQixe`)<_5<9L4=f zb3smGv1M@2>%Gv}Z_qNsq~rG8S{6AM^YzYLVk?C$^vUdO@l4IJT+M15`S&9aqao&b5^>h@QB^U+j%d*H&-nQAijsq7B!jhkX zj$2WhKni$hFbB8~H~;3CZK!zdrXGbMW(P*M-4T(T(Ep*`P*O}fzmNil9vnF(z9!9| zr#NJB{CEAb1pi%YwH=0ruofXc`DG!Axb_0btZ7?0Lr>h6grlR}bbvv&-KxUf4fkyy-|F`5MZv z52Ogat~n@Mp1|E_fqw?pYn8Aq79XL4|3aDM@emDw06az5TsbuYYef=$QfL}72^}wu z2Pk#wwqD(u82-;H*9q$ET#R<}2pw$Dy8`(|&H$#*CCZ*2%UXu-rnshmgBs}%rN5YA z=MK=8nPMoKagO5VMNU;7e)0|haQK^(L06ZHXzC%r)Kv{1VCv|#A%BAcBXvJY)EU0EcvAf}qqsYr;DrznFgCxd1H<*Yy)W4b@M86`wRH0DB@$J(MziLM0X zVIH(!2$XPcyi^(_5GjByi;PNdMgr?xzm#%}unglPvjGP~M5;N{aSw1a+Ek|i0hk;N zMFcCxb!QRv5D27K9kE6?!lwj*AVr zXR5TutHc(PV{A80`IbN;avUqGQo7;6(b#2nII!rGsK#vN^g|9pfove?x-p6y*a|F! zu&*2c*a|Ff$gUfI`d8!t|7ZNeqYW!+nd}!M`j2=6K;>T~ENJ2b zUYhLDerXH6tGQ`iD9hPxD{>J%La=0&Q~)WY&gzziAuirQ2wcTckV3+7TpkqbYq*aTvqA+rlcNS~0274y?yM z<%UW|!sb1_iOm-z%>`S@wG9pdH+5= ztVsR5t-51N+}scBnjvFuw%;O~GW0ao?G4GQ&%cfwmf_}1>ntwzyFfqeR<}{SeZYxk zTY`&Q7rE$chS}5%k!zV1=XNHm$geBAFoc_M6L@{(B)0cHe-}xnLtlG(Rm%%E{H)Nd_l2 zxEGjCU^nV1bvTa`c9>ywf$YjAO>O4-IlA@)F2TsBB&$*X-**!m!Q%AjpajbAP zc@?-7Tz-yw^Cb)C!lT}KGD614^=|Q&)xuZoH?9I~!F|$E05Vf-;5rescTTf^x zvqLb-ywNI@=W8eE2fH@XlizACTUFA-k3)AJ zT*Cu~irtGI%c>aDnNK#)c5%^^`H_i-EonQCC&n-am^||wUA@SGOmI1AZN#>(vn$>A za3h2f2ix|e=;{sJ(|MaJYN>~yCgAjzyBRN7Q7v=5@M82- z@s6Gy$Tw%qft$^`W( z-dsAq;*&}&`+_e&UsKCFM}SY#!hUi@62N_Ey6al|L2Vu6q~7 zz!ZPO>r#!iKKrYqm2$k#f$8vO*41M4N>XcT*Wn5ztrh>^D}Q2gnW0=OiTJjUc&mSfgmP1C6v4< z-&5L^EKd5Nb z-uqW!zg}Y>D3YAKg=W;jk+8s+=N0V1dz${-Rl#NB?w^Lf1H>zGvKkO{M-$NQO42OL zn_QrdFOW#fO!ZFqq2(IZEtZ&O1+!Y}|KPX5mdxiAfKv=o1RFBYsTy@A8&ocPy{A{n zI{UPL@0dFePc-Kj8F7RkC#-GMsErN;fS^{_ASkF0GlA^6y6N1C5`|!d70=2erMFaS zfN9}7Fy3!*OUQm%w5V+`X&!Cf4>7Ek^CPsT#ZS+fdjn7IO50!GFKx=(sIp_&t&RwP z^O%-3od8gHoBDGn(lwsIME*p-97Fp0S`W-P)@r(FRQv!#HPk;Vct7c|Ws1Qob*skefmxtdF&`TZ(G2<56W66? z57WuL86e%h4#*-oOId-fK(^=J_mK)o3@&2%A2Vye@^=x(ekrQDv#{f{Pr%<#xZ81q zk24(=a^LeVGmu&7m%e%!315<~H_QV;{^noyuTaB3#UcK?8+)&l-v1ckp#K!%)R$8M z-lR@Lm-utAEqGbdKLke)uq{F#WZw8se#pd;8R&SPe+_k-fy`e1KQ=V}N0A)MsG6H6 zIf4wE=iDcmWP-zv`f{)8Tc*k!y;u#&7xdW7VXRygMbCOE?P-P3IoR0cI(u)CLOFRo z>#%lQ4{QIL_-HZAvyNZtHlP*>!RwoFIdt_Pwt=%TF>ugLEuxS9@le5?l&U)J=W$31 zQ*u|8F5r>dP2kt^ zfkZX&{KViM$*4T1bSq{NCXl>G9#o2DkhkE*#_>ypOUIQMtqoVn`mrZGc{$mnHsVwCZAHEW8W7p@6UqplFdnCJ zLR2H9zFv0%UZrhy5Rjwm_I?F)HZ z4Y7>i>NGh;Rh^B&ouOA(y-M$RYf4gx9^l(8JSOPF&Mc9neu;b(vy3xoR9A)hQJwr` z@;5D>}Y+sPZ+o3c7))0@?Ae3CthYN5@mdW z_6w%?7kWZQYyv$~#E*?`@BO%I8XeV@09(3WBf*_*4!$>gKU+snIr8W zh)BoXeW+Zhq6g>AvAwpRl@W~CmVJAqc6;*Xu0_vLaym>W%h!Gc@42^i{C1L6(=M`P z9d10omAXHdxq4dsxr7W47*$SSe{(#h{13kJd`*mivtD_a^zO%iuRt^o*>BvuA8T`^UQ;hWakv&HWBGwg4{op45T|fA*IFTj0vl0BQZhn5hqo zoV*{uiEbWjN~p^t?kF$o^T_MV_D?T#d2#pm)R(bo3sCMc9qL2-8I18`-lY4a zeh5l_&`ZYhP!jtwZI*d$(h}3ug1yevN2jpXJMhLF*J$(C$`;qpN!(R z-<-B$Loeo%pO3jVd(KN(i=TA5uBEV(?{WLcFq77GdrUbVu|ezS)xETq)byX&a0#u7 zf9-yNE&ilx!jeme+fa)xuPwSFRi}hTkND99oBQ$MORX)IevK)ffHsGsKKl6SC{D{K zQuUkS4l^}RD?g#zSmH5fuITMoPh(tIi^q!Z53 z`6Lnbt0FxH50jHV3R0BJ?RbNc3=I-VDXMb{6OULE;@^spM3hF}>>I>ib?EYDiF-1( zpI?1Tm92JISFC<;BBnjAZFj(?sj?^Hpg27xl3)JNWfNnFz`j5a8fqdOfkokTZKBTn zKzeO{=liIayt#pIMCRCDQ2KUzydB#>l&+s9MuGE6o97oY3b>Ew=$&LAs(oWt7kL?Y zzI~|A*^qtUOxTjUq(PNduIjPMZK(K=Y`SEBe?3xuaD&f!@P%De{L3rvtSj$;#NiIt z8o|SAhSHd?N6zjo`_n_q;p!1a&uXRwJenV!s_5z~7}@6IV9$X&ohY3?aU{M6F)Duv zG2*FE%DdPf5-2Hsx2T)?0ATUym8F3TSBZ+Z9Cr(>xmSb=X;}DEMe!f~q?v;-q=M(c zDa*)wREo}|d|&jA_lx(_Q9S*s%?^X^CzNGl3X!*S<&B1^K*>MGCFD%L?7DwQ5Vet3 z<0I(pp7_}qVwo<9s|M!>)Cq=`2cYuxsRLAn4tBPN{cXYDy8`iEJWIXhrDzd3^v(G( ziVbDGva@2f_aX6ANmEw_S5bUxyyrQH8L2E9K&3rT)1qy1e!;H! zg3zbTkI5LI(GZZ22ZBuJpk%Lf&$`_DE-_nEyvm~&vTw6oPnHOWY7i}9=2nHX_X?n1 z&*bj?cy6DYUU!0i$Srkv1c;S2d;bO<^H#qr+rAiAvJ}*XJ}g}W;MHnDtpnw^^_nsa z6{1VNJ(74F4Leh`3SX*33lscIrSKmDcSNRYs(9pAL3K2PDAdQZzb1>8<;U&YY`*BPz&Ylx zS7ckZ8!cIM)l_M1YOYTxd$Uk|tJLw2zbbg?5IH3I23o(TA zMfAP(x|TiPLvC`zsnc;A4ZDVQOiuo`&RG<+Xu;}hYZcX$!u!$gs)lu%}cY6#`HHEswib2>idJ5TQ1vgjG>zGU_~-Ot8uD#~1; zZT|dVmv7Vr2wm(dgW01B?f|Ta_Qg-F<(-AA=R+UoU}Nj~n0Q6~ zJNN*TyiBEs&l=Ijbn*)b$@6YcX<}V{zj{cVA@JdklEZ&4Q&0Oxg(5&juh02!;J;XL zD;>b=V_grVc7FlsJY7xIf6U%e_M<8QgqN`l_EQQlp`93ag<#~Z_%Y4zfnZo3Wc8OE zz`x`I{@K4+n+DfYum95F#V!zDX4AoswY-IZ7T^EX@>PcPKXmdh2VwS$&z#MxNP4dg z$XoE@Y=ZKbPJV;jeuHurYRipz-q{x2G@=0Edz08{decvw%=Ud5J~iKGO;m^}ou|?Y zqllG}2v2tv@K;0TVv{yJM;#c&0`Au<4+WjxmEM$)UWxgFQh8BHr%=g#)toKA=H>SW zx7{~lFH2^~bJ8FyCqinr{jtT2W765=q3wBPm&Ftz?L-tu=9;z5Xmx9ClSZ>+8gFyg zco@LU0hC9Wk(yUn)qfYMo)C7iUJS4T$&T!tXjg#Sy)KrJ7MuQMay$_|A9a+Aml32Z zeM>?Qwq06|3Dz+Uu6ZC}NVn~taQG<^aJ8oRQTdgFU*M_rx7CC6mV~TjLo$HtL==*n z3=J#(LMCwxH+X`F=K;c}p{#`E_qfrLVy!lV#1<}y`ppiwzbLfDP@%l zf*@ASre4&l^^VcA0D{VyWmIlV;w?Map9I_gbg?N`OhY3ZYGrRPyxep!qz_D@ocZx6qDeDZ+}0iJwL`Z58$<~kW>N)ef&LHzpT%$hQ6-&J72 znHJM#)W1P9Lc(-qWjyc4+Ag`2tyzDC?k{YzI~WaP^{o`NEt4oniaLfkE#HqGqc!p0 z%T)SSZ|{3ZVtrFL|$0+hVLD4alg5zuWyPATq$?odm#3PfD!nv9S)- zU%N*@^D-{{yd>%`N3OcujCZv8s%7FdAZWjq&of)-5lb*u4HmOUdfFCtE+XEBEI^v+-KiAaBlfCPfl7qh<@s;C=2-q67WA6(`Z`&j$F)GSH>yQ<~JyJTmRHn>bhm< zA6h_nu4$Ty^{uyDyT_C=E`GHwn;>OGPO4Zlno)erM;J6sI+->_nHKoPO_7=B7`lF)3v8*$kWbu0vn>TlhHo z?o$80oaTtm1|zTG9^J{T)j=(b-`eIO>71~^yajaNwbBHa1jYSgYsXf?xsnBwuYF-K z0kZj=T2Qxbrzr?I(=4_6bkur33tm6Q^~Db>I6HQp)`(F`O#y#dgZ(SKz6Por=4t|e zd#8g1?+Lyom?Ndp=0|+B((%x3L)2>sVjvS8=(&pFWDCoz^Nc#`?XlUWgpcl*5gm2G7!;T}%OUhq1Z;3o+Q4Q~png zuFYM~mio)O9s>(q4|)~36sXo+aVm)(8p8zd9pp~{z0aK()9;zR*m3<*PUfQ!wj3fM zc`rUWtF`+5*TD^*!@JT70pUo$e?V>6wCv@&2Y~f&HPX= zaXt~>S>6N39UODp0i5tYP?20z*AYk<2TIt>ZVFuPbG{fyx9*cjO&<0S4pl#?vKrDe zW>7oDG-H94{O;J`YPn9m{MtC88b0{7I(J6zIl)owSKr3lCv&2ryE+R5LCRnuq`T`g zUDth`nb}^~@g^enL#zi_piJxUcBw1P_Yc|!F|&J9iP3tpZAV|i#gJouyzrt6wi1KGRD! zbH+A;sf5yL<2tCRj$O0Lt$IG+`P^U^vkA;{UA5MGfq-j{WG(IKRTYz&yBKP)iaQLOa^{6&m1a5OOB)Eas3sYKJ+^| zxEI3Z&TQX^3*&Ey(jGu3Oh*M~Q{o)#@@G_R)w)e$DL*_=AY2Py#X%mbIOJ`gC&tX^ zYv~MGfraF_IC|@7XkI;j{2(&y;`pY8+dvzlYp9dko`+7w^pjE0r4WhoxC}FAzXJU_ zm#!^)Ig?KEH40FZ5!xVFUD4z41sDOP_F|ZlMJWbkGNV>#n3paHPl)D$+nXrZT2a%W zHK+z5tTX(gNXawJ9q@fF%(dwh!foW%d4c7!wcF2fGAFeVQD6^xF8fN&&(u~#PlOwu87a2 zQTX`3LAMpp48vNtOo8a{n}{?j^n@GeYo^7|N8NUeo%IKm0I(ZR&GkJ4+q?d$w+WBT`j|>ECz?HYImDXRO>#01=4HZ?^*fL`n{q_ zG|TK_t%VgcK2};5y0c%C7fxk3CpUabS?x)Yo7t5&So}ef`;^uP9d=8a1C)fh4Iyt` zKn4z7Z00NA@2D~%f>BYJAkkKYAsI@OhYdp?x}0ek(ojv#X=zNCYV4o-_CQXD`a%`{ z**K|o@m7p*!d&pgE=6xqU977X`2vtH-4;Zc^TvF^sg5HM4f4 zUlLb}osIiNO&OJ^z11)`K$#R(lcpd~YpklwAC+BD8iY8I=j>eT5v zXV*D9Jp0*F!}J7=jo9pfM{W*n^6UZl&K1*!wy+zkwc|U_YXWRWviPa432&$kGjBOj z&CG21Yy=-}PrpSa%p=o?{id!E5#o8S;+|+;`K(rSg4Ca29}_)w5GhfQ&8sl(;>t8t zTWriE_$0BH3h10~)l30lSUv}Y0g5~c|5Em%uBtW~jpAAg6s%#3dQcqmj3=>XUp!3I zct=}SKy3^?-@RW(JUibT*>zvCy}JW^cke!p+!Ey!@{3n1AbX5X*jq6t4{P?$SOk!m zt22P)l=$2ZYhB2nGuaYUdM^5I&&i{V%R%%n3QB*k1DU5Qw3waUM0@J&2H(#pNg}^i zZ5raHLYa!?4EJSqy>0!5@k3^Q#yFb?vk=HE-Fq@c4}R*1yLdOc@j>E$zB_kxNYVWC zb{&>^N}&0?cVVzP)nH866-TT08i@V+$3?yQH_Lb2RrpKyN>h8ZtGPMr_?fORxQ~WB zm5@sorHvFBlzxg?+iyh#FJ;#yDH~34id|D?JS39@V&K0vltTF|b~i8z2uU z{>enko~R$7)od^6UzwI)g3HoT;?Z)eFpTHFfpUyKXXASk2K|A#s_s(Zbxiz$t-coX zJpGqC^(jT3d)mj1p5_L>ENqcJ;6 z{K@?tn8MR^U~^!D`xX$)DQrFr@INdTsLCw?CU=n;Kn8v%z6mK712iYP@Fqo2YB(JNcm;1;0G&>o7Ah@( z5QvP)E&mtfpYC|Sp`S}6%Od1F5Wpy96Jq-ct+>mMm?(P%m}vYJ8|U{foRAb(g88A(SiVA%6^-!KTSf> z1XS|e6Mohx+kjwcw4WKYaC3r`b7Un|mz>&h}}0tg~XoM#=++}^xTtys7W zX2fIQN_#jmeSwbLj7SUm0V>eSswe+Glj%i0YGcYcb|GrX(VFJKCPaCl+T)fW=iwn! zd4_8FTSP|^^A3f1C62*vZ`Z@q)I;0jnz(Tz0mF~nY>(lP$4U5W#c7ujE z(naw}_7nRYV+C_3c5U1J;!oqxK)QZwJ#sP)>jLx%BYkFBT?Iq;S7?LOQI zb8c1i%XS0d#2Br)$pyPk&rMIypD2!5)FcwyA!q6snYF5zj5mTmGFj6f=Qx&M+GF(y z0EBemX-3ULj7-y@(O8fCggX(UNNkQ!3{+BaZ_zN=HU3Iu9YJ_qq1U*^#F0Ax5McpP zu>s?Z;WP-2!|UK`1`jL0y5h!bji6eE7sU0rw=URRzF!o9P4!-_%Sw$vF>f5^Fgt0n8`Ix*$HJ4Yn z62#R;&+fu~y08CvK{-ZiY`2tS0&X+?VUykau=)fJ4&QX_v~Fh zg7lrg&QCzUVgGQdY$AveNsCyZG$t2(5}3Bjv_f_zenxTwUp31om!;a_Z1OnYGIv3x zv^L5~`hk@uiNkQs%K@sp(qW>neK9jS@%;+XL^p=&72lZpod__+myS`(4lRC?O>N>j zRLfcMQ_|upv2-p_uxGqVKVN>64Xj9&>Tv)lG(SMHS5uM67eHVGuM?9M%}>zrL3DxA zXtuqG#KMF!+KE1zASqO(PlArsy+mrh8i@1KcE109zjZ8Ntk4U+wES$P-)UD6t;ECx z?nQ})yuDDD>o}2-^|ZCM^vhKvY4q!DIC#-$Gac9PATZ!?*L&DO@9RdF|4mGH)AJ%5 zu~dhIX*K-D{FC&4wwvHN9Hl8)o6Keu7C~sU;!+#5@-%R&1Ep^ymW!Wp=nT*RP;#HT zs8k2&d#nwS5#waOZ_@I%v-bg7MRqVzCOjoH^PO?UGP3lV%Pzgiq z^ikPc4C)O{(Pa@!SV2KfJra$Y#A}MtLDCQ3PM;L_xKuF~P|yzN%Dek-G;z3p;^Y56 zCw4jokzYKP0m2i{`(TH4*0q*Vr>^;|~bk@(O(x<(GhuhEK)mO_m zjFsG9Io_%lqzQZ19TC+bA*s*?51;93#0Ly7B~&T9+#D@&R=;F6;VY8ZLEsp-&6x81 z`i>z)o`mEZkYW_RDqyMxxiW<6rcRO;pb9Nr@)j%7Tn(Cg6F|h4)6L^E+%et(<){gX z5HPQ{_WYKkOg8t`VCk5KDPhP}uUtmJQY_58zzCvlRjZTVslz!16I79oUX*7~PY<`< zhG`5Ez_PZUHZ)9f?uUfz)L!NBwNe&BT+Xq8DBYGA1t zd~d=Ak-yeMbn{)1dIJG2{b|g-44%=xfW1#j=flX4r>(7saJ~^PgJsR^*Fm_TBfuAi zZ8Wf*IXh_BX!Or{Z|bGco@$!1CRW1SNfxUp$crat>A|c^(UdSHyJZ1T)7x9FNCS~Z zR@2$}fr8r_^CqjH*V3(wGGrkMk?Tzp=Q2($Y*EH^v&A?)`FO@jB<7(N(y!4%&pfE-@wYF7&&}?cDL693LewZTFabcGgOLUhRL{=Lw4n$VE0z#A?H!O?yw%7<>NDJ4mpT_*bT`nDKfk5cHeE3ppqmq$^tkDX zo7i@%QKqeev-kK-Y&{(TDoF=#i$*v4KH2@3qyie-PT#;sK;$ZA@I>Dju`eDW%52qF z`!Olb=&cKGwMC@_9!3ZxOD+LVHcPn2G%5K5l&k_f#MNTgi__TX!Mh_inE4``v~K)f z=49_SLw8~M)>I>B0{4vJ{Z{q4wn*uwiZv=05GXRs2(*9!T8HyI0_+ev*dY$&s=a_h zB}70GQTd>YZ%O%Goi-In_*5_Y-xrGheJ!AYoH5XV=vQnF@~7BZkJkslfp@bENCE)V zb%1TA5|ecij<&w^=>h^6;UM%s!*@A&;R+yRzP~~KVd0P;ikC4>letcScxLa|8QE94J1o=NZV+^BQpzNFEdXne-rq z6rZn0f(cmIR-BKSXL~(B8%&Pp4M!`!d~tpBEm^saBbdxHvk$wkUtAtYATb*f{wcjipx@ zfvrxo3(g=pZ2Jsoi(mWxgrHOZY(C9DH9^|M&{Pze!h7ou^L;#8`Hzmi`i8((z_-+b z=jFB~?PS?VTL-q`+FnJuo(q3)f7a8%SR)-EXpzSiiSEa;Ft2$ zRcHezY9+Y$MShymskF~phvN|JbGM-GGfYi>T?BtkZ~jgGGBiEZ<09yfz#%fPUQNQD z7G~PvBZBg!Gp6~hl?*GVN;{+jkaM5`PRUowQ?Be`j3rX3bHLv}SX!37^koKJgyZuEgZ8MYP#v-fF>i*S^Gj<_>y^)2qOB8cwe9L|CzK z>CT?xkubDu(Au)7==^E%gR+KWXs`LFKu|BU_o~&q+VSB-h6S^2S8gJA-vwHm#uOAY z^WoKu3FB^sXFUx*FbdX<+Pe}(h`sA4@d8&j&$@<+ll2>$pOj->msaO7zC^uOJoSl#gvfv=ch6ye{^^2 zD=t2c$|A z~dZg{ZZ>9L`Ms<*nUqFIbq!#N3L-?bqDoH|<%=JK0F66|?J%3*0`RE( z2Y%3`uEXq#H-jz(U4%UTu5KD)ylDWmvpkNDdo) zTcF);{M1#t?Sp@e6!CHR8vP0SzA~~W>Jy-Mdb4g3F=G=)fiRyx$9~bsGZepDnzZ^o z=?0_WXQ`gCu^|4^D{#q`q8xjt?Ew~`a?2i9IS1hKDKQ&qmR?ceKtff&KWU8~8`HUd z>d6iAh$iZ{h<@9g(Yt2}>nGpVs(Rhyv+5Y8~#VZXjC%!bV+Rxww z-WQNOiJAIJ`VF><9-q(H-qh_CqgjA^I8-dY&eX(-WtK?)yGhPH!!0Exqd`I?2o&eb`M`@N6O;ghuTa*iA{z2GgNoh`> zsy?^mzE;WD2p76vT-^jg4z+(t*_pE!Fqwk75!>7D=BA*Qw|E{>fRw!%?GoZ8`qmqw z4D4$4jR=Bx-M10G2OT`%p0+oH9PO7Sl6(s*10G1|hqmVN#M zE>K&2M~DQsf{PVwN3%V3L>7`aYQpix4o%)^ug>DrATso z-I+f7qsv_qV3X*S>nNj-k%AWe0M)(#U>es}0F^4+vi$O#diE#X2{pK5kX|rKY63PR zXe8E3F}@QjXw*T^jJnJ7(^q)yxK;}t6Z zy%9i~9?VCmy?IGN#xn4dd)wj`aUx%kF z&|xXG$*$u6Pc);y)Typ~a-~1h&oGXmvC%P|nO4f{gNn{h5n5C3G0xVKN$i=1rABr! zG9^;sbo@46xxYP^C(A>#IVx`on42a6`R}6@^V?vH&i=eWYxU9P=zS)+--ippkNx{# zPdWd*Kq-#)`=g&`z;6R)^V?`cdVXH;t5=+kDC-{fm9M|13iUnEt?$r`1U+q1B~p2H zR!7?Vh1W2uwmB5Y^W=VnqlgcBwCHrocmeEWW;B3Q-Q*c58I~aiB+4lq`3uYcki&=h z-_Sa-hax!rf8I?^otD7Yw|ofRie%i{Zu4jNjgYnoRt%?8rMyZ;^p44&8hH1Jx)v^p zSOC905-4}N1={`qSHj+iK%P;`===crvjX{C6R>t?0ATn)DIB)f3BK?NVD!aIb$tJ} zb=9fPFY_{0rHp33t0Atjmu+Zn)>>kFIA3cYo|RWnXzA*jcM?0>MUYIBlfxUV-muOo z$UME*+atqL1B4d;+%u4!+yn$e*}CMuFOy?s@6)#uVj|6^8CH$pamU88FE;aNZiqpi zqp3Y(U7J^&HzZ|UjnTK?IEe66x{~gexNXyibF?7iEyA7%Q5!6}ffIqB|8{u~`!)s$ z-7Ek_WRJj@bRe=n61SXnzE1$~IcF2^pUA01wBPQyRt>XCKSghSe8hPX_@kwi< z4lkeQH3uGdJ+$Sh=}>CHy*j<U3# zuY-8{LT${m{`OS-0Rf9|+IGoJ6M-lG%TZ7=4pE~Oh2h&WbSCNC@U)bDXGO<3zH2&% zx_9q!5m*2>T+IpBMz0k-fsiO$IG59kTgw!_dD|64My@sTRO93c05#6%m&MrFs9 zX39B;Ocf$j-lWBN{bx#5id>z>bl-e>v`v>ez6#xqtQK#d}HPl=^V^>U-U)(pcNT7~+AzCQ-vY<+_ ziT5TW2~{YULECq9HdgeD+h1Y7|MFb4P@aCwNuXr&&E=UEgErpOCwaPaBKGw&pK@_9 zl4wQYFOPglwOhyEw>)5e1oED1ZONNcWPhVm9Y@%E%MwrslF#)ufQ^6K>HGq@Vni!P zD#8;sZzz&R02JKzwP3$|!Yy#}bRG=on*3>i8CMBtbs$~ry$3k^MrhRmT#W(nJ7WI#ng4g5|5vvEpLw6h#L5e~lb{bKaApUP zEBb$4vvQX_Lk@r#(Z4TgpjA=nqy4wJ9cW<@)WKcaDPUDBiI(~Hx{*c}Aow_jNa5F3 zg;PU{!>MqlujS;Q_)n=vX>-L)vVuQr2L2EJsQ37^w=U&h%bprN%cUOE{w0p=r#xS5mFJ1p-$ z{;QAJv~b-kX{KWx&1UWg^K>!pLsW*U@Zi&1wXuSPq%x0WYg;U3;b|h3vqtx_*Cn1c zye$p9U)p8h8hE8nvWMwL+d8kR%8oY;$bU6F$K$C7Um1b8QP)$7p&J0JM#?Id$1T)I5Kib~DeW4xRAO(%69I zMZjY1$P?icJ*&9o&>E(}lQ;g*mc(eLx8Lq+UA zH2399t{Xrz?@I7SF4k(`63TD6q5~v=@%`<0{nyu^c+1niU{l|75c~&Uj<+gq)Qr5u z@REYZ1ZBgz++U2&mY6uPZ{ZB&?ey(1i)&Au>v3aHMGN*swq=`*+V`z8zYn-5JbT1I zW^(pT*L(q;rssjT3-_M-?}yAY{-jVnqzmI^`F6oEm0Z$<>sycZ_4EzrYn>!+SF0`* zR!*A5`KHhpqUHVe(5R_c7UD>*_}yd^eKu}sLnxU+sM@3@1L$hPE*Mse|BhR`vl zQ)%pG?4hZmVEP&E8-1%u^SXU}cp|&8`UE+z_U=*&;V1Jc(yWgo1nIE9mfcb)%RAtM zv-+2!w9TlL4}AU^ktvaK>L7lKRBJjd7bVhMBR%R+y?%aY(f{e@xl%qqK1d%?qh|1c ztDC!vO7mwf)4%j*vJiT-^JGd3G3rJvZC_@zE|w;)O%+V|E&%c=@QKPt+%tU@6VK3< z2@x2z;Ym`zTEiskIgcS^2E8N9y-N~Z+6$ z<1Rx>qfS-lF)-wjtap=1C?PZV;A+I)r3a8W-ta{#IqE_*&lcqqgsp~>mM!nrC2%y4 zMy>+>24S83jUYEzl^@ciI$|d-?AhseCxy}aA*$osZ-(<-M#PZsw;1%0#a=$!nyCQ&s@o`?$w{6v=tq^Na+R<`yAu>GBs1>y+zA`uK*VL9b>v~)^PV`wx zoFi9?3J$R?@-Va4UyOz$E@YQh>_nlasHq*3j+WMwqm?>Y4eFGFlNI$hZ5~F)$RZ=7 z9+!aoETs_&3r}(JqvInS$RHI6=-mO}*O0G_6Evng1o1@?YB8B6m^Y5o9W*W2~ zRNa;|6;9t^K2S1CJ|)oD9*92gG`%%Z(=;dEo+RZEQ7F{o>0YFE>t<}g%=KIG%6OC- zOv{C17fx1a;o0qVX3nkowXeIm@#?(^+S2Qex&x= z8?VtNFN*QqQ#W@j6PN5jVN}D8$GCRoIHE;gbBrW4ICt4*k5OHPB1I|bsrjtG_KYU@ zaVlF`tC`=_#r33QY$Holp%=0P4UcgyF|XaY19p4;pKi&FDWwY>eXYp`&xmypUswUe-lGEg|RSAzT&w=^5@`}QUDl+={S-| z;c9l$o-dMRYEHCX{s$f3wkzyV!}%R?97A8ZDW*4Ky_wuseWoaoodip@S%vbs_F|q= zZWS|gr-EI_Az|_)$6aCcLwBE$5tU{Lcla!SF^Zb8O+|F``H-qW-RUXdj!ViD*HxF) ztkZqqQschJ?TRKCWN7T5N6Zt|D7p~hRJO+lKe8#cewnvJLiGGiAi2g+qo15^<)ofR zWxXMaD3yp_7$J4JHmne1w|Jofg{?rq8rQ$17Q{6f#P})~Cluqc4m?PRyzx5Te=pzt z9=UNpl)`OhN7A97!rbW`fW&ca-txV>8lAn$;3o&~&2YN(4qAw67JUvc2y-t*_Y_HU zN4R{xnU>A|l4vDRNo$KI1J!I7AtOO;ge;<*~ZYY$2w)rPU}TbBP-i7PcS^4VK;;{Mz=4xH`{6NtX&+{kSj zG4)kn5~fjRhlS*I=6gT$%$B*cmMhAFi* zUO+e%Mf97@cLp{(KS0oJeOd-#IY3lvi(nA4`8(#B zRH^hd%6Xc-UtH)|J~z z_6#*O>D7_l@i*dyBlkyqGkIbsE~LH1687o%)4!S6-KObzHYahNmF}nm_XMRB`bxU^ z(K3CxbxfD5vDvh7=hYBi1^ZixHf#DjL$27-qSlDDQ}U8itaW*AX(&9Bp3c(ni9E~u z`eBljbqGL=*Bu3eQX`V4ZsbQ4Nz_N*=DLK1GUvr8!b3WIV8ep24o0gkHD_l%YRRkM#>%_=Qa+q-O2OIlPbARKF96ddo}K`#fmO_AAF2I z0Jy&$Tl3Hlz6$fI(DqVo|u$fIGQ?pzIh2VX1*7+0+rTJhO|Ib2M_JR zTs>}d+{IOn(9T2FV~H-=3%{H>FJoO4!v4ew)h)&w7VEjPrcH=vv1rvms`L=m{3!P% zbD59wySi^vzuE_AL=V$AyBFUuH{3bX^-bg?J;Hl9=t8Ynv@X1P5)r{yu69Evg7Gv6 zSSdl|?B6$4rxxx{LW>pvoxUX7OK)_HP{92>fr&ysu!z0W@{B16BzSRWxMbUCI9Oy( zzkPob$}wTj8}Xsk`xAJQX+oNSb)|Zye2`%_1p0^&ePYzaElfNY>aFYw~{Y70wtTE#W>$ z9}cc=Qo$>H7U!{gMTm@0R_a{W)hk!42JIyCR6?emIF~Q@= zbTX+iFUMm>Fp*sWl+Bm zh>Po=AVU7xhG(c5yc~6-!-(-qDk>j8y3o>Ds2^{`A;C)V*_RfRyR1`{Ek>Ue=YcWV7`=yjI|7T+DR3uPfjqW-#WX%<#MhFVJ;;-p6xnI z8N&J?$0i=zh>HTSn696inda$KpnH3kLSBn$F*Of4U1Ej1W^$E@+^K9OW@4@Y9R-A>_PmVgb;mflBnvAt~fH%g%N7heosDDuX= zk9Sell*|T7t8JQLA|)_`1GHrG+-W**x9<1feAi1~ciKM!S^!G2c5@>zgPzOV{SkxK z52{c(6Q6I2-&)!El4;#r2|8D$mN?ao6t+&EY6^WpVxagstWR1|CA;H>GP_`o^JsWv zOm|Q&wi(&Wf}AQp&1744Ug|4*_4Ho3APcuz--|jk(a!n%Z2i}Av4+&E>Cn~n7J+WG zAF2qmZBSLE=$qnTpcpYZB#iJ)H{XXx<%Fd-DF%hMaVR=#94 zNO1LWP`8jDzAtwNWv_)ntmIakqQ;70k|`Kbg>qMaOcM{0U-727e=X+`T5KO+Lu(ML zX^vstXD~A}vlW{M^z*V623bU?=$^zw>o&?nuC3N*5<@P|<1LPxSeBZ7+E9cqv&T}6 zGHu7qY;YXq;%SHdU5OVUNyerFREwT+?mHZIn?<)-;U|PSb@=q%!qF3EX4}IT?P!>( z*{Zkf+f)jSN3bZ2qBZIR+AoJ!0R1xZLp&8)?E-$6idy#m3SDne5r5jUI(fU#M&wQz zgyu}mw%kfFSn}Bns}k$VGneZJ(Hf44UrF)!bBwq?E3s1JU2|28PGhD{9*GQB8}JVa zdZp1&302UvH_t=Q+9o+O-f_%NfwMD;^BBE-Mn!JIqNs7Du105RdzfYc%shKbfjR?` zs-MNf+NNz`gYiy)6%KwZe`<5fh4^9Q{f`{-%ZW?j>P;((Gvh9A!f9S#`YXlSO`EaU zkJ;T^#f;QfF@!6{ddGZ+-Y%7I#hl%Ui!+T_o;e(UMO$pDUM0wXTx z>5*iLo~$)jXyPjLoL?zNCF9b4?0Kwtq(}H=m|z6IrKLQgtdw8G8sUe(W-;2lzy=$* z_u}ivRx&c#DxSiT;DUK}=iHX}X0Ud_LT{*7vj7ac4MT8_xYg zyhoDPvRCMF>++=mkO1s79KCS1iB(G9GS@YuvoSaD6>r7%PQ2=_Rs>=gYngZH62_Ug z8Sjtsi6~&s^(rU}a*9}kzj-G9F%&#oq1U&1rR4PtE;G;6C!V`6vSaTOQF+@2G19J` zH@lr_8^KJW*jyig!d>kmMq=OmqzM46*CpbRlpm8)UwtE{k%&1A z?s3e=FyGO2j3tjV1_CkD{NtCY+Q-|j2~i>Jjx*^iBUUXR0zL-f5PT4s4CaXJuFO`L zd=Lq4Pej0`)L?A@nx;Hsm@h=xjO3(HLxY?>%)Q9QQ%Jp44~BU1`kM6Wx6kIoK*igT z3YvZXC~um&_k4$KQ2@QrEMwmfkVjY0N2_OqjxGrd z=*t>#;Z5EKB1~}Px;g-^A6dHxfVq2vyaDBF2L@E8S}R|?$Gkrjv~;L)ug$#_W;5-a z7$d0486#Wqm5yBLUgFk3@pjF-yN7HdRC?U1f%-XY`5d`eBs@qwKZXK{ZO`o2Z*Bq5 z0!#P8kDns+61&odNLha1+3Y?!RrgeCK?*>^W#e(? zvPtvJh}3E5dU6|?W^VBwSimIRZ4crU6j^bSt0F$N?EyGbqABYh3Wb4EJjDkO+&)jG z9^MYVL&eh_NpSxY8F$cb7}U396P;omz1%ZEs6*%G-ofVFy9oY3t&hSY%TE)ae3!OP z-(;#*y@C%^8u!%;5^P2??%R#bGolV61ut?Q`kUT;foJZyZ&h0Zrt@+&*>@0YjIJ)2 zv6a&64@?&44qjh+C-hup?fP}sxzNLM)oVFo_&Cmy=8?LJW7FwrUSZ$mRcyqIx=uDa z8>uTtma_~h!!>po?K8?>W}h0$$(c3njTP*R?9Y3Jd_4H98XqP~5XT-FOiKHCbyl9a z;*t9v6z?rjx+u99uqF`e}-op8lj)da3EiIzwB5@(EIR@a{;HZ+wwx0!s(f) zfm&T}CUibz6FTg$R#XR%p#-U$x!9rz9%+4~76lwunx=*`TQu=Tkj|<$){gEuIv?3C0H7d0 z!2K+s0AOKYU}B(SVPaxpV`Jgqk>caw;^I+}JR~Hgqh_F|qo$>O#KOb=h>44tmX<^0 zG1pT*K|w(Vb}=bYen}nyLH@rCf`W~WjfaaziH}do&q&M2|G&N6w*kah4|LGVP*IqG z2gE3-#3=V&00WXvbdPad43rR1*OYP*71HprN9pqoE6!@wib~2Vs%o!Z>*(s~8yK2fSXx=z*xI?exqEnedHV!?3=Rnm`xG7*|0N+Y>Fc-T z%&hF3+`RmP!ivhO>YCcR`i73quI`@RzW#yniODJ0^sgEC^2+Mk@AZw%t?i@Zlhd>F zi_5F)zwm_up#F_5XJb4)5+P;nh*iJrWJOe6e}?lg`!r>)D% zH3c)6RsWkZ`M-awBMRjD@M)Xe<;oi0PvL%~tN%5GI6$s3dhI%34j8*cQ$r7$&H*U>W!eKF9=Y?r?3;Y(9 zI{SgDxSJxs8CwSNQK9W&Qmw~(=uzu=NEC90&~t^V#%&wbyJHV029e#CTjE)`#x*(c zIJw40C-|x#O*Nk(*NLOx;GKZqcQd1AcnMpubA4H{TI^?!GR)Kj4TIL5)+>^fse7O# zZQb2Xk2&jd#l+08vW%W?5}MUi3(9w}_LBdqB<+_m=whJ6m-f{;Jgu9e;YV_Ae1@&w zy8#o~ffJKmf-yx(^cvKj6$4pz(VZX_YfVL`qxtOxvLlU_G{3^pqOBxYL zBg)8ht=9#oOVQQV>PM%BQ6q^gW9||X+^-@e20zNiNzc!!;2F4}v_C?P3>U$VZV6w7 zA{b{h^V^9h_9ry!>uv<8KiCX8vZ*jU0&iyrqLQKJ<>zNy#)4Qo6341JPVa#|)ZFHq zC*JD9#{uRZE7_~-sI;s3g4}YO9+P^hiiL}st2;&bnq4A>8eN`h_V##>&wU%6qoW5C zJ+3NH9}qg=+iW5@V7!tb=$0on_rQ#yINDdqXV36_>rrZjSNqyTM@2KF9@16Y?BTqodj{hA!{G|6a!hrJrfC5Q^j8aJ&S z|3+1~puO!~e`fn`@vfSuo_HbWm&Ehc z_Cz_yYB~keRZR< zd9{OAM(*_uRlzI66?}}IVoo{H69L)D{S8Ntr*74kXPbrhe{(qs7F|6z3V*-z`$N6! z#g~2hUcv7bt!LA>A=MMth-xXE@Fl@Cs^*yBQ(RYT?;<p4zi_o-!J06t8Y*N zpK~e`JtFtp1;j*qTvSB2WMiksOkI~tsjBmW_DUQwTaeo%&-emd`GGN4iBpdfihMq5 zeD8yXdw}3FWVTE9i6)t68COtbQ~$s1D2>#CB5!&HvQ;mSjiI-$(v zXj(OOQoDwk;T*t!lZJ|EYBDX%87Ckf85Af!<;TAuVdAP4E;akiIqh2n$qU#1BI!Cl zRtBv6Lv#j}l(PM!A56OqfJl{AhaXIMp zCpsJnqP^~k;DK_5o37jyFYNpM8k7)4|Lp!K5ZE&*$$1MdvaJ>OTl2T99 zQSMlHqCpPEks*N(=k`C8Wo@Y7bt!+^P7@T&Lh!NRqUhz=m*nx)H^~c*pWw7L-);mh z{dFTeT+90D1yX%{VBp%W*}f)|x7mxrADqcf5wbDQo4s>u*+qFgc14-dP*YvcHfh?! z;ZfN=XCH}dj%_WTEcbz1Ma7nuK|G6X$!$ThRSmE`!@*|_(KgH>Lp>I+(cW)3U)66v z6%)gNs)^_!s^7Xh?vz`tcGGh-woG$ZY4E=IY^#S(tzMsk7V*0%SS$Vav8FLfukqpf z(eOgf+l=n?tugf?J6O-AvioCwXqGW1PaBE9c@-eGV`E*MBny65j_7=f-vq|2n#@_Lq#_DHZG;>U@ z4pkymmd^07+A()tz$DrIN^L!qwOgzF3`f@H6rNTN(LL4b8B%>Ie)8j#snpB$S>t$i zUGiB~E5BA@LEbN1H!Xqw-(TZXH)!z)svdJn1lhr9*K38L6x~(S+|^anF&WDmbCI!+ zPCl`Q2WsX5+aS(vfA%ASqQga9CiwUOqUU5cIly3%60eSrNMg;&*Q&8IgYT&Bh!9Tk zW&+W>wznbu)^BEXIlRC5P{MRGm33nHQfX>GYRCoaWGyWTh&&k>qsl?r-AFS!_JwL| z<1wG)TlZC!?8ETbtem0bFg^XeuS&;zpYvsS)WB3y7arYUGq3or^2;~|Lj5hc{q$Zg z>s;KRKsH~WNkTnPZkF^R@?_*mPwDconf|z2mT;a;@<@tDt*W?@NZcBRF)8m4u;YRI zX|cp0*v>56nTA^lhA%ILAtTsa@5L(Z8ZzL3hv7}>AhDht=i`Hr*Dyqw6-;^=nYK(p zs<^5_DqwdF@wcQqs7aqX@dq|lMzEv^CH3Tz+Yc9lg(H({DQa86laA{rCDz2dFJ*BG zi)Lgg$?B6sx!u@SE0t{PvxG#*daWWST$OCJy-|b=9fY3{*2Q+#icx;y?|wJ+z24vs zq+q9li(8XTZZGg3t_S-kikDQ{TDxr1nSVQv!)}wwfab47+r;Uon2MkzOs+kZ!jkId zG$FW7b@X0xBx(&7G{Hg5P^qa=1+xyV=p@7C+7mIg<9kFU{0~?HouSgfD^LN0hPt1; zZuQ(}gdFqI=`1HqZiXh zuawe9IC>WQBGY&aIr@`q@vf z%UNz)J!41B>?&8J2(_ZO7Y%E*pCX57bVXBdF<`OOXe27YzaYe%bOM#cUBhT*eZ|-n z>+AGv%-NH0qq_X~WyH&0m?90-l6zpio5AxMX}K|XAV?O6DL#Rr3CmMWg9^B#`13<~ z8nPnbw@m1YR$5Q<@=wXO{n^HZ=;T58>Pn4)i?1hUhU9Tc3lW#tiyY}xVZ%n*Exb> zQU*owhCDt*0>DA?;GzGg6)8=X!^-U>AIEc{k3{oE5pK?srN5VT+OJ;otoE^s(&8O$ zSp@L==vqaUZozL|)$>%j%AeifMWOZ-`@Jqh39akPZY}M-?YRd^J8z@e6)wF!vq0oW zB?-&Q6VP>jWq;VSMY544LdyABkNufmyJnfd>c{UNxdo?A?_nC;_^<5M4&G>fkK7yC zYjW%Hy(rlamw=eT+Ro}bL8){{dSNBe-yzcBf_!e3IbF8w@F|@jbR2&zuh~W^v0l-l z3LEN4f6l2AC=Zl*{LF^T%Ej2YovBCrxfhMfri+Rc=)K*wH;mvjFB4*n*n54}bF@-) zKIS=0XroIL%l4wuVE!_shvutsc5KtvC!P6%hTDtNZ_irK=6*K`9j?BdsFpBr$8w{5 z8#oS9Wt~`Yc6OtaJz?>nt)*g4Z3cl9f8H9s&7;FP-cdQe7ijq9c{G^)S2UGrZsS zE{(eo_Y(E#j7xsex?(S5`3Y)tz!_V~<#)MB$5JjKKazc1KO@ZrW}^f)s9DGsc%kg4EKv?}N=ka>Cv@!%!B@@;g=Suy zCNRMiT|cFb7t|w}=jqw#UBt?0-vjWJ*4epSWY<2-3T?jDN=fOOnu>O~2RdCBFQXA> zxl!B>Rq=0o)Qia3MSJ*ycJyJXWJjb8dck|uVs7O;pCGg!XC=SJp@pU23|{n0iPE1f zjH%k*dCxhT?F{tN6>ds1R~`v1)GmLI68f$cBI=Mn=^6Zrey=1LrN5I%4seDQ8hGJs zXf%!);?`M)_Y9mrou08L!5Q!H&iH@SK7l ziN_04g)H*UDS0-`(S_oA+1jc$`w~Aa5g%5J2#HO%Q>p4Egtu)e8)LLc{P)H@Fk{|` zT{$2BaN8Mv713@u9r|6p?r>`FheF5VJIWDLQ66_pp@q$IXu0{M7qTV?X7)73iLV3Z|9 zIaIawIKwmmrH#vLTlRF%s7k8}vtYd;=7XeyWUZk)6c&G%;QW4i%(SlpcUFvY?d$nr z7yZYwK2c^``qFkd_d#fr41K(>P}4nN<)Lr&@hN}ChHKDRKlcw3PXBQ7@U}A?j?936 zvz?e=b{u;pspR3!Li;jkC&IV&i6D$u zPEjs#crD=Ahrm}?r|^bDtlF0h>qLU0b=g?q9#F83xCahhSs4yX81I3M%IHoU_@VPR3m75#0knXIAckTiSjTsQ3DJ?)lnoGZOFXmOP>0gfE_|Rwt;`Npt$> zz2GKS#j^(&x13>+P5UDVg1{mRLDnU6D(uyI4`A}Ygx_k6^R!m7-e5q`kS? zNBn~3{F|pP9?_l-?e=(kz0yB^R+sUY{v#fA?q2o4d>qPMvvX4l7R}o6T(8KTiYki| zsf^3EYJV)b@Wt8COO6qSaG(9Wpx5XMp@iOMT6=$Z)b;S@3e7x^YD6p@=g(sQvxE(u zE_;S-Jbh~N-sW!+!OIBgZc!}kR~>%l)LQ%i2wdUu3P%aK)%i6kY$*j>NegB_KHELT zg6p3I4k$c*M8YqSW(N%41HsOOclfEi_dxs57f454C?aH(;vQI9S4-+j4*p#mbIV|j zK2$zyBD9!BQbVp$USJ%3$(FL>C%-@4(?U=kxURC)P+w=Ts@wJ%OFuyQJ-v-#R1wji zMw0o{G#DRBolV`PR!n=6JRbvzeC{!+tR|NyQ!XRx02zjqx-Nrte+$jOfP)-KRc)di zr6+z1whOyS0$y|l+~5tzp2c;11Cdz-;T0X^AIctIS-p7*!3cQ==};{(FobUr58O}BHbW-PLU+3h^SI+6rkEFk7#KNaZXmh|~EKjY^twuCL zIhW6>0|GbV0J=-E0sE0Oj`}vktk3F^wH+A8keDmpgh4; z+d1YOy5(C|hgxB#-UFr$er!^P>u%%Ylw%?}oe6VGj&JL%sdi%2{6Zo~E4&sBDyvC$ z%I4>lE%{igz-v4Y2qNOVdrbv?zHCW8Z(WmamGHODUh^4?-7%HRxCc5|;H9Bv9ora1 zG--QQicTH8kDC^&;(PXP{4c~tB*wk$dSE9eH{`{U*Z8|4kC&&u{C;NNr6TUs?X7t( zpgDMrXkCi;ym zqDV$c(_{PdIF-&%Jd|JXWcK1v?*Z_W(XdmMTZJQxgBPBB5}QvBhgy9e>n?q9-ls%W z)|oY%^kX=iqDzUcbc7Uc`@C0uGMuMM>nadH7Lo2;o9gv;nidxJ=u~J0){uZG$nqN4 z_VumHp@3=I@wHK^5R(X|SuOZ*5=ZwGm!7Mu7ONlrez?2%E53NWU0=z1u0@}&X~=PF zgfDf{4bf<9ox5On4_HhK4vX|Jo-aQ>5m~kB#>^EP@l&*2vnE>;r?_R}UGYP|r4D$` z>$1;t|bl#s@}8DuHPk5vY|H6PD!>!ROH;Tdke){_nq-%TpY)C0?9VAx)#+_8&|lkZ!i^;@4!(*h5fWznAqzoyJULKo7l{$Q`Rq1_g7y8f^HNm8{CHy zcbJe>nZe;bc>r9A0;b-33$3uUZ3KG>7@j!0?DRkZh$V?HV|H>f$xOLt)o~jR2_EY-O>weu zGQ2~W1-P|Ai*tIOt459A8L;P!wt`#aH zgn_Fc+jBeO>We9T8q-;)@#JJ54lEWYQC`-8N;>ce?P$=ib|%k9b&spEfSqYs<-?nY-Hksp(RDyHoY^ke*OhRtBrawPn z=~hyqd^g}cLiu>@(l+u$zA5VH-6JixEw#H7-6(~Xmys?gq-_fJf)*VN*=3S+JPZls$ zx{kc&4Q(<5Wtq*juDgb^7%QHfb^%92Y>FJyyh12^}UIbUzm0sH0KD>>ICYweqnBQCLj9| zjNSZ-n<^)-7I~5@ zRX{)n`gHgYN zOUVUwsG7|S-K#Z>T(nuFf65f)R5bd0Lu^DWHw#)K+H-z_XjS;d4&&yoE|Ov$yer%n zV~s>#wKI2SN1#1HfCk;~0YaPPq%Jc8%|fFhnn9{mUGHy_y>j#mA6@vp$RrIt;#W$^ zygo>3b4oV2w?&VSt0#VX`!StK5r|9_W3AuXQyUoExca55TMuWeG3p-Q^j9YSm9v=A zTC!m=8wsiWvC#}#Z#uqtRzBUrqH61PqG^5?ip`h3o9Vcy8a0$nPItZZgMO-L32Qhv zp}=;KtHS7)o$IX8Rf$2O5VPIdwKZu9^v)C8z^{bOGY=va}e5 z4U-6Q*q-2beDN(F!A6^PkqQ372NQq?x z*(agjV#kzlQL8Mj=|vArBz8>6%BR>Hlk~@zn-j<%8ypak94=kJ&sG&cNXYenOWEq#TjEI-%?VsOD;^1-Qh^llwIffglX5giq6LOiyN>R>cr;tl8J_taf?FdZ(5fs~V#JcFl7B+Z(UTs0 zwO-ls22K8jRfE9Cq!%ws@D*7%g7c2LYg$FZs^k+r*x+-1^|%LCS6|~G4FK&u@KCpU zzjbN;*(^=7t4h-D1hh-6MXpA1geK%dkSFQj3exG%I5k_l9xd^Fl4t3tBskmd<<)g+ z6c~cr>YX)99Ikmua_HAyQ0yr5W$cuJto2h#NzP>HqiC=U@m-olX;n`|t&;_2$?8Cj zvZPySp%=M^hZMNlk91qRZ5e80SS9gnG0JDWbO}!j&NRbVYyt+ONE?aC(cL z?8Ecs0B`BskLRstrQ3uc-W>$xhEP@kNSbQOz5iC#nI!7@EdEoh+k_}~9HK~v1&{st z)X*ID((RU~lkYX4xiR#4zqeetoVcM=*;+y6W^RLz7nb5@QGt^ms}Adf%kJw?;R0vp zbg8v2fj&nq%et#F8k9PQr&Jef6K7%4R6EzeB^#i)cOHYTV= z;@|VFCnd#?)CFri9~#{r8}T1Rh4sT#m`lq`g~yaKESJ+THWRPUGai6stCAedc^_mO zHRVSS@vKffO2zj?`esR9MuW#lPls{*fjK2b`G=UB?FvWsOYeLet<5B##81L0SlXoA z@6*^8J__#!9sKxtVNdaG*lZhqp7OTT26laVGHhO;-|3h?5H|i~sK1^8z+h%CTbj<^ zO4P`)EpimjaQ89q2SwDpVm#>A!emfe=T2LmO8lIxQ2T2EoI$mP!+YP}-104i) z$9}`ytR~=um0zXj=_Q-Y9T!H`*4GagJJcgS@vzp7Qyb_v)}EKs#FUa~RZWyhX*iN@ z!JNEmJ1)kIlxjVx(@BOX)}3mO1F%o&6K3^N?cin|^B%X0c6tqp-TgXdPq!Prv_G_c zCPJ2#(M4q4_q?7hP?)I?zP-UHp*_W*WG zh|K1@amb;t4B|!zrJw%q0!OJwT>> z%O*{Ubjl-C?}4^oNXNVofd3Ck|0yX|K$|quP1TrcV5c+LQX~@D?;rr^HIb5BU;2Y^ zj=#00i_U&uS7e~X$cXK$aH-G2o7mTQrYg>XuCr$)>o-~z6WpfTh<#imH!54U&rC60 z(;|oKI(3Il(|l{4&IP8!56PH)gt{V&1%3tMTN)-+2L*YwI$nxAA^GhoPFCM!!{k^- z=JA;`ORZi9ajqnHtuw$;6lpj=r!mJlnlT}?ei^jl+b^ifJaJG=4#$mmCdTWdVc-CF zdhD-8x|#+l3YSnwp6_&0SEb#CUIDso-2{XDq+a(df3nCQpCGlSbjJt<{NU zOGP-$AkIy}zHe8ZzZ^Y;=F-Khgu?L9?9*8k8_4JmSlm7|^&Ybu-+rR=c_=$9i~IO} z5E>CCW_ zotZJ*CWkpUpQn_Vm?>Mb44`-MJJ3F5;e4}Zebns}tfgTJ*YGf*#rB^fYnbEXOZiR} z9w{R4LP!cRi@FEojIOWr*KU<_c2b28Bg-_ir4u~SC-sPo`h9Y8f4RtceVR#?eY8gJ zmFuP6IlSamb~ zf<)0Bo2N6FB~V6O#H1u62Bm>GRkSei+;H)eq(P1yye8VsSA#`uOo4UCRaLO0QQ)RR zRxA5r#8(fjh%4*!gRa=0?JzZ?$mrLl8;G2uw!*ZXiz2pbjOwWYt%~xIMPU6BOt^H} zjCl5v${u<;HR*0NZ+7!y54=xI!en`Z6U1m72~=-a^;g7!+IxUirbFgzEB_wg;4CdsFuDig(v}&n zPbVQ?$fkdyAVDw`#n;VKf_Gw`W@bi)rNzgEgp*T*|4J*OR4cwi1|9yS8ngaREdpR| z{no!%jeq~Z%aT_?=@(T*qEJ;?l9G5`1>-*JSH3Y15Cs6KU>jZDf2B2L{;b5cFs8_m zLC-%a@|gb@$)$PyKQ#SkruF{_IlWyDx(5XQ*;dfW1ABvfDdS-4RI5_d?07PKvJc0Q zk-;Ej3}HCt?mh2W5mgS%?Y!lad>qw_*Fk~!Zy;~B8BR__?#@9WHT2nqVoESXQ?$>Q zoW~|KGg(4GcR~4((V@kQnXDU3Eo7qQx7LG7gFD^Yq=kE+FU?$r*=_~PVQ;Or?h|W> z4#!bnW5Sm+H2TcK#Z9lVL8}OJ~YIAevL=Uq(?rCqPCGWdtCLRxw4o z$2W^tm=)ikj^@_ZRy7{5G6lP$Dh1lNT7=IDo^De0V4MsuFKMLpeyz^yf_ zZ@f*?Y>)MZW+Tl#1R`}k|3KC*odUw&3YqFU%%E5L`=a%j&{9C&ghoA0Lqpg^d|aIr z;I5R<6++FWpv*Ja5_d?kfO+VW*TA4DKRZb>Y0vsl$-e(v0#yDd5@()IfCV&tDeVmh z=HLpo0|8yaY>SP-n|vQ))cTKL37#choUbRiknq$BcX`oMtrtr^X*quop^BT7s<+X% zO2k#h+nqc==?jFy>)d1?3NAr9cFj7GM7YTpia5drO;Vha-WGklWVXZgP-kzUIk?ov zB!LW7D_=LBZ6z#rDrT~0-O$<7Z(n_$Vtlb{_+hMW7Zbh8lrd49GG{=WK%t;^4=9hH zsbofIO+a~8KduX&9=CU7jeJY*>+X{ED^^FZixDzysM5Os zV=C-ymP)*DEKT{9fmq#v`z5hQEiZRY#r*f2+Aw3vE+wrXk2Y1@)Vh9l`qMHaBluOC zb)aa|3|@u3mPq|}FP-XHepYLB(!Q}0WJ$hgZY#oN{aHh*u!FyW=xvdR_U!y*s=2zL zReisk(OG}^)ej$m&vpgXEs-nM4RUu&-Jx4A_#HixkWk@3r{S5nl*&EORk4Gp7deKS zNFXG$uJvdfEzd)Lxj$Gs)Xrv>3HE@MUuiw@!8v_eHD?rWyD%$pA#T%MRN=#s*@!>T zkeC6^-m0azh{M|3TeW3zossP?!NBL#IvSZ*?}~B;<&@8ZvU(XKNp~k=1BbjMdSHz1BW&E%ycOXjyv9L`$C8$EC+2bNe|8!um?LqzNi-D<}(>rj5_e+dlegxt= zO+V2!ccTOlA7E)0%YR9b|CjI3^~itRS#cm~nW^uUIop{;!PAg35E9P#kVtMs`pr{u z>cwH;HKK1qW|Phqnew^44uWh{!_n_>??pzgrmOezr2T^JiC6;E{D6hayW{MMuUpBq;IvLG6`ca} z14{ajS{T95A(6y#S*~m}ooc zNRH))BH=>ftId3A^GlpA1nkH&%xWgv7L=*aaOF2e;WoVg8MXHoD<;owe4QZ9(!WQc=R!0cQC+{wnWEIGrEUw z2#Rvl9#QN`pPeGSA-?OnMR%7)2RHx%fULK(@N!k}B`g^~Itu%y!Y=-s1@rV^L`+** z(E`?FHJxVXC{K~3>w==UtNn2=eYYczwXFR4vLfK-hO9Xt+c=r``mTSp7*^$r@3jT< z5I!I8>IacVmTY&$eAC3F-C+NK^c6oC=q97*q=gAdS&6ZUh(|gr^H<`BGo$g0MaDF_ z#@SFyE=y{!^d55OMX^j}>O@V% zT8f1Bp>$e4D9$eP;$^k($JdR4uvH5^IVFlA%Ko<1K+#N7d8GS*bTRtYF(V`u3b*`G zlSkHR)T3;(jY{W2o@0GoLydc?B#T*!89rV)c{Weet(T0ERTQPe@EmmG8yYD3`%^+q z!kd7{rc0}^%CkjGIk5AR3C5(ERPc`zb~sr%i$XuR*hCgb!5otglr?e7Xq^t<)>Czb zxaHu=uiLak)7!%4&MWI(%H>6BmB5 z|LdI|Pia)lH?GH@QC8_2uVcchNn!4F=|6hW=;@6%nK`nvvjb%?mD_=mGq#mIZKP*}5=#pe5|dVJkW22MJK^%_Dld|4`slVGTtpE-d{ z$?}1tix$^u<~-8%YFlrFe9%lQ?=UTw&vXRWd#Pmz$=ptaE7@&LxrsiF$`sDvhq|XI zK`w;GzVB+@)E!ZLa+k7N)tnCbW}Q3VoGsjMOWOi^M3XGvDax{nqZfT82_9X>mtO5` zunIddjsS}-c^UH`m=mLW($yjJx=;^GM7qjsgiRSIt+iL`9Ia3yq!%gnI^h@1KYzlme~y-yDH7xppa;?X;tkLjqVqiDC*he_%Qb)Q$}dM9@U+(nyX)knO0LmQMZvadwQp6qg&(hAke zIxO9(cbM(2g--{$8MIbel}(icBs9<9t&?kJ?;X>$2?mD5E2 z{1`V%U|9ZwyexY{@i*oSQ^^|Dd}Ij$unDT4l*G=S|7Dw4>#Q9JrV-%r=v)ox5xlDJFn z;^;PS5Fk#oKZ7P55~MEB#wayRRvVS0WrB?A_c7tzU-5Vox3ehe!lYifzJzo<1ha?k z3q}l^>9we?N9)x!)aiY(cX!wF*hA&qnwyd`xkWx`>guHLzITa>L{$}W-r3vf8Wy{* zlUJ*>jI!9#ta0|g#HM_wi5hPLwKmy=T{QH3FX%h|gnTiJ4CpX}yd$diSEVp1nW}96 zmaYJT9KX6{MrI-;#~cSXT(1eWUOh$-Ba^x`l&|DW`~b4<%Lvm+cYOOx^Ug{x3Y9<& z`ODm8TN=NgSsAfq&Z%FAJ@{+-W}4BRWC&Fnf3m~A5=!VaC2!5Ph$P6J;-upMwYirx zvJ)pPiRuuKxqeT`&6V->zWwUvQ%)nK4aIOmQ2AG*fy`CVD&WbVEQdcHkU9SM?G{3x z--8A>8k-s;Yb)-ySe*)G9?BwR{0}XU8u5h0JnZC-ecb#YFfZPRHtBDqL9ffiKTUM6 zF@SOFX0L+$`F`x}j0X_@!EpLd8&S~pvRjRnJDLvRGGmYkA=<0f#Nc<<{XM>@veYtv zG-v)~+ZaSECJMz8=mfm+i<0>Ftl_)`Geg_+J!fxMvL;TLs_~PUt|@$xZbOW6C)l+| zskQr47Rr?`BG4xc-}+`&ZYIaM zB#|>P#cys(>(Qr&Zwn~dpz?N@vNTM>chq2v(Thf9k6y&4#l4pCczq98mS56y=9i!v zhv|DAbc#30aE`Jxxcx#1TJZDuhX`CvzbB9t1ghR3tqw}ke}Dg#{!5h>s@mgibS2oB z%xi`O4MjjZUacm+_+7!^@~qwnDGod{K+L=8G1W~dnbxJt_S`ylip=~w&dMyZ9|flqV8IY&m`-h-6TR zjMoi$8__J8=1)Vq=}tr0!&u)?eF~(sCNVX96CAV^?|G8lLSG%9`9XgWg=T>tY1W&5 zloX%m|47|E9{YRI(6}@^2;`r(te0eBNtHUFCDTkyLO79}!yB9_<2EI7ICn(b&5XLM zlAFKs_V5Lwb>g-&uGk}CK$@{B{G!K{H1)wSt%$%k)tADkj*qBDU#O5xhT!pIF~jRL z-ks=-pF!C*C9I;a4Tzh@>Y~akkJ2vee#UK+5A>MJ?2@!jlbKFGWC*7?wRD{{>e|g` zM->|AEne*1M2yIEk~vGtePQy|Gdquu)h&0rvF)A0*D-mec;nvG>b<}@hNH~W)}rOm zwjp+btbsnb2R1om4rw7#TF4^6XR2fCudTmUgo>`B-g3H*Xma&8d!WS4tdb2<^?1G? zE!9IKAdSZ5O?tyC@!dpy+RpV2CK3KJA%S`&E!GYUEO47px zXybDfW*5d+pPk!TT~F7GMWU8LU;;0$9OhRoHxGLSyd51@J@z^=S;&~FZb2`U=kE?~ z2YlRbHPZetuJXCxJ#dGPi9n4x%G4yXIowvy9W2t%xpAt2x&*#DeZFy;-F$=*lS@Hy zT^*buU%2A?CC2uR3MPA?EQ{Y`qhaKrod-9+TK@svuS~WRg~_dskI&%?lW)GQCuoap zojiZMgAEQkK_(Rk<0 zt1f@yvc<|hUM{0%5kYey-=)?t_9_ts)|6cIjYjOyllf^GegtNk)8t|Vc>j21n70?M)o!56*-eMtgW>nhC2y1C!D8F!Sqmrf9lVSzHisobkKB7I z-E+OHJ3SkAYoIl@kNUKfF==!@ZA2dFLmsSyieyNKF?4e3^q!xy@R2UgAbf4O&ITWc z#b_A?TkDCwOE=FeAMvk3nYrX#(aDQ&+!w6hWuFiNxzC+f{#YZZ6U7YlA$k%M?#~%T z(FIPoxuzSb%vVB5V0`!B=m#DriR}4GJy>X3>n^8+M{ruNHjw>Bfn6cYBR1_~0HH(C zX$#$L5xA`3R=Du2=$DBbr?v2I45K=uWd5avVRp+BdzrV|8hVg8vNG~)1`l?D8yeO2 zK4wy{$g6V&zcu&_SAz}K0e+?5NEJSU8WioM+TP>j>mjru{`(Gz746YwRY#6JXX5MT zRPH+g-bY?AKk`p0#CM^SgV=m}M*YzcJk3N9=(PtF}ZEc7(xP{1A_nB3Pv?t|FVw zFyN9ZI^`}wSq0KyJL1|G!dH_}N?9h^)zxDOW_jR}nTf(kH9az|yIl}+YvNAt z*zBq+EmYfZl$y0%4~AnyYra}>C#0?Ez7bst=#2akZL{;{gz7&W$i}vXi8`dVc&i76 zXu={&a{THElQ5fms_{z_amx=j5=lFnDa8PO`mHm43S%Agb)(; z-sio2sEh>NkwpF-$?iYtUW|d4>p*pA0%XK07Bm*z7xJm0VwAmxfe<2F;o5$4=RyMo zZ#H40ey1X4>s-GOVRVM=8$c!fScTCwqIsiiM)6i&WS8?-1Rb&u7feVB?}0y9{t{gjjZELw=l z+5M@5ASxXAWHH*X&!B~E0b=n7S~ zyCKlKdGU(N#!nY6$-XqSXb9|#Mp0-tfHd)JYpLf3oF;m28ed-z%^|fn=*g{I5`^}0 zV1aTrH#WOZi+Ho!l`Bo|gCTpnqc2Wp@eVv;?Frwx4XvTk>V$><>b8dcmOuSimHxSH zgC(@KNSShOQFj~iP?l1f=1B3ok^0*uUQD&~G8OYURbhiJT9lO8inU{zkI7SrzM~c> zn>zy$+*Y`^m|CahiVx|&TZ~FC+m&|4TwWh(-i12I#Oe9U(*A%DNQz$Ix)vCX78{qU zx~syMVlrz7qyyeHggpv@lZIf$aeRLJQV6F|Dxd%fz}M+ZQmv$jt5=P`2hrvHn0{rP zjh~&JgBK?dTi2q=1!~9)5G}n_mPBU2DBQ|iG9wcyBC-&~qjbm$1Z2`10&5kSHL+9u zKUe*KDv3RT4#PEn_HtNtD=AODeMU0JI}I`L(K>ieRafjLsMj4@sLfhES)>)w=AR;ihOJSd@!b(Ebvdd zkNaPpHuXGeM5rcg+Pxn zKKEVwPV*fuc0Kg^zDie<=bTn0o%VYG($r7%5<+VgAf?bFL$ddtudqAI09k-tQ~G3| z9Qg+_9fZ$6WA~o|M+V6Z-JQ-WxL}GdQ~IW#`z2xe&({*(y}f1Z=en8&-b23IHT>@ zpofO)eh|+zHn?!R=qk%e^b2w7qt@_L(X_5>o)q(Z8<7U347~MMWNi`#IIE976Pnp= zHTiMrrqMSExaS{PFh+pNJAztX+=;%!&PDw6iTYSWt@y>0sCj!}{Oe@kz}FtlboP_p zYQRS?OwuIpQlKO8IsvgUz~U^Q=A*~?)87P?5WljJkwasYMtmuK@qB#$D^^L<7$a~G zczXmAi1iw-w}@nC@?sh@Bu=4nO0b>jN*nz4JtU+%1Z7S)JzRm1+9*xDI;#HI;G2-- zRHj4gLa;Q60`p31PnL<+(@c69XA1^*s!}VkO~pG=vP3(_Xc``thMCe;9Hw;*^GtN& za=qkN;P0hLwD{83hO(IcD#qtrhS)1}bUE#V{kvpsPj9*gBX};~27f+HOlEX$JtUN# z{^msyd(^FgpTl+Y*rFp|9LMqNXl%M@QPi@Fbi%hLvr7AQ>xs#fL|9^&8Km{;=$Y}z zXaafo^RZ;W@G;WILb1t4M(+Q?X#!BUHG<8b{2nMPLoTd`dTwb>de|$iS2$iqhz{h! ze5PRn_iH~*$DxKb=3IJo?!lXPbv?Sf-l@JoG5#>}n##WQ=-@ExPuDh5bNo;L|D*DOHsf}|c|T<&o6u;7 zoQvvng)sTQ^rjjH=}{~@UU4i)A}#L--a9r!AgH@gW?A$wl;QZy`5wp~*FBM=ao5&2 zRYoeE1JGG zpY!t7p4c1cWF4-G<>csrSw3QbrDWHTY##yn8v_F)eaYVMchVG6UXEB?}wI z(#2`xK*daTHjVPk*-AQLnND`k8OncJDu}a-M22-)!?EGQ$&_n0-``tKl0Ya-vDuHG4D{KAg!yp3JoKalv>_-m)rA<0}a|0ZNSj*yq;{R2!|iQc;nKGH-n) zJgTsSy$2F04jMGRjy;{IDm*JA{R2eMu0mEjPDtp-8;KK?b zrfn+Alps{;ss!!YB1AgS*Ge5RLlkpP4VI>qHXvdzLq#u@4m}HP1 zKgS#gYRX|upj#Tc(OhQ^O<*k#y@KS27*qfcWky1g&WOh~VxJgF; zwv$}@^}EL5j;M|Ru>%}0W`#p=RjjS>E=$s#{c+6}!>`oNUvG5H!V%4S^vY0=#$cLYz&Vit_dQ>(U9xG0rK6yLZQIpbvrjZ>-!2E~{gA~~ z5t7&$m;FrQKL~1xA{AXxcVjVJBTt#P39H?{|Mln8m>t7}ak+2{TCKIiPSzd!aLI;=Ib>bHLDUUyvAb(bp?-)A2?RZ2A) zLrDcJ_z39rDn70t?S0zo7{DZFD;W!aG-~L19JPuCta(B%uuIDO>IL1t_$*+U0Pf_h5+VfEx#*so7uX)b}BWx zts`T6EV-B=-p%*XTiBsdf4JkJ;~U9}fre(4cNOl!*Y~@(S6WIfIF5P(oHI!SqX|r! za~VHw>y2B*gN`2DncGWRB^N~1+dG!1sf#H|wVLM$WmUg3XQhH-q z*4p)4Z)vl)p~`nm$>(?%87?iez13$nzL7q|2!v~eLn+uEhG>)BM+ecoYj+nqI(n+3 z!y}vf#-K`ZT!U2Z-Cd0Uuz&LEPUPu8mS=AZFv{w-kTVzE9MoR=s6X$DD+Cw33GD@6 zP&Xo#j1OOfaejjSWBDBPONEVi$QbeL#42QK=Di7hP3#5+@4(yiQq&+k>KJ$TUO2l( zP4!rL??{hxjoo$VgG{w+tqcr|v`)3PFCO8w{nkP%zGF4~q^ zn2}Bf$a(dz2YLYc%LVFM01VcD_qDd5%JX6QN$V-Wdp#tTTziak>8`QCjOm#{0is#| zZYUI_k|1UubQ;h_^Qdd{cJY3;exitM)A1&AYA2gifMN z(*1&8ZA`HdqU)oCO69gyYQ){>SOiJDbdD|Z{MWN)KwL#t6eUGtL&KZM9wSR`-V$!+ z=&)+BId+cbr$GIwA=IA~?%7T8{^H#|XU{hxN=I8On!7yce%G_MzqUP&Y9-3^nivxI z(_z6Q_^R2nXe=o$i76G@G=RMCB_k00!n~Pd;vU7$R~p+sf1^zl@7=pTT!7uRvNxfc zXqCx>SlM~x`aZ+SPQ0Hm79@(^SnQ7pcetP5&|G^l1le3Zdi~ai3gz0ojUBi?Q8f~T z#JGg@1%C-;zEI>KK|?$@1{GECF8fDdPTgCly*mB`$b|xmFB|MrfTGVGc0Iz02}+yO z?_E(8Tc-g$Y)0e2S^s*c0LfYs@centw-xlH&}SL1cIIsmlzX`;0HimRkLgL zf`s<^q1`dj5nchvIg3E*@@>a~MJ-azxNyC(TwcL$qn)7~wk|kG4kIoeN+;-^=}WRF z&I)&DHD9h(Ah`Na#kj-5T$Q0f-X}D^2N%93B`ZujaMF+?uDZj|M(+t=ju?{y?#ket z7jz#4?cXycUr0m;7g)j$NT_lxF`!ZZiTwr`CB!;BnX3Zv2QURtAa<>T@Ou9TFDLk#DXwQoI|cHe1J zGwv6uL_^tMHq49nezYLr@52J(MHK5z2>HGVIRt;+CRKUfptjE|KT|HnDrSzB^+n*1 z3K)&5*SR4MduE#4>lJ2L-AHIsk)4>;O0fOi<&FhmyGz{YA?dDs8!Xsl**EV6KO0hjQmsDf>0~!n*4v{&o=@9+tu;vxI_1U~K%yyXx2b>Yx9JzwFM=T&wkR6C3OkV(AMC*QNA-qtsb{ zepi03Kj`rt-r2}ww$Q5Fy@iX+3$cuRPkiu8s*-cfDS~~`Y+TLW)gR*U$H?|_@LB59 z)pAn(ljHDK@z1*pk!sH6HU>tCN|iB}@ug4pqJM&hwJB?EkHlwn^Pyt?P38^F4iiSw zOOBdDnQO7xHx!@_#if!5fc3$U^vxX>IM|&V9MM5`HvR-|P$MGDS+zys}D+}61LoVS618!L%JJ4Z@;IyS4>OQrGWmo(p~ z|F0Y7?Vmvs{01b`#i<%D?)D?j1aR(bf}uF>5*odikdO+mmis8(NDQy)+ut<*|Uo>1zI%y$YGW}BDKNpk3LbA3N5!=5%!~yzflWw_lWw2ufSnW z$YefeocDFfbS#&(t^8+% zu1)famwNG2ybc+3erTOx>jw<(fGlC^M(|6ONX|!xhsWHi#pMpqS9tUz&jE;tq|XVD zYb40@w*%+_+!x)@R&kP~;yp1fj|SUP{o-WmA1EJXs4_R24~cs4p|6ej43`W>9C$;0 zeM3+-U!&}x1%JVyOC5kgk0#v4e}Xi8xo@dzQRd`(<0;(b?%ht@vOW7p=8w#T#>FOB3tDq}?47^K8dwaIJ zseoWpK}G(eoU zaQ!|ENwEYE(GfY1@qjs^e&wsYIwIj)83^&ZD5IR7LC}BNiEWm35gUchOl3h6<$6*=66eSv6ymz2)A*y|73+*?p8Nz0S{~ z=V+0K?lG!ItNlod$@NBd2lu<1qxk*bG&F4`;e(}oaAAeaZvE=aM@$VfEd0ytU|4(} zo5C}P=aRiT69|j~Tc5JR;s%PC<=amK5zf9IlcKsH2c{P6*XW?Z^So+gedWp}vOxs% zynZ9O_5#o zRhI2!PY|{c=?PbaF z^rA99KCT`?kOGzKvR6odxP(SF+^d9FA?Eol=57hH1aC$avWEV7_~cU2Pt~xo{M_EA zFl{t-Xi+^;+?y)BiZ?jc_j>ve<8fo$_*h z&+V!CH7B;4l2ZivSaHZGJ@24mCnRESop_a%83HL zM_&fkHp$B#=f(ZXRpVuQF{iN93E|jH@520PPQS zWV@iX)6}>xt2k%#Z@6=nOZrQC_8z#57AmL@D~oLsjU8-(!jU451B z`tr_lZ3iQ++^(%7yj9LAXK4>ZOG%JvHZ*~xK!7e;{wlaS`p(>Dy%OybS9$hlBUbXK(LFj-- zdJO6Q(Xjnu;JP8C)il?(+J3gVAf=KiaOTkB)GR^sb;A9Ju(=?-ay#zO>nAg*?;}|J ztTmeS&TVseRmTtF52LSQ>QGh}gD~pcC|H7OM~KvsW26+ZIX*vqkFvy@tk-_5s8R5kEO`D!JSCEl-H1?p9qO~HGG#VZ z&}u*YlK-y$18D`Uz0=p;pJ~Gnh>!$AqMb0}5)tx)^Ks_Njg1G5m6IA|BGor@&#guqcoFDSP6ucJ`Of9)@$67W~U4^qWIx8^w z*@hzY0}}ydi|1QKU2)Gxwe(ZK^}(>-t`PA3qJTVe9K*5ejM(sIDsS;50!{aoI-~V9eq>tUC@~i`2C~w2hQ$c7$aAEE%CLSE^T>B8P2 z*GKb)S(!0R3Jo$gJ#_Insz)rVWTk(4PJgPaIlI7%dW2~n&774nJg#mDerlt!VmL|E zl*r41YDTt0ZG&1{u4Z&1A^g5ycbqFtaYn3moXeu|B1I}%5-xTA(Gy0bIl;|YCg_B2 zr=jBVwKB>+-1_u>iA4aVB#Okfc5yq)ZXOa`d+VxtexsW9sQ>naBH)pMSEfDt)=JhCeR%7tnr|N24}a8{ z4`N6Pk>x#&z5VnCAr0_7!GI9|nk5jh*cHDKo93(|hEeynBv#vfy)k~;Nb`&#St3@3 zHn412x7Wub=CiGj6;w>Qg#R;MU?p)swrig!i1kjaKOJN=D-?3V2SEzT|8>D&*5JG@ zv#x0GIY;3JT+y5jeZ2CHi|et0lmp0>93BWXG4Th0L7js@Ff#r7vK!Jty?K_CQ_zrF zNXMzpuc@}$DAdvS(F1PJpv^idTU=;`8dahJ}@(q|dgh%#t)kX^!A*BVUjKymk1Ddquq zE6bkt{0Q=)yhCsRp&YB!dbd=8kiSJ*TAQ5ZBpc^gc%>kVJc zj&(D*C(^$ha&iZVZ^MC-FKrlPXMYDs=Wv1Hk;a_24lke(Xw#EDu{Z?VGLZ^NqF?nr z&S&wP+v?Rp^Vhhx!=G0NH*9~AjSRLn=2O`*vm=FK1$QmK_ZhdR5e1lD&hsacPe^Wrdmqvn$KE5OLQTj#H+(woPRXg#JASO~B;9ibFn(gX{SNP@b&PECdKhJZ!KU;OyJk@E9%H zXVRzIe0w@7Zz`g4Mq^mZ{ zxQ!{SQnHx7P6Bw(h4x8nK2RxJqLLNpg*ry5EX1wjslOd7WSV;-e*^?%A%B7CUU*fQ zw2kLZnopJ;?zBvCCw}8~W_b9>$|6yV2qBfRPxafwRD*c!TSiTG#i?d z`pSVR%DA8D-M7vA`ya%UA#GV*&7WsU-`uow1L>GKLepECsILc^!LxgG_0qNX3wCWXpXN%x^!P%Z^s97f6^;H7sdpa zflPC&O?xTOtnteBI&YR);M1P4hLAjR2!U+?WOf1)19H-3KluqlN(*;fkWPWmK0QrF z(!+)L!(dYH@Y;YM=)zqEJf+AoH%RcX7c$Ng8h3|g`S2C0&?rDqo7_kcdlt;2$u)Pc z9$M6wxbHd3yRe`@$K=WTJ)CM?ix>QKmp!KbB@CnI{oOEc0^Z%(=h02HM}T zBD-P?wzYU&jT3;|Bw2N=N4g?1;SRnB4?6Ak>8%@C$+F=1l1m@`iXbwbAb8+f7sV{k zj(bfFUp~bkc}W8v<5*bNBF)8hIQ8V~{Qwk}a*=PC&|Z(`ig1BZLO4s0`tHT0Bg|H_ zXw-s3ix3{`e`FfJNDg##122kJ*{bOjk|ezG5t~x645uNUnz#2 zWu}P(c`7E*+}R#=BgT1i#Q&pwhHW(g&7Zg%{*XNm7ivCi*pRYus@b4WHyv$_Ls1Nn z*qyGnbt}m*DhzhG+*zvNh8uX8?P(=KHAKs-A)JhCyFY3oP&XZyM2<5>sM!jpbL1Rc zrv+(=cPV5TG^WcO6ho|Up_^Og(hReVv_s{#2JIQ;dQ2DZy>XUH6Va-K8 zKD((!>IgR{7j*ZNhMAd4v3q=2tV-y6TFk+J>4M?!Gb^*ky5d`bZ%?2> zO6~j^+G=39Wo8tzX0_vT2&;ETe2(!iPA~ms`e##zr;j1)B=G3#>GOikJ<#6m`tQVos*wsYa@!^d_i0O1reN+8p(In}!x|D5_h(b{h>-(KN8m+j}yLQG` z+O0o9hW;U?uHskz?{8HOF$|lh5U5t=s#RDe2X}|r9^Gp!JOb+OH?#Z>8>56KOI}R5 zX)3YQ9*ZN5I`tk_Y=MimD?FGYx{rb}~Ja^eGPCV8QRLUctMVZ4t`H z90A_GYDPzUS+DYkGHGW0srG~)t`|<0rLu>`E-1)Z$7uLlJr{kUU?P@&zoBGx|FXER%y4_%WJGjm_Nymu8g-RmpXKS>-J1QUF<0Qb8PB;+CQ>J z?xXqb%8={IZ?bhyp&G>oZuo(_rRZR8V(&bm0@tO(cOIJwwA&r13svWsDE2ez`~(FH zK(E|mTzjr#(o^{CM#)0InVu)~QXD>CdrfxUZ>KTu<1dD5*r8ed54lg&-&A_f2YzF_ zf&LKz`kqW8l~w>e>OUet_CLyg-aFl!%xsd zuC||`7gqqS0@X-hp{zfMT;($kn^$Nbk!7R^>AJR1mdcmpjx`*UA_@GZFN~xFjFkfg zG@=jqsxz~Lr!;B$Cgd7)>q0_m&;!W6{0Jwz>jyNv&~QKiu2~q+c&6J+SxE)2U%1)s z#T5l-wg>X&BNIpeQWXoV)D-KOQ+NF$iO8sn@pf&_$Xd$6qx_2lXUgHx%nH5hL(G*I z!M?16pv-*iOvw?;-nCII8JG7wa3_UWHwP%2UJtopxeQkN%_Q{J6AyYKdh4nc)u&5J zLtWV%$;l@A#3$oaY`yb)c2uU)?#Q?|RVKS!0W;k1?^0Pik@W67W_L#6^ z;yhLqb3WD;~glN&OKU33`dmPa+=`6?jqde`@!(1V@*f*7tnD=>5MgeZS!+P8O zVRZ-DS-HvVHgl!M$Z3+tnH@aj+ZnFwhlH2vaq7+E2P|#TLGSq%85Hx~x(N~@J!b$+ zm5kxzps=n5(@3d@UNft@u}X&tWkOv{mc}d6SFsa(3a`!vOZ^|D^?K6c)m9uBmWdgL zXX;rd>6}tkGgOz~$9D}}X_@cZbd-?6uATK{J8)xuUHRU@_f(t1?_SwWP0u@+l?FkC zokWOIQni{1H)vLbP%727GOOD=?K>81Ju6yGa^`BFi&2b+ZKfI%wTI?QhWj2Q4VA%- z3aBnCdh-({Mzd6A z2DB1MT**gmh5(?Oo|Cva6I@}dh7o=?dT+4k3(zfb@9D|>(teB;)sG|kEyhQuWIi5C zYCdi=6bUC4VFHJ;hdTTlR*}nP+|h>*C>eYF9Z{H6zmKB5@X`HjXCURoE!MXdU}@VGSQF7CE^S`k8kZEaFo4UTPHX( z&P+o2ZoNk;BxrM-^YcTURhn#OmwTdDPa9O#4E8ooCElIZd$-8nW_e1&GN-N|fqutz z9z{4)MC z8V3#t@;l*%vr)u3sqYK}w!%rxg?378P+$Y4ZLA zrF^oWWTg2DR#&j=sR`4ba@*YCFIs&3$@}Z-LJgFY{B2lYH@R_+^p(d>&SiyC+Ox@> z8P0QC&Z*VKJcS2~;cR77FQ{nyiaSCiz|0l5#n-Q1LLnbix91FRdig7}T!02*<8#IX z0xijLJxCr~OYI(yBbOzV??P%5O)4xZm2Fe)G)8KmM4-Zc#No{wVkzlMsN!)Q7kZ{* zs2+>F?h-B6@Hk%Tetq@=6C1pO9(J-FeR1uE{`J+Qo@5T8h)wv}3m1iFL5d>eg_#Y= z=E!A9RRy)$C>6N3R1;c_LWxmRL*nUI@!YpdhxFcE0j;pr)EQ$2)su<{w-!IVH@v3Z zV$0IZ*80R29M$+rTQvO0X*Z7rzmW2ew`s)9esS2?-0z>0CCV(PZzXBf#$oTK5;Sicz64l?Tqc>Co(g`O&!PYG-EGjeXnx>U63;ln+`Y%u#!mn zLHm~i$mldshBlD;*Iz9F>&p5q5bWxW19=B-By$}6u>=cHuI>YfV*vyZm;j3Me;noi z_XCzV9Gp#md;?Gi2iGvrQmcMH+ole3FHf0?S8gqB;!h?WxHNUE?-=@O#dPv>$xEXL zh`Up2p(tzgEyfe6{j)IJ>vgcHd6M%JRJ;VfMoXT#$~^CfBQ}7#az8Rp0|GGU6aWSt zI&B1#UaR)HZ(53WjZ&ek-r7BVum?neHpXuMI_15CYhkY+p^CHMfZ>xL-PP@Q-}om{ zMa?Fq&feG0q{ieetSt6Aqu%a*SMNlk*SN)D+xSo&Iv35oO^<1Rl_*h;kzemrsgxLy)Qpt}j-%h=Ey4E2w8-YA9;aDteJX2DbQdZE-vBf{ z;m0Xl*Ia;_$x`WV#R&C$o8~r{h#+X$Pu%UI6jim7bXK=;*}%6kg?v7Oe%v)cE8SDF zWqe|FX3+md1AuP^%2e;fXp*zNy1P-Ida8-29us^_S9}72@+fWXB#I%41I^G&ybHTG z z5@2tE0l0(xPmmqtWgpk&)ko|_pc3vo{B30UGf4*r#@!_Q?L$6QBiQI?ml%ETR6yfp&9h*0@xcL=YaIr>`WmA!4{G*YBzoJ1!^060US2 z99BtQjo!-S1C)(H;}x%L>(s~V%(X^#EB93tW0E9*f0dG!X0oYvO2F6(L%TWf&=M>g@-C}WGZ*IHVAD9$O8QB*yL9i{`-t;30N&WI0&>?_ zSnO)CXow&c?aIDW073}>(M%5+zIl4`?vnOpnr>|0POI?#%vY(L&hkR?+{TcXL0Ty+ zKum^>dGRoS_LmN*_A9qV6+vcY5j0ubOm7|~ZU(9vLf_OnBd(l{IHJ*wHfz~2fn_0` z$@L&7u^+l}^izo)tfJ-$)EF4dfpbU26?|&p3y5awtVr|-E7Gv93EeIgulu%mU&jvI zIb9_a&{_Lh$ z3trX6cxn{1NmLE2TvSGfB1^lCi?x>VSeAi{5)LcSJ^Kx6v%>>U3PXo`+=*L-jPZ_1 zm?N`J!|A@MG-*Mz7DhHAurzqsA=u~3tJXN~LPqQ57Vx13+2KsR{m3E0eZ%HZfZAGp zy!LEdP$7uuyXy7^^SjsI6&D*Mt5taobZHo2QaPn|?CT@kDru^d%sgsL--q+x6S59} zmeC4~{piRqzW{W4%W3bJJz%n>AI-HFaggihzf-!Ap-KM`!d8lWdd9Q##!rx>AMx7J< zA{xE_VUiD+zMu-~jx)Vn_YU2+CA@4M4VAH+K!y>-=d~3tRoECVp;WPpB$?nm=J?E2 zw=9=}g~?gyOkm0(bVVL24`N)S>2&|p&Y#x9u-=~k=CMOj*^sfp2Y?K3<*92bB_;J5 z^A(aR(n-OL(_!03lDOX%Ne^SBVXv8}yX963TGfbDs7?H~o^LY`Vq#p%W5Foij6AUU z)ui@+I3wVBGK*B&{*R1+@}C)j(QcmN1Y&;Ofi!mTQRZ|;NzWNH5>3Xf?5 zuvPEl2*?)aI^^bl9Dr{1A^gQl!{y&#Svh%s8|{%b2(2q;|Lf8py4jfAP7bwD@?$4v zJ%{YajETZ>>U=8ZyP2GEM$KNE<8^6VWH@TmbLDH^B}{ylJAq;qS5ZNc*`V`OY?K zr0$YmNDXYcps&WM$k^ukTgF8Z@50i`B3h&e)!}opc*Q}=fL&kM^Dmo*%5Cmviz@uu zO_8D_tqm|kR0pcOPbn;(7^w7$I~7LGBqjti<$`h8`@&M>_P&Qwdv_0S`*IryzXk$E z`~xB_)_FFe-(ae7?7U|4ZsU0EqcgwRT;-OtDf~s_CwL>xASJGDR3;gkMyRse+Y2$u zpCBszh5hQi8K0M>pGHv8njNI(#0-goP;2n{gVVVbkHlUYd-e|Jj8xhftmNi`g_JlP zg7YnwYPdxX`1MJg;Q$Dwyu`^i8)C1wx+=M)%WK?E5cRe1dxBB$7b@aW>c&qr3?~;g zpWwlop_j7A`ANskxPwrY$HNV^R+0nI5~Hphbx*BM-Sn-0wV%99o5q{2JT zL34}xz3uja(TSg&127m9LFJlCFwB`&rwz|kPMx6zY8qb7{LSGMlHnsd%km8#84POC z+|llK#xVdbiD9RcYHng;tAk_hU39e@7lxG>tNJyly*0AIA|D=m)6>o{U5CTO0Z42p zDcjqGbSXZ@`3Yjj_rH|%KbBNwOUl6I=jQ06d`NZ)vW8V6eFMha#W+DSaC87g#(M0Jp@)7H8 zIulMPb-jh-1-3~tUT=D&v|^}U;NnS?y6}knBQ$?0I-Fp56(HSzx;)P#u7u(M0ig7hr2<3=`E*^S(3jA?zIb$bQsx_9qx`KVS zj@nZTCLE-xH5QtvtG)bPa^l$!0wO>^fsSq`X_O(*xY~=o+jax*@cRU3Uuo;$MY*RV z1KRmn4#YH|qvGpO+J7dBdii*c%c{QlhO^k2Kvu@w6D2rEn*5aql}_1Z#$+n(;*KDDK|L{BYTxM~aJ#pP7>vXY0IS#*QniWuG4Y@O>j@u;r+f$40I7}Bj712CSmCr-S<`?T;z66PPC>d+S5Fq=4RjCtn*n}nLI6_$xo1(S0oO`@RY#pN2`g#mN>VBfgB|zlt+y(2ogmGL_R^bxZzAr zB=DG{RtDQ*N)=_rALqVuDdIgp^6zbz`$O@y;(S)e5!TkF_Aa(KW+i$in9(rKGaAz@ zG7$jS9-Z*z%TB>dpbhPD1VY?~X)JYUL-P5;%lgD^7BfP{&tIQrw4tmK+CSEqaI%Jn z<=Q2DX&zdb*nU*7l4mcqoJT(R4NpcEC?gI63>{PNNdPkt3aFPEBA*J!fuS1!`9~?G z<>>|j@cNi2`?Jnr!0R!8Seh7N0r<#8Zz0fqpc1Q;Ulu(h9Nn?y2CzW*$~^#^9^5^7bHg?YgGNogGTQ2maccNc=KGWT>(4Ptct( zAPO`27`P{oQSb|iM>XNNfZ@U=IB5oIZLX+-F~0H~?_@rEZN>DCn4NW0Bm{;RxB1lf|F{t1Ex zBFzKX?2&?D-8B4dVj+vS?`)j~NPswV=9#7QkGSWc!2_|Gn6~qsu(Q%K&mK}&BV(g? z^H#4G^TMl89!B_(Fb3$IO9hpVl%c7f9EoN@UIp^6U&TQK>-4_hfb3bOyURoS9_D{|7 zuKS_9I`3_d>+7ik+ zg_(v5Tpi|Surwr;Z`Ef!d7e;9N*N%c?rG6}B&Nyeg#X1_C;^hKuz}^$)9=pIBV(*g z*lrG{tDp+-v2@jTcq960_juhe!LR96Zsy9)aI)?YVe%K_9xGimiwe|8Wvg4aM@J*? z;DPiB+_aBd;$*zvDwWhm1Si#s#vP-vz}rOg+hK=WVuhtv7VC?p>xDcwPfEldzGbCk z@YJ0BPLmvUo3`<&Rh0Iv)ue@?*5bXX-mZ5xTESg@hjSncvQeMes!8tb4wz*eI^pfq zA~p0h5Z}~9bTeK3V)A;)`zkgxu{oq7_K!`-0G4e7G7b- z#;)BSe3Z5hj%G8T*Bk58D0AbVHIY*h4A5Po>0q~GWgSS-Hn)wLPg~Yb>I`l7?r2*w z#1HixiZBL`KJL|Tp7@ruo+cn>T=;Rv-ou5ntcGN+-Oj?-GH9i76FQRxfmU2$Rqf+v zEpty~rA89n6kKb4%;$BLxBaE4mIi}_>GtTPz&;*vt}Hk^Kd3(N&-m6|IHRC{LKWk@uN;e<2?nPg8ya4Ecf?DD2jK~TabE#I_MPH!NO6TE z7y}a0`UuEedD0?00@&j~LsAqKKzgf|=quq>md%e?@a$FNVpeyVe#=SbCkYim3CN`C z?q?r>IpD%ncghrJeu&PzbO$ycjC~rZ#-ip_55$s6UYPdfhcXH$R@>X}d5kZTnN>wLBNNJy z49&^*qxY&4f^d?*pg~p_Xi5tdxFQ?srKjI7$?lj(hp0l~la>hm?Qjbk^PXJ?w^FhlfWJ^w_+` zTCw7kA=;?fAf*5{)8DRJ)NU@%O$z#@+Q>`%$ymsKscp4EQ`+!`8e@>xD0%M-yXTMB zM|4enxhZXZ{0gQ*eOtsoOz$+jtN1$praR@XD!HC@gTd0@Z}g1cXxJMIUq9;eDH`<5 z9}=$`-z$I2H=>H>O=3V#nr>LVzD2Q#Q47`Kw_U}p_2MFti|?BtpzAhUPPbmgX8&-q zowY$(X5HtIYcF)6#9YJZ!r5DqQF+8;{h=)3h=q&)7o*`Y2|vL+)F(Z$#nm6j6BhT?rJ{3ioRxm z4wj_30DlNMh$EV}qbN0&YW+IO6HXuJCL@9xjs04Z=bohPo2&@h)7y151S*i)-K_Nn z6M}bcy0YLH&OrLbR?<;0=fMiz^s$=?CzpGa<&4|Mw<)^y7X_L&Dg~CZ{jbf^*J9^- z4W{0RmqFVpnkU`;bVkaVQ^^Sq%M3)uEfP%0xO$Qr%Kb!&TmRjGY)~!K#|>_J47iF} zFq=KIJ`4fy%0QosK6Z29w0^t4f1lT&1J-izZ$tO{gGCCwF#igprnJb0=kD}2JJwXJ z>>g9UCF-T}z<(MN-NL7O;h2U|SYAmGx^*%&@f(Ri4rd>Zl7sS7Kp=ScW3hSO5;43V+DY7dMP<~N13IC(8 z9{o2(7yD3nn(N(kDUzn9R1vzU9-#`x>Pl@nlpjhGD2Apd=|JD$&@v(bF%RV2wNN_b zUO>8Bf&M!bwQ2}5xqtK|8+{AewHpVYm;w2GVrj}V9Sj7E6kv*TN}m9#Zf{M0H5~ij zx}$U5=aXgU4r2C{b*^Z`p>yBKJkfyHcYr-Rk|+xkNMKIqLe{#tk!*2r)i(@)BdQYt zKsYcvfR<-*iYKbc`F6otr%uNeGyGJZ6fuiS5>I{Ja;Yxe=ql*_er3N6k2-p2a-M`}Xp|9sP1VgS;5?{A~4@cYPuoW}pZ+R^N9nlgab{C1}c zd?sj<1?K_EE|H(P(N^{MO~hd%%OO*wIul0v2dVc^^UUXEx^eO7;<^He`XE0(CaI`NMv-^-+!* zG$a1rzA=?O$oMh7B}#*PpDKGvTrUlK10~Ce;)3|XzD6ZjT1)+|%;qpUxeB|n`@wL* zR~f(n{;EnH4I=}A4wlIKg#ZV4oQ8iopc~GwyS(};KH=&ZXceWRiF`X^jl2m4L`1T& zyJ1A#Fks7WQ$sxc&&(QtYR61WTvV%MiHNew%fT&42L8LQKS3xW-~z!`!wqY)hLdMo zKS7gzvaJ1ksUvMwFyZiooVGCuek;^yexsL1GDje7jMHq~T6}qhhwlL(u=uIjNwtA8 z|FfA{Vf=fPjY=5`V=r*o%K#9m{X2M+li+ILxgzI=x0M&BmK?`dzku>m!3X0U1KMHe z0U}_yo(==j%wPwJl?k9-b--s>v)~hx^I2+&x%9&aS#$NPOAk=`hc}nSyi6VmWH={G zmEs*-iDCq6pRJParphJi&)N8VCD}_PaJv%A%ugDW+HQ$EFmyV)53X|PXkhyeFbUg5rorcpYY&Ach$PqX1e99?I(K!kKiK8 z5&7?&4h;0ll_KP8xU8tDz*JcPUy~rz0HMnWpdSca1+cC;PCWqVob^3|T|osNxb9$n zEl!E!xM2TL3Xn-KoU_EvvfFJP^#N^y*M;JcAsv84UCT8E^s|V7skdtg10(M<0n=`O zRkJQmJ@U(rhC7GDLxtsJMKmNiZ3gE|o`YqT;MXo*jwnJRgz6Fsd<=sHuno14Gfp^w zSiegM+zr~3GzFkSx(e8?6Af;-uCc2xPR8d3VM5IH2<4W{$h<_mYUqM}GL|oag~LT< z_9-wQA=J{@xc0xD#oy1dJniYsXTcV<_DRRY?dd_Rht_Dp!83W)Fx>aWy`G!}XU`R0 z9#VJjU{P{25(2EAjNb+*2}#t#nDU@`LU57wd$Ku^@m{qY*-H^}a~@H_(jxdVkb%s* zz@m1Xu+LT0n?}N@-(^ysjF`>}v=~Da8Yj6?y*0UnA9XXJCcp5L0U;ltFuN+JZq=44 z=uft4wNp+P)U`aeoaI^WA*T*%E~HiiA(rFp_D`j8{}Dkr0TDziaI^l1Aa;p&eu4mw z!mL*ovim*lAp00AkZhNK@ku`%2ISrzd}ccCHtZ$O`!QZD|nztv_$Ly)}gu zUsN7NY{$7Q6Kme+0z1rtI^A4%Y${xo19t#CvwC?B>5H?+-4)GSYWO$1$y?}d#(XGBqUh^#%siSI<-CYfIvv0+1u^lz?E0)C3rs z1}Ht6BzOY?I~8XYLh#p+jaAz}9lTQ3{nJ09Zj?WR@Bd04sHi=rKS)nHZbKB1;VEqd zQb1Sv46i8UV;aEesw@U7$Aq)FMN70#L6Jv&lj^~PZFNE097pp4X0uX_f{&2|PI!

UAVx?tkPfVW)!mE$c<9fB1h zVod=5`cDX09iUR0%KwL0=|4Y)VBCnP=CE5|;0|V|PK{W?laO-DkCAPF#UE1v34f`! z=tR=ep5rtC^Tu8szN6bZAwgbZx_5ddtlo$1;($|ogqnlqSu8=zBXxhQQl>A<5tugR z_S$d!hIxaUpTyNHi(RN(3s!RUP%Rc+E(~>F%)J|(86DjxtAHcIuM6j2RxJaSA zSl~FomFZ{^r9EGH3UyU9-;SR6B^UcY^6`=PcGB&L63T_U2H)F1 z7Ud4Tk2iYEK4i_C{w_M%xD1-Z?d$WqKeDUNC>sdi8E4^P!c&frp ze@^Gx9Slulle&BZX#8SX_wa+1Nsbe>dnlXEON5v^Ih$=7EhlAV)91^bQ-ZAYt=ro( z_quu?^(|!z-;pN&Lf%<0=BMf->eQ<72`>L~YNsl7M?T?*rGBAWn0LpPE@bdxFIA8- zL&sRim_$XRuxavq*oO1?yPWkxCtnIR`FjsB*I0WSI9sp9BpO9%o)dGLwQ8+7=bE#RG5YA^XSb5CXQ62usU=ikEZwjo zZ^LmtWANd;A}Fo@dT%U`Gm2CB=)tpP9he@oon&=zam>D=oj%y&QYU=^?>d)pMb!_v zZM!2BME#4~(@aiQrLWC(iy5 zZ_$0^Ma7h4?Hs+ofa+`2GF?kv{2Urr=(*n421z^ZzXaj&hLaF@^jEb(zOj-PY+P)%NW z@DT-Z#t?K*d2||#a5(5q2xJ)$4#-?~>2Ra0qG=8#*mJyw1Mw@fKjii-(wT>NBIO%p z*RJ7T428d)Z&zJa>u)KiTaXZZR8w&d@b2`%^2$W9{^OxSR&5u*Xa<1pkB#nAg>$sC zzkos^dw##cTm#?^-o^*%c+_t2d@1q#9JXV?n_LD+W4%~I)Mad_>nhEV!Kz zRH-B`x_)0#C|&CUcv|JN0HUiBzuMO!sSdh6-U!cbqDOz!fS=@m;y=Fi7@10=8m$_`tL?c0_xk=GT~ zv91o_E^NNfpBqAqTD-@#$nDdOChqwhCfStCG zqvvw!53w|4$ie@9p_7b z??%cK2+B^#&f>xD|21;iD}&XJL6*u8maH0h3y!@|X2e*UgKec$IQ z>k4URk{IHdD5Lk1Jy%8i8K)reOA*MyuOhAN{&>Jomll+u54iHFZYaxH2ytGYDPuM| zzex0SSHs83i67QcNm#{3v-flY=PvEkcs@htaV7vL9_nkX$g0n;HST0XmnC1eMV7WR z$4ItgOGS{jS4}xl+BS+8?r_>JQs1#;%+x$1H1uTR@XNjz0V&{MiDR@yIQ%Fn@o4#5 zCuZ+^w45DaKimkP4K8<8P0wU({HIgEfbUQi^cLg3;Mb95pi6EUFdd?w#>pAtm&4Sh3sf#6C zQ#~-0VxaolD0pOP!MG1QwTJ5PI}J6Z~L8_swuXorEhu_MNva`s!PC2Rw zbo;2_8h&?UhHZYjZP;PDv{940pRAs3X!BVcO)H$=uIEi|7>ABd&U2V@X={^+KK0IP z!W?MnXmT~Jcrw33c&Uq#aW?vx4zKLtBu;c(!%wXa0Cry##|=>V!S&K5nu(enNzb_e z_I$Gi9RLtJd7m}d8U${Z@2-vx2S``ZQD!vY|Gn2r**kA19&!>BeM~A}=1$G@cg>E; z+wW|UV+DjrUkUsw$yzi1u=TT@+pv_ft4z2*P<7UjiZv9?n$F4R!_WC&PfKn%yyN2ZaJV zvX_ZG``hZ4Nm`rqo03Imw1V{#^(ehK(81p8z=OFHEMq_$z$ffKRtcuiN_$7Za-+j# ze!3hCin0f2db2-|Z9Z)Vo|_o&L_Vq>H)w?g_t6GOMs`0j4#b=jL;p^_2kZ_#i-eDHtTOX~^teW(avc9;Whv+pmFI`1u?7_70$S7(nqyf%o4<-v8@=C`fCjH{^bRKgCp+>Q?zB*t${#u&QaJ zgIoSeoXEHL>8uXQF|zZtr+h4UgBw$R)F?S)O_ z9J~B!2M@u5oD=+=LNd;k1*>tzjfY%|FTJz^3mM$PzUM=5kmKg}x=*S|G7>bQ&v-M6 z?YVZG@Vc{ib?;i~X1;T%mTnn%PtcL_Uxr;8-}u+v5x+URT_awo5k*`nfYvg84w(x# z$NKu`7n9$3jKxwy<^@ehW2=@3r4eNvcr_0m@W_@Jf+?_2BeYn7ZsdXtn+1ccLwYIo zVWD(^^LE0P_r_*iXQWZEJ=5M-V;GC-6wYI8^B4Z_&$V%A4vGU6Jxu28`H72G}FZmpj+yEvs$`+Ii`b$_$hj^ z7{4;q^st1_B*+6QPTVpe#3=EM10Cq~0v9E>T8Tszi6 zNxHIJQ@yVOs2-n6=H1EkYm2HWsS_`6SX0{TJHJ;$AkO5kd4C?h2}XQ<JXf1^Z+891@FPZl>S3ZFf!rnek zJab%&)Z963AV*LXAldU+|0r#g7Ol>lEM7UVcGp3_{ABT|%fFQiDY^}2H(LcY=&%@d zM+Zu-3liE!XcFJeCHrhu`=}wO+YVSu7yY7-nEg0fHu95U-|_QSD!CcvocG>UAyfs< zR><<1KC*E7iJFwji&swO@Hr@G5}2CwC-`fccU1SZ^}kg4`|jkT*qKSX_HUBsmhzlu z^CsYRZgtsFqC}c4SPfrha^4m1dkURNkltkTrm+>$`3UVIDK&dCk?nsf+`>rzwopU6 zr!?yB%`3?mFFy{%)!B*f-o7{*%A2$UjRQZ%ERL({S@5#h%TvEb=dH+1_9vOzL%EpL z5BLw7CMnAGII>xGQ$Guw_(lb0#4@;pGTH+8Dkaz7Ryw)svz$+tPARt3*#PAyaWM*G z(NQw;uTVrkNCBme%9y({&Pl6)aFpj?KsK@oYpS=H29V_jfT&xV z%sb^Tr=Nkjht_O8R8^(!tvAST(?2MV-IhDy?lH!O_v)Ng<(}*CjAbn{!F&66my+lI z;~BC6i~Io@CVp%Tu$ZST#4^oC@$SL=qYY2REhBOI>-Hth>L_lq<;SrK$s_6GA?7~< z2v2YoeIrZ8A9Z`106m{Mk&i%cxf2p8BZUmx0shCQOAl#olLqHWNjr9nWRt83pDF!L z-q0E${d)1**T*y8&S!3CDs(|awAiFT&2LX{jd92+xpinog1qJb`pT>dOjF379mXp( zHP}!Sv&Le4iA|T{Aa~o7&)-G9?XpiAWj`S0k_g5yz2TIq{T3c~z`D60OROU7SE>R* zb2w~znQ%A95XSo@K?5bjiCl&|gz7nviABbivVCC!!v zgCR=Onb{%QTtjA3e35oCZXDRw08nEzb)!{R#*|7+;AVbcIjZylT#DY+TdEc#VIC3v z0igHT8^*iFS03XN&Qcu4vz0+ypH7S#SIW_1RG>Zyjw}*NAbH?4teNHEpKZ=4XspPo zR%GfbkU;5I%NM=wCN8xwI0?Q*3EpUI80P`IlQ{Mhb_qhFdPOq#wLgp?$ymtK9e*4S;%b&V0EN)z)o zd1zVb1gJ@6d;RemKa^G46y@n_gTVxlV0= zIV|ymk&63iBoPBe4}FR4BKIio0Ki;Sfj7dzFe}_l!(a=6Y2u0iZzQ@8$W^>ec2g3g z*5i&>-`0Mj68H6VtZ2)+$b&RCZ9P59d%dVs0nT#$R=BymrfNfyLtsy?=VPLQuZ*pa zW@H^_O5>*5J*uH5Ftd0ii}B4`sE$Bya%C+$1+v$GrQxoL(6>%^`k)-?ck6_3`z?O7 z{3YxDt3BO)-iDNDQhOn#?=<#S4o)nVkIC3uJXC!Az8C5Gcw;B;A|P3qLxB!yRXh9c z%m-q(ZPNxX7aHhyUV!{cOL7hKeB5Do=r)xV;fsoerD-SwBGZdog1>$uor1IoNe7dP zc#89Pbvz>pMOvMNdNf731D%HvT9Z?5!CvZcLjv#AZoHIWWLcD_y6JKvIKty~{Mq#D zYS$koSs@w|UwTgMaAX0I3%;ky)7>On&OnwrpqZi=)S6^M67HJ-uk$}*G_$<>`7w?& z0RPomW@+f1M)?@^*#&x^ublkcrjbH<63-r5$_=fa=6 zu%ywH--+3{va25#E!uPLR@cK28E&kdRe95S^bzC%QjD!AA`z5!jbcy70FZFN$>!LY zf+>6Vn$HHK|1P<@oHq6>1LCvoP@36TqxeOCEgcnkJ&qouL{yTTG>ONxip}Q1ze^^2 zC52U2+x{*up=SD**~#O_RgNx;Hzj?lk2^8D5_kvUg^OuS5|>}NskPKX_+pCq-}|K2 zPb^6TA2S*aQL1YA42F`7PA#b^+*B{4>`w9By*cl9F zd9lstAHIA=;SnRDR)mk8Gz#baSo5Y{BfTGk~-A%Dh5%4gjLvw4yM(ek?g^9!#5;*wQZ zrM@}CGv49(0R2cjT#|t*ph#mc#ZA#cWl6cY!7xNtk97p&qn;%m`lH)n_AJP zktLPQNf1l5m>2&60-_e|O4C+J%8qmv?!`;=3!mIfadzQYj2FiPMfo;u2h8onXrOHu z2V?ECovQ^XgVj-7k+JFMa@adM7s@)ye2a`0qen+yQi%_6N#djOT^~u1>I1mPrbymB z_m|JC)kQ<*Th=@+ylNCP;wWEN?W!gp2Zilc`jBG1fRM(y+BGdvfg9r}@ACr*GT@6L z;-B(^)a@l#7Ob_go*&?L<9-6@dZ;nv9|&x8Ce}yq%h=_|-Lbg^??`FibqO6>?PZJx z@B&LZue&~(j8NF6_HFHxhluaqTd5Xg)prkv36$|AeMKV0^wEWS??6FbpR&$&;Ijd| zl`{6Ie$v8rrWSxv?x~Ny=?wvMc5YbwVI@E??zHMlYuJ*7q$xianT8rKT0v!c8_$K) zyfZNYCHa92tvCuA_O2udb4B4Doo%Sq&|CDUJx2rd@kPIi=wS2-zrn`S)fY*(4%V5N zz&vWJIKjXA4Js%m9sU3n9sdX_?q}U+KJs9iTzCX+&UTZi`1%cELC`>R02(}y(u(U= zjoRVq+pBhY6u6+)Q%ADS{is`NfM0dOvZWllabA2U6A?pQ_XbZ4`GZ=Ctf(L@6f_2W zRhio~q=f@17$9ObWvut7A`!rneUvcS2bLS3IMp29n=$t9?ie3_y~zBw_T$l~ts0(3 z*-&o1Tb(+#{P!pF-|Z3~>3Sx6#1_^3m^~PfX84q^EH?wl{IGpDWj|mhH&Oo&;Ot-U z``0Sm9juL6*YwW)6#!}_T?&s3>c#w?289MtEx;CHwUjiAJ>ABTF-6>UPZs5DZCbI4_aPyl;CgO}d)n71`MPJzPtivx3*QZS|1(Mtcl?pcNy~jC0^kz-jw~_66?%2c z*G-8q`{vE;pKZ?6c>i3Ph!&TyMe^kxqN4Ot*9%(36sHZkTQSsS(?F*EQ3S!9#tsw0 zR=qwq00ABs)T0pbZ8HGP{w@^ud+k)3YLY?~gnx6v!sDXOfJjB8atMu1U<3~&S> zA^gUW>q0-Krj{yTY3_Nh>)z;t23i3~(|@~hK)%m+MfH{#FbeMOnQMVwBJ0k~#G?Ag z{sTy0%>X#g3IZJAEo}`zH>v-(p2zTbfTDwuhph}hoPpgNG#49|J*uJN7CdSY8?V0PE4Qzy8R<7T z^fHgqyLwEZ);t!KEu2!8E0wjwARBth&5?9KRJmZu#&;8C=wvd}1x|#z9bXq~j+@%V zx;i-Fdx>mdgj9$xzYXBcNqAvA%Avk%$6jLy8M3_J$%?*&$6cODme%n02~}=6>x|tV zS}|7bM`<2RE}hMY3yCcNZX(h6vr>TPnFKFf@LxKX9-{qv=U#JvsX7?_=V|tG`K!OL z?QgE)@~g6YE!?;lR?7a_Rw`YV@p8}UWcL;hA0=fFy{E0URW4U$Em%OX|Qva#Mw`_o^|a*LJ{l+s=)9+$f_N6>SuR|8npHRtzF8JAA()myI( zM6p#)jy~-x5$epw8?z*qe@ZawFjxAzcDUu?d}PopR#9I+>~Q$*U~$)2?2>HEF~dh& z<|g2(mCm0z7fdUoHdxgEJ~I{1zd&B&bt{El;q08sdw)z>u_1U1Xlrr{>Q&iUZw)iY zoD5n0vdydYs(dH$0KWdRYlTi)(Gqjj2l48DBbT^Un^h4d4Le)UslQ5Ki6lxb4 zR0fKVWwL9d5fi>|65>eH>LI$O>t2U}lTIh%?NdIEx%u;*&Q9-3p$>s_?YOvw%(Zw% zTdACMzSc0=EU>33hKRhcvlT|o2mdpc!+=IBYnKMS(@flS;rKn1@=i{MP#w1@Ef1+s zZ8L=oh=L2uJ^upMK5Lhezw4b-V_+%9R?~R$>2nF)sM+0WZneb7^j>l_Jrp-Vd*xcKG%~H^XH_YjCz~`PMlXx55cNQ{^{)q4*kF#OjUC29DB+8Ivq~qQ&+!>9@g+74MIp}9Fv}9gqfaF z?}-0F7B7>bIt6%U0aGcz^+ID&cVf%fK3yIpSWpNWrfOZ%80=rPDyBb{o(ehudRPP8;q1 zZOdly_4ncpD{2;FpR;HV1@475T=jX&y{gEorj}}2@f$P)P{#k{o%sJbGQeOKo^yCT zDqJkMmcA(YHI6&MeBZ7q9^9=l9+I~N|EziN4Mq4KK9_oN!*^clxf1_;S(L{I1>g@w z8rFYEeCKX#aNbXDIzXPqw`M(^z?lv_EaH(MyFUZ@|ICg-rUddlX)*x6__P)v5pO>l z|62_p4{*lPl>I)a`4^C_qDkWKY=uYTy0mK#idsJ^Mlwg81|n`lVNc`dpT)iYv{E7Q zUdD?;nj3)&Ap|btHE_;WD}VujD?t%S&<$y0UtPL=rQ34N0>sQ$e@{fy&X#kvGRUz$ z5X|UML$QzsYSVqWu0ezo93jwIA=lyJD?}$w5BSVW_k;$3AT9o*=@BE(`xgjSz?~Q3 z3($T0On++!OWzy*1tg7j1$b!_TkfSn9z41fKz3cr7sz%N;2~PB0B}1Xll=c*)_=XX z@~*kA^u=gS#3GCBh5&?Mt?c+?@L2ZwTREwi*B~J%D0eOThV1^YAwAWX*4K0DjiZgU z{=Vjn;k)AO79p4sJ(U?rwm&VpZP{*&%w&uZ?Mw)#kTNXg_BVYXI9OKn3y zoyGH4~vcry$N;$>7rfKJQgHANAHPtXyVcdvvtEZs7kU`g}NmM^}e};C3 z<6-FBK}1AWAAeFjjbrVQsvYb}4Tp9ksdW|b=(E{(O}#n!)`2(wW3JX|sqtW@MzFMc zt`k~S{=s0xsB$-%nr`t#M zS_#fOY1lqsMRYk*ck@i=RJqJl-~okrXtC(n7D3aPq+C78=5}V0uM|%^cXZ}RBDQ%f z^SoSu0s8r#yNK_*pM+zk&~e>-P`~-Jn!9Ed=TzK5=c~dCKm_{w5v-+b4}n^-$O@el z$Azuq!n(fBeb*YKkNiqJB>Jm2)sx^$mb@!N2dnoHA#c6C@N=&2E2(|IkLfc77kjqu z(Ujo#2J>B=x*oZB90GFRM9iD8&W0};M)OAJJUfQ)GH%Unfbdk)%O8syK5Chh%LoJEID6cM1dhW5j-P4>Fq^Kyffsi4L?FtF7V1$oMf?yA_(6U zbb9L|k!0=hiy}bEv!00lg14u`(MuaX=pe|84px1Ve{y)xrF&K~*Fs_!d+n;!BInkp&)&zM zQDAgKc$_CMF>{lg?ELd6epnlVb1eyH#a0?SzsqvG{KRTt4d_)h(5ZhM4|sM(z}J*z zJa4*_1hpi6w>n4^v|k^3#KLTWk%JUUCb2UNrMh_V@4@|8a$S3!v=UX$I%ZfrYbxc$ zQ|%%cC4ELK{56L7F)CGvEYh{=ouY~ua^eo$+#L}8tHDTDfp~9vhdD0P-8<_pwUJgq zRqE_nC*lVROj_L{8e+9*UiU?io6)0~wl$2fQ-EQ-h-!%ooeM!lanju~r((OQ#6;)q zSl`&htS!D!E}f7Un1LxUQ_puj|#muFVi%^)u@>{uh0^+`ED(YWkt*-$UUc!d@)`KEeB%YA_ z@86D-2IK5P@b(i&H|U}SI&LH6q@H?xKK1k=uUmV75>}f|UEBuk4pw&H^)I4MDF0OF z9Lqa(j1R0w3aiUv-kvT8rF1t8YE1T<*I%i?=J<277)uV4xWPAIq=3U3P z>=0UB5bYH0z+3qV{JEuhL$=ll3zy6cKeY5E?dN+nv!-9QX4^6Hl$U(_**K%7XKsg| z#z0n`tpLv4Q}?>kH&oZD`oC3XjdgaT6q*KCgKDHHsyhi0y*uBDzGY7`ZGW1Hs%^~2( zitBUlf1l2WvQV}B9#z9$ywgV}4dTACgbI!)K6D__k(;8+O`I37*lxx5cQ$lo1Vi@D zZ;0F$a|X(;>;0wL4U9CW=Zx(VU5$<8U)DK;o+<=1gvw#yM_gdZ6wAa*?W=W6;NsiqM}+u)g8oTWSQHpr5o#jiy;bxOHZm! zl!~-63qXLpfcov^37(6>W7QQ6>vB!5+WQfxHJDqL!)UXlQM=G?s^Hc%Y|)=;fD?XS zqe$|O6XO>ps?}N{t;orqVFyB>;kdJ1-Ze+Ut-}nn1Nzk&@XRlsuF|qeY;&P@lG4Yn zI$?jSY5=jk zxaVb!pM{&1))s{v)bM@+_9{Hu<0NqMv zypBxnWlizS_WOm>Ffx;BrO8uMI_EYN4J+E%o$d|&YAT$4$3|Aqk^v|9fumUOu|a>$ zV|kT@VzN@C!+AhtcJ%uc52mknyzDrd?G;ht2b(k-7t{TY+Lf;d3@&RVw5jO{YF>D^ z%IJEPafgHG#cA@q=<=-JUkD^W@s(6n%A!z35e4U?`z}qktfO}IZuraUOgc51uDpW# zgLYbW;zS&i6oqOIQvhr{xs@+^Q=EvP`!RfegtyD*!=Gq2`GyEBK9ReD$Ifz{-2bRv zRCHHo#811(9!j58ten$Hm?kVYY`_!Q@=DiZQ+-fX1;s>_*fJe!oJwhmHF&b&=;CY3 zr=xez+PgaV`>uY3<;kjm3#Dux>{=x2WcqFo!82Z^$JZa-noOnrc~sE{>^bZ7{W_H# zF+exOkFmIX&-6XTap2F+ebYRa+Mg?bmx(wrf{$kRpgd}9$_Zv@mhTpT4H&*jQU`Ju z{hK)*90)pG8Q&7$gyTD`Q8Y=vCq1aH`F_t^KboRFN+XN>Dm zJ`c??Z$A_nX3J8=N_lwtGMsO8?JWn^D#a%aDYM6*Wt;f)m!pl0#ht4T3_1-#(p6D| zBw1cO7v^cLuTvAq>M5}}kD#9UWf#Mqk;#f}2$qv4KDwXb6uL^-PrqJigz>kA*IQhH z99ATT25w!AUmkrybZ2b~B~5z*O023**h!mfy=S_DgG(t?k{NbGKaQ*OK_`R>uB<7x zv)Mx|F4_F)k*`33ISxO!Qg5C02Ub)G7Itn4)4W}C&>dbJqH2E!^DC2^MWmJzlt&>; z4w(o#YON!Vu4xVzT1>BzA6mkX*hQrLb7!}^ z>PBJmv95g(UujHyE^U1)9&*H@p?dw0j(n#`?;2P;Ye;V6*(|IPQMhEve*L1+T~yee z%}!)Bi8qWht$VZ3t{d@EmkQ8THh@EOi!!bg<{=Fab`0#!K|W-(e|i#~!om=*SQ!v? zHHgrOoxdJb{l|mnvU$JVi1Pr$7bthb52#Z7>8|{pIxz-RAx9GL;Mbh-ve#~?_D{mm zAD~kXc;6cw6x6z|v6FaoC6UEUZOVHF;zA_k`~{=m~jd8Njg{1nz0{}qL- zy6~M%je??KD921E$pa)wxHi#p9$9>m{_u{177vwhOvh_{rc6N!_5E0pT9Mhv4n={y&Dxi!b)FbH@0 zQ18-;5!Rci11fwH5z=nCm5vE;Upf8rO>Cbl@>a5?5sWI}eoN%4)^(KKN>z7WQ7r5# zqpk}S{tSP0fzGfI$i);uZsy)fZWE+>dk^VJc<^7^l*KA9*1ulJKf&i4PEn==C-1LY ze;yVPd=x+(f#Z6xClh}{y1J!VP84s8Jc>}qSJPkm>}Y0v*ZlTeEQ|uRkVtaUUD?`q z!Nu?q#;sc-J(-N_56)fmN2w9F9=Bs|X-N82wdH{;ZUP7Irk}ezIVEqQ599O&GZa$C zcM}gi@%sx1rnj;2U>l~m%Iaz`+W2P4rZPymQnA50-t}g1Hy^A%jDZ`cYP76NVZjh+ z+D;gzn10cy;h@%j33gP<0M80N_INmKDu*8KSCKV10cFK~`yM5!rEki8vZz8uCi0~4 zcZSZ4u&$5sP>8iz%OuXu3K(U*l-CBWiPq*--?bQq-BfZ-br?JheJbVYIVjOtY&G|t z`>bDsvlEAdg>K1jgkb^GG$q;l1*U8fOSViWkCR5iE3ec0wyYq-x0cmyL^}RsjJ|pb z-mqiBjhspItVm_0Tp{)3=tz&Y-C%D={o2L10eN`M>aqLMW$0(Q7hJWoLKYrASYO?V zq*S#}60>r4uSnk{s`vT3Xzh?8IiymOc^_29`4`Io^i_wlrJdQhw%At7q?iF>YPrVG zT&M!ANXIXJz*RAJNvpHni~2k6b zikcX`&Q{4V93kypSd+&X{*{c1g!-a<8{5pDp{to7r9;s{H26f)BkBudOiV=wx!72M z_ftu#XxOW51Y{lCLRMp{QOdc*SkKF&Pc=rb=poN=Mqf(!mDwkJ_bx0|@^Fu|HKLa0 zks!lotZ;#sNX~uT&LlV43|YjOK&9ph#Q|}Xipeskb4Ir<#S1AB&)lg#ceSjq1%f?D z)N7fWnN7#=?t7Z*TjP*bOqG%QYqs^L5?{+%=50WS=GP;`=&sjo_qIerX-!Q@iB2Yq zi}uMK961sPXO?e$98o?_z4!s_qCB-z_w1{8WS@C%*xtP}4jD{g7U4pcxyua@eCiV= z2*XrtJB@t)rgBW|7c-bG*Sr~}H)DfI(Tto@5eu7F*^jwI!B3Lzbf;%F56Ws0h1nbP z0|T_vd%xgwho)>hb@5|tLVV9P4SRWVBuk74deW%AkyTB2I$!BMZ$lHc(BzuV2|Z-> z(}c|fI;ikF(O@5sgco1x7BYMpBgA`9NCthDEeIOSVD|ZjhILO@KdXnQbAiZp_|m>B zB4@7(I;Qj0Ojzki1RtVr1Pk^oU+h5huF9End)2PTOcd=!UG5Ysqua0IUUYBtc5Twd z1VpWo$qABxPSUS8yRz0;6}ok6heJ}i?8X%GRX`l!dWOYy2bCydjTSI#=@poVF<+lV zV^g^hZ%W6Tk)yLsxoLbE4ZbqTIMkid{FdX%T>k2{`?v!Z9d^P2O#<;UYYTz1q3#SZQs-p?wU?K&@E zsNSp9Lq>o)0(>xkdh}ZO5ssozZJ_-p=zRHd!N*74`Tdv{ew^YbuXliwTR^_SYz@zX zhW&tzL1nC(tc8ekmzImvn<&@o@=Q#>5vU_jKkv^N-EWD9yv>MVWVTw!s{2ax9U9~S z)qd>x?`T3|HDuNEZ-;)%4TGFBaK)~ITlOug5UR#7XH#6)3{*2qAYLU4{&+3w2p(PH ztMjB-|ANRRdHdQUPkwFwZc?+bPoQh=FQ9lxmIA->B_B|{Z9L}t4?hQI-u#d7xi8?H zP_Ghtul30A3J$5jXX~V$5aQi(d%{`dZ`RXnR)*~T%Dh%@fU{G!}K^7w$KvG z_W7*BJEd_t+)=jAi3FP^I8EiJ3rChJ1Le89{SP;#b0~9Y z?REHM{mY5|S(a?gik9%M&7Ue*Q!$abro>h>A5oBwOKX&%h}BlQ@sjVA^@0l+nm0g2 z)-wkqgw`XM@G}Ps(}tQ8E$HczQaxp|FII{(o}6FaB6cz@`)l<@&h$BhdUhi4t$C%t zspiCmFPX1x9idS059F=rglF>|saOw;Znf^^Vy z6aTp+Eq){552p1D1_W0B(4<{2aNp+XOCtm%=VRZIIYCWc2l)Mrk|f?D;E!rqcR0@Y zL#b9IL8{Z>P5o8q;du|8+8Y9_8rICzVj zWLO*U&Xas9mji>tu2N5;`{h%R3!rxcsL(Pl03kOc@ip0d`q`QXG*8Gi5%%?z@k55w zL#ynif6EonQv5${sgyW6`Yc`lafxkiaRR8oHfC1-VhzJpG zxknGXw{yByx(B@I;+l|UfF*D*O?8|M=w^KYq^EuY8CTcgps;^zZVQAk=5zq|fC^xH zJ_h(=q$!B!zjd{S#+R;<#DSP6aQ7d8%g88fIYx#cUIZ|9uJ&!)B-%ET@VN9##!1%( zaIfX5co&O37Yf-5cZZprsrV8fX4lA#Y{)X0nSIz1`wkG)P&0yo%C1iLkAVL_6}~;$ z3NgP+ijTf?{{21^#jD4pQ!Lis#&Rms#(PUd@kJK%M*XqgS}|HK@ZD+{AU>3~gm~JH z;B_&jyf#n|ZA=Ab7Z5zVmzMCeU%nvmox%03oaxW>msdgqefjryxc8yeDPo_XAJPV1 z!HB7;5O@;#%Z3{%DC(IKRa*ef|JEb_JYGOY1`oA~pIz`HKtn*dSN&gZF7p}W-;Hm} z3iomJ{ev<<;Nbop35NWCH@0@fjfVbD;(lQV(MN7NjB*)Cpl4`Eu1^g8XES7I{9e0c zS4ev!+)HQ2U=`&PXG0jMA~wDGV)FX!qMHula`_GBt(&`$?&p4^f^^NQ zWqIJCEU^9YknW^(1cRosg1*Ln!I|2Z804v4WE9D6`uK2V`v)VV!i&YFNPrW1DGZYt z>~oquT&`J4Ykv2U-(rmC=J4Ra{H&BA{8gx!H#yVUSiFhus-hP>8g~zCCsT?d2dCTF zy+p>5PD1m;wpvV)*gN0UTWtsyEHob_5O)9i1+@E)N%M)P1aZ2%6?y1#O!7~$^H)2* ze*x(@ofA~m?UgS`ay3Ycs1#m>*V1?miKUW{QMXjphve1N2kONOrwbyz>MkT$O?oYC zih2!QdgJRprur#6Tw{Mx;inGA&ji7l2GH{PfZfil->hSKA$!J3pDSA|XX4PC&^Nkj zV!F<~fvD7!sK554!WFI%CEsoU!?k$&;**m@sqrZKCo9XYcJP@?Tguk5O+oo8pIwRw8p){N%r)IKl;};&i{G+a`U- z?yY4HdGJ&G4;czAP%A0+3W1d2r{*M~7aQR?;!LA?^(^7qq0)?OmSfL#bp*anHh!Dr zgU2(0M8;Zb!tQSsZ>BWd8s55C;AhO}w4C+pqFlO*Z$iRu$bnIb_HY3f&US?V%lmD_ zBGuUr`1HN*f-}%XQkP9 zOj8K&aPbNlWb0e@|G2hJf4yG!5T{%(>nTmruau-UQOA5IOkG@L<5&Sx*?+ywzuxrZ zV`i{;`+eAj7mRJt^v(-1l+%(%^=Aj|6(D*6hEMw2=>1~Tn@vWDP5Vv83gY!`m2YLz zUpr6s639HtkcSBE{zs4e**%pj_d~!Z)c@1ksvFrk_&Q$h*i+jOKZ!MV`g7=u^9-3E z<5H5Al* zdxMs+&V!_T+8QFJsha0>g|2v^y0PEhtH>e)fu=SGpiDqM@VtKn05RZ)Df1d3HeeW;ZS5 znCR=n=k%9Y+dE1N6oQ>wEuu;oUz|*Z&07LapM4JE4 z*Y~djq2LEz0ip7{f&}TNf1g2D*8XYaK@PvphGn3oKJ9w}D_UxVOn@Nd-=N}&p)Vqg zU{BGK8b|9uDe^#JeIsV9Y^EGbEy%xg{r_8h{NIfqq1V+nY$h9iBwRz(Xs2I3A#qSV z68uHLaZ+8XdLRC$5%ZtVH`kwp{6bE_?_lgNPV*(^#@-9`Js)=-o*amr_&S@Ik6C5N zMx8*oQT?bX!mihKX@f}|&6w%7Ph%{rX>0>;wx3^S88>z;YhCkQ)wTmKtHPn6eoj*Y zU91X6h=msD0~+?41S!e?A@}c5Bn;By)J}B?WTDLBP31EHx`St!HVCP7VnC}YA2FQR zBnk)=s-cjaJU{YMM05$Ou3wCG+B4aE61~A@TzR9Jz6_WKLYrbQ{Etu})7{|v^ag{^ zlJa@C_Sd|(k-|pdM9KFz?0#mtDA#* z-!xd7LH_9&67XhWX6*bBSvv;x6t8#fBZW>SM0(a^!@1-G2|^`zmdXhT0PjwpX;^(!24yfQ;#iAv9X;G7?ZH9_f_siJ4q_pq4EIOkTei9N?JFSvw4b)Mk(;Hu9W7*5Yh+uNf_IiUqd@epgP z4?mKA*kahYHqsfPbLFEXsr47`OeC6)Vl_Ej$+j|6O`@|)z7(Nkrk_$ei7J>Ci!TA+ z`saowIpdEmw!3fgE`9wVQGP(iCBeXO&pcd^1-|}3hPii*5Keb@UU9-zEEA7t?6+5& ze+JuslHJEgz1-2AxA`kxGzw)z#3qX`e{BGZ4E<GsXQ?>)o+oZ318#e57bOW;(aa zMUKul*wrQdN~rd~*n7{gsJ1R$v;YCg1~Nh+AfO~A=K>KBkesuK25 zxyV7XfaIJd=bWJ^?{e?{`fPUJz5AZ-+voH-&-bGqYOPf@nQPWuV~+98cZ^rk{T(QN zK>;%Y|BKMNmi9NWKD&@t-!H&v(@AX;cbqt?osd9%x!@JPWfJDu%0w{r$ z3@;Dl{kuE4W)5e*KdNr{k`whxWch6sS_D1_wdg!f!xOEwO6!wCAq?ZYL?>uv6bb{q zb4npNanE~yhI}}}oUvnJ=EsK8BU#htYEY z?!r^<#ne?E#xlPC@~+fz@hN}Z{psYGZhev1OLgDW1FnkkrytkYr+e<}@G4V$Qd7NE z3TAcLc|Jm5c+YRZPiED9JVx`{cU&Kc6R^#Ah>0u5>>IX(3S;-XomRORzY@O|&s=lp_)1hLkxI2Bv%8yX9CfZx2Tv zhP+L7p|%_I7TDvMtI_``pAtw8c)mGm$T#KN2z`5K)_St^^V*o5!K&(yK5br_!ggqU zOEy3-awo5mRw^aX?Z@w*_GbP_Fi~Zar~<(OgYg{fkl#}-xi&VT13(EXnT7YC&+Rh_ zP61GTuvmn0JYL8Y7}TIe1P{HS`r^F%H|X(m<$A@8NqycCI1@#v9*%}nd$xp6tH9HX zmM9cUyMEDl^}%P;+LV#N@1XDqGYz9&?$VV0+%=UxmCC|lY|yyU(N&LbZocNV#OjkA z8#&J{^C3LK^I+-dmUyoAkVxr%SKhLyDF}nXg|>@sTjFbwUD)ipWZv)Nw?<6GHlEVb zTRbcJW{cxC3k!{!n%S#Ht=M;M=nN&`P}6A(D(cd)9nJ=$;|(n&8D9NqB>#sM(Wt`0 zPTNjuUw(&75&;{Ixp0oTYCMwm_{M_5qM*|OarZacS8B4MD?URIrj(_vK3nuBE>+4_ z*e{-G5!d!`@AnOxsN`T=gOl%-j={>*zUN#N@r!{w~g*u!h;dup_tf^sh9*!KoZbAP~ zNQoq48^{97s7F6!8){T}=xow6|HXM{C#5dY-2^VV&>*pRIy)t+BJm4qj)idwd5-~=P!;a@a!dp$YL`VSK>%O-BS-XRK!&XdVrG@{6hTkR= z#})k=(hGc4EZlg{@d;mhMtl~TwEC-U3h800(H(vH3Jd3m?}MZ&8GL**5>K@S*NV(d zDk`F!lXlX~k|B?m8FAPhr{wK+bmUKE`wS%B%8aceM`_Moq5X3yTtW+qt>;|oz~FeRFky4d4;uuZkMj;shsQOIK_3eJ%FbY zTnzr>&A`56@{ZTTUoCEhQb^S{gq<$EDvRHxzGdHq%8^PHA|d|91=x10QX4BVJ-uX( zwX-J3Hs?;AZj^KO&6h3_@^0=b?B7mExj2XRh5z<9`Y5}^F;0c+xkX;hLJEE;Sh_ag zSgVKYxL4@AZRZD-7@SfSAQJ=|Q^-QPM`Flf;*oFXt1@_iG*ZFs988lj_Kn@%p0cA1LExE%;M)|;@@_xFPe~TL~tmY@(gY2Q4O=L zO+Biq2r|Kb%EOLocOIXt{*rmwRbqV*%OGSKrB^a*W_r$ySe@H!WvUld3fl??j_sR) zmUGKaFK+^}0M!Q)!hf%jL+QHKvUl+X`g@irzBj7KmQ)S29>TwXWYlhT%`)YVyIWX! zMt&Wl)YkfB{cW?7fS(q4A68EU^t~|-KvTCwC?!^q`-t_Kra5pDfO6cpD2ZUQL5B_$ z190*W{sBzI-`eJ3I}2RXY5L+ccth4x%8T#U2bxCCvf z+*iNGzSKve!9b1_ae&ZT2JlZ^yc~OgxbuHp`)42jYsOIos9$1c(Q^_E!9GXpoFi`v zo9mP)kDof_TX}J}pz`4aY4|xYgnS*?<2OF%sjNYvN_!B#B0|ZvhH!@r)u$`~ICao{ zfcN=(?xbTCKRL{NE8C(6K@`S2@A|CXW+3;V{4l~#{&bBt69kLad#I6-TomqB$25H0A#K*!o5e0k0oH>y|Ttn1_3Y=`(co zU0>nPRyhe$Vi_AeXE=El zLz1jXW@yli8EBpw!xw_QPGI#amNWp?beniULtcUY*P?Ey=*uXGV_Ky)97-eW#!y-D zg>2HwquGDD=XJ<}+YcYD*wah8cx1%f{XrnpJ#5W$v}IDtg4EA!rno`fN$%-WSIW8% zuTJEDTZ-#9d)jLk_Ivw3u+fy7@^EsGl`=8aC)>nkC6J3GZ_P^kQrdJ;s96kjc!`ljD^A`cyfbG>26ZOvZV>{gqY*D=?s3kUNrLY ztUR&-$X6S{dq;&2tW38hLi=S%p~o6U;W`n3?GegDb-ki%xJ^hAh?}UYho|65JP| zjvBhX7j2xxDl^rgC-@MvwD~oLNFO>*$+r9J&r~6gJ^I}arL5|OI8SjFWU&{=RY??| zR)@>SPDqlz)~RF9WvZfmutpoUWIWzp+bUXQ9>-){YaGLQKpIKW`#JU7!zu$epMkaH zPq&ZRes`F7PxkI^RB5TiFT2^nM~v^dI^{3yBn2oCGmC|_c|T!9Xcng2fwpiCX#WJ6 zV<9>cCFbNOE-yaMCe7@0xE&1zV_)hVgMV+!458_6XthXJ6GYrFysT*Y;r4xIZ&%G* zC(P1`ZtC(DbigthxFe3&(Y$(Lh*S4qiEoM1HRK19yi^!txHA=cjaQ&8T~=K=&Ncld z?ym04i~Yp4hX&#hIh8SO3l^PIi;~G~Kc@X}mlK=!q0KleVO+nCo8UiGT zt|K7|;2_*QDfy=_{j;V2>Tyy>@GZO)kX-nVHu~Mac3c;RF z_ZB?rOgZE_mq#`NKQNG`n%zJjM;vT>`As<|w9$IS80=!`zqa=Vc7BGGD8`14j{sqU z{3C?};_&Mxp!`KLG;+=AdbM}jsl@sVRdNLiP_5#I0K|8o!mc;q3T*-82p~Y#EW9=+ zsTpET4d3g!|72~%ml0*|P8=2XfMj+Oun?w?l@?Zn6PX?X^+6j3L*b*#RU$ggO#FL% z9jafinJ8oqFh5izQFY1+Kk(Hr)9%Fla-9}Bt-@-zl4wF?gS-#E16Zz70wiwJl`cqk z#6kbQo{KM~qIHFtc)dBxLXEGQMhciC7nhHLmw^sfEn@?SGk5*nb9J5137 z!)wSe2Bc_nH!Xm~@%No=kYi@xG4x|YjswgFVF0a7djm$0K~HOmy`6dTtqWOMMt#4$ zd3)>h8%@cspb%#x;jqI*@hzSy+mAcDs4Fi@i%@8*1$~9^44^HBi{MLQpowO{0kr^V z7&p3WslCC!r88$9R~wpOUTkRvdvT|%6*W3)f-Xu5bh0Cr;i3h>H|IXn^6a`>fgvQ; zOP3HpMx%drsLe2NrS_-JK^C4K5L{ye&ixO-zl0B<7vpJa&Ylmek6Y&4*9ecAN$Ykk zBK+R%zMPK~D3DwvtT~6m!Y`d{HZ&{%wX-{nE$3PwA&d+FRox7zDLfC>AK2qeTUe*Q zQZa@+o!^~KK0oY$^7GSCS~~*_vQ+3-3$}&A1TV0e*!364v7y>$%x&Ji%4%6m)dPZn zV;N6|D^wgGn`+l|AfD>R9_3I zCiJk~;~a}S)k;cqZUNJ;Vi`JBMkns7;>EmnUyB^i`uB#@42(5H$=|`yq-ni0s;pf5 zB^p%{la7%R<0mRloan08XC+HFn@ph zdz)8HNiB%&8W{8qfv>ljbqpNKDu+k%ti`c{m~;2+4IKgg?X zmTOGIAjlz7zd3Vg&YRkBk{XGAy zYfFAOQqfZfK9~3rzVoymKp`tQs@_E=x_X=x<+R2QX}Y+zj9I!yOm|WkWSoqLET^l>=)5#p7lF}BW0hZ?-WA`v>-c4ci zw)&97#|I480YyNGp>zdI62RjY^5FUjGNcSdh8DHSkbP>S^!Um{)KBd;PH{k+Adlw+ z1Os8MKS2^>v;O&0=Rxb3Q4;PB#;z1}NgZX0GbW^28PE)9niPEkcP_xwj1ma`V8xPE zQyoL&OUFhRW}BP(Hp(6J_)RQdy3`v-msv5U$KPJN-PK6u0V=fMwCSJX)&Rhjl}=1y z?P}Jd5R+Kh*y?b_L29exV_(Lp=abmnaY`8W{g^q8rdoASUFbKeAOOOVKN4b1uc`Iz z!$@8=UGVoh`qnJ6Gcx{Jei2M4wSa>aWwD_)uUHKqWpkKTiCXIk{h~U?etX$}*4wTt zAK=F*w?S_2va$%u+vhT?pZ@qZ)`i=RTe`@nPC8Rp9zpm3KO+~8fZWlZIOT^4XBb&I zQdXQPbPDG7Mb>$LFX*`=;4@c0uRjX^051ro@7EY-i0YeqB$}ns`H-Ec%i;^AAe;f@ zjU8e#9u#RlPJRNg*5NV@O{O&id&x=}pvrd4a2Wlu6;umyh+eCg5)Txs9)(%M_F!*n zE8_WfRul$%C@|D8t7&1fI&t2!8Ry#RwGY@gxrf zYTX`V2$IB#5SgJ{ey~P0D(l{hCD6_>&R8r|IqC@AVIwg=(}J~XnA%G%Ye+=;N2=Xf z79e67Iq(%Y$u+D!;En;1NZQ)*sS~ywAM0(Z+hashpHWus@SxTOD%tl+J6H7_Ojv0j zAb5mmf{p~P-P`WC?4Pp=NmGvx!InwBD}VQ$j@sYuWQ%zy1$Ky@s^F3*)^V{{IN*XE z_cBRZQLcU_lhB6lB1(Pw9-$$xa)uVhI-S1fW_|WoV3*M_5$-$HTIWvk=6_<%t8{f zE&!7cVUylilW);vVVGy;_WrTKZC|lQ(z3FJmY9pPp82xzJd5N!`j@NKU-dp; z#uub@Vt(*&@{rGJDcMk~w0RSUogh55(oE-;ZbePCoam6hWwUkJ=nH`L=|PWivA%X~ zsN{@s_UK$^C{_f6=f9|5#4ih)WolX=`S*+B^4f+cx@0j@+ zEPag7%!4cl9o%Z#6eK?g>sV9x2tB480k$zvZZ$FUPbsH4tt zeQ2ATRWfvKTy8e}YA4j}7r|Zf6#BvBCn&s$Y;$LmWsFf25izXYYW9?7OJkf>bdzo* z3v~TF0wO)xOO}f7;zH6YAi;4IFYq5Qf?4z+znyKuys(7+jY}_uM zX+FH9U+IpbRiF#c0Cx-r@J@eo$9@qq!aqRQH39)SWBuzQZ|*(L21E7X!iy zG`z7b(^-Y|r@XEA?8jt$5Tm-X)tqcuYY7%wdj(giO8POVr6Y>}`BnEepgMIAJQck>12F~9NX%paVoZ6$THd{Tg@r#U6AOJX{ zO$2YJbGF2g89dO-I|bxn3B?Q_vqvi|U|hGxN~de}UhJJ|hDnTp=Zf@Yo5t(8=(DVd zKjO_O&-cEIx~}Tsb^1m+0Zjrxl^dSF2t}2e9$j^1S*@&6AD-G|%DYaA>)5x}-~G!Y zK-6o+W4XF6asX)ed^D}T$^S%1?q#y35_>6ih;M#Dlr2awI@vt@X7B*HIp_nJsaUq4 zUi&dGN|nKrB<@2+I<1)RL(yP#H4)ym>UKcu`tN*6l!;;ZfMZ=SOwhs&2Aj=}M_Qti zl2MtUVGl*M*^P%CkM6^UW{S#BzlAlOHt`iUw;~rzQH11A!}Sq?(t-wy z_n^!FnB8HqVC`68@|5#*X5nxkeu6s?YlW!0g=LvN5ZCKd36tQx=#eP8jUXjxDHFuY znK9ENE-$aerVru%kwY8HP`soy<`f%y8IWd&wwt{A!riA%K!YZCsj3?tq<`b#{+DMQ zH%S`_EF-`1KLD6R>KF78Yu*Q3d)>Bh*- zm)Y#lg9VK_?ttC3=Bf8ikD22r6eA6~-l663b~1Ro5N!z43{j6&Bm^^kXl-XFbQEeX zX7p2O*!K(NXv5*GR6M}Q&DF5lF7xS*92UX@Igoo55NF+rmPB6`#mAn}7JTKhO^Rv` z5B~r=!GTwW^(%k;e7|21M@84d%wpZA=Txl#25T(pcyNgrj z*!w9W521mQVxDki_zH*%LvyZ?`Ue?*JV?deO494VS%rH-l$~)2>)kwx0%xA;7|!L4 z>@gpglg9R)+Hmk*v1Igyxb0w4x{ns`gmNH#xl_(Afb54qy!WQo4we{dsu1pRpqmz{U7h<=B;5L=}d`b!LGDs zIW18RhbE`=pc81$8&cV`nwuY)#LZl9hm(ue5cvJW z!2pH||7i>hg4oIedMmmnjJvutiq5#17q=TzM1Dbeelu3fXgu;!SO$|7UhPzeraJOc zm6j`JQC6+}gpJ@+W994kuvMlfi3IeD(fth$Vj9DCKuJ3a`71=zk_eV-zL=pQQP1?; z%P2!E^zv^@w1WA*l1qw{GgF(~QM!*{Bj)dY%cljcZ8=&>ZjH=Olbc-MvV!a#{?h&Z z;{iyaWR%&FBF%;}AmjcWxvgNVH*%0EBh&=vf(gwzp@NOe#E|!>`x}Sf0%--G>QKQm z{&)h56Cl6Ks6OfYl*jjTsx-F%*#s-O)ATJ$}vuw&p9i z(Ty7iAf;hP92Pl~ASR}B>7qqz%S?ZHp38KsjHa;0%dA2t0HDfr> z3j_bv#CWplT-V*d87pq`f5@5?dJMGm%U-6h6{8+Nv)CCZ6hF zPI)F5ce#{TRZTLXH7N?!)v`Gd+HacndwvQeDe?#S#kul$J(CIAMj?PYpr)Txoq3U* zmr*}a+FuVln!~IyrNTFw%ErK+4zE!&y5D=Ia`!xJPUgUv87{Ia&*rWnP1|Rev|<&8s^Oy0`dGJ5g&+agwoweQVPU2clzgQYEDsM_lyUD;dk)fe zH;L(xa8lTD_xd($rdzF&G8`mRU<_nA9lDH6U?l} ziP_tl&W0Ju@_vFUIr}efPeJEbfJ`aK9}hq<$3+1PnQ^!~i{k7{^u>~xfwe*3AETi% z>cKL1zgX=V&nJ4uj;?kHIr#>ue+BA%7CY;x-fr9U>q1$8%l;O+Bx|~N4b}1+)%XTQ?TCHWX;i4MsoUz#waufj(_N7ntCUG(v_?uh z6psYKQ(9}26C87A`yKP-s(47Y@4Os(sPvwZLWBa7v{PSe^55&e_m=%Vebjy+eKVtN zDp7G>l6>=SODV&Yrm`_&=lff4j&jP!r^2AJ5?M0Z`lLANisT_`k&7rjzBANH$ikET zy=%;hCIoIc!t_u7DQy$gtd`%@i!e~K%J~b6mB%MzFvsiH4tqNCzJll+=i04H>LXli zWz&VukcqR~`H_8m6lHb$6iJyss73*<+TRV3+~thgP^$xw2%&~7hkw)mJxxA zoBF)z6~S1kr59MzLg>9S|7wP7w%O1{rItrK_lwru-Pd)AtS&Qhys-X1Pk-~bXFX!0O*B+?2>;jm zeHD(4OF^gjnAuQdBx%^#C0H#_iAy7M|zZi&vOc(`#zDs*2?`D7iXm3 zls*ze63K}YGEcC*GMH1AjNEiDD3~ZNDtiNJsI9XBq$fxQmTy5Qi;K8HZbBt*c$WuxOkj^@w_4;?hZX8QlfZTDJ0{(AlUW zE;mblGQuL8RSNf}a4|Yun$t%6t{j&7_TBT4aUc%6G1n#kd>@q7l+1wW3Dzh)lzi*e zNWEdMj+m`N^r^N3_2Vw^6I{Oz(SCWD**YLhmo-zMdmg`~0DpPPQa+{K7Cd~5bbv~T zBM3;0`TKtYvQX57+!!$EALrWu26xp@&?86-=LeR*1AvO zT8B-<>-+(sMO}+5!~}fHB5|?qrKkRs*vWnDUKv5ZDc#|)k*>%28O~*8eZ4w8gVsh8 zq~Tq&Gl~7{Y7eM89;D&`(sq*ndg|Z!yf~Bpnada!2l93cy-GL=y>^c>^X@47YNVl2 zUF0W|jx>&EPHQ*7k2$8NJIb!}$6l6A11a*~%%J0)}eZSuEUxRvqm+vA+JEFRMBegdQJO*^@EeSs? zRXTk#83oMPwGqFY(3$I^oP{-%?M4>u=smzSp?-P`NyKy7@ddE;zdNKFRGg!h zbo)-l57fu{b}Xo|%kmh)VIcEKm!N6IS{c*r6W$)bz~mN&)3t>&dRg&Q5W`ZCNC@yprHZ+fjD^~Rct;T@XlsG(ie2ybE>^?xk&v3-j} zryVjC@Y>Kj5lR*&VPZbXMBel{H^QhJ2dDulc@vxJ!Y`co-^*51ZUqCIa910jaEGno z?l{1L{wu6eLUF0M(8c#8WyJ%G@L&5&+Dbr8$uSNVfcMsgwq04w{cNk^fpU6vMVySq zfrUe9F3U5U+x7S>K%uOY-u+n*$3EfvuX}_Sdsl>ZxG=N~; zSpW^nVeIJl`b20!$KFuVU(bwN^1d`Yh=i zm+^JabPvrA3!%F0cHwS=H^%wK0cu!Ap2kKM4VI-NmPW4C*ciKX)7elS*;1Cgqn(*q zPN&Ohm0TF%`=H4=(a2;4+;A`6zxBR~b7rQ@f%>C*2dWsW;WP?hX+d&dS9dl*UJb8RjKS;jxG$WeJLxp| zcr_I9J^7T?Zrst015iUehYIwMp6mbgW&HB2T)IuQ*Z8ZBfLqtqaFCot#(k-2rH0s# zJxzfK7tRv8M~e_!uaI~N?9U~T^wP0Qd9p(MRk3leWm38u%!GdwL-rX0igv779YZz8 zbmjK_9*Q2e`R#Tzwn9FxHr%#l1f>W4&%bDC%55Azu%XOkaAk>uY^wkkxxmL)QD$oQ z`1qJlzeyqN6rnA}z#jTFO2r-mU4Fj^83g(PSx`TRBE6@ak*GQ6z}ku3)Cy1=Fov7< zi6AX?myBrMfcl930>G0%Gn+rhPFavdJo&9o-vEUi;Gn7i-`HDE#YaODK>_%89WMv^ z;|bIsykCHT`~-fo-J6%-Tonnh{8G6AjLtg`N%&vZ83NJ+$<}M~e%1YL zMKdSHD*m`n3swUd`t)keOOY|)7>AzyCXpwQMcLo`s&i0`2o!t-BprueSH;(HUio_S zvi+~>>wl|(`@gU=3p-v9Ok>NLOE#RI@h1j^%@T&UVQ(^592N5cYgVdUa zEgiL$t#T)|=d`0p+wJas`$CCNdRJ3_-|t)o>2Gx(AhjMq54ak{L9YP^mH;S&PgDV@ zHn#)$m_TfG8}hzsU2PDuuTS!e+&`j#L4cq}-UGm4?H2$&#zP+=3;t(I|NFJD)F3Yb$UvY;`Irc0zQ)xU~qFyD#I4AYki-XGor=m}mEutOt;Fhv5GOfK-r>r3#G@=3gI`ws2QXnI*wvOhywZ zr3F@Fh#$Xk`G6ahiy0X?m(R7)H7O*KL%bdQNm1Nf;crASQQFVP0eUvD;FWYDr+amy zJezfFLZQoi3v5@o(4G3xNro9h&?aNLH}-$(Rm6yp{J6_%Sg~LHvDnbpmF4^CnCW0S zEumG;cQP_eILN6&o#-bB2Vej4o}fP9WnuyF?JrXq$aalI1t76sZvm9=PbgD^I_d~! zKIr<(11J*20A!wIAc-F|9n-l0QxOC#_F)Q`7UFjL`z0&D8cqwW^5>uw#Jliwjdy@4 zj06~^2#`FIPlOUi)DCTFjYGZ<2NwPxsL+FM$btdDP<<{L41l>Pkn@#)cInqBb(;wb z_$iND9e=E#w0~7y9VYeA>iMLhI#%e!!>(^L0?tK6e?K*r8|k54d-xA*8+j+b0C~l=3oaF#a#g-B75Xb_}2;YNFl9OWAij@b? zrPhCfx|#;wN82~OXc9BzG)~u4oQ$E=NNWYz`>mT@z&;hdHF5D%_rwva9U1G^Zm$%h zZNHyi+1&G>6wRj}CY8RaWIiPXN5sC8pT7Lbk*OrMjPL};ok zFc{>^Y=y(BBlRc-%tu*38qT7TuUg2rbjC)dZc)%NuMmeS33LnCWQaJc>faX<;6mS7 z<|gKt(|M-xIy5uFCyEd^nd$q&AS;Vvh|p;dms&QulV-!mQ7(4Er82@ZZc^0p?}tAlasnsc&v;ldQrmaqlTpK=+L-qo%PehCVl;Wp z5MD{<2L8x#)It~cHt=*O;5SJe3b#U8;x2oqPajyXKTDDGf``M*rz(>cVT57@nqAqO zJa6W=Q;exzQ)xdlphvobXZLh6Nu2iAlOI-8SGq^$x@23t@VAbi)csN(80o6|m9JAT zX@dn?%!M>^xN6cr6asXY>up*~X4c2`xCar_FMkBTIvShm-qrrF@UBSsIOVAadjEo~ zE;$QRGxE-X5PbB<=$tlTtW(W9y3qY>_RO7Wzc5U5C*3PA>d~fRrsUpu4m}HttPMSu z1NufJZB4vP690a?jj0V4QRRD(nn-@@p5u&tB9LD5aj9Y4!`U^fCktw!Ii>adv~sU7 z;aLEfvs&R*AcWP8pd-uTbtTM6$ekY(wr&4)bur3d!lc#^U*FiH8K4$Y@sgw9mX-6GV{a9hus%duDs zl9o%^*x-dN8C_xGpnJa0VYHC%U+=wZi=XJdXMTAnX<+-e!npEJcLlf-UUwqz;fc8C zAibHyxRQV;|66k|^ef~T<+*bnf&QSc*CDc&Vnk8gnG!{uP~op$m?TU37x+N+mR9hU z4VRPD8M<8~7w~+p0VQ;w0eh@R>TgqQ70>{tHHh&)@nZV0Zw)iaF;%y)W2J<$5Wi2+)=^wt=Li50Kt{_nM=F62nzTK z%*w8Xuk60Gyx%%|h7fwzZdw_f+q%?rE|GB{QR%QXD^#3qQp>_#`;z2t*BF;T^FzBc zmo)RZlcA-kav-si>WjE5b0gi@F%)$(c0hM_c2;XEP}+B=yNka~)3&V!B@yEZ4pnup z-u|{QL-`m(RYhRivk6(F$MeNA?T|RzXDel8b=<>!l5&j5CY<0)gr%F2>hKALtlD?o ziSOkingjg4C{Nn8i>qnsk6QNos)kLqcGDIubd4{2zXuDQeIa~*w^rLk|HOsCoBIck z9J#=!BI&GU+VBnmDC6ye=p9u@wyN`lPE3*NQMQT% z87qE)szCYb!_0W~Hw1f_kJL%OZpw2`jkU~R+zzWtGMbf(eLu3O~GExORJ;Lj$Q@dB>X z2Nc5x2>hv_1I`hIa&fbJk*4Tk_zqo7jOEbOB6?{EZ3L}Lyoh!huBLrICAy=&(FlC+ z1HXSvM{(}NWNXf3-|1K!9@&vpH1*v9yFTGl3^{bht=De)ms^Gdxy3AT+M}F19Gd$f zac6jim8q5Sjy>Nx)B-$e8+U8O@7YhkE`k>eN*Uyd=4iRNvL1g!R0A0(^rKKWbr3P5 zLX@PCc1nkTt`K(ZJmM1ofdKA8b_OD~sIdmm1}P1F58WD8L@KE3|Gl5`r(oO>-hmLW z@!eV|{_wUco-abP&p+e!S9agq-*#_Phns^oEul+8kTY@3eD3k5qUeS1`XAWvblt1- z)-6qB+_lA88Y=!v^Ym9{>yPI#@axfZCJJ!F>qW!1SgJntUw*$t-9NR`3bydb=>b}Tm{o!C)eKOF=J|` zplSMq!c^nCyc~u?%cYA@%#mEo_2AVK^W>_j(LpI;6vFD)!`hh2;~8*vVx3vPG+LYu zPQWqqpHQtPAQd*%uca6#jCTsoG9xaw$N}7s%3Ud!g%3}vM)2VwNS<u7i^ zrJg))yd{}RmR0tdgEopGU9!dW2NFXF_!()3%Y!SGu}h3_GaIgk#xG`s94*wboFC9# zzEVF%%WdVMS}kL)w>NOl>&XemQ^IOv6RNI;Kf8vJ!jUCbP6VW zlgGUn9+w@R<2>-(&30n%GI8Zpdiq*|)Jl~p?48hczV^_zHaWTTdxSvdEDj7157{w6 zLwyMw)$pQL{pdhR<1UaORWU8avTMX4fByycx8>zh%KipBB>usJN-*B>)>gHnkkX#H>m+5cZc%s52p?c zCj8D$exuVG{7+VJ95Yri3aMSIZgIzow-;9PVXmkF2km|qnDP}Rqil+g4vp*KaJE}) zi9{y-*5{buBsRC@O|wm5@&UMeX6=YWFjL)oQcZdv&A>%*0hRsTVAut8Dqb~d?bFj_ zk0)1>mEvzV=L7)Vk>?ijrJL&60E?0(rZh@GK?3SxztM*w$CO@G6SOl1^mOZL?3W1B&7q76#Xzw}~=l$GDlY*cY?F-MZ6nF~jR+;srtbk2M`dKdc;^8melB$=XO?2F;Zai~dGy%R5f`qaB}Pcko?B8#GZ+0(Ql z9sR8{rI)eJA@}7g=@zJ8c`rjfZn5v4@fo&~>w+(cgEf>0>5_3O?tVpw<_*ourU52Ye9ajKjedS|9Ullys zXSf2(iKE?&IiwO0>=N#|1x@lm|6@;i&5F@o7}f~OQ<`MHWDaj<(6j~y*&1Ht_!xXE zWyHg3Vdu%^YRB$H?C!+FSkj%9$gg%W)K8*j`8w?8iJmhE6AqHky~K;#&1^&t8Xf5; zgifEGAou(D-Bmk=EE6qfZ>szFbJxi7EfyQ_5W=q(Vma z0kiR0B7t=Oet0-1kWuR*<(SYFHJaxCww{eJQv5F8dLPK3Q$dzT5!esSKLv#hx$_wQ^zzdHE82?% zBiqyxP3~>u2g_*5@KI$`sn1&Keh!qxhfF;b_u>>K4w(A|qlCw(BV?YjoJnvbI<*TR z?@|&V@Ad<=;eqYWdkxd|;arz{^Ji7oS2HKD+Q+yV!r=z?1)7`DqP2&;l+@AgctPhg zPx0|>`GXcc`1a4F>xPZu^oA&QjFBtd6-B``WM3j`3()O+pH)*a4Kk+QQdLpNPg9iu zp@FbF#swMDU4k@>Mz%wupWhBM&31XK;1;^xT)LB}HcE)naxT^qX*NE`Qj#L0ahyr&!2i(XH{sHK8qK`E$>OP}bExU zi^Z?vyXm;-L6-~)fK4&z0{tLf@$|i^YhPWoGVwMtoiLT;PPo;&7kkCXZr>NE4Qotc z&Rl8jSAYX%&b>i%6$IKaO|QN-Y>yk-dS@z34i8vG65%;IzI#~(?-!X^N&E?NFq|2{ zG!VBf$UdL#*bp?b4uJ*;J;n~t*5=&X{#8G9U<-`IHUwk=a?z^OtD%27hwMH`xyA>e zp&!{0pyrG~5L4jIQ{jL5!vC^Hoce}?4)j0Bc`#TkGD;Z@IbLZ774NsYmMpttf%>mn z=jzyQcCFxkTQb%G$%l#>hijSkXEW11KwYNTAQ!${cV@^zbqlWGc0C50JLS8>mP9bl z_U+yw%_eXyL8>r(eJRVeD15<&(2U=K*vX4I==MXA$PUijZwmZSBGaEBBNw2E4uF~M zD}#T6h(suRiGWAq=1Iv@^$=il-x0`nK?g$A0~M4RMIFeWPeN~gNrbZfpDq2LGEQTq z2ctFjL>Ch%D)l@Pk~nF2N*N~!QDzl&#slhAF5#nnF?o|eK_**j-kg=wXUremq$r^G zz~3FXh6XV38vvJ|`RRN1!i`zvV9a zjy~F!f+rQHrD3(1@Y0STa)Qh= z;DO&KTQ%0H=ztBlg)Q!q+IS{cR^fRAGq;j1>Nl@UTx_vJ4tbnU^S6G$ln5|fdR~ zDo*>E7G$1RBBUL=fX*Z5>~kTx4uR^08hC#w>O`#GDWTartJ93JN_kmHTa!`GCt>Z z3)>xCrlf3zEDnSSBPNjr7hZe4^RpcfUwl=cX5~#hE*`SStbQSd!#r5nSacx%Xr1>> zceafiV~53u9VyKgtjc7J?i{Mtg>!vS@(L(NXmgjlXHA+vgdvJsnl(Q|}? z6~eZgD|+A>RUdQorDW!H7AN8@*d0(`F<8iT|l*hp6>v3a|`e>{btJf zeG6bi$F%-HIMg7m6{f3C=jPBI`d~#P0GGWUCE4p!E%>7Y^qwg5l+dAW=Y2~jRfCEp z-eqi%{LNvi02tVX(1C<}=nvWApCHP7ev>u=91H$WT23hi?Lsq|?>{Ow^WI9%phU?r zM+)wD_~mj1Pk8LGfLJ z&L_?IGo9731D*$k;17r}L3vK0 zd(OM}d%kndch4B#z2lB?e~7H6t7_J&Rcp;P=QE!N@)6{r5Y-@>=bmdcQu;pFMH$9( zHuq(ACR&P{K_tGE)rV>#bJFHAC$-X1VX;vO*e?dvAV64y;Q8?jO|!TMQ|%!0Ur_zO zb{J4w@V6&q1CW~FLI3Lx8Lt9JQ2zfj{ePpMUZ=Ma;E#S8LFm(7)2y3SF;+jtabnyr z3VTMnmo(L2Tr}4(RH&dOD0pgH%(bvc&d!6O z*xTrSWH)HGOF@n$lMD->^7xg{hkZ{!c_afMK(YZxAMjX%su!V`R2cwl$?1ti zxGezR^wvRYP7NW5WQdBBD82>u$MGXVXz@ivw#7`|R1LH*U||O?i^H$ZfrLM)-ZXh| zvSYzKs!+AT=F@4&E6~d73>DMe4v+4gC$m0`r=u$@)yVtNOI8S~w^uu`JtDhO`(o}` z^V)#10XZWX=O9*Mc@_Ob`(#%M&1Hh-vK_VQ`APi?66^j07P{`(g#KA`@6TNWGAY{G zFP`;`W2(Lx9$dqmyLO9Q>=c$OT5RI1suGVwJ>{=rhV(bCS z0X;7PhAG?aSdv#Ss0pEr9Mml((|{l=jk|^18x;kkk-b%;GQ^KWKq^XRVC$F1Uxfw` z60&5B=i5-s{nT?du0IhCGl%Tgglh1Gd(c z;lF!izYrKa%{SnK?fBQ1@qlO*fWTPrhisY#Vy=|{ROR5G0F1vG7>&~R1ppkGVc@IN zU(fI#_>2F-82)y>k5P$8mLH_sln#17#wRveKmXueX8PW}lC+74Bv3@ErhQvzq5mm; z5T;=(U^f-p$?4NlLXTEjDy2ay9wCqG8sszq8l%Dlj$osCXUaV`cVs z!k1>W^zDP>c=C#aDzgKEH8{5#t`)C@%3@p0Txf$#W?s)MQ{P%Skke~E8U?>N_M*u> z>|YiixXGKj&N63gpyK>irkjL64g3{^&ETgMeKrvxKuu*MCLq=rchWGdI_0=MC<^Rx zbufaVcI-hCW_k45Ep=$UH%+~Xa3Z;Oj4)Bw5*(~Ru?9{v+pwQz@ z^kK0Lc9^34|@hx{dGt$|R{>XaMF85N{`S6%t;#!-_)EfaCtD!8Mz z6s4C2jEIaA$oW1nBK*X;hmuy{OV;k}n83k5Knu0M2gKi+pxp|28^DU7{KfQ3uq^PB zuKwdC{rd;l<$cs8!WfF&0H6#Je0}wgXa3h_263;zo0^5c+YtTt=5LeME7KWJW(K1q zttw+nY1`7z+Q|J)@Khh;TW=o!n|A4MQu|WyVRNVQoy~(Uer7{!!Uh z_hLj0ShY3->;#y_xNH$|;7$2u%4o6$XTZ^w>6MTJmX;`$q&tdo!lc3DB{xTySoDCmno~2r%ytB^^OS7AW`{_)D5Te$ksT zXsl#b?q=)0o4|zC!Z^=(K3FPq__b81amkQhv*xK%NrtYWy10zg2P{MqzY0K3^IM(V zAZMU$0AJ5g*-onVMjz=1m&px6;+tIzm008J5fik)Osd`jIbnE@x$gcGxcs;FApYwl zeghbq2MWAogPx3kPLvA&(w3%Au7RALv6zT1{C z&vRIkPc`EJW9a(?Cxeof;ZLl~u^>xMRW)c9(OU3jizV0<0r>i^pxbX=jE8`Wj6wna zbIH!NwZW(n6Hwp=AX*J4E=MYiq|YsDetV2^sNRH$xHTKnwTbP6Ip z6fpxM-Tb~SJe`o3-@67&f3ECK>6E~r@30!#+;)@cw9h!)V4M<2yO$O!<+PqPbB=fK zlC`)n9>zl0JvJhu6rS*>(i-2;nNi7Ax}RS#2e)|6hR_ckOL$0@_Z*jz7wqd!Hx}A( z59X3zJU5?TF#A0|{9Rds%R{cf7zeUPpd&0(F71~9ip@_w<(b34G9$^h_R-|di!Ci7 z;b`kv1?#wmwMR9z-isD~l=$?Qrn%t=A6*zP^AIBZw5B5Tc|wx!r})&Lw{Yj7AHXIu zoy9f=D5+_cmtI$r>Y5`-Yi)^-bo(hplfN!gtKP!KDdAW4SlkhB)LXuWopQktoW)~} z4NGP<)Acg;!WMK6@{Vc+3eY25k@ z`@x@wkLd&V=A*#=xm=dOSk)0U<*VGeZA#N4%1E{T4MbCCHpY1RFa|2AfPc6fuVn zMdGHejf$mSzdMw(a^`-*gz*)+2>V3!(xc-s?%M=k%Ca)MyI&F(QyHfAiq9XTdwL3# zk=;LtKOJZy`UsRL^w#DxVJ;qc8L`&|H-lH+QQ_WLDi=$Gs&crz@~H$kJTcPQmGXio z7|ufspiOMwAYXlGO}R`xI@L*yZAM=pebwOOPIsE`%Om3=95_~dFh51IkHySwJ^$vl zbSalbzM*9$}%!?<}5g*x=#ZP4hI@5KaSEHkOqIof4_o7o}zg zTwCBR^7|Zo3V7)AC|dfF^0Z72eCHAqKlf$sV_9`BY$RwzjEDR!j8K&uod;u(3{A_`=J1B`1P~(d0}0i^`zIwc0K+T zX1G){fK%s-Fq)=3sslIzi5+2=+wbRggpu9~)x$5nb(%i1waS%w#3+J9J3L+cxo&i} z`Q9r>c}nQ!ZQq^iX8Xb|WIBs;O#P!MLc^YQ<0BQ-BH@uIk$6j00?qUeIL}%*@yArL zo_@Y&^*!688&UEPANlV>1px}Jt)X8Wt6k+#lb8^=`-13;!8ON$ns$ZC0IdbsIIzq+ zcOHqtiBZwoQfw9F>1s`5#iitJ#X?bM;nv$Z{)B3;Rd24w4>ytu`e(bp-4d2^-tlb5 zkeVuZI27ecg=MivRuZ(r>*SPkcpv zY}Od}^rGK&FETy)Y8_#UydAqXuTMUJ6D5P@(!PDd_A_xid@WyPjwPFr>ISh)5hG5k zkIiB+Gm+98{n6?d4?o2%H6G8W!hFb^`|KVjjGjhA1mH5WUK(k%qc9f;;pBq#!)(g( zHX6%V#hY{AapEs0vfj5nDIf9e)D#P|ku`HT&2l&`+7s&^2AI9n4ZfE{(ljVerV(ye zv;Y@Sx(E<|iY`*wBR>al#q85fZOQM8Y2OzsTt|L|52V1087HSL&=;MTLwnnB&MP1V zfnVv<%!tDIf8}enMd9>!Z5xydcW$`88GVWK=;L=N$HSck@#bfV$|m{T&mNZbEGcBn z+VClUYpO;Lg!1KvCiHJ}306CO59TPx|2{y)sx2q#+@f4tWH@Z0^4sYsq}mzDZ%iXCB^M=?3v@)@v3*rTA+PA@4Iri|tHn*>F-Yai4WPfgUB#Xk~ z3dg96>FsCou9-}{{2fY?URXvxNO2WpQ|ywLGW@RfH|qovL$0C%DfhM8lLp8b8CY+% z12{u~Ug;E0yp@y^8!~@1%oQvrdfO1&#MdP=>1a;B!wJQ=c<`RrjzyQR#Fi(cn)oq! z!$-D|1OJ)vZ>OKVy_VqM+|cxnJ4QXp3|@GVuAnvtNzcUu6h{oGDi8EWCH6oMhOaI-G>1Dgp;g5w}VP+|anO`nVLVZ2V?Vlsrn zKu4nM8&8h{-K$mkSCxAMzKWLssI+6AwGx%OtiqzKIH3@5!)?}@ReqPM6uUGRyY?gl zoCQ!zfbkLq$)1({2+8Fv07;Fa|J0W%fpu007~xDjivv&I2L+C-3Sa@{8op z(4qSDlfI>$$9*($4jv+xfI?pp8t^Zl?4_^5JN(4vTE`<>$7}geTE4*cB-3@^=$9@z z7vz_tT5oZ|h|D0a{{6a^C}YUefgOfn1v(l91SW3}01hwj6h;AUiCsJ^X*nxt69?>7 zEP%ZVB@i-T0eHcFQ~*vYK@~g}sOAOjxPkSA!FsFBOS&j_OwbIZ3n0FDxN&hF_Q>e#7iPlumE%l&|Rg%rQ7Gs-O1pSdXk zIlK^o5^xlDZVGk>+-pc1#|s%9b+6s;;!FTSC3tU zs&l_5X;&GdHRqc=Iroo2)GO!KHrm^)VeVjjrBLfQB^P4Tdt)o6>$Jb0m)w z*hAfH`2r5c+zEz>o``#WuJ3mHJbA4gZ^(woL8LI#>bqxzyj6_hjQ*~M&(0nNvnQTo zY?7u_+OSa_`S}pM6>}zBGGy;Rn!?%Z${N8UQ;(un>|lLI6(g&ay|NvfLvz@y0x~ zCJu5Ef_lA&E*x%H@H~cvG*J(|`etZKBz6-vel)}Xw4UINd&^uD*ywcrqUi+7x$iuy z@4Q5|gVGIRHnPi8VqPT{dE#2GLTdLmBKXa;7YUi4&vqS(_E}kN!0}VQuST8B^?kJb z9y{ru&2gJde4tFnPw-qZuto-0qepPoa(FS#d0WNSrBA#;JKO?%&p-Jm$h%Wg-r**l zC(TGTQnmD}b#;OivohXY9jQ#ZahXwD*d$zx;ycW#c zE3aC~Td?^1vG7ICvo6uiKfxNuKAXorJN^tD<1SEgB5`?&8@;<~Mi=LgYVD#_rLo?# zVnwSuC+&JKcye>iuAw#W0aHofVmTOT1SH3EN=6xQ9gNs~7Vb#UVoonP)X&;yN7!`&M zmutqPLI#p}VjT_c4gZ=fEEoldl zCttfU&Nc)4;aRfOha7~Y$eJlJS$WK3tgRYy9A5#fK2+8ceQvNfms_dTbbq#>jY!)7 z*E6A79QzIs%S^o1DI|m@6UfmNvaVXY@tm>O&^=^|>6h#>YRVNf)-)g6wK%={GBIKajFN3$qK+FIpV$R7-DEy^NZYaCIJLVkrkr+uEp0u(wJQWqVDR&khTshQtiSLmzlp7FzzytTI# zwvE1pB=!+rx}TG89WVKF`tADsRLWWon~WU$WIXD2;E&6J(1yD?VDPptUi~Oh#ka8a zDdEC=)>Ba61X_W7t~I+fT7vnTd$O$0BL=vj9fe^D=qfulf;7H^1G^UyN&U7t;4)LcyHLiZg!F-2?2oWFA1s z2J#b9I)IMa`Exd6Q9?#cCuDvVm!;FQ&oLQkQZ`OPC@+Ue_hdi8>nT4vUa_-wFmz`XKhvY>#G~o7)t#^XoZ(*lOGKTG((AHV z4Flc6B6Tjp0P+il&)xWW%XPqw_}#CIzb5vDuO-7dH<}n~VW3UjJ+qrYauU~iP8ky_ z3_0p+{%D34hahDv(@)nZF@7A_1%Gn762sb!kH`+IuY=xWLXzs+t^&6VD8t~Z5B@z^ zmH&&x^|9cLsC|}vNDQ)Sjb;3s)Ic$O0Dwz1DWJ3+q$REDDF?OqTSt=D<3>Z(BT}bD z(kph1kEfasVliYs>3V?g(Dw((JNmF>#`N;G5MM^#We(#S`{c;3BgA67d7}cpz=Gzu zk^&-PlF670#wEk5_7gH&V;O}p4uFKpi^eC4efM$#eFnQ)1votZ{SdCI$)D@ z5eO?j`vka_G?ci|lH+iw50 z_v`?xXaVC+pFw&UI?~NT|I!Nze>rBaiZ3Gz=sc->g$vpL!yQ+gdb`K{b%ISW59_CH zsaEoOz(x+xTXMD5;lTB<(Y%p&hQlJplpIpRhkh)@W1X0ICB2HRK~-WkE#vU!Rky?U z*FFaDErE}XNwXCq=2l9!%o`>_F{a~#y#w#I1FCPZQ2@nWwzm9MI$~tER zl`l@%%MTi`Q71>J%rR=c^9u5;rmV8QIbGB6tm+#R$z%Tno*KUlH`YOKN6RFo`nVt0 zyzLW{!Rc#(;?RgaDsjb5asLj{K4%)$!ANu2aSD^)6#k?3Y%XAx5pH6n+k zv7Qjta@LvMIzMaCQ`S$-sSLjfL!^m={`=sNoIBIqmI(hmftRV3|^9E@X0`V zg2~W#M=C?R)CFUXR#~L<1D;hc+H^}0+|QQ!#WXuQ_HO!>mz)+|LFuQ-^R+0+GGkTw zP%;}j%e=6M-tFEh(N{n1MU1g(-YK3RPUZ=Qsum^GUakoeS3)Ts~(KcdPFWs5k~ zR{M^@%c?4?C?A-zGYNDv=6qO-&cp6(;?^}pxfvejhAjGTl#e`-yZ0i+)p zQFNG*R2i~4Xt{dG_JFk-k!sItZys$e*CJ=^q>TA^P~J(06mXW0lb4F5wGNhZ4 z*Xws$*vFLh%9nAx^yzpc%^<#VO>+Da#kZtfsKWsJrl+qYV4WQkOpdeY?B8J{1;hat zXY0&g&Q8tpbjvg?oVQoIVW+s9mwIA72?F6_gRnX{KMf1CC-xbN4JxG^+~IXTZ3j7B zzK;-q+;E3KWt=(>&7PGDiBPKVR?G0465W7gmhz#pmaiE*vs6YJL@gXWyyN26i`Z0e zb_|N8!9V)oa=(DyjnWxb9ME*ZVf>L%HBw|d+lWYQ;P(8qtO?1;xQ(H$s2gc`l%s3Z z-LySF%#mZe!A*ue;5phSEbuFbxh{1N-u1o&J`u412>m4r{`7tVthqbsrz^r3KTB-u ze&Z7k$W9VaXxEPvs^$&Nm^GSdt1t*CYoTSJQ7|b0S1IP(u>qqs5K~)vR0JTvw~lPY z$lFsJ!s+|96zMVBT@lpIdodg)VNO;?T!<>=Uie+ad`7lFLc9#kj3Ojdo1oSw)a+y< z)P<3JW{aky$e!kOyi-*Pn7<$}*VLXo`T#7(6>=?c^Qp!X1lf zkSfV#Z4$qc1sULP6XVTy$#eH#4p8=Y{L*em*UWKjPTUp&guPDI-}|P)WB7Is#8SmO zV&}ILs)T6_uR|L`zgEUAY1{UI*!Rmg9b{TT9z}M}ox@de$5!KF?}QGV>F!wH7kDJ@ z;jfR)<_CF5C@*=bS^TVL+Xvpt-$_7ar}avl&^tK`1Z*K)bzC+Z9#96Y41RS(Bne)b z#7g7_nG;fbblaM7MXRTcx)8s&$a8P8{>z@Y#Ilf1OxZww( zz7)eBp19m~a}YVQjrr-k^>LB0qs(6Fo_mEBuEe87uVwID?PxRLXs1|UE=$f;%PK1T zaf^%Y5W;&!egNaAJ_&?`5^NetA+g3^(mxRgxlsE~25fy%7ov2(@u;32f1%y=lJSR4 zu@F?s(0=~b01sP2TAIBV=FE%*&N+;N0mxXX2LLg~TIbQTgFNg=Sa6cyJOoiH=%WF5 zJywg2$6UMwq4Qlz!JAEFhxXNZmPwL9scR+ajspChwn1a#LQJa>Y0^7}klbmplb7pnfr-7(k440Dxa_z}-Xw@Q}YA^rE@V=eAJoeIB(IJ1#lNVXF#(N98F( z<^+{kv+H$+5VeXi+j~vlYNOW2U(eB%RcQt>m^d#ShFUM2NZ0^V4*<)f&cfW~(Z{@x z^unM3KXpZL3=n86{kt)i0O_r=77z$r0XE)EJaAN>i-sEhe`}qg^5ZWFrsUGytcSR= z79PLz%vO*4V2|HN$!+#{-j$~NmtOmi*RkKBNulA5^e8YWipI_65eTQk4a@89P^3m% z|0LyUZ&1&;XD+?SP{SgC^%KZ{vlYSzk)`kjie~hiXw*{#?2i2K4t#oez2-5xpcDcH6NvoYP|2vvB@JlG8+7oi^1iK~z__u!pYzOffk-&bVog2Qh z4|K&dmf{7WG7Qs{;Wh(R5-ru!i5oiRI=yytZu$-WU!O7uwzFn3b9r3* zh7XL2Qy4Bja8bU+{kHehYKQ_UcRc6njn&l~_^(<9j6^*Edd3iLUi-Zh9|dT%YW?T0 zbCR`Id*LbHR9&k@S=@@%|HWJYlQ}>NANJ*yAJAX?82G;Khd_J5 z7;*oYmtfW-?CH}WAk47m=r~sZs7~n^B!%<-pfj@^v8}9EdanUm> zH5{>*e-B$$tzjy9x9GyzhZ?OL_jlp69I+hMW9x{!El0jg5gL!xtcng6jlbjwbD;m6u@7a$tc>{~8b47K`DgFV+IUs{~Y5!O_qhP#bwmyj82j>7$n$Q8XiZ>4c#{|6iFZWBOp*2z; z{zu$TB2Y?zVPkIze0f;|h`CyExd{fYfG_C1?*Xg3HshrVo83Av{x;C`xpP|#w}vaP zHX8~Hy+?~fZO?O60OG;Rh_I1c}ANU7LWxUNG(DM9PoS07E zR!`NVbK}+uHkxzb>?|3`i?CoWAWIs0^qiPQZa3E>#W`}em(WEb1;MCz7<-~*Ujwi& z^B{+=+6*M%?KQ!^ue*EI&kkeeN8lT)@C9}>$3K7Nk}~8|tqwI$vdoIC(Q*SZW(t&9 zdA>yoeCs8PE!9@Q0k^%p48*`%F*lov*Ny*q;9t)nL`%fE4&^0C_AsWNj>Ch*WGD10 z2CI`*yAE}UW%Xo4w+i^k`~ebP`U8X`RlIHmVEL_@kB=Y+@c_l@zyC^}F7^5AbdWHq z>`nVLv!H>EXLUv?XX{I^Lo-3~Ob*&n4JTRK;EQH3>fR~rF(51S&%RLj%kR2_R+ok& z_@%;fyDkyMW5l+Nr&gqDL#7PJ^IdN^=G(}!?L#eM=Pqp zT@|5D=&EN=yT z0zwZZy#Y|?e?q#X!T;+&3ii1liQYd{tsqkzV43PED0r;I;eMlzd(`=9#XYS}OKiWu zPER^lB#W{qwb~6x#lG0ebx%BobdLbzWdvVr0xBFrO0qY#+TyF?ukdEBDzSQjxyL6H zK43F|rksWUnqB|WA|HjD%)nivcWGh!gZRKQcIzw%h{6F(z{bGNx$W09^|ueSUt9;; zT~9;4*)`Wo(>0^nrzz2AKo+URrxREL75=4dsBlJ9eQt?qslx1moB&{Nx~1{)^f55{ z&l%Bjuif9bi`gS`c!K;J5|)(Cu>qX5lnNzMcvQ{R=xpPTB{Ppcw z1xWwvD98idl=Q@f(W?nTDvfv9(^~dd7i=?K!>T-dtU6!9xUzpPV`z|HwlB7>D}YHS zi|YfzW(EpIWoaGmwJPN5*fiT#Ma^RNE!Ei5!timS10xYpT;=fhG}w*(`s#NKdWQRK z>mqsyBRY9Lwc0VAQT5>Q(n54hn)aSytk}wi{9}sH zw+A|}<{?JBv7Nq3J`z8b8!6t}nbgkW{uq)PQ;aBml$SVIw9q9Y{Xts|Sp zU~lNU&1F>{+epreS}fzMn;tL8%%oDrj4O5+7)QPN@~T5%CGjt6qQI>xb+6c9UzqN%BCP$C=cvg8w@2aB0uRo ztI%KSn(n!Yb9v}BqQ5W*g*BfD0CY{V|H^PE5>08Wzljk!Y%0zZ4TJ_0Ounvt&BkJJ z zDr6(*p~_lGjD?-&E7uB*X6VOh4}2N7l<7itLs|l=o~7=!oMaHU3cN{9;+MX~%pD#~ zO^!dwgjvjgog`xR_}LB(pO%>)gr@K}HU5)7^}PmhnoX)Pi&?uAe}HDmDgPvIC-|+% z^$+s)KO52{*8vMx0ny)p$en|R@o#}7p1*2u{i6X=J|lqd- z-ARu5bwdijjphVU%pmRGmNWk9yFzN<3y0!q>CDHe~JF30TRzoFn~|=Ef1JzNwqU`XBnS?T>b|Xhjfe)@YT16(f>#vR3x<{ z6VXv=cmDvD7Div+kE`MbQ1luVBLdeVKW&Tb>>~D>)Nwe|%si{`+2hBH)`7AwyY%2n zda*6uqDmE^=_URu9iF@NP7fWVyB1@WeU|3H>a|xYn8;kieraPNFOAr5VuZ0tLqG@K zu1-?@oHyNR%+N1#~fN58VTq}1><5VCJ2FWv?$Xp}QVfG9_0g49mqO`QQnxP>&hlwZ&bmM&ybXdG#)ge^>+ioB7}Vl^O&S(< z{r%;=>=H5rrt|`Uk9L0O!6o9$9Mj@bfCNu_hEA{-QjDZ&@wpHOiu6pa2!M z@uVhMwLXD^aLeOsj}bf8$7d-~wCnV@7*4Qe)+0vwq2vub0>jykOugdYKwu}E$Rs!9 zK**PhVTAIefDNl_edEZ+QCoQmPNV^LJvQq~lpM&7I!o}^-mLLGCb*Og*eZ1a`%eIS z_4hW`2v8Eh4`lAn7%yoW)PWs33*Z|K{p$yV_yGRUU;xcE3E#9vLNSEVKrs*i2m5PN znqJ7V&lz%93hY8M3)>;6$BRnKYuXqvhlKGBPZ{gg@ie|18n?58L2JzBIsVhg7(J z&kN|a!;RpFBiC+WkX=y#KGt~Sf6KqBGfygRerL-^Vq+dg%klXYhfdu)dVLAfU9PVI z#$xlLt{>+!tJ0>(J2$Hr>;_M2O6(3u`e}6>!fp39?gYk3U)Ao-*NxTTEsH$m_eLIo zD{f1M?)nr)1u517e#XXvaXkjBXn9!s>B$Q@S86)S0B^&5>uF5>*X<5&j|#y+X1h5$ zr%!Xxe8vZtg1m(*^fFTSt;C1|*v}keTD`dLD?b~O2n+Kv;b6aB(m`%seoPNq&_jHy z5i5x=nwx8Zt;L+*sf?fpbtsy%M!&?>s~!^M@`}bYn43{NAF)<9w&o5|3|;j{nTlb3 zlu4ISg}lKSN1vW&LJ-Gjz>~%X;^U+`_034_P=SjcDuD|o9gx8t2FXtLW`RBXnRd!V zBcsTC9jmHtDwvRsnZr>tgjbkDlg_#Oti)+T$Cwy((EieBYr4MOjjck4ttweybIBBh z?#gzsYT`o5^{s7Ne$(A?O|l*2PSifTG(S@8X_eCOQroljS(g;UN{g?PWBGb$k8mkP z8I5DkI9BR-SYL)zlzpa;Dc>lYi9IbiH@?%r{@S&wT5PPr8PTe5YqG!{Hlt`S^`btq z6STN^XajBCGkwb?H^TDP`K># zQMB_?B%_ug?;jxDs(`~7LJRgco-n3Y{Fou~*t{(#woX4xpXrhvFDSk@1hYm|A6SpQ zM%`(ID!FgSs9BNNCZ=+|5ppkDtszO4JSv=iRNHc6P0H$tOeZ5hq1m^r;^va{i5G8c zx2Mm;Dj|gJWJw)aic;DD9ixUOppa3@&qJ0*EW;AY<`bThCvIw4A51gy zwdliQZldK38v#y^F+F~xiXEZHYvs8t<MD8dbb2-+`>WnTkzF{viY*dRc&du@HUlFnqm|Jh6vVh{#6EjW zKxgbtg-&u`93`!x&}r9&ey(-2{$WW47yxiO1Z?cZT0^GwJ|XkSzdvQ&MIB?>+@yFNoVV1ueyA3n)=^}>C}KNeeL`k#LFYXB64U$;)6DYaOP=Ml@tIIk z0^Gi-aa!s4;Op^gQPCD#V$SQ|^kP(9EhXAohj`I>Myc{$ec9U<#dCokK&{w&y!qM zo$W;2FC{F7J{*BZwbl+I(2sISs(qP8?!6kL*%J2E?Q_)IuRUC7EB6R@6gb~G=M(tE z#-DlSK9Qc;m4Sl_(SqJ?c+A=0WT;nINEpm$8O;>6Tn%3$JWRhPpy9)S~;BN@9u~~XpEmn>jubzV7&2T&INSX4_I0Mek8E6-n0N}LpODX z;DXuH(xqit88L`nTnGOHOy zh}Y*YRR{jOSbN2ts$0jQxq5aZS&O(z42F`G0P1s}bc6%tWOsj8{!uSME-3_z(IDWX z&?0%{&xC2!d_hb++t-|2`SM=#5iUG=5iM zYB)0ewiZMNtW_o{a-#p6^OsPz&Wn#{9?k_WSdq-n8yiD$@b{EI-?WfG27w}*WzwxT z?a{^sY(FL^-CYh`s4{g+U+E*BR?iK&#Ri)VfYiQE0a14TAMg`y%Vrc7;Bf=q#l19A zz>_coI{f?1W>ivZ4f2TA@V=q=v%1E3bt|(dqF&lgW@63b?=3i_X%C@-`AY|P`ij_Q z-8{Xv&c~fD)MqLKq;FyQ6N)eeaA2oSyp3d+Ljsp!d^Wsm`~2&nqMh9^&*IZZsTmg3 zg0q3`j!j%X^Ad+{R}<-%4{W}vABBNjE&J{RdjsD8krUVWuvfk2Okcs(fYKS0^TS_pIZ5sU)g zO@JHqPn1(_2L6lRRjvQ!Ph9pu1LOCZ0Xv;dkAk|UE!$Fy@)`A8Rh z+pbj=DLL*dQT{&s3axZd3`NHUm7Z?W<0Q4Ve6xhi*yU~8eMN>YkHJ{+xycB*;!{%I z7MwkFr&Mo@>SG(D@o-{M?mjSE@Q=g#;OD+)fTFGfpFfFRAxg?D!bw@pOf})vA*FoFtKx?y71vt0k^sB6>KLNRgL4>+#EJ@ zwx~6moLUMb(F_LreiHQeSt@34^OYLyFE!yci~2G9^rwy;TXz3Vmb8xw3RQ5zO$^K7 zSVJMCw131V&^Ua03m0K$bl6u=*UO5d(vmOrJYk9i2zn-^fvSN)!tk=|Y)z~8z2vjWc zY&-*6oG7e#GakucJ$g8qcURsCKK=bbuBq?_O4;ymx}RMfs|!ssjH z*VxTMYh8Sad6x^{89lH^J6q(q$E%H;ifW0N1V6ZUlo6*(>tNUR4u6e`ZV@Z3VQiafzHM%s~=kcBt}o}pr3R*RNxoP3;n z9-*Je(9EnbP@5}Ng=sDwwSn){F3h^W*KTSGe?G)qe)ViRr+ut%e1%RNvmNx3;8BZ6 z*Y^#{h`KlR1+67Slt#g$P;MSU#Msv*x)$iqdY!nU$Ko+EC9YiUhiPR(`ED?$Y1bSZ z*t&r%snV)4ja@4)j-UK2olC(qBrFSQ9C0#5GqoF(Li!0k_}!r572H)8J2(Bi( zobret#=Or_jhT}koRy;+5@}@(17c@^25zlRlt1o1_2Xpg8`Q)Gfh&~^p~(j6tpF;7 ztQVS!+eJZmz{?_-sjCWJys{?6?CGDaEv%M%A2VX@7Fd zws@9n%GAn!0YuTu-g}qjD>B;iMrY2D0XS5`szA@CN=z$dS~aa&sMn!7sl|~jCrToF z#=p6l{jiwF*m#0<%w({VK$$dOFt5I*ram_5=A9I*pjJ^jz zNy`xEtH5^2gNBu_1g&sd2+cbym6u`kkebOe<%V^;hkZ7Nn?c~v$vsMv zEz4$$=7GR5^Dd07JOWU7&xlHWkW|{P{OLgOK@o3*C{aQ(f7@B&dAIp4$eNF zOh5lT03|Q20c#eVO^jDZJ0wip?HNC!`g+s3>^r{Ii3rPai5QGxN9-(%=>5BUBdr7+ zuo7MurnS4TalYU@xn+u{)Q_unXjE|>ZMM9p+{Rf|%257eQNQqKW|+K`T7gc0gQsJf z)hUoo)0-5ty0&s2<|yIQMxNmRXr3`*Q1=R-9Rh#CD z>$r*jxHb93S}Qy=d7MCm-vATc@k_;d4p`*R7y~Q#YeP|5^Uw#(lRt&ch@u5zCvsIe zqN)2!hQAE$*(c4V zU8M9nz4*2!l)}`-WIs@BCXWGfsX_589B6Td^s|5j+u?1gW|AN zPZ#p{OGLg<64aXs^|VQS15wNMP<;n&dCp164mo=@CP1qaI{_(FQLOJCC_nU8#w6bQ z?rhJ9QC3|>c<&--`D!Yrv7iFw*5MYJoK8z%E@@UaVrJZ(Qbu`iY!*^*a~Z=9`wLS1MJt%_IBw zlkB8)@wyY_O*I~}Vt28y?`Q+AxGCUe4$=Y$rtd)c84cuL+6);AHcM6|0`%5YZ|TKl zy}BgwUPV@S$2fncS27@t!1@C;Ug*FVf`F1(x}9G>(b3mc5L0ZnQlfR{z^aiX7U_T* ztb_$(D0tGZu@DDn5DXBO|4Sp|rhpHnUKOSlQ8Us|bJC}}td*h~Q7(^pMTqAlYVs&z z9I@)zAHJ-7t^RC;QvLl)MRZsDkG8?CKudNvT*Z@(uuOnXgkk_t4n4ntQ2DoM;`dh*a6k>lI(LnHZAD*-71{hg2cT&eF|82B)8#FG zZY?Yi+SukA@n2N?PjkXWGkb1I!~J}x;>!OQdv6&P$F{YNHi1AAAOv>_ngk2(G=$(5 zLeNHo(`ayQ0>K>u1a}MWZowhA1$PMU-t@l3mUH&r@BPks&v)ND?zm%oKY&3&RjpoY z)~Y$5`OIe;Y%%ra9@CdCuAuqXSop1MuQxI8=0t-7v^*=3CICnh;O{-0NZ$}5?Z9EZ zh?9+@>&U&4)`-w+}^IoNp=h@Skbh9GP z^Cuy{!gFX)4Q7G#}KQFahXgbqLEx#gCw#oeOesWb&Vn zM%0$1pVWX@l^74n`d4L@Khp51lmUux1wjC|(^mrcA#^1Y06!#V};?80+P-uV*0HhIqTms z1n$PIHGD6+twb>8c(q-aW8&=LE{l~#)q|ybvPKIdU)t|II^&z4<@%}dwT3`R%i|1X z80|S_hb|_EMV!)&VCr0L;3^<-=%?IPp{=bYy83(%^Lr6zSzDpYGmZM>I{aAx^fwc| zk3{jwI3Mg&c_BE{>RA>9>P}g0J#lurdB`=ukIqzp^f>Kf8wROBFm6)FMt&;-y|T$~ zd$ZFzkE_8Yl5U~U3Y*McudYkd{3mP~eXM13a1n9KdigGJ=~hU1uMl~QDD%5u5WBnh z;Y^vj12RAkn5*_(V>rft9OEY3v?}HBy0oskqNY3wH1z^I%h=wEFb~A)?ojNVax#jj+_b?5^L$ZTVm}U|+a(P~ct19Rz-TYobLA82v zULiZuQWUiCa4?+0KTF&IJ0gR?qG$F??E8-BGx|{0S5;%^L{woy^N!Dm;RO)hUsIkO zK=J{|S^|t2kf-SY2f(BU*n4t!;!!|8ltVTF^!?K4Hz$|$S0`7;Fs zn3l1?U+O5Sj|ubf$Ic3u-*Q;8q*5noPWa*}Y2tj~SNl}u@o3rdbP+q?RQyX%HTyJU zFG&T#v+{i+t>}<^Dl*sip%?9YuQq12$M)b-lQTMnCvu0uTxK}Z{D>2$=c&uBysKOb zN+P=XeLD+;6ID>I(wZsez0>oWC-NvW=7W&X?9Z{J(rC5jL7QzHZML}!rX=GD4tvQ7 zKjP`Vv}g~*L>%Z`Pz;|)D(`4}-~rvI%yn=lN^(ZI9yGp+gz~%g_^t1Xb-rj>pio_| zNo$)=5!fi;Y44~V;Yvgw&{;97B0X!7tqOfBMfWCAsvGS-$FPm8FPkR~bDpwhD)ZHo$)j3Yx?N~}Y#YXyUML%Cz9UmLd4~CZL~pDRDeB=< zj{{Co9yZ1wn>2VjZO|_OPL{z#f$N7&(_uJXIQ2bbIgTT(M<}3xAO)u-u7kj}xTzRs zmJEjd$dDDi*Y%w{&Vp@W)_GT`AX5xA-2|iPiSwFDjH);q`2*fF{N$Bnb49G5IOc4x z%7H=}H=-r?;|$zO)S+dG85iI+iaDsD=%+T2-kX*i7|;39rlRd(IjT6d5dm@e!j4%6-b&~7SZBc?Z5oVa|j3&fC6&8eBjp*=t}%e~RX zQ*-ZTdQ-?1Lv*F7E)JasKY8dcIn zI6>34v#Z3|_m$0|8swhAz2*;ATZ7#DL)Fk%+B}L02AtL0qcRGzjWX&8-ZYHaqx7v; z9C?e=(}5hNb8=+|-C9;aHTtGPD5shIYF@XS={XgSm8ub&*W-utNjA{8D}$rE&+4lE z^je$-M1EWw`&L!S8dy{xG1sBEh=O!nk4C-Nc4>#XG$@1w-j+4mlg3-_+nw|cg7^oGHT7VaXf2fmmsA~=AD`4s+kULlvpG>4#KRxwok(d1>az)o`5S# zb`Jy_t#3GY=;uxcM{ScR`_jEn+IiHVX4Zvf6J&Zv@W?&uiYK&m4=g*XMWHNb!=ro1AjIsNS-L3M#h( z6e9&<$WKYs`1?gwg_h(^jc+o^VnXkEdwN&6vu5)6HwvMCIWEj$h^hIyq`zi@Ofg$_ z`380Jk6d<`i(qYh(|rLh9e}Hz2_E(01tXi0+{XFsIs~I@qbo;Uv)MB0bd1cc*U=1+ z0?7_F6T(2aA_(1fsTob#EKg$T7Ww|yR4l{p_PqzNu?!X9iQ48Naq{JY%u2gA1TY&$ zF|-P(Kc`wcgptTkYnf^PusI6LLZxi0UpP3YmU?dvKY0eBXsHR`6~qh~54h zLUt?(A))7IrS)3diJkR3M_4|r6~BPwiO*$Vs5j&xdxy#eLr4})mSp}T&2W!rdE zD7W$)-(2U{7xMGXj<-jk+|>o7l6m4J#Tpe~28TXJwyl;C7gr2iq&>!3u9r2+-cGQH zj`xktx3dZj!TkDT$#BVGRnWM^TrFWEf91J7IR!*OYPBlNEx67{Fo-`$U0Mlpjol z)n%I9c|*38aj_!!i|8>!Ixc`#&Ya*(klT*iJGI&rUX?G@@kGy<6hOrLisnmPMXvze z$I=l_=d+Ru@btMXCqbx#-BWSo=R~TcuIu4ddeSSHx|)ZdkF@xpH_VRc&RO+d65n~A zDpNmi6+A|Lwv&5SzaCeH@Q|?Zh^KBa2(mwZvYZ**o)n4NXqg- z)U>19TJyHW9|K2fv?zQ0d_=07E>rU92e&2LOBL3DlKLSL$}>`+CTluBV7uMWIAmNY zb-rXYHQ_EXb;omI;{pOK+*IJD`{Nz#XCQP``K}@$ryjQAunfC-1lxNE9UTJ>0if$0 z^7)^AJw^ruMUQ!bwwcYv;L{WtAgz7W4G%@&{6<=1h$=DmBIQic5^uWyVFCRxC$aF7 zLwdYqpG(7P-*Qq~%RCOfF5Ddrc=^s(v@-vg39#TGm|ST*o_)eDw~*Rsj*aJFg=Iyc=~hzqz**y$iPFFl2C)dEQVdK zQyzK&^zJdB0IscS2xwkiFnN8OJeli~+qKJ36~?Davu8B=IauSK z2C3(Sbki!Z#TW@;(VP^4xxILYa4&hJ$J`kT7c~{X0=bKjlh?n1W@^x&g%+Z1?i3}BtzDQKDvv5w@HB;W+W6`Is=JD?`}jb0|Lm3LVn|*KK`vr z>{kWfe^Md^{`-GKqS<}55Yy|-+RjZub=b(Cl4+aY>%@5>FzxVsSl=X40$&Ts3$&!B z^lJ%H&n1*z@S!nyt7!`HSz3g!<*y4T`PWHeWKcQsJ&#;nD~rLe)GdEKR!hl=MN?i7 zeLI(jHbylW1)ZYlu1U~}%J?CcBWceLdKz3xn<|F{>_EF%uM{suI9ac@3Sdh!n?Uyk z00Q}&)%eRFZNz*v$JP`hheP?OXI9+Q!brUZXd->OoLdS@)1)He>LcsNQUqbK*Vk=yP}cAo!`0cM0KJb{$0IBzCi+Me;PlXGQ84% z)MVn34$vtsA1~aZeUE7#MXcbYOOnzTr1)P(`8V5En!VhSO_dwBrVyAl9?LwjvMjdz zu1sw}wJQ|%_<6lrIA^l%TfaWMCg1&cO_g!nMPyDOj8x`;tJ)1O;=D^zaUZ#c1U?6S zTAPOFh{-x$Dje{7Hp3^DtcF9saL&O|ne#Ul4P>$JeP~lP=OzG1AW-Py)(rr_(h`;g zpRb?-by4=ABm-wc>OWq|u_c#%<9=)uoq=W5JdQNtjHXmRTI8nkKuuBC=Xr^rF}&ZUK&{Hf0)S)px2gt8?|2C zp%n~ufe=6Q&tG!y9;UYCIX|u z4%t~up-x^ZE^H7(Gfp-13pG63_1I&lIBc0XGjRH?lRQF6aJ9+E`9t3r0{fPFbGP*B z7m&fHt7i3iM4{6Tg#Ow0jJLrXNR`5T$YWds3ZIvs%gTGW1JOcPRFoM@JJ3IPi$hQc zUH9R(FxEJrws8!;l%eLKuo#_-sjH6HsHQcb%A;f%M&r9Es2Qa@yJ%ls+3~TMA~9-h zz~Kv!TGzkQ3em$Iw~g+0SIbsRGo9l9>>AS*sm}~|o_3%s=PU^O@`0W0tZ?3>eedgK ziF9KEy36S$vsGGMkRA~@etij$T5`|jWJ#Tp5U$~U%4k}8xxtD;wpBgjZvAMs`Et4L zY0Z^;WW<;EXzuTX@kVEa+(ep27uzSpX^gia+V7<|T0+v9$RA-2q$biHik4`XjTKfl zh*ys7?(H$h2>{Y02BIknc%T;+~<&fa_VoB+ggDP-qYBN3=B-Dih{$HPPh{k zs)B4ei*TVM`Vu?R=5(k_o~#-8OY1DV%o<+XYGjn#WLRba_!AcdhSM7xu01kA`$qU| z&8!~W*^Uoq9F3DNgGgHv^!LJt(f20aEy@m>k6D(c zR#l(&+OAT9ES)CU6c@vv7j&aFmVmR4JiE%dg zdFo9gQysd!i%VE9j?`aPB-5CYh324}S(tnbTZt^CXG{6O(**+WVQHQgDT<9cWQRIv zv-H7VONVGfvKk)lgto5a)Q#y?IP#|0L7<+CX?``t1xBi=5^%^Zkt_nB)>OL6Q? zAy;P0+2%zGBu_M5{Sx+V#v{57x*WVyX#PR7zna^QurKPRAgUFX+|uJSr%nTD?6X2}!MY;~*t*yg$B>dZ4glWjKIh>Li38d)HsDakX4MM_>bf ziM-^MsI~`4)CCY596Bvwx^#xEc#pz%5@A0rb#i|{`r^_Rw&F9|^6OC%a)sZJIgYbi zt)KN`i%isS%iAfw{kS=PVIn+RajCKs=w7SgpRF`GE3eHwSQK7%Rc6&=aB>w~v43)L z4h-T&=N8ao-a4D1l*7DVsv_arf(F5>1Dpy(u3Uh9Qn`cqtR+0t>MDdf} z=VVEG$Z42z2Mm0|i_}NYpzbjw2!;5y3g&04YIjRw}w!=$9gi4;* zg#7~AOaa_z&m5pMKmV)+Xb{E{%jM2W$`ioKMJi8VNh%jLZ*WW}9?<`cfptCq+mfD1 zlOu&wlgn`9(f1}R`@EL9YDa{nB8hI`fhT0>iPn7~@j^zdonCk-$jljUL-9$fw)p`N zQh&YBoNw_LbZ~OXleCF-;(pV?qDnoI$=IW?!tb#RX(k`h8{QQjxEFy=`2M>_e7Gv@ zv2+2yUo!rj9Jy#u8;V03I!B{xY&@;)_Z-GAGKE1-7%#VlTqm7}Eol!w>yXzAd(>-Y zmwkAtFadgbDu}FQl;z2C2uD*GgO#UrF~Df{9zJ(?)!|F~gm>@-Y|k3tjtmK*BW~q^ zEVj~UtL4yg;suH2KurK$%>>ilK!tX&gDt0_kDmms$qT-rl4W&g9JX9t4Zud`v7J1~ zcjQD)RHleGbELI4$y`;f|2CC*h{>Zx_{ng~!T2Iv1<1~#-j?_o&%4=vh2okMP7w^4 zM+?f5v=eXR6NW_h@@*n#i|1U*K>2nF4z%iznOcWQY~Pz{zBnF z8|O_6TBl2SO%_Y1Y9S9#6yr>xa-hgXJTeS0I+CR!rSXa08~wT+|r)leoY_zIFYMEIpCrCnp7WwPFiVi zzX@&RWF3qU8e2b@o4rM1ld!!qrs56t3t~42crM_>BNuY|btvEY*mU6~=WCX1r$Rd; zyE*rHXxd?ozriSHFvPK4ngnz2c4Hrgn*iD}05?#|c$)cq4C44lW>mQPE>I5tEl?iA zy^Ed0Dq$;5|2TA4VE?}rJ7WnS4Z%YfaQ_IEi;eRycMuYb4*`r~{1?!*BAw;{NZb)N zvbd!|iIvseev1Jc`#PCD{R>DEmTlvSc~s(W{|g9hOEpfjsgk4Js({w&#|sZkkgIi~ z)$aP9<`s}ew#RdY2$$(j7naSsu?lAVCJe*lP(+#v$Jy$YM+(H zxW61@MC(0juVsSf$Voy_rU z$~LbzPR-_y!H6RqRcDE`8S#FV4|<4|rd+$ZkbT5l)mgLko@PE(=cH~(y1uWRYdv} z9+t-iC8WS}rw-iXsSqlT&T0mzeU{L+0@v<&d_ zfyGM;0!{osjg|k1vip5*Vy{)P08nHpSQNlO_ghXgWaetYb=G&H4nA;$G&5n6puQWG zYM@iOTj-I1Ua|29@T+IY*>Q?D)?p584(102wprcCe9C3vU#_%H2Okt$wM$Gc>P%JM zR3%54w&+#0*x0`8x1g)HLh=$kBR+tn{>dc5|1?c9^Wfr^4S_S@VUHX#t=Elr$ZVr) z3D()3G+i(~C{EIBRW3lKDhN46AE%fbD_K-3Kb#7xq#LPvzQ4cELjQ5O#yD`~Q1mhH zkZxQ_l{G+=_~*ib|7nptC1bEb2ei4yezo%+Y)6bX*; zB$b_7R^~>YWoT`oanhEjT=Yza=ZW=arPAC2mfh96Vzxhx{7(xEkVZEaw;4TE;W)P# z$+oQSA}Lhkd-k(G()l^2#mFXiqlTUe)V|`;vtP4IVN|`I{JD7fG_xwvvLKbW&;%Qc zDm?L~@o0ecpCdX+13J6ZGV*z@lNX=+t zXM4&wdaNO5Uz$7f`k{nh3{sz^+orN9DAL!jW?c3?y$L6rmECj5${$(@Yw0T&COiW; zkAC|=`C9rLG?IZM>L2j3OXD-N!su7sr~2y6%#(0^076h`LCstyqVYZ5`Q(;pDR1~7 zhx>kY3>tEgtI^*j1JIjE0T5GH3fQoo!A_Ah9paZ;uJBL8uaEtI0g2$}irfdPUdgH! zLs!NQMoH(fG-@i{K3dfrze-Wq5O|lG>U_TxiE++#ygodnsPHVlvOu0yd4gqc#IO!R z6!QtGd4J`5W5mPO-t!W0Vw(3I_U~G$c3(G&Xm^K zi_dUoU6F>|nRk%UYebOCZAI5FJ6+w1MSseQHHt2IIcyW><%{y?b9KFKLt2JsMWKw} zaGR5h=V0HS)5pNL6SmheU$t4j z#PLfMkF29?tG;P0+#T+H6s9D4^Rk8(4QnE!$@ur@(2m{rUNzh8hWophqmS`bM zWZ^F$!ycrlo86uD_QA&oskWfA7etR;mL}KzKQ#(v%=LTZm`bY%d#?3cLQZ%I9@uB#C+Sa6<3@a#*{1~m_1mn1P7)zKiG^@V3PMP%V+;Axz= zzu1Js%aJSJU4vI&1wQPjrSM~b)#D`@5>d}Gd*v@pOLt#x zqF|-3z2iUd<8nG8WjLoLIS=N_QXLkzbEFCD$yIe8!b*88!VqG+P*wA(QB?o1rTv`E z>`1!6jAzSv($h6Bg6aFL*jnC_&tyIX2L86o<6y0gwH zL%M)wdU8gm6Z3?(=E9b>V<0UPQR(xiS48)ugd5-Ij&lw`)u$RszYgWn)s?5XSX1K` zTp?u`DW&j`MGkv@W*&Ary~WhUx-6d3ZEP+hP^%_97ZALG{Fu;I`S_w8${u0kO0rP= zzFo4}XGOT8p!Wd*4>$K=C;g!lKOqxaQi!3NqHLPOpT!UU)+S!d4W_@*D}Kp@HxVKQq6~( zVgzHg+l6Crqe387-z@3!$ISMCjdND_dL^|-6&%9ZUnDDAlkklz_C~Qj?0kXVBSS*Y zJL|kJmiVkwy?lHRrwax838O5JD&^Co)Em`)gr@;~=bNv6)c8r}+wO-ey%voc<^}*p ztJYy%Sg43!{d3)$SIeiL*OnNxnp9J5Rv(tUf-WSf9IaRxC0lv8*NEM}ejpL4yZK0} zots_ctkbDwrds#pgmP2zyeJM1@4lKWAjB&SI&Xk!xAdH7vr5A*6$JV60p~Ws^gV~9 zol~Jpb{rf0D?Vb|C&dP6fCp8vpIyIvWf9x8q%JYiQmz2FQYcd3PoD3VzVbhk`zK}UNhdMii|2v?zkuq&FMz}sh^Hs~Ci6J+ z*YCM^Dn#zT{p~;em);QA1Nc`_ub0p3{)>bWF+VWdft!Bd-RHl6Dve4M?>Lj|cfCUX zbWrJkTWCx0q8oP1?W~As&INR~TwZjU*Mx)Dpg@hG2LSzN9lx7SyC;tGRC%G?aCpUu zc}H^^&_@Wr^}(e4@pAG`)B-Siy>`iS46ZxL3nBo{c{u|7)dH9dRPZ?lCrqy$LJXV| zk$@-DdaYV1MJssSz>G9_>jT&iCb0-VvK{Rm$Vs`{XV&6oT> zkZSUJdZ`WQ{rutAOr^LpZ>=caU;-jYC1Pp!fT{mWv4@)I8=dLdZEivW9@{|6;`gRT zAyLRQ_a3Z{51}F~?VTbhA2o^5^&CM$7`#-NI)@syV z9nmJgl4CwF@-+tOp}o^14dWwcWc@c6;GZQW)n@M^WhA1hsrvXz63^gKM&Vtz-A8c% z#&dd47yb~4h=Vi#cA4LMu4W&CZ$WWDr*2~ar}+s)NwQ0@Ya~_#$^Zag45OKfQ27i` z$TP}Sj#M^yaCdQ#7u8#Yq7$@Ur^tU;{M*L`FTcH{M0f#S>_-mRkv-DPU2oXGeA@8K zcg<)(xC7*Rm*jv6`@PBR|HMe4;5QtSvGma@a$3X0(F{1qo&scgY? zW@sl#YZ|q{w~ir37Nd=RiI&&)UO@H6oM#N{6iV)O3ia>)$yaC;kW<-o%k^|TXj`Z8 zw^lNdm#E^3_qN==V9nPkBWBgIxvJLtBAwhFQ}XZmQl`?!8>Me0xSNqzB}wJI%(Pa_ z%{Dy3fnj(aldFx8oqAeqJ`rVV-fr}Ca1^4rnzAp#ufLdc!oIRqt5U>P-;z;D63omp zg@!jxoC#T;ueDp>POMaH7AVcI?vHp}x74PX^}pA4(-=8LmyDIBZ;sSZeq`fSai8pQ zW8}iuLgfXV&e!H{&pNS|Zx|b%Gh6FvEPvLYxVeCz44SJRB4V$pMiR!!1wn$zd&A`I zNt-22P{oHRS3pR`8Rg_!j-A#yewGb9r&%L#aJK0^{T$8PB>8ZTP2>5H)dSpZ^P(1@ z_8gkelKD6VXHTn#_CyQb-HiUeK2*Bpw})LG@~xh}&ap-_G$6%Dl*pQGY8@p>4}06( zZ2(=aWi(@qrKqu=*ZWhj8^(PiGM|)`QzWe4wkqWd=S zA84gD%o+Uiz{36rv%_Tu0j*|#>C7Z4CIT4cZ<`-M_R=kyS~GNZ2Z^g4gnm_A*V?-D z65sCK@aeqyD5LhP*+D;gjNqm?G@|VXyQcTBc8Sb-(>uaQN3$x1?FYaziE>p{$GaQz z8GNEJiH2115$V=!vy2h8y~5sJ+y+V{_sI)z+p5)EoqYF- ztTRpb38aG=f%+20(ls}1xrRe)|AQ*RaTfc-3x&2BMXoNXpEjiA1#bV)82S$q>VJR) zzpE4dcg`f%1VDN16o@D?Vl&9fjHSv5AXWk!PIm(Kt%y4S^sm1njRS=vtU%$&<0haK-ggm! z3ZN>TN`F7_22BYl)@Xs#@?YFOB7|KM1N$7lUHyX2DG(ofvf&`E>EC0x!{_CGg@^<&@_Y>ZQxK_N+Yp$~nfk@s}KIKSvZ`d&M zO?(ojmrXKg;QU!==aN4tte#elcC*%i)oQEc5hSf(bzqWu8!=0Gtl7B|H?0N-FCkE^ z%72D3CM)y}uef~~X=zAG?JrXC(4T{4Xed4xBJPfDDFJ75lghfm&fa8DzG*}_a6=h% zvg&8A5%o{Ddt)xPqr(>z@oR-Lw7XBh%W@=!7_<8nkl3^Meysj%$N+I+!rRNLv!@OF z56Ns(1ZXS~7=aw+3{Ftn#d4KY@ zx=d?;p6FDxpYmIsrGe5Em#m4uoNjpm#zVH+py|@XEt8XC^@bE_D|!1jGh&Z?-{Fs7 z2)K^lgc`_Rg%kOFLoli?vSgbc+-{b=hn*Vf%F}e_y{z{Ilu2scU`KstE%f01@V;>B zyYKZ;(i)@|F5q*MV8G3!0{}WN%sUPk;`|6k6T zlha*?^7wpA{tL+1QezK=T=X+N#gg1rNCr^=%8LkBU=07GpIjKRqc~b<)YOtf3RU;h z(NKSpcfkcwoU9a5ljjfh%QO>;YHsJ3;znK4OX-fh3Id({yD9jOCZ2cx2d0XB`RZOM zQZ4qbU>DJIzApaAE<4r#{yNjM!D)^QgB73rbQ=93Zux=yprgn_Fo(255^LPnXD8%5 z4NLehc1ihH!z)p&uP-f&ni0s)2;4V1)>q?Y7k)S+bcm^OOoR9dv{?amj0!=3F)qr8 z1ysT!_$2?ff{c^DN9(`;_#a*&g{7Gph^(DWFWJlVC`qe(dBn5`d?hYz3VC`V6;|e% zZ`$cB$oE3)yo<3Ko(NYWZ2)M+zkafIC|<+nDPvHHT6>kQ-k?PT9o_!)Fogfj#MN@Q zF1Y!NKB-k((A9i4qBPSASxqP2lW%D#9m64Puq8?`^k%Cpb29J(A%hVarpVu0{`GW? zS(Q}IGS12hBnG^%Ycz&9CwrO6ghXUE-#6X{tiRgF?OaGry}Y7shO#gI@P`m4$mZYe z74r+uH2_&CIeUJg!T^IXxOslz4NCAfz)BV`Ih{!9qvU6wn&$45P}O2t^&|6Ga6JJG z8T_{o`XBBflOyi>wl^(%!KUvHH5oBpUK4ykTZ2yiZn#R19s`x_5rF)*zRV`Xa|zq9Hq?f`Q;mh~bFb()t1bGdj@$!56l8&xH-xd_@!qYCk@Xem#{w{?1>0HK03YO zZDqlBdXj02OnZuV@cY$I68Ee-T-Qh_GHnfVy3^;ks}C#;;s?I5oK)7*n9cF65)DY^ z!L{9VaTP5ej3xBV9PE}uUM9`Uh*_GbK3xBpVxxGNBH>H`w1okGr%0VDxuS@qPc^}_ zr2UAEXISwB1UFlpcFZ!Qo=UnL(mr}zw7iH@=+a zIkfD^s>m+g8BKYD>F7KWN_~XGjtp>jtSU>gyDcrue*yh%=5<_(ommwRH+|`kgnS29 zcFx}fXXXjbg-kt0Rp(`{Ecn0{G}Z(-NX1;1=3}qNDyzd~j@Kef=p0N`LxelgQxgU1 zc#*@Z%_(*Y*%?5F$04>da<5DHNnypHOxz^_x|f%_0k(%is=Izd8qlTif;C=OGa><@CZmYVTs#lHeg08 zRs?ZJi39|(fA0XH<#Prbyx&*CuxJz0d!m3Pc3Q#$BcF55ZPVc_t&dhnk+r0kR`n1v zR_`>qc_5BiiQOsJ3r<-!3|Ja7HczqaJ0(Qoy3`vM}sHd-QNy()_MLrO(*)EPU|b zZx7X)RimPjU9skNf|jjTrsUJlJ*crc9N{W7 zk0Cq1mpVh=Dvlb&i=ho9P{R;?|BgJ35jR}U>8q0t`_bdT+?|LWRT4Y%1dj}z)=8@k z;#FhD5N@I;qU3JFXeNJO*AI>hEK{8G3g?R**B+m+i<9ZI(SGEZJPYtNoht>EqI>{R zjxn|YbzCfhqARi<+-qi|-Ogc$!B2VJ+%ad3*r`-kLrQGNQtsyoVTt}>jDMK?9g0m* z;IXt>;bbox+@KzxX<3M@;I(vpqBWmW+BIVtPQsYAPm z-PUV|S|R+Q*MD3wr#E-HlT@1OwK%acAk#ND^#s9pg!Mx)Fmud)rM*ks~hC!E%0PGWc7C_A!yH;HXCH! zr`)TLnVFZp`BGz#?8qZv!@Gnkl+SFrdFv_Ws@s1J9}@q^gkcKz^o@l2iG#|$*^z!q z!7)EHKQA?Hv|JV#bFsfU!^IIX>`*G1UK_-7pbc4DyVg^U%^(Pv;(mX2O@>&)tp&tr$hX?>2PRSm;8$-xfJP=jD#PEV zEW8WvcXDKE2>iwZDwZ>HdE?IsD-u#-x7uRK@t-->JpkWc!VvV~RX@4&7Ry&+WrEvpoG7$ zm?Cy^_{!QTOD@C({WO+c<`!3l-CbE;s%2ueOSV~}3B}fE(DoRDcYPgLG^j!>0xdlX zQ!my5q$m7Riu2%VhvD`Lc38}>7^p)T#wyf2XBjII1of?S3_^XZd=Z997`a%DvoBIXyT_{^V%pZY+DS&VV+*40o z_%k4N=%WC<4$5JRaR3g#QUxOuPF#hMKegYJfSOT7u9;fsyxKdqd?BeqED&zRp-Ivm zS~>7|U)^f2iiS);?t)Fd=de8j>-&h2}))+OIbf%l_M?!OK*DQpE3{t&Z>P zq2Xm{b|>bzU!NttOJkMYdfg($WcMo@1*|54<%#)$1Fy=0#D+jC2IpQEqYt}_Y}<4_*pap-g0?|a-k*z#ZZO-IS@!hw5u;bg)A=xE?1RU@Vo(N}WU+RDydQ0dK<5i*`yj~+9a$cDht%4?k z7(Sy3MH-dGRmUEldfW15>9fF+hypSR8sGx}hw}ySIkUx+NyVFEfN-X@P_`=1^SU7x zJ<(D|#S1mX&5f8g=!t}<6EdW`)U< zOa1a9x+xT(CjHZFflg8Xv^B;7u25I#K@c|yvTy_DXMz|zRTibHJ|v-+ zxsFC=oI5+^@#_q|Ab&c+N>BOoTt{7q#0f}(x|rK=UmVpp{=bs*J)3q8Y@|Cx)Koun}}{l z3Y%aE;0*)!J>0UA(bV5+l_kyHi{-TlPuZB_md zOP-)&>r4=k64dwKSPjj_mq|JqbcXM+wl6%rLG!aZa3E}Iu-=o+Gmy%3S+|f`R(WT* zgmem0fmEP7B3cT_fqb>yqz?ONiT!}gmH0S#I?u47rnbg3C*VGFR45=5<~mN`DKuj3 zAS+9Rn9Ml=D#m2hq>T00RmeivcE(0yvVSlQW^#q*L9Nj_xl09jd0F}#!;ikkd2)|h zI%shI9KAj^RcCJ3m=Wx?QZyr$Ae@?sQ$Nay$ylrkDR8l~BYUrrSyAQk{h2Gr9_6#8 zv;5D-qorKTk(&5H8~WVowlqAAa<v&?&hygxYR>6>0qfM>?_5p9+iD)y9^kR9(n!RL$*r^>G`iNASU*z?3n+DnZlI0oG-TRMy04&pus%J{`tLEk)vI zZ^>#~osXlo3moGtD-E&z?RMLy)!Y)xE2A?r52$)AI73+%QSz_Uh}zDU9V{V`Xt`0Y z{!p3gAz2c8<+=WxLiu%>4x{zZHsf&e=t#ffjDzKiPY0oKGn@jd5YFRekw@Rc^ z6ksOASwQdA$DLiq#&J0Q@_>O_unLd}OSopPjk9Cf%cZeFLG8W~8CMfVI;`3#zi#RZ zM_&{SejaCsl`QqN^f9vGW83J<;Tn;1lZPu?4$MyN&3w%so*R9beCriqZ780~8EJ#? z(2iz>2Q*|31tgOND~W*LqGjgFsrj`tcUt7=)_Y}=YH(X&OK@Mq85upyKLuJj;eLPF z(po9*&A7cYLwy36MUtuAxr<$d3O5Ydfrv=(QMBfvqQ9URU1NPt9N94>H;(kcUaw`I zPKwrz3}{1>>a@*$hG+~EUbyI>pc}VhIylScl(xU6p4m_@43R4zqAqiM?}Qp+i43~f z(FV8gwJ&%1`^_!BSMX4HF_^m)j0Y;Oveq1VtGA9NR_D(d-+-<2P{U z+)hENg#M&zE!BrP0bH{K>lMmX59-8wPn5$`$ zSH{WB&cWMgtyM)F%g$)YF7m1lkbgKwcc@OeW>+R)T%SKYm zo)f6_*8Z|EiKvmJ1P8cF3&&-f5!4XIjrF>@SkkSBM2K;2A$^N6^Au6|VDs29NyWF} zC?r;sKF$MXmoukjehlH<9hMb>C-3#OBcvlBm~I42H@*v^ zZq9!jR+g@{BTsi8T$pJ#6=zzg9YY|lfd>NWD|wNps+5no&Iaq2<{bww_^ontbY_cI zw+5S4`Z*;1`kxEEv~jsF(u+TDPJwZFo{zbvR&Qa)ytZS-HER7rMw-dWNB2*idy_m-ISjAeWOz^!i-|2drj;8#@j}e z%3-6(?AkIXE~h`UJOjZeCq=C*M)d>IAqTlKBl@JRJI^p z8c2-8DmibdiCBE%G{o3mNKyIrUP&^n6Or>SsqE-tni0JH(Qj>xz56=j$AQ6m;A7qV zO@^t%oYb@9_W+VImB5#**1^A=+MeQ>XqQpdRUUiXPJ1L{9MH9P?CV90yco0@r+kFp z9P2L25MY&s4c+*zzN<)AG%yrX)vVoF>dZ&fy4vLGdmd+$FKb3OcTcS%NlQc)yIkY} zX4|H>>xAmzVAwU(Y|jugQ|q>{yQyXJ;DpD7?`Cc_n+_;T9D{!W?;QOC;B zMdSfaca;R8px$QQM#EKl(8@!q6wcV^cm-YC#Of4jKI5ts!Am8Non6f~Ml17**5hRa z_KgJ;RqhAQ2>GEQkH$%;3=+2UhX?4F9_m!VR?rJp3;U?*sv@7f#?O48ZBvKwC?x4* z=w8}r)e!0S)Zq^I`OL=}OQ~;1LK7J?Ks}(ZBkuoI-gyT#wQqYI4qZiT^eP=`(mTje zng~dTgrXuX5hA?_C<+J&Nu5a|NaOX$5NkN|-I($3wUd(U|@Z{FOQ^XAU` z=l!u~XMTI{wSTiGv(~Sy_4%&vkhQoQyfDviUux#QtRgugeD{lU3`f5l9RJ%dq+5gJ z#C~H{%H19`K)6(^=DHZ(S6+}41;&F5Q|U?J`;sO!x+kNo>klStj zS%*;CSJCe4(YCv}!*ww*9@HA}r0}Ddt>R5-yRFQ-f#8wm6#U5LQ=x6V-M;8XbWWnA z=GxN`Nrg>F=uy}~;KQ!oUP6~b`F1*Qy{5p?i!>oBg%#qCj?)h2_||;X#tB>E78Zdo zT(-2^lNpHT_%LtU+>Gd=e4u{T<5Mh>2V`ipWdsUOLqMpg%Ejoqo`VN(HQ=b{^%nag z)gijFyB;jKRktHVB(^S?>=?40yHub*4B+!Zco+~xJ;>{bt&$Rr|G5l4W^SbXzKD4w;d*uIg=BVT$HbT#=g8n;vFH8Ztc zUJ*LWK+6_j_vxx?Ao7q9WVMV)5hP0(eZ_EMy58sF{loJKLD3m(+-I~@Ew|$L*y2;( zq8rBaAUJ$Ow`J5J*%pO_kF@h8=0&h{-;hjfozALi<&;oZnMW!LQWba1yiz(d-t0=+l?)|Q)bag_?6HlM_6|gJQl^L7VMN(8i{4}b(YoHG zdj^l`JhrT@Z;6s(-4qMk2XTyJOHmyx(FLhR-$n=RsP)2(D8X*5U$lFy7E`udYfqs# zNf$-DgiP%)lfC8z9}l@3&de#_9!f!M?9|vjafq4j0%#m3`G?Uju)`Qa8H7oww-iz2 zXt-%0&ylF!#9j)qxx1(FZM=^C&cHz{C!XBi)sFCGa+b!3jx~(dpYPV!HF4!zX88H7 zUngMFmN|90vV`9pXp41%|+J*`( zlIOpWmV(0H@?{z&9X&$t93(e}-fm*(WA$62ytVd5aB{_c@!+7MB2tzt0lCYUK6jWK zc-Z)JEhh}iJVZM&qM5C3`&d(*s)*NYpgCveI9weu0%Pwmr>RSqx919>KDkEgYhx9C6uz?%teW;&auZ1)WA%Ov$G@o zlwMZr>-h%$tOa0Il}`&|o)R15m)R-BuVH##9Nh%j=)qFgtaM%;rSae zmA)>?EZWSsa5guso4bKC!ph7ZC@6a&-UKc1E$mMc?&I|o{B7p$onEPEFgkqs-JT*Afrdh);pj^+9hprp0ZaQUWiq?iirz|w_GP{~7AHFQhL~w+D z+CqZ$wLJOjmr!_4Vz@`rm@=oESRU*kJ?Ka$!{Wx}Om4wd%MeHB+ha?wd7pcCYR+>u z*6f+cF;veg%Yk%8HyXX?H;2=@d2WuS55{Au%Drlh0p@*^mW11)3Ips?G>yy3dy&gG ztVML!OU3SFUCJbr_x5~RhVn38da+mU@Fgzy)elZB;wx+~_nFC*Fhc@wSRLOU_0WWdiIz|G2^B-1 z{;0UKCqCF2b-8l{X49)gRS(TKCA3RXQtddBF)4dh^hrBZNYykXlzR+j4av|S>y}~k zo-LnvBZaO~ZiougtG(~kSY}r}cYr4YGvNlW+S*8Y*f#%%`?ty7rAfJG*PID_>}_V1 zcBq_DnhhESuowcjOAAWvmtk~RT)|)wbvb`Mf&r|f6)b`$egDH95PiSp%ral5tl=1# z2SL0eB)dpjR9^9p)v8&eFzvFfP&bZWpvMBK@`+&2i%PeyLZNQVwCR}*|1R-+(!iIQ z&5tnQw5PoMY6+)jg@n?1k}t@SGr9sx+j~P+oy>HsUd9b6n>B{k*7xn5WyagL!){4x zPYPV3)cLv|Vd`%76QcE58EqyH23u>j4KHja+puSXhg|LBELg@gx9MlxVzlQHp4{*o zRL%H&Xu~r0Y`1y!U7V8MjwxOS!?SJBpe=v32P_! zsRf#j;Ro;~Q;VME<(!(Rke#nPGPy-xMxKW)rUhZ|v)v=xA$&O#uJ32g$EBB_zd&OJ zj5ad2GMLA`m|vN=xS(I58GK~>gxai_5*UmE_hO*>gg~t>4wiAx_#9hj$$p372=Pl4 zlGk6C)(ec~~U~svLE11Ja#% z9Nca$1pz7h9NLJQe@V2r3n+SU7%M46QXpyJNOI*W%|_&Q5SN8Dw&hYwP1T#ihS+Rj z7NUG2%{i&e@Op~9My#D!_6(m%hQ8vWaiXJ;z~n6)pIEmT!e#R~pQG^5=k(febYm*~ z^Lp_N@hgAjakpNk$!_PYSpg*rv|iFJ}*aG^PfR>vfSb9h0W3(xS+3N%vaien`h} zrN|aI%l3&x**weP!+3y$n$w`nu(Co4*jKGh*4P;zZwb|NQ`Ug{8RFBzc!`{n`{-C} z6NFda@rmv*4Z~nwVCDc0)h_^B^{}RN>K8I~cWHs2srLf~0B{AhaGl_%OVB_@!x4NL zUV8PqbR15izt26?Vm0s{R&lh+umBPZP4S8F1ba3is$;bIYaFQ^B6fq!lqty_7i0h) z`4AQ^_S6I7w>;TX?z^uwA^M8XD?inlH|`LRm$&ot(W=wx{SH1ck;Ke%R}FD#i^9Hh z&X`Nt%uGGxX-J}s?cIJ6b5&iQYq?oK+99)^z*&3I@;k}CGF)eOoY-6$6IvhTV^5i1 zkYjn@O(97OCiYmJFO@cl?fILGx-m$K^(?|5^l8{s)YSZR&z%oJI8$o7V@LX*9ov!x z2Kr>F!RW*oPK{)X-SUa-0-dETvX92ZR#AqPN9t%+Er6_fwrea7e*1G zqnr`>wPTlbST}u~IjA4!KiA|bw95!<){=Gz1r03WWM1^_RwSedgo#KWL^?e%TeU2gCA0v`QU4PGO)Nb7ygzsPVw|I@Z4;u>dn+J2R|N zqRmz_gMoR7b`76hEB+DKBTvOPp`X4&cYXet&KMavzB(=fYxy=XVQ-~HxOw}n!YqBu zrb@{WQN9(9xXEA}x~-U=LOorq%VMAfXOfTIZ&o8+ap--jtcVz?+!FpuvYrK+%T)cd ztjkhg{#wNG0~?T1DIQ*15OUJR9$*R|sbpV`>*CHPb=9AkX%JGzQL$wjn+vs_1la_o zM;hrYj204CZHB$TrS`kB={Jk&n?|MzjI!d41S9unc33(%xc5F}&^GKn@K22EwBwyA z=NP*AGD4UvncQX3?#+zCIzmwNAjp z*BTor*XQ{7e6oKm#6`jw7YI3eD`RbJ3?EOn@MT#aeH=G)S8S}UghOgTQfDJ&jd*U{ z?yp}%v4kdya_>%y(D7cQ0>2;IrG9QdD}fFnJu1zl!{elJ+&@xr+HfDjY?qj>bpBNm zM}hQiSN%YzU@~WC%*HHl=ABXaTCur4OGWw~W2b-UKy@_;1a4Q}jkAYnr@NR{SY(RDrVei=3;URNZ# zSmT!Q15CoDH?O$YQh zV|N_{TV(-}I|)2vNneYei$m~!;ZEP2oGoM4=2^Rys9gc}J0Kk3uj&@AVV+%tmRc`B zRe9?hyd);)@*b09J$>|W_JpA4K6G5zVSZx%7T~8f`S`be7vqb&)+<|lJ7jOPf>H`( z?jMw@TICl=SyeQZe^N3JZmktLHFr|rs%CRbU6Al zt{Vk0OfW0=#sC}eKpC_5Mfrt;;;A@7a4j(dJ!TQJyGBF9umm@ge8utQA*lM+tbH9_1%jn@GR1T{23u5=la#ld(de z-f#0jo;nIQBff(=-`6_CB_AzZ{3vt%PTYn#e3-EPS3~ZfestKI*18XzZ!!7^yp&kHeCh(vyW$dT4`r2 z?NALYTmH-IbI#P;E(D*hM}0w86{nPQo`nPZixr}0ohE-8mf;JX^+}mE(rC!(Q0KKv zi<}6Zv%riYocD9f*#+~+?aIZp$^!~>ZW+dIX=;@NDVu-e4218WLxcaD5Kfk!6Djom z&%!6A;s0XH{reh;|L}d`v6iElqoezaPZ>r}YYp#F9f}qnIjdE}x569%LSV<=%?u9yed-?{ARej! literal 0 HcmV?d00001 diff --git a/neutronproxy/l2-proxy/README.md b/neutronproxy/l2-proxy/README.md new file mode 100644 index 00000000..abe10f7a --- /dev/null +++ b/neutronproxy/l2-proxy/README.md @@ -0,0 +1,163 @@ +Neutron L2 Proxy +=============================== + + L2-Proxy acts as the same role of Openvswitch-agent in cascading OpenStack. + L2-Proxy treats cascaded Neutron-Server as its openvswitch, convert the internal request message from the message bus to restful API calling to cascaded Neutron-Server. + + +Key modules +----------- + +* The new l2 proxy module l2_proxy,which treats cascaded Neutron-server as its openvswitch, convert the internal request message from the message bus to restful API calling to cascaded Neutron: + + neutron/plugins/l2_proxy/agent/l2_proxy.py + +* The code include clients of various component service(nova neutron cinder glance),through the client you can call cascaded various component service API by restful API + + neutron/plugins/l2_proxy/agent/clients.py + +* The solution of that clients gets token or checks token from token: + neutron/plugins/l2_proxy/agent/neutron_proxy_context.py + neutron/plugins/l2_proxy/agent/neutron_keystoneclient.py + +Requirements +------------ +* openstack-neutron-openvswitch-agent-2014.1-1.1 has been installed + +Installation +------------ + +We provide two ways to install the l2 proxy code. In this section, we will guide you through installing the neutron l2 proxy with the minimum configuration. + +* **Note:** + + - Make sure you have an existing installation of **Openstack Icehouse**. + - We recommend that you Do backup at least the following files before installation, because they are to be overwritten or modified: + $NEUTRON_CONFIG_PARENT_DIR/neutron.conf + (replace the $... with actual directory names.) + +* **Manual Installation** + + - Navigate to the local repository and copy the contents in 'neutron' sub-directory to the corresponding places in existing neutron, e.g. + ```cp -r $LOCAL_REPOSITORY_DIR/neutron $NEUTRON_PARENT_DIR``` + (replace the $... with actual directory name.) + + - Navigate to the local repository and copy the contents in 'etc' sub-directory to the corresponding places in existing neutron, e.g. + ```cp -r $LOCAL_REPOSITORY_DIR/etc $NEUTRON_CONFIG_DIR``` + (replace the $... with actual directory name.) + + - Update the neutron configuration file (e.g. /etc/neutron/plugins/l2_proxy_agent/l2_cascading_proxy.ini) with the minimum option below. If the option already exists, modify its value, otherwise add it to the config file. Check the "Configurations" section below for a full configuration guide and detail explanation for each configuration item. + ``` + [DEFAULT] + ... + ###configuration for neutron cascading ### + keystone_auth_url = http://$CASCADING_CONTROL_IP:35357/v2.0 + neutron_user_name = $USER_NAME + neutron_password = $USER_PWD + neutron_tenant_name = $TENANT_NAME + os_region_name = $CASCADED_REGION_NAME + + cascading_os_region_name = $CASCADEDING_REGION_NAME + cascading_auth_url = http://$CASCADING_CONTROL_IP:35357/v2.0 + cascading_user_name = $USER_NAME + cascading_password = $USER_PWD + cascading_tenant_name = $TENANT_NAME + ``` + + - Start the neutron l2 proxy. + ```nohup /usr/bin/python /usr/lib64/python2.6/site-packages/neutron/plugins/l2_proxy/agent/l2_proxy.py --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/l2_proxy_agent/l2_cascading_proxy.ini >/dev/null 2>&1 &``` + + - Done. The neutron l2 proxy should be working with a demo configuration. + +* **Automatic Installation** + + - Navigate to the installation directory and run installation script. + ``` + cd $LOCAL_REPOSITORY_DIR/installation + sudo bash ./install.sh + ``` + (replace the $... with actual directory name.) + + - Done. The installation code should setup the l2 proxy with the minimum configuration below. Check the "Configurations" section for a full configuration guide and detail explanation for each configuration item. + ``` + [DEFAULT] + ... + ###cascade info ### + keystone_auth_url = http://$CASCADING_CONTROL_IP:35357/v2.0 + neutron_user_name = $USER_NAME + neutron_password = $USER_PWD + neutron_tenant_name = $TENANT_NAME + os_region_name = $CASCADED_REGION_NAME + + cascading_os_region_name = $CASCADEDING_REGION_NAME + cascading_auth_url = http://$CASCADING_CONTROL_IP:35357/v2.0 + cascading_user_name = $USER_NAME + cascading_password = $USER_PWD + cascading_tenant_name = $TENANT_NAME + +* **Troubleshooting** + + In case the automatic installation process is not complete, please check the followings: + + - Make sure your OpenStack version is Icehouse. + + - Check the variables in the beginning of the install.sh scripts. Your installation directories may be different from the default values we provide. + + - The installation code will automatically add the related codes to $NEUTRON_PARENT_DIR/nova and modify the related configuration. + + - In case the automatic installation does not work, try to install manually. + +Configurations +-------------- + +* This is a (default) configuration sample for the l2 proxy. Please add/modify these options in /etc/neutron/plugins/l2_proxy_agent/l2_cascading_proxy.ini. +* Note: + - Please carefully make sure that options in the configuration file are not duplicated. If an option name already exists, modify its value instead of adding a new one of the same name. + - Please refer to the 'Configuration Details' section below for proper configuration and usage of costs and constraints. + +``` +[DEFAULT] + +... + +#The global keystone component service url, by which the l2 porxy +#can access to global keystone service +#In future, seperate KeyStone service may be used. +keystone_auth_url = http://$CASCADING_CONTROL_IP:35357/v2.0 + +#The region name ,which will be set as a parameter when +#the cascaded level component services register endpoint to keystone +os_region_name=$CASCADED_REGION_NAME + +# username for connecting to cascaded neutron in admin context (string +# value) +neutron_user_name=$USER_NAME + +# password for connecting to cascaded neutron in admin context (string +# value) +neutron_user_password=$USER_PWD + +# tenant name for connecting to cascaded neutron in admin context +# (string value) +neutron_tenant_name=$TENANT_NAME + +#The global keystone component service url, by which the l2 porxy +#can access to global keystone service +#In future, seperate KeyStone service may be used. +cascading_auth_url = http://$CASCADING_CONTROL_IP:35357/v2.0 + +#The region name ,which will be set as a parameter when +#the cascading level component services register endpoint to keystone +cascading_os_region_name = $CASCADEDING_REGION_NAME + +# username for connecting to cascading neutron in admin context (string +# value) +cascading_user_name = $USER_NAME + +# password for connecting to cascading neutron in admin context (string +# value) +cascading_user_password=$USER_PWD + +# tenant name for connecting to cascading neutron in admin context +# (string value) +cascading_tenant_name=$TENANT_NAME diff --git a/neutronproxy/l2-proxy/etc/neutron/plugins/l2_proxy_agent/l2_cascading_proxy.ini b/neutronproxy/l2-proxy/etc/neutron/plugins/l2_proxy_agent/l2_cascading_proxy.ini new file mode 100644 index 00000000..5dbdc7c1 --- /dev/null +++ b/neutronproxy/l2-proxy/etc/neutron/plugins/l2_proxy_agent/l2_cascading_proxy.ini @@ -0,0 +1,47 @@ +[DATABASE] +sql_connection = mysql://neutron:Galax8800@CASCADING_CONTROL_IP/neutron +reconnect_interval = 2 +sql_max_pool_size = 60 +sql_dbpool_enable = True + +[OVS] +tenant_network_type = vxlan +integration_bridge = br-int +local_ip = LOCAL_IP +enable_tunneling = True +network_vlan_ranges = physnet1:51:99 +#bridge_mappings = physnet1:eth0 +tunnel_type = vxlan +tunnel_id_ranges = 4096:20000 +tun_peer_patch_port = patch-int +int_peer_patch_port = patch-tun +tunnel_bridge = br-tun + +[SECURITYGROUP] +firewall_driver = neutron.agent.firewall.NoopFirewallDriver + +[AGENT] +enable_distributed_routing = True +state_path = /var/run/neutron +debug = True +verbose = False +tunnel_types = vxlan +veth_mtu = 1600 +l2_population = True +minimize_polling = True +ovsdb_monitor_respawn_interval = 30 +vxlan_udp_port = 4789 +polling_interval = 5 + +#configure added by j00209498 +keystone_auth_url = http://CASCADING_CONTROL_IP:35357/v2.0 +neutron_user_name = USER_NAME +neutron_password = USER_PWD +neutron_tenant_name = TENANT_NAME +os_region_name = CASCADED_REGION_NAME + +cascading_os_region_name = CASCADEDING_REGION_NAME +cascading_auth_url = http://CASCADING_CONTROL_IP:35357/v2.0 +cascading_user_name = USER_NAME +cascading_password = USER_PWD +cascading_tenant_name = TENANT_NAME \ No newline at end of file diff --git a/neutronproxy/l2-proxy/installation/install.sh b/neutronproxy/l2-proxy/installation/install.sh new file mode 100644 index 00000000..bdc3a299 --- /dev/null +++ b/neutronproxy/l2-proxy/installation/install.sh @@ -0,0 +1,91 @@ +#!/bin/bash + +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# Copyright (c) 2014 Huawei Technologies. + + +_NEUTRON_CONF_DIR="/etc/neutron" +_NEUTRON_DIR="/usr/lib64/python2.6/site-packages/neutron" +_NEUTRON_L2_PROXY_FILE="plugins/l2_proxy_agent/l2_cascading_proxy.ini" + +CASCADING_CONTROL_IP=127.0.0.1 +CASCADEDING_REGION_NAME=Cascading_Openstack +CASCADED_REGION_NAME=AZ1 +USER_NAME=neutron +USER_PWD=neutron +TENANT_NAME=admin + +# if you did not make changes to the installation files, +# please do not edit the following directories. +_CODE_DIR="../neutron/" +_CONFIG_DIR="../etc/neutron/" + +#_SCRIPT_NAME="${0##*/}" +#_SCRIPT_LOGFILE="/var/log/neutron/installation/${_SCRIPT_NAME}.log" + +if [[ ${EUID} -ne 0 ]]; then + echo "Please run as root." + exit 1 +fi + +##Redirecting output to logfile as well as stdout +#exec > >(tee -a ${_SCRIPT_LOGFILE}) +#exec 2> >(tee -a ${_SCRIPT_LOGFILE} >&2) + +cd `dirname $0` + +echo "checking installation directories..." +if [ ! -d "${_NEUTRON_DIR}" ] ; then + echo "Could not find the neutron installation. Please check the variables in the beginning of the script." + echo "aborted." + exit 1 +fi + +echo "copying in new code files..." +cp -r "${_CODE_DIR}" `dirname ${_NEUTRON_DIR}` +if [ $? -ne 0 ] ; then + echo "Error in copying new code files, aborted." + exit 1 +fi + +echo "copying in new config files..." +cp -r "${_CONFIG_DIR}" `dirname ${_NEUTRON_CONF_DIR}` +if [ $? -ne 0 ] ; then + echo "Error in copying config files, aborted." + exit 1 +fi + +echo "updating config file..." +sed -i "s/CASCADING_CONTROL_IP/$CASCADING_CONTROL_IP/g" "${_NEUTRON_CONF_DIR}/${_NEUTRON_L2_PROXY_FILE}" +sed -i "s/CASCADEDING_REGION_NAME/$CASCADEDING_REGION_NAME/g" "${_NEUTRON_CONF_DIR}/${_NEUTRON_L2_PROXY_FILE}" +sed -i "s/CASCADED_REGION_NAME/$CASCADED_REGION_NAME/g" "${_NEUTRON_CONF_DIR}/${_NEUTRON_L2_PROXY_FILE}" +sed -i "s/USER_NAME/$USER_NAME/g" "${_NEUTRON_CONF_DIR}/${_NEUTRON_L2_PROXY_FILE}" +sed -i "s/USER_PWD/$USER_PWD/g" "${_NEUTRON_CONF_DIR}/${_NEUTRON_L2_PROXY_FILE}" +sed -i "s/TENANT_NAME/$TENANT_NAME/g" "${_NEUTRON_CONF_DIR}/${_NEUTRON_L2_PROXY_FILE}" + +if [ $? -ne 0 ] ; then + echo "Error in updating config file, aborted." + exit 1 +fi + +echo "starting neutron l2-proxy..." +nohup /usr/bin/python /usr/lib64/python2.6/site-packages/neutron/plugins/l2_proxy/agent/l2_proxy.py --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/l2_proxy_agent/l2_cascading_proxy.ini >/dev/null 2>&1 & +if [ $? -ne 0 ] ; then + echo "There was an error in starting the l2-proxy, please start neutron l2-proxy manually." + exit 1 +fi + +echo "Completed." +echo "See README to get started." + +exit 0 diff --git a/neutronproxy/l2-proxy/neutron/plugins/l2_proxy/README b/neutronproxy/l2-proxy/neutron/plugins/l2_proxy/README new file mode 100644 index 00000000..b8991ad0 --- /dev/null +++ b/neutronproxy/l2-proxy/neutron/plugins/l2_proxy/README @@ -0,0 +1,6 @@ +The Open vSwitch (OVS) Neutron plugin is a simple plugin to manage OVS +features using a local agent running on each hypervisor. + +For details on how to configure and use the plugin, see: + +http://openvswitch.org/openstack/documentation/ diff --git a/neutronproxy/l2-proxy/neutron/plugins/l2_proxy/__init__.py b/neutronproxy/l2-proxy/neutron/plugins/l2_proxy/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/neutronproxy/l2-proxy/neutron/plugins/l2_proxy/agent/__init__.py b/neutronproxy/l2-proxy/neutron/plugins/l2_proxy/agent/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/neutronproxy/l2-proxy/neutron/plugins/l2_proxy/agent/clients.py b/neutronproxy/l2-proxy/neutron/plugins/l2_proxy/agent/clients.py new file mode 100644 index 00000000..dbd6a624 --- /dev/null +++ b/neutronproxy/l2-proxy/neutron/plugins/l2_proxy/agent/clients.py @@ -0,0 +1,237 @@ +# Copyright 2014, Huawei, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# @author: Haojie Jia, Huawei + + +from oslo.config import cfg + +#from heat.openstack.common import importutils +#from heat.openstack.common import log as logging +from neutron.openstack.common import importutils +from neutron.openstack.common import log as logging + +logger = logging.getLogger(__name__) + + +from neutron.plugins.l2_proxy.agent import neutron_keystoneclient as hkc +from novaclient import client as novaclient +from novaclient import shell as novashell +try: + from swiftclient import client as swiftclient +except ImportError: + swiftclient = None + logger.info('swiftclient not available') +try: + from neutronclient.v2_0 import client as neutronclient +except ImportError: + neutronclient = None + logger.info('neutronclient not available') +try: + from cinderclient import client as cinderclient +except ImportError: + cinderclient = None + logger.info('cinderclient not available') + +try: + from ceilometerclient.v2 import client as ceilometerclient +except ImportError: + ceilometerclient = None + logger.info('ceilometerclient not available') + + +cloud_opts = [ + cfg.StrOpt('cloud_backend', + default=None, + help="Cloud module to use as a backend. Defaults to OpenStack.") +] +cfg.CONF.register_opts(cloud_opts) + + +class OpenStackClients(object): + + ''' + Convenience class to create and cache client instances. + ''' + + def __init__(self, context): + self.context = context + self._nova = {} + self._keystone = None + self._swift = None + self._neutron = None + self._cinder = None + self._ceilometer = None + + @property + def auth_token(self): + # if there is no auth token in the context + # attempt to get one using the context username and password + return self.context.auth_token or self.keystone().auth_token + + def keystone(self): + if self._keystone: + return self._keystone + + self._keystone = hkc.KeystoneClient(self.context) + return self._keystone + + def url_for(self, **kwargs): + return self.keystone().url_for(**kwargs) + + def nova(self, service_type='compute'): + if service_type in self._nova: + return self._nova[service_type] + + con = self.context + if self.auth_token is None: + logger.error("Nova connection failed, no auth_token!") + return None + + computeshell = novashell.OpenStackComputeShell() + extensions = computeshell._discover_extensions("1.1") + + args = { + 'project_id': con.tenant_id, + 'auth_url': con.auth_url, + 'service_type': service_type, + 'username': None, + 'api_key': None, + 'extensions': extensions + } + + client = novaclient.Client(1.1, **args) + + management_url = self.url_for( + service_type=service_type, + attr='region', + filter_value='RegionTwo') + client.client.auth_token = self.auth_token + client.client.management_url = management_url +# management_url = self.url_for(service_type=service_type,attr='region',filter_value='RegionTwo') +# client.client.auth_token = self.auth_token +# client.client.management_url = 'http://172.31.127.32:8774/v2/49a3d7c4bbb34a6f843ccc87bab844aa' + + self._nova[service_type] = client + return client + + def swift(self): + if swiftclient is None: + return None + if self._swift: + return self._swift + + con = self.context + if self.auth_token is None: + logger.error("Swift connection failed, no auth_token!") + return None + + args = { + 'auth_version': '2.0', + 'tenant_name': con.tenant_id, + 'user': con.username, + 'key': None, + 'authurl': None, + 'preauthtoken': self.auth_token, + 'preauthurl': self.url_for(service_type='object-store') + } + self._swift = swiftclient.Connection(**args) + return self._swift + + def neutron(self): + if neutronclient is None: + return None + if self._neutron: + return self._neutron + + con = self.context + if self.auth_token is None: + logger.error("Neutron connection failed, no auth_token!") + return None + + if self.context.region_name is None: + management_url = self.url_for(service_type='network') + else: + management_url = self.url_for( + service_type='network', + attr='region', + filter_value=self.context.region_name) + args = { + 'auth_url': con.auth_url, + 'service_type': 'network', + 'token': self.auth_token, + 'endpoint_url': management_url + } + + self._neutron = neutronclient.Client(**args) + + return self._neutron + + def cinder(self): + if cinderclient is None: + return self.nova('volume') + if self._cinder: + return self._cinder + + con = self.context + if self.auth_token is None: + logger.error("Cinder connection failed, no auth_token!") + return None + + args = { + 'service_type': 'volume', + 'auth_url': con.auth_url, + 'project_id': con.tenant_id, + 'username': None, + 'api_key': None + } + + self._cinder = cinderclient.Client('1', **args) + management_url = self.url_for(service_type='volume') + self._cinder.client.auth_token = self.auth_token + self._cinder.client.management_url = management_url + + return self._cinder + + def ceilometer(self): + if ceilometerclient is None: + return None + if self._ceilometer: + return self._ceilometer + + if self.auth_token is None: + logger.error("Ceilometer connection failed, no auth_token!") + return None + con = self.context + args = { + 'auth_url': con.auth_url, + 'service_type': 'metering', + 'project_id': con.tenant_id, + 'token': lambda: self.auth_token, + 'endpoint': self.url_for(service_type='metering'), + } + + client = ceilometerclient.Client(**args) + + self._ceilometer = client + return self._ceilometer + + +if cfg.CONF.cloud_backend: + cloud_backend_module = importutils.import_module(cfg.CONF.cloud_backend) + Clients = cloud_backend_module.Clients +else: + Clients = OpenStackClients + +logger.debug('Using backend %s' % Clients) diff --git a/neutronproxy/l2-proxy/neutron/plugins/l2_proxy/agent/l2_proxy.py b/neutronproxy/l2-proxy/neutron/plugins/l2_proxy/agent/l2_proxy.py new file mode 100644 index 00000000..ffd2b94f --- /dev/null +++ b/neutronproxy/l2-proxy/neutron/plugins/l2_proxy/agent/l2_proxy.py @@ -0,0 +1,1755 @@ +# Copyright 2014, Huawei, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# @author: Haojie Jia, Huawei + + +import hashlib +import signal +import sys +import time +import json +from neutron import context as n_context +from neutron.common import constants as const + +import eventlet +eventlet.monkey_patch() + +import netaddr +from neutron.plugins.openvswitch.agent import ovs_dvr_neutron_agent +from oslo.config import cfg +from six import moves + +from neutron.agent import l2population_rpc +from neutron.agent.linux import ip_lib +from neutron.agent.linux import ovs_lib +from neutron.agent.linux import polling +from neutron.agent.linux import utils +from neutron.agent import rpc as agent_rpc +from neutron.agent import securitygroups_rpc as sg_rpc +from neutron.api.rpc import dvr_rpc +from neutron.common import config as common_config +from neutron.common import constants as q_const +from neutron.common import rpc as n_rpc +from neutron.common import topics +from neutron.common import utils as q_utils +from neutron import context +from neutron.openstack.common import log as logging +from neutron.openstack.common import loopingcall +from neutron.plugins.common import constants as p_const +from neutron.plugins.l2_proxy.common import config # noqa +from neutron.plugins.l2_proxy.common import constants +from neutron.plugins.l2_proxy.agent import neutron_proxy_context +from neutron.plugins.l2_proxy.agent import clients + + +LOG = logging.getLogger(__name__) + +# A placeholder for dead vlans. +DEAD_VLAN_TAG = str(q_const.MAX_VLAN_TAG + 1) + + +class QueryPortsInfoInterface: + + def __init__(self): + self.context = n_context.get_admin_context_without_session() + + def _list_ports(self): + keystone_auth_url = cfg.CONF.AGENT.keystone_auth_url + kwargs = {'auth_token': None, + 'username': cfg.CONF.AGENT.neutron_user_name, + 'password': cfg.CONF.AGENT.neutron_password, + 'aws_creds': None, + 'tenant': cfg.CONF.AGENT.neutron_tenant_name, + 'auth_url': keystone_auth_url, + 'roles': self.context.roles, + 'is_admin': self.context.is_admin, + 'region_name': cfg.CONF.AGENT.os_region_name} + reqCon = neutron_proxy_context.RequestContext(**kwargs) + openStackClients = clients.OpenStackClients(reqCon) + neutronClient = openStackClients.neutron() + #filters = {'status': 'ACTIVE'} + #bodyResponse = neutronClient.list_ports(filters = filters) + bodyResponse = neutronClient.list_ports(status='ACTIVE') + LOG.debug(_('list ports, Response:%s'), str(bodyResponse)) + return bodyResponse + + def get_update_net_port_info(self): + ports = self._list_ports() + return ports.get("ports", []) + + +class RemotePort: + + def __init__(self, port_id, port_name, mac, binding_profile, ips=None): + self.port_id = port_id + self.port_name = port_name + self.mac = mac + self.binding_profile = binding_profile + if(ips is None): + self.ip = set() + else: + self.ip = set(ips) + + +class LocalPort: + + def __init__(self, port_id, cascaded_port_id, mac, ips=None): + self.port_id = port_id + self.cascaded_port_id = cascaded_port_id + self.mac = mac + if(ips is None): + self.ip = set() + else: + self.ip = set(ips) + + +# A class to represent a VIF (i.e., a port that has 'iface-id' and 'vif-mac' +# attributes set). +class LocalVLANMapping: + + def __init__(self, network_type, physical_network, segmentation_id, + cascaded_net_id, vif_ports=None): + if vif_ports is None: + self.vif_ports = {} + else: + self.vif_ports = vif_ports + + self.network_type = network_type + self.physical_network = physical_network + self.segmentation_id = segmentation_id + + self.remote_ports = {} + self.cascaded_net_id = cascaded_net_id + self.cascaded_subnet = {} + + def __str__(self): + return ("lv-id = %s type = %s phys-net = %s phys-id = %s" % + (self.vlan, self.network_type, self.physical_network, + self.segmentation_id)) + + +class OVSPluginApi(agent_rpc.PluginApi, + dvr_rpc.DVRServerRpcApiMixin, + sg_rpc.SecurityGroupServerRpcApiMixin): + pass + + +class OVSSecurityGroupAgent(sg_rpc.SecurityGroupAgentRpcMixin): + + def __init__(self, context, plugin_rpc, root_helper): + self.context = context + self.plugin_rpc = plugin_rpc + self.root_helper = root_helper + self.init_firewall(defer_refresh_firewall=True) + + +class OVSNeutronAgent(n_rpc.RpcCallback, + sg_rpc.SecurityGroupAgentRpcCallbackMixin, + l2population_rpc.L2populationRpcCallBackMixin, + dvr_rpc.DVRAgentRpcCallbackMixin): + + '''Implements OVS-based tunneling, VLANs and flat networks. + + Two local bridges are created: an integration bridge (defaults to + 'br-int') and a tunneling bridge (defaults to 'br-tun'). An + additional bridge is created for each physical network interface + used for VLANs and/or flat networks. + + All VM VIFs are plugged into the integration bridge. VM VIFs on a + given virtual network share a common "local" VLAN (i.e. not + propagated externally). The VLAN id of this local VLAN is mapped + to the physical networking details realizing that virtual network. + + For virtual networks realized as GRE tunnels, a Logical Switch + (LS) identifier is used to differentiate tenant traffic on + inter-HV tunnels. A mesh of tunnels is created to other + Hypervisors in the cloud. These tunnels originate and terminate on + the tunneling bridge of each hypervisor. Port patching is done to + connect local VLANs on the integration bridge to inter-hypervisor + tunnels on the tunnel bridge. + + For each virtual network realized as a VLAN or flat network, a + veth or a pair of patch ports is used to connect the local VLAN on + the integration bridge with the physical network bridge, with flow + rules adding, modifying, or stripping VLAN tags as necessary. + ''' + + # history + # 1.0 Initial version + # 1.1 Support Security Group RPC + # 1.2 Support DVR (Distributed Virtual Router) RPC + RPC_API_VERSION = '1.2' + + def __init__(self, integ_br, tun_br, local_ip, + bridge_mappings, root_helper, + polling_interval, tunnel_types=None, + veth_mtu=None, l2_population=False, + enable_distributed_routing=False, + minimize_polling=False, + ovsdb_monitor_respawn_interval=( + constants.DEFAULT_OVSDBMON_RESPAWN), + arp_responder=False, + use_veth_interconnection=False): + '''Constructor. + + :param integ_br: name of the integration bridge. + :param tun_br: name of the tunnel bridge. + :param local_ip: local IP address of this hypervisor. + :param bridge_mappings: mappings from physical network name to bridge. + :param root_helper: utility to use when running shell cmds. + :param polling_interval: interval (secs) to poll DB. + :param tunnel_types: A list of tunnel types to enable support for in + the agent. If set, will automatically set enable_tunneling to + True. + :param veth_mtu: MTU size for veth interfaces. + :param l2_population: Optional, whether L2 population is turned on + :param minimize_polling: Optional, whether to minimize polling by + monitoring ovsdb for interface changes. + :param ovsdb_monitor_respawn_interval: Optional, when using polling + minimization, the number of seconds to wait before respawning + the ovsdb monitor. + :param arp_responder: Optional, enable local ARP responder if it is + supported. + :param use_veth_interconnection: use veths instead of patch ports to + interconnect the integration bridge to physical bridges. + ''' + super(OVSNeutronAgent, self).__init__() + self.use_veth_interconnection = use_veth_interconnection + self.veth_mtu = veth_mtu + self.root_helper = root_helper + self.available_local_vlans = set(moves.xrange(q_const.MIN_VLAN_TAG, + q_const.MAX_VLAN_TAG)) + self.tunnel_types = tunnel_types or [] + self.l2_pop = l2_population + # TODO(ethuleau): Initially, local ARP responder is be dependent to the + # ML2 l2 population mechanism driver. + self.arp_responder_enabled = (arp_responder and + self._check_arp_responder_support() and + self.l2_pop) + self.enable_distributed_routing = enable_distributed_routing + self.agent_state = { + 'binary': 'neutron-openvswitch-agent', + 'host': cfg.CONF.host, + 'topic': q_const.L2_AGENT_TOPIC, + 'configurations': {'bridge_mappings': bridge_mappings, + 'tunnel_types': self.tunnel_types, + 'tunneling_ip': local_ip, + 'l2_population': self.l2_pop, + 'arp_responder_enabled': + self.arp_responder_enabled, + 'enable_distributed_routing': + self.enable_distributed_routing}, + 'agent_type': q_const.AGENT_TYPE_OVS, + 'start_flag': True} + + self.query_ports_info_inter = QueryPortsInfoInterface() + self.cascaded_port_info = {} + self.cascaded_host_map = {} + + # Keep track of int_br's device count for use by _report_state() + self.int_br_device_count = 0 + + self.int_br = ovs_lib.OVSBridge(integ_br, self.root_helper) + self.setup_integration_br() + # Stores port update notifications for processing in main rpc loop + self.updated_ports = set() + self.setup_rpc() + self.bridge_mappings = bridge_mappings + self.setup_physical_bridges(self.bridge_mappings) + self.local_vlan_map = {} + self.tun_br_ofports = {p_const.TYPE_GRE: {}, + p_const.TYPE_VXLAN: {}} + + self.polling_interval = polling_interval + self.minimize_polling = minimize_polling + self.ovsdb_monitor_respawn_interval = ovsdb_monitor_respawn_interval + + if tunnel_types: + self.enable_tunneling = True + else: + self.enable_tunneling = False + self.local_ip = local_ip + self.tunnel_count = 0 + self.vxlan_udp_port = cfg.CONF.AGENT.vxlan_udp_port + self.dont_fragment = cfg.CONF.AGENT.dont_fragment + self.tun_br = None + self.patch_int_ofport = constants.OFPORT_INVALID + self.patch_tun_ofport = constants.OFPORT_INVALID + if self.enable_tunneling: + # The patch_int_ofport and patch_tun_ofport are updated + # here inside the call to setup_tunnel_br + self.setup_tunnel_br(tun_br) + + self.dvr_agent = ovs_dvr_neutron_agent.OVSDVRNeutronAgent( + self.context, + self.plugin_rpc, + self.int_br, + self.tun_br, + self.patch_int_ofport, + self.patch_tun_ofport, + cfg.CONF.host, + self.enable_tunneling, + self.enable_distributed_routing) + + # self.dvr_agent.setup_dvr_flows_on_integ_tun_br() + + # Collect additional bridges to monitor + self.ancillary_brs = self.setup_ancillary_bridges(integ_br, tun_br) + + # Security group agent support + self.sg_agent = OVSSecurityGroupAgent(self.context, + self.plugin_rpc, + root_helper) + # Initialize iteration counter + self.iter_num = 0 + self.run_daemon_loop = True + + def _check_arp_responder_support(self): + '''Check if OVS supports to modify ARP headers. + + This functionality is only available since the development branch 2.1. + ''' + args = ['arp,action=load:0x2->NXM_OF_ARP_OP[],' + 'move:NXM_NX_ARP_SHA[]->NXM_NX_ARP_THA[],' + 'move:NXM_OF_ARP_SPA[]->NXM_OF_ARP_TPA[]'] + supported = ovs_lib.ofctl_arg_supported(self.root_helper, 'add-flow', + args) + if not supported: + LOG.warning(_('OVS version can not support ARP responder.')) + return supported + + def _report_state(self): + # How many devices are likely used by a VM + self.agent_state.get('configurations')['devices'] = ( + self.int_br_device_count) + try: + self.state_rpc.report_state(self.context, + self.agent_state) + self.agent_state.pop('start_flag', None) + except Exception: + LOG.exception(_("Failed reporting state!")) + + def setup_rpc(self): + self.agent_id = 'ovs-agent-%s' % cfg.CONF.host + self.topic = topics.AGENT + self.plugin_rpc = OVSPluginApi(topics.PLUGIN) + self.state_rpc = agent_rpc.PluginReportStateAPI(topics.PLUGIN) + + # RPC network init + self.context = context.get_admin_context_without_session() + # Handle updates from service + self.endpoints = [self] + # Define the listening consumers for the agent + consumers = [[topics.PORT, topics.UPDATE], + [topics.NETWORK, topics.DELETE], + [constants.TUNNEL, topics.UPDATE], + [topics.SECURITY_GROUP, topics.UPDATE], + [topics.DVR, topics.UPDATE]] + if self.l2_pop: + consumers.append([topics.L2POPULATION, + topics.UPDATE, cfg.CONF.host]) + self.connection = agent_rpc.create_consumers(self.endpoints, + self.topic, + consumers) + report_interval = cfg.CONF.AGENT.report_interval + if report_interval: + heartbeat = loopingcall.FixedIntervalLoopingCall( + self._report_state) + heartbeat.start(interval=report_interval) + + def get_net_uuid(self, vif_id): + for network_id, vlan_mapping in self.local_vlan_map.iteritems(): + if vif_id in vlan_mapping.vif_ports: + return network_id + + def network_delete(self, context, **kwargs): + LOG.debug(_("network_delete received")) + network_id = kwargs.get('network_id') + LOG.debug(_("Delete %s"), network_id) + # The network may not be defined on this agent + lvm = self.local_vlan_map.get(network_id) + if lvm: + self.reclaim_local_vlan(network_id) + else: + LOG.debug(_("Network %s not used on agent."), network_id) + + def port_update(self, context, **kwargs): + port = kwargs.get('port') + # Put the port identifier in the updated_ports set. + # Even if full port details might be provided to this call, + # they are not used since there is no guarantee the notifications + # are processed in the same order as the relevant API requests + self.updated_ports.add(port['id']) + LOG.debug(_("port_update message processed for port %s"), port['id']) + + def tunnel_update(self, context, **kwargs): + LOG.debug(_("tunnel_update received")) + if not self.enable_tunneling: + return + tunnel_ip = kwargs.get('tunnel_ip') + tunnel_id = kwargs.get('tunnel_id', self.get_ip_in_hex(tunnel_ip)) + if not tunnel_id: + return + tunnel_type = kwargs.get('tunnel_type') + if not tunnel_type: + LOG.error(_("No tunnel_type specified, cannot create tunnels")) + return + if tunnel_type not in self.tunnel_types: + LOG.error(_("tunnel_type %s not supported by agent"), tunnel_type) + return + if tunnel_ip == self.local_ip: + return + tun_name = '%s-%s' % (tunnel_type, tunnel_id) + if not self.l2_pop: + self.setup_tunnel_port(tun_name, tunnel_ip, tunnel_type) + + def _create_port(self, context, network_id, binding_profile, port_name, + mac_address, ips): + if(not network_id): + LOG.error(_("No network id is specified, cannot create port")) + return + keystone_auth_url = cfg.CONF.AGENT.keystone_auth_url + kwargs = {'auth_token': None, + 'username': cfg.CONF.AGENT.neutron_user_name, + 'password': cfg.CONF.AGENT.neutron_password, + 'aws_creds': None, + 'tenant': cfg.CONF.AGENT.neutron_tenant_name, + # 'tenant_id':'e8f280855dbe42a189eebb0f3ecb94bb', #context.values['tenant'], + 'auth_url': keystone_auth_url, + 'roles': context.roles, + 'is_admin': context.is_admin, + 'region_name': cfg.CONF.AGENT.os_region_name} + reqCon = neutron_proxy_context.RequestContext(**kwargs) + openStackClients = clients.OpenStackClients(reqCon) + neutronClient = openStackClients.neutron() + req_props = {'network_id': network_id, + 'name': port_name, + 'admin_state_up': True, + 'fixed_ips': [{'ip_address': ip} for ip in ips], + 'mac_address': mac_address, + 'binding:profile': binding_profile, + 'device_owner': 'compute:' + } + bodyResponse = neutronClient.create_port({'port': req_props}) + LOG.debug(_('create port, Response:%s'), str(bodyResponse)) + return bodyResponse + + def _destroy_port(self, context, port_id): + if(not port_id): + LOG.error(_("No port id is specified, cannot destroy port")) + return + keystone_auth_url = cfg.CONF.AGENT.keystone_auth_url + kwargs = {'auth_token': None, + 'username': cfg.CONF.AGENT.neutron_user_name, + 'password': cfg.CONF.AGENT.neutron_password, + 'aws_creds': None, + 'tenant': cfg.CONF.AGENT.neutron_tenant_name, + # 'tenant_id':'e8f280855dbe42a189eebb0f3ecb94bb', #context.values['tenant'], + 'auth_url': keystone_auth_url, + 'roles': context.roles, + 'is_admin': context.is_admin, + 'region_name': cfg.CONF.AGENT.os_region_name} + reqCon = neutron_proxy_context.RequestContext(**kwargs) + openStackClients = clients.OpenStackClients(reqCon) + neutronClient = openStackClients.neutron() + bodyResponse = neutronClient.delete_port(port_id) + LOG.debug(_('destroy port, Response:%s'), str(bodyResponse)) + return bodyResponse + + def fdb_add(self, context, fdb_entries): + LOG.debug(_("fdb_add received")) + for network_id, values in fdb_entries.items(): + lvm = self.local_vlan_map.get(network_id) + if not lvm: + # Agent doesn't manage any port in this network + continue + cascaded_net_id = lvm.cascaded_net_id + if not cascaded_net_id: + continue + agent_ports = values.get('ports') + agent_ports.pop(self.local_ip, None) + if len(agent_ports): + for agent_ip, ports in agent_ports.items(): + binding_profile = {"port_key": "remote_port", + "host_ip": agent_ip} + port_name = 'remote_port' + mac_ip_map = {} + for port in ports: + if(port == q_const.FLOODING_ENTRY): + continue + if(const.DEVICE_OWNER_DVR_INTERFACE in port[1]): + return + ips = mac_ip_map.get(port[0]) + if(ips): + ips += port[2] + mac_ip_map[port[0]] = ips + else: + mac_ip_map[port[0]] = [port[2]] + for mac_address, ips in mac_ip_map.items(): + if(lvm.remote_ports.get(mac_address) or + lvm.vif_ports.get(mac_address)): + continue + port_ret = self._create_port(context, + cascaded_net_id, + binding_profile, + port_name, + mac_address, + ips) + if(not port_ret or + (port_ret and (not port_ret.get('port')))): + LOG.debug(_("remote port created failed, " + "binding_profile:%s, mac_address:%s"), + str(binding_profile), mac_address) + return + port_id = port_ret['port'].get('id', None) + if not port_id: + LOG.debug(_("remote port created failed, " + "port_name%s, mac_address:%s"), + port_name, mac_address) + return + remote_port = RemotePort(port_id, + port_name, + mac_address, + binding_profile, + ips) + lvm.remote_ports[mac_address] = remote_port + + def fdb_remove(self, context, fdb_entries): + LOG.debug(_("fdb_remove received")) + for network_id, values in fdb_entries.items(): + lvm = self.local_vlan_map.get(network_id) + if not lvm: + # Agent doesn't manage any more ports in this network + continue + agent_ports = values.get('ports') + agent_ports.pop(self.local_ip, None) + if len(agent_ports): + for agent_ip, ports in agent_ports.items(): + for port in ports: + rp = lvm.remote_ports.pop(port[0], None) + if not rp: + continue + self._destroy_port(context, rp.port_id) + + def _fdb_chg_ip(self, context, fdb_entries): + '''fdb update when an IP of a port is updated. + + The ML2 l2-pop mechanism driver send an fdb update rpc message when an + IP of a port is updated. + + :param context: RPC context. + :param fdb_entries: fdb dicts that contain all mac/IP informations per + agent and network. + {'net1': + {'agent_ip': + {'before': [[mac, ip]], + 'after': [[mac, ip]] + } + } + 'net2': + ... + } + ''' + LOG.debug(_("update chg_ip received")) + + # TODO(ethuleau): Use OVS defer apply flows for all rules will be an + # interesting improvement here. But actually, OVS lib defer apply flows + # methods doesn't ensure the add flows will be applied before delete. + for network_id, agent_ports in fdb_entries.items(): + lvm = self.local_vlan_map.get(network_id) + if not lvm: + continue + + for agent_ip, state in agent_ports.items(): + if agent_ip == self.local_ip: + continue + + after = state.get('after') + for mac, ip in after: + self._set_arp_responder('add', lvm.vlan, mac, ip) + + before = state.get('before') + for mac, ip in before: + self._set_arp_responder('remove', lvm.vlan, mac, ip) + + def fdb_update(self, context, fdb_entries): + LOG.debug(_("fdb_update received")) + for action, values in fdb_entries.items(): + method = '_fdb_' + action + if not hasattr(self, method): + raise NotImplementedError() + + getattr(self, method)(context, values) + + def _set_arp_responder(self, action, lvid, mac_str, ip_str): + '''Set the ARP respond entry. + + When the l2 population mechanism driver and OVS supports to edit ARP + fields, a table (ARP_RESPONDER) to resolve ARP locally is added to the + tunnel bridge. + + :param action: add or remove ARP entry. + :param lvid: local VLAN map of network's ARP entry. + :param mac_str: MAC string value. + :param ip_str: IP string value. + ''' + if not self.arp_responder_enabled: + return + + mac = netaddr.EUI(mac_str, dialect=netaddr.mac_unix) + ip = netaddr.IPAddress(ip_str) + + if action == 'add': + actions = ('move:NXM_OF_ETH_SRC[]->NXM_OF_ETH_DST[],' + 'mod_dl_src:%(mac)s,' + 'load:0x2->NXM_OF_ARP_OP[],' + 'move:NXM_NX_ARP_SHA[]->NXM_NX_ARP_THA[],' + 'move:NXM_OF_ARP_SPA[]->NXM_OF_ARP_TPA[],' + 'load:%(mac)#x->NXM_NX_ARP_SHA[],' + 'load:%(ip)#x->NXM_OF_ARP_SPA[],' + 'in_port' % {'mac': mac, 'ip': ip}) + self.tun_br.add_flow(table=constants.ARP_RESPONDER, + priority=1, + proto='arp', + dl_vlan=lvid, + nw_dst='%s' % ip, + actions=actions) + elif action == 'remove': + self.tun_br.delete_flows(table=constants.ARP_RESPONDER, + proto='arp', + dl_vlan=lvid, + nw_dst='%s' % ip) + else: + LOG.warning(_('Action %s not supported'), action) + + def provision_local_vlan(self, net_uuid, network_type, physical_network, + segmentation_id, cascaded_net_id): + '''Provisions a local VLAN. + + :param net_uuid: the uuid of the network associated with this vlan. + :param network_type: the network type ('gre', 'vxlan', 'vlan', 'flat', + 'local') + :param physical_network: the physical network for 'vlan' or 'flat' + :param segmentation_id: the VID for 'vlan' or tunnel ID for 'tunnel' + ''' + + # On a restart or crash of OVS, the network associated with this VLAN + # will already be assigned, so check for that here before assigning a + # new one. + lvm = self.local_vlan_map.get(net_uuid) + if lvm: + lvid = lvm.vlan + else: + if not self.available_local_vlans: + LOG.error(_("No local VLAN available for net-id=%s"), net_uuid) + return + lvid = self.available_local_vlans.pop() + self.local_vlan_map[net_uuid] = LocalVLANMapping( + network_type, + physical_network, + segmentation_id, + cascaded_net_id) + + LOG.info(_("Assigning %(vlan_id)s as local vlan for " + "net-id=%(net_uuid)s"), + {'vlan_id': lvid, 'net_uuid': net_uuid}) + + def reclaim_local_vlan(self, net_uuid): + '''Reclaim a local VLAN. + + :param net_uuid: the network uuid associated with this vlan. + :param lvm: a LocalVLANMapping object that tracks (vlan, lsw_id, + vif_ids) mapping. + ''' + lvm = self.local_vlan_map.pop(net_uuid, None) + if lvm is None: + LOG.debug(_("Network %s not used on agent."), net_uuid) + return + + LOG.info(_("Reclaiming vlan = %(vlan_id)s from net-id = %(net_uuid)s"), + {'vlan_id': lvm.vlan, + 'net_uuid': net_uuid}) + + if lvm.network_type in constants.TUNNEL_NETWORK_TYPES: + if self.enable_tunneling: + self.tun_br.delete_flows( + table=constants.TUN_TABLE[lvm.network_type], + tun_id=lvm.segmentation_id) + self.tun_br.delete_flows(dl_vlan=lvm.vlan) + if self.l2_pop: + # Try to remove tunnel ports if not used by other networks + for ofport in lvm.tun_ofports: + self.cleanup_tunnel_port(ofport, lvm.network_type) + elif lvm.network_type == p_const.TYPE_FLAT: + if lvm.physical_network in self.phys_brs: + # outbound + br = self.phys_brs[lvm.physical_network] + br.delete_flows(in_port=self.phys_ofports[lvm. + physical_network], + dl_vlan=lvm.vlan) + # inbound + br = self.int_br + br.delete_flows(in_port=self.int_ofports[lvm.physical_network], + dl_vlan=0xffff) + elif lvm.network_type == p_const.TYPE_VLAN: + if lvm.physical_network in self.phys_brs: + # outbound + br = self.phys_brs[lvm.physical_network] + br.delete_flows(in_port=self.phys_ofports[lvm. + physical_network], + dl_vlan=lvm.vlan) + # inbound + br = self.int_br + br.delete_flows(in_port=self.int_ofports[lvm.physical_network], + dl_vlan=lvm.segmentation_id) + elif lvm.network_type == p_const.TYPE_LOCAL: + # no flows needed for local networks + pass + else: + LOG.error(_("Cannot reclaim unknown network type " + "%(network_type)s for net-id=%(net_uuid)s"), + {'network_type': lvm.network_type, + 'net_uuid': net_uuid}) + + self.available_local_vlans.add(lvm.vlan) + + def port_bound(self, port, net_uuid, + network_type, physical_network, + segmentation_id, fixed_ips, device_owner, + cascaded_port_info, + ovs_restarted): + '''Bind port to net_uuid/lsw_id and install flow for inbound traffic + to vm. + + :param port: a ovslib.VifPort object. + :param net_uuid: the net_uuid this port is to be associated with. + :param network_type: the network type ('gre', 'vlan', 'flat', 'local') + :param physical_network: the physical network for 'vlan' or 'flat' + :param segmentation_id: the VID for 'vlan' or tunnel ID for 'tunnel' + :param fixed_ips: the ip addresses assigned to this port + :param device_owner: the string indicative of owner of this port + :param ovs_restarted: indicates if this is called for an OVS restart. + ''' + if net_uuid not in self.local_vlan_map or ovs_restarted: + self.provision_local_vlan(net_uuid, network_type, + physical_network, segmentation_id, + cascaded_port_info['network_id']) + lvm = self.local_vlan_map[net_uuid] + lvm.vif_ports[cascaded_port_info['mac_address']] = \ + LocalPort(port, + cascaded_port_info['id'], + cascaded_port_info['mac_address']) + # may be flush subnet_map info --add by j00209498 +# for subnet_ip in cascaded_port_info['fixed_ips']: +# subnet_id = subnet_ip('subnet_id') +# if(not lvm.get(subnet_id)): +# subnet_info = self.get_cascaded_subnet_info(subnet_id) +# if len(subnet_info.get('subnets')) > 0: +# lvm.cascaded_subnet[subnet_id] = subnet_info['subnets']['cidr'] + + def port_unbound(self, vif_id, net_uuid=None): + '''Unbind port. + + Removes corresponding local vlan mapping object if this is its last + VIF. + + :param vif_id: the id of the vif + :param net_uuid: the net_uuid this port is associated with. + ''' + if net_uuid is None: + net_uuid = self.get_net_uuid(vif_id) + + if not self.local_vlan_map.get(net_uuid): + LOG.info(_('port_unbound(): net_uuid %s not' + ' in local_vlan_map'), net_uuid) + return + + lvm = self.local_vlan_map[net_uuid] + +# if vif_id in lvm.vif_ports: +# vif_port = lvm.vif_ports[vif_id] +# self.dvr_agent.unbind_port_from_dvr(vif_port, +# local_vlan_id=lvm.vlan) + lvm.vif_ports.pop(vif_id, None) + + if not lvm.vif_ports: + self.reclaim_local_vlan(net_uuid) + + def port_dead(self, port): + '''Once a port has no binding, put it on the "dead vlan". + + :param port: a ovs_lib.VifPort object. + ''' + # Don't kill a port if it's already dead + cur_tag = self.int_br.db_get_val("Port", port.port_name, "tag") + if cur_tag != DEAD_VLAN_TAG: + self.int_br.set_db_attribute("Port", port.port_name, "tag", + DEAD_VLAN_TAG) + self.int_br.add_flow(priority=2, in_port=port.ofport, + actions="drop") + + def setup_integration_br(self): + '''Setup the integration bridge. + + Create patch ports and remove all existing flows. + + :param bridge_name: the name of the integration bridge. + :returns: the integration bridge + ''' + # Ensure the integration bridge is created. + # ovs_lib.OVSBridge.create() will run + # ovs-vsctl -- --may-exist add-br BRIDGE_NAME + # which does nothing if bridge already exists. + self.int_br.create() + self.int_br.set_secure_mode() + + self.int_br.delete_port(cfg.CONF.OVS.int_peer_patch_port) + self.int_br.remove_all_flows() + # switch all traffic using L2 learning + self.int_br.add_flow(priority=1, actions="normal") + # Add a canary flow to int_br to track OVS restarts + self.int_br.add_flow(table=constants.CANARY_TABLE, priority=0, + actions="drop") + + def setup_ancillary_bridges(self, integ_br, tun_br): + '''Setup ancillary bridges - for example br-ex.''' + ovs_bridges = set(ovs_lib.get_bridges(self.root_helper)) + # Remove all known bridges + ovs_bridges.remove(integ_br) + if self.enable_tunneling: + ovs_bridges.remove(tun_br) + br_names = [self.phys_brs[physical_network].br_name for + physical_network in self.phys_brs] + ovs_bridges.difference_update(br_names) + # Filter list of bridges to those that have external + # bridge-id's configured + br_names = [] + for bridge in ovs_bridges: + id = ovs_lib.get_bridge_external_bridge_id(self.root_helper, + bridge) + if id != bridge: + br_names.append(bridge) + ovs_bridges.difference_update(br_names) + ancillary_bridges = [] + for bridge in ovs_bridges: + br = ovs_lib.OVSBridge(bridge, self.root_helper) + LOG.info(_('Adding %s to list of bridges.'), bridge) + ancillary_bridges.append(br) + return ancillary_bridges + + def setup_tunnel_br(self, tun_br=None): + '''Setup the tunnel bridge. + + Creates tunnel bridge, and links it to the integration bridge + using a patch port. + + :param tun_br: the name of the tunnel bridge. + ''' + if not self.tun_br: + self.tun_br = ovs_lib.OVSBridge(tun_br, self.root_helper) + + self.tun_br.reset_bridge() + self.patch_tun_ofport = self.int_br.add_patch_port( + cfg.CONF.OVS.int_peer_patch_port, cfg.CONF.OVS.tun_peer_patch_port) + self.patch_int_ofport = self.tun_br.add_patch_port( + cfg.CONF.OVS.tun_peer_patch_port, cfg.CONF.OVS.int_peer_patch_port) + if int(self.patch_tun_ofport) < 0 or int(self.patch_int_ofport) < 0: + LOG.error(_("Failed to create OVS patch port. Cannot have " + "tunneling enabled on this agent, since this version " + "of OVS does not support tunnels or patch ports. " + "Agent terminated!")) + exit(1) + self.tun_br.remove_all_flows() + + # Table 0 (default) will sort incoming traffic depending on in_port + self.tun_br.add_flow(priority=1, + in_port=self.patch_int_ofport, + actions="resubmit(,%s)" % + constants.PATCH_LV_TO_TUN) + self.tun_br.add_flow(priority=0, actions="drop") + if self.arp_responder_enabled: + # ARP broadcast-ed request go to the local ARP_RESPONDER table to + # be locally resolved + self.tun_br.add_flow(table=constants.PATCH_LV_TO_TUN, + priority=1, + proto='arp', + dl_dst="ff:ff:ff:ff:ff:ff", + actions=("resubmit(,%s)" % + constants.ARP_RESPONDER)) + # PATCH_LV_TO_TUN table will handle packets coming from patch_int + # unicasts go to table UCAST_TO_TUN where remote addresses are learnt + self.tun_br.add_flow(table=constants.PATCH_LV_TO_TUN, + priority=0, + dl_dst="00:00:00:00:00:00/01:00:00:00:00:00", + actions="resubmit(,%s)" % constants.UCAST_TO_TUN) + # Broadcasts/multicasts go to table FLOOD_TO_TUN that handles flooding + self.tun_br.add_flow(table=constants.PATCH_LV_TO_TUN, + priority=0, + dl_dst="01:00:00:00:00:00/01:00:00:00:00:00", + actions="resubmit(,%s)" % constants.FLOOD_TO_TUN) + # Tables [tunnel_type]_TUN_TO_LV will set lvid depending on tun_id + # for each tunnel type, and resubmit to table LEARN_FROM_TUN where + # remote mac addresses will be learnt + for tunnel_type in constants.TUNNEL_NETWORK_TYPES: + self.tun_br.add_flow(table=constants.TUN_TABLE[tunnel_type], + priority=0, + actions="drop") + # LEARN_FROM_TUN table will have a single flow using a learn action to + # dynamically set-up flows in UCAST_TO_TUN corresponding to remote mac + # addresses (assumes that lvid has already been set by a previous flow) + learned_flow = ("table=%s," + "priority=1," + "hard_timeout=300," + "NXM_OF_VLAN_TCI[0..11]," + "NXM_OF_ETH_DST[]=NXM_OF_ETH_SRC[]," + "load:0->NXM_OF_VLAN_TCI[]," + "load:NXM_NX_TUN_ID[]->NXM_NX_TUN_ID[]," + "output:NXM_OF_IN_PORT[]" % + constants.UCAST_TO_TUN) + # Once remote mac addresses are learnt, output packet to patch_int + self.tun_br.add_flow(table=constants.LEARN_FROM_TUN, + priority=1, + actions="learn(%s),output:%s" % + (learned_flow, self.patch_int_ofport)) + # Egress unicast will be handled in table UCAST_TO_TUN, where remote + # mac addresses will be learned. For now, just add a default flow that + # will resubmit unknown unicasts to table FLOOD_TO_TUN to treat them + # as broadcasts/multicasts + self.tun_br.add_flow(table=constants.UCAST_TO_TUN, + priority=0, + actions="resubmit(,%s)" % + constants.FLOOD_TO_TUN) + if self.arp_responder_enabled: + # If none of the ARP entries correspond to the requested IP, the + # broadcast-ed packet is resubmitted to the flooding table + self.tun_br.add_flow(table=constants.ARP_RESPONDER, + priority=0, + actions="resubmit(,%s)" % + constants.FLOOD_TO_TUN) + # FLOOD_TO_TUN will handle flooding in tunnels based on lvid, + # for now, add a default drop action + self.tun_br.add_flow(table=constants.FLOOD_TO_TUN, + priority=0, + actions="drop") + + def get_peer_name(self, prefix, name): + """Construct a peer name based on the prefix and name. + + The peer name can not exceed the maximum length allowed for a linux + device. Longer names are hashed to help ensure uniqueness. + """ + if len(prefix + name) <= q_const.DEVICE_NAME_MAX_LEN: + return prefix + name + # We can't just truncate because bridges may be distinguished + # by an ident at the end. A hash over the name should be unique. + # Leave part of the bridge name on for easier identification + hashlen = 6 + namelen = q_const.DEVICE_NAME_MAX_LEN - len(prefix) - hashlen + new_name = ('%(prefix)s%(truncated)s%(hash)s' % + {'prefix': prefix, 'truncated': name[0:namelen], + 'hash': hashlib.sha1(name).hexdigest()[0:hashlen]}) + LOG.warning(_("Creating an interface named %(name)s exceeds the " + "%(limit)d character limitation. It was shortened to " + "%(new_name)s to fit."), + {'name': name, 'limit': q_const.DEVICE_NAME_MAX_LEN, + 'new_name': new_name}) + return new_name + + def setup_physical_bridges(self, bridge_mappings): + '''Setup the physical network bridges. + + Creates physical network bridges and links them to the + integration bridge using veths. + + :param bridge_mappings: map physical network names to bridge names. + ''' + self.phys_brs = {} + self.int_ofports = {} + self.phys_ofports = {} + ip_wrapper = ip_lib.IPWrapper(self.root_helper) + ovs_bridges = ovs_lib.get_bridges(self.root_helper) + for physical_network, bridge in bridge_mappings.iteritems(): + LOG.info(_("Mapping physical network %(physical_network)s to " + "bridge %(bridge)s"), + {'physical_network': physical_network, + 'bridge': bridge}) + # setup physical bridge + if bridge not in ovs_bridges: + LOG.error(_("Bridge %(bridge)s for physical network " + "%(physical_network)s does not exist. Agent " + "terminated!"), + {'physical_network': physical_network, + 'bridge': bridge}) + sys.exit(1) + br = ovs_lib.OVSBridge(bridge, self.root_helper) + br.remove_all_flows() + br.add_flow(priority=1, actions="normal") + self.phys_brs[physical_network] = br + + # interconnect physical and integration bridges using veth/patchs + int_if_name = self.get_peer_name(constants.PEER_INTEGRATION_PREFIX, + bridge) + phys_if_name = self.get_peer_name(constants.PEER_PHYSICAL_PREFIX, + bridge) + self.int_br.delete_port(int_if_name) + br.delete_port(phys_if_name) + if self.use_veth_interconnection: + if ip_lib.device_exists(int_if_name, self.root_helper): + ip_lib.IPDevice(int_if_name, + self.root_helper).link.delete() + # Give udev a chance to process its rules here, to avoid + # race conditions between commands launched by udev rules + # and the subsequent call to ip_wrapper.add_veth + utils.execute(['/sbin/udevadm', 'settle', '--timeout=10']) + int_veth, phys_veth = ip_wrapper.add_veth(int_if_name, + phys_if_name) + int_ofport = self.int_br.add_port(int_veth) + phys_ofport = br.add_port(phys_veth) + else: + # Create patch ports without associating them in order to block + # untranslated traffic before association + int_ofport = self.int_br.add_patch_port( + int_if_name, constants.NONEXISTENT_PEER) + phys_ofport = br.add_patch_port( + phys_if_name, constants.NONEXISTENT_PEER) + + self.int_ofports[physical_network] = int_ofport + self.phys_ofports[physical_network] = phys_ofport + + # block all untranslated traffic between bridges + self.int_br.add_flow(priority=2, in_port=int_ofport, + actions="drop") + br.add_flow(priority=2, in_port=phys_ofport, actions="drop") + + if self.use_veth_interconnection: + # enable veth to pass traffic + int_veth.link.set_up() + phys_veth.link.set_up() + if self.veth_mtu: + # set up mtu size for veth interfaces + int_veth.link.set_mtu(self.veth_mtu) + phys_veth.link.set_mtu(self.veth_mtu) + else: + # associate patch ports to pass traffic + self.int_br.set_db_attribute('Interface', int_if_name, + 'options:peer', phys_if_name) + br.set_db_attribute('Interface', phys_if_name, + 'options:peer', int_if_name) + + def get_port_id_from_profile(self, profile): + #profile_json = json.load(profile) + return profile.get('cascading_port_id') + + def analysis_ports_info(self, ports_info): + cur_ports = set() + for port in ports_info: + profile = port['binding:profile'] + cascading_port_id = self.get_port_id_from_profile(profile) + if(not cascading_port_id): + continue + self.cascaded_port_info[cascading_port_id] = port + cur_ports.add(cascading_port_id) + return cur_ports + + def scan_ports(self, registered_ports, updated_ports=None): + ports_info = self.query_ports_info_inter.get_update_net_port_info() + cur_ports = self.analysis_ports_info(ports_info) + self.int_br_device_count = len(cur_ports) + port_info = {'current': cur_ports} + if updated_ports is None: + updated_ports = set() + # updated_ports.update(self.check_changed_vlans(registered_ports)) + if updated_ports: + # Some updated ports might have been removed in the + # meanwhile, and therefore should not be processed. + # In this case the updated port won't be found among + # current ports. + updated_ports &= cur_ports + if updated_ports: + port_info['updated'] = updated_ports + + # FIXME(salv-orlando): It's not really necessary to return early + # if nothing has changed. + if cur_ports == registered_ports: + # No added or removed ports to set, just return here + return port_info + + port_info['added'] = cur_ports - registered_ports + # Remove all the known ports not found on the integration bridge + port_info['removed'] = registered_ports - cur_ports + return port_info + + def check_changed_vlans(self, registered_ports): + """Return ports which have lost their vlan tag. + + The returned value is a set of port ids of the ports concerned by a + vlan tag loss. + """ + port_tags = self.int_br.get_port_tag_dict() + changed_ports = set() + for lvm in self.local_vlan_map.values(): + for port in registered_ports: + if ( + port in lvm.vif_ports + and lvm.vif_ports[port].port_name in port_tags + and port_tags[lvm.vif_ports[port].port_name] != lvm.vlan + ): + LOG.info( + _("Port '%(port_name)s' has lost " + "its vlan tag '%(vlan_tag)d'!"), + {'port_name': lvm.vif_ports[port].port_name, + 'vlan_tag': lvm.vlan} + ) + changed_ports.add(port) + return changed_ports + + def update_ancillary_ports(self, registered_ports): + ports = set() + for bridge in self.ancillary_brs: + ports |= bridge.get_vif_port_set() + + if ports == registered_ports: + return + added = ports - registered_ports + removed = registered_ports - ports + return {'current': ports, + 'added': added, + 'removed': removed} + + def treat_vif_port(self, vif_port, port_id, network_id, network_type, + physical_network, segmentation_id, admin_state_up, + fixed_ips, device_owner, cascaded_port_info, + ovs_restarted): + # When this function is called for a port, the port should have + # an OVS ofport configured, as only these ports were considered + # for being treated. If that does not happen, it is a potential + # error condition of which operators should be aware + + if admin_state_up: + self.port_bound(vif_port, network_id, network_type, + physical_network, segmentation_id, + fixed_ips, device_owner, cascaded_port_info, + ovs_restarted) + else: + self.port_dead(vif_port) + + def setup_tunnel_port(self, port_name, remote_ip, tunnel_type): + ofport = self.tun_br.add_tunnel_port(port_name, + remote_ip, + self.local_ip, + tunnel_type, + self.vxlan_udp_port, + self.dont_fragment) + ofport_int = -1 + try: + ofport_int = int(ofport) + except (TypeError, ValueError): + LOG.exception(_("ofport should have a value that can be " + "interpreted as an integer")) + if ofport_int < 0: + LOG.error(_("Failed to set-up %(type)s tunnel port to %(ip)s"), + {'type': tunnel_type, 'ip': remote_ip}) + return 0 + + self.tun_br_ofports[tunnel_type][remote_ip] = ofport + # Add flow in default table to resubmit to the right + # tunnelling table (lvid will be set in the latter) + self.tun_br.add_flow(priority=1, + in_port=ofport, + actions="resubmit(,%s)" % + constants.TUN_TABLE[tunnel_type]) + + ofports = ','.join(self.tun_br_ofports[tunnel_type].values()) + if ofports and not self.l2_pop: + # Update flooding flows to include the new tunnel + for network_id, vlan_mapping in self.local_vlan_map.iteritems(): + if vlan_mapping.network_type == tunnel_type: + self.tun_br.mod_flow(table=constants.FLOOD_TO_TUN, + dl_vlan=vlan_mapping.vlan, + actions="strip_vlan," + "set_tunnel:%s,output:%s" % + (vlan_mapping.segmentation_id, + ofports)) + return ofport + + def cleanup_tunnel_port(self, tun_ofport, tunnel_type): + # Check if this tunnel port is still used + for lvm in self.local_vlan_map.values(): + if tun_ofport in lvm.tun_ofports: + break + # If not, remove it + else: + for remote_ip, ofport in self.tun_br_ofports[tunnel_type].items(): + if ofport == tun_ofport: + port_name = '%s-%s' % (tunnel_type, + self.get_ip_in_hex(remote_ip)) + self.tun_br.delete_port(port_name) + self.tun_br.delete_flows(in_port=ofport) + self.tun_br_ofports[tunnel_type].pop(remote_ip, None) + + def compare_port_info(self, details, cascaded_port_info): + if details is None or cascaded_port_info is None: + return False + details_ips_set = set([ip['ip_address'] + for ip in details['fixed_ips']]) + cascaded_ips_set = set([ip['ip_address'] + for ip in cascaded_port_info['fixed_ips']]) + return details_ips_set == cascaded_ips_set + + def get_cascading_neutron_client(self): + context = n_context.get_admin_context_without_session() + keystone_auth_url = cfg.CONF.AGENT.cascading_auth_url + kwargs = {'auth_token': None, + 'username': cfg.CONF.AGENT.cascading_user_name, + 'password': cfg.CONF.AGENT.cascading_password, + 'aws_creds': None, + 'tenant': cfg.CONF.AGENT.cascading_tenant_name, + # 'tenant_id':'e8f280855dbe42a189eebb0f3ecb94bb', #context.values['tenant'], + 'auth_url': keystone_auth_url, + 'roles': context.roles, + 'is_admin': context.is_admin, + 'region_name': cfg.CONF.AGENT.cascading_os_region_name} + reqCon = neutron_proxy_context.RequestContext(**kwargs) + openStackClients = clients.OpenStackClients(reqCon) + neutronClient = openStackClients.neutron() + return neutronClient + + def update_cascading_port_profile(self, cascaded_host_ip, + cascaded_port_info, details): + if(not cascaded_host_ip): + return + profile = {'host_ip': cascaded_host_ip, + 'cascaded_net_id': { + details['network_id']: {}}, + 'cascaded_subnet_id': {}} + net_map = profile['cascaded_net_id'][details['network_id']] + net_map[cfg.CONF.host] = cascaded_port_info['network_id'] + subnet_map = profile['cascaded_subnet_id'] + for fi_ing in details['fixed_ips']: + for fi_ed in cascaded_port_info['fixed_ips']: + if (fi_ed['ip_address'] == fi_ing['ip_address']): + subnet_map[fi_ing['subnet_id']] = {} + subnet_map[fi_ing['subnet_id']][cfg.CONF.host] = \ + fi_ed['subnet_id'] + break + neutron_client = self.get_cascading_neutron_client() + req_props = {"binding:profile": profile} + port_ret = neutron_client.update_port(details['port_id'], + {'port': req_props}) + LOG.debug(_('update compute port, Response:%s'), str(port_ret)) + + def get_cascaded_neutron_client(self): + context = n_context.get_admin_context_without_session() + keystone_auth_url = cfg.CONF.AGENT.keystone_auth_url + kwargs = {'auth_token': None, + 'username': cfg.CONF.AGENT.neutron_user_name, + 'password': cfg.CONF.AGENT.neutron_password, + 'aws_creds': None, + 'tenant': cfg.CONF.AGENT.neutron_tenant_name, + # 'tenant_id':'e8f280855dbe42a189eebb0f3ecb94bb', #context.values['tenant'], + 'auth_url': keystone_auth_url, + 'roles': context.roles, + 'is_admin': context.is_admin, + 'region_name': cfg.CONF.AGENT.os_region_name} + reqCon = neutron_proxy_context.RequestContext(**kwargs) + openStackClients = clients.OpenStackClients(reqCon) + neutronClient = openStackClients.neutron() + return neutronClient + + def get_cascaded_host_ip(self, ed_host_id): + host_ip = self.cascaded_host_map.get(ed_host_id) + if(host_ip): + return host_ip + neutron_client = self.get_cascaded_neutron_client() + agent_ret = neutron_client.list_agents(host=ed_host_id, + agent_type='Open vSwitch agent') + if(not agent_ret or + (agent_ret and (not agent_ret.get('agents')))): + LOG.debug(_("get agent failed, host_id:%s"), ed_host_id) + return + agent_config = agent_ret['agents'][0].get('configurations', None) + # json.loads(agent_config) + configuration = agent_config + host_ip = configuration.get('tunneling_ip') + if(host_ip): + self.cascaded_host_map[ed_host_id] = host_ip + return host_ip + + def treat_devices_added_or_updated(self, devices, ovs_restarted): + try: + devices_details_list = self.plugin_rpc.get_devices_details_list( + self.context, + devices, + self.agent_id, + cfg.CONF.host) + except Exception as e: + LOG.debug("Unable to get port details for %(devices)s: %(e)s", + {'devices': devices, 'e': e}) + # resync is needed + return True + for details in devices_details_list: + device = details['device'] + LOG.debug("Processing port: %s", device) +# port = self.int_br.get_vif_port_by_id(device) +# if not port: +# The port has disappeared and should not be processed +# There is no need to put the port DOWN in the plugin as +# it never went up in the first place +# LOG.info(_("Port %s was not found on the integration bridge " +# "and will therefore not be processed"), device) +# continue + + if 'port_id' in details: + cascaded_port_info = self.cascaded_port_info.get(device) + if(not self.compare_port_info(details, cascaded_port_info)): + LOG.info(_("Port %(device)s can not updated. " + "Because port info in cascading and cascaded layer" + "are different, Details: %(details)s"), + {'device': device, 'details': details}) + return + LOG.info(_("Port %(device)s updated. Details: %(details)s"), + {'device': device, 'details': details}) + self.treat_vif_port(device, details['port_id'], + details['network_id'], + details['network_type'], + details['physical_network'], + details['segmentation_id'], + details['admin_state_up'], + details['fixed_ips'], + details['device_owner'], + cascaded_port_info, + ovs_restarted) + # update cascading port, modify binding:profile to add host_ip + # and cascaded net_id/cascaded_subnet_id + if('compute' in details['device_owner']): + ed_host_id = cascaded_port_info['binding:host_id'] + cascaded_host_ip = self.get_cascaded_host_ip(ed_host_id) + self.update_cascading_port_profile(cascaded_host_ip, + cascaded_port_info, + details) + # update plugin about port status + if details.get('admin_state_up'): + LOG.debug(_("Setting status for %s to UP"), device) + self.plugin_rpc.update_device_up( + self.context, device, self.agent_id, cfg.CONF.host) + else: + LOG.debug(_("Setting status for %s to DOWN"), device) + self.plugin_rpc.update_device_down( + self.context, device, self.agent_id, cfg.CONF.host) + LOG.info(_("Configuration for device %s completed."), device) +# else: +# LOG.warn(_("Device %s not defined on plugin"), device) +# if (port and port.ofport != -1): +# self.port_dead(port) + return False + + def treat_ancillary_devices_added(self, devices): + try: + devices_details_list = self.plugin_rpc.get_devices_details_list( + self.context, + devices, + self.agent_id, + cfg.CONF.host) + except Exception as e: + LOG.debug("Unable to get port details for " + "%(devices)s: %(e)s", {'devices': devices, 'e': e}) + # resync is needed + return True + + for details in devices_details_list: + device = details['device'] + LOG.info(_("Ancillary Port %s added"), device) + + # update plugin about port status + self.plugin_rpc.update_device_up(self.context, + device, + self.agent_id, + cfg.CONF.host) + return False + + def treat_devices_removed(self, devices): + resync = False + self.sg_agent.remove_devices_filter(devices) + for device in devices: + LOG.info(_("Attachment %s removed"), device) + try: + self.plugin_rpc.update_device_down(self.context, + device, + self.agent_id, + cfg.CONF.host) + except Exception as e: + LOG.debug(_("port_removed failed for %(device)s: %(e)s"), + {'device': device, 'e': e}) + resync = True + continue + self.port_unbound(device) + return resync + + def treat_ancillary_devices_removed(self, devices): + resync = False + for device in devices: + LOG.info(_("Attachment %s removed"), device) + try: + details = self.plugin_rpc.update_device_down(self.context, + device, + self.agent_id, + cfg.CONF.host) + except Exception as e: + LOG.debug(_("port_removed failed for %(device)s: %(e)s"), + {'device': device, 'e': e}) + resync = True + continue + if details['exists']: + LOG.info(_("Port %s updated."), device) + # Nothing to do regarding local networking + else: + LOG.debug(_("Device %s not defined on plugin"), device) + return resync + + def process_network_ports(self, port_info, ovs_restarted): + resync_a = False + resync_b = False + # TODO(salv-orlando): consider a solution for ensuring notifications + # are processed exactly in the same order in which they were + # received. This is tricky because there are two notification + # sources: the neutron server, and the ovs db monitor process + # If there is an exception while processing security groups ports + # will not be wired anyway, and a resync will be triggered + # TODO(salv-orlando): Optimize avoiding applying filters unnecessarily + # (eg: when there are no IP address changes) + self.sg_agent.setup_port_filters(port_info.get('added', set()), + port_info.get('updated', set())) + # VIF wiring needs to be performed always for 'new' devices. + # For updated ports, re-wiring is not needed in most cases, but needs + # to be performed anyway when the admin state of a device is changed. + # A device might be both in the 'added' and 'updated' + # list at the same time; avoid processing it twice. + devices_added_updated = (port_info.get('added', set()) | + port_info.get('updated', set())) + if devices_added_updated: + start = time.time() + resync_a = self.treat_devices_added_or_updated( + devices_added_updated, ovs_restarted) + LOG.debug(_("process_network_ports - iteration:%(iter_num)d -" + "treat_devices_added_or_updated completed " + "in %(elapsed).3f"), + {'iter_num': self.iter_num, + 'elapsed': time.time() - start}) + if 'removed' in port_info: + start = time.time() + resync_b = self.treat_devices_removed(port_info['removed']) + LOG.debug(_("process_network_ports - iteration:%(iter_num)d -" + "treat_devices_removed completed in %(elapsed).3f"), + {'iter_num': self.iter_num, + 'elapsed': time.time() - start}) + # If one of the above operations fails => resync with plugin + return (resync_a | resync_b) + + def process_ancillary_network_ports(self, port_info): + resync_a = False + resync_b = False + if 'added' in port_info: + start = time.time() + resync_a = self.treat_ancillary_devices_added(port_info['added']) + LOG.debug(_("process_ancillary_network_ports - iteration: " + "%(iter_num)d - treat_ancillary_devices_added " + "completed in %(elapsed).3f"), + {'iter_num': self.iter_num, + 'elapsed': time.time() - start}) + if 'removed' in port_info: + start = time.time() + resync_b = self.treat_ancillary_devices_removed( + port_info['removed']) + LOG.debug(_("process_ancillary_network_ports - iteration: " + "%(iter_num)d - treat_ancillary_devices_removed " + "completed in %(elapsed).3f"), + {'iter_num': self.iter_num, + 'elapsed': time.time() - start}) + + # If one of the above operations fails => resync with plugin + return (resync_a | resync_b) + + def get_ip_in_hex(self, ip_address): + try: + return '%08x' % netaddr.IPAddress(ip_address, version=4) + except Exception: + LOG.warn(_("Unable to create tunnel port. Invalid remote IP: %s"), + ip_address) + return + + def tunnel_sync(self): + resync = False + try: + for tunnel_type in self.tunnel_types: + details = self.plugin_rpc.tunnel_sync(self.context, + self.local_ip, + tunnel_type) + if not self.l2_pop: + tunnels = details['tunnels'] + for tunnel in tunnels: + if self.local_ip != tunnel['ip_address']: + tunnel_id = tunnel.get('id') + # Unlike the OVS plugin, ML2 doesn't return an id + # key. So use ip_address to form port name instead. + # Port name must be <=15 chars, so use shorter hex. + remote_ip = tunnel['ip_address'] + remote_ip_hex = self.get_ip_in_hex(remote_ip) + if not tunnel_id and not remote_ip_hex: + continue + tun_name = '%s-%s' % (tunnel_type, + tunnel_id or remote_ip_hex) + self.setup_tunnel_port(tun_name, + tunnel['ip_address'], + tunnel_type) + except Exception as e: + LOG.debug(_("Unable to sync tunnel IP %(local_ip)s: %(e)s"), + {'local_ip': self.local_ip, 'e': e}) + resync = True + return resync + + def _agent_has_updates(self, polling_manager): + return (polling_manager.is_polling_required or + self.updated_ports or + self.sg_agent.firewall_refresh_needed()) + + def _port_info_has_changes(self, port_info): + return (port_info.get('added') or + port_info.get('removed') or + port_info.get('updated')) + + def check_ovs_restart(self): + # Check for the canary flow + canary_flow = self.int_br.dump_flows_for_table(constants.CANARY_TABLE) + return not canary_flow + + def rpc_loop(self, polling_manager=None): + if not polling_manager: + polling_manager = polling.AlwaysPoll() + + sync = True + ports = set() + updated_ports_copy = set() + ancillary_ports = set() + tunnel_sync = True + ovs_restarted = False + while self.run_daemon_loop: + start = time.time() + port_stats = {'regular': {'added': 0, + 'updated': 0, + 'removed': 0}, + 'ancillary': {'added': 0, + 'removed': 0}} + LOG.debug(_("Agent rpc_loop - iteration:%d started"), + self.iter_num) + if sync: + LOG.info(_("Agent out of sync with plugin!")) + ports.clear() + ancillary_ports.clear() + sync = False + polling_manager.force_polling() + ovs_restarted = self.check_ovs_restart() + if ovs_restarted: + self.setup_integration_br() + self.setup_physical_bridges(self.bridge_mappings) + if self.enable_tunneling: + self.setup_tunnel_br() + tunnel_sync = True + self.dvr_agent.reset_ovs_parameters(self.int_br, + self.tun_br, + self.patch_int_ofport, + self.patch_tun_ofport) + self.dvr_agent.setup_dvr_flows_on_integ_tun_br() + # Notify the plugin of tunnel IP + if self.enable_tunneling and tunnel_sync: + LOG.info(_("Agent tunnel out of sync with plugin!")) + try: + tunnel_sync = self.tunnel_sync() + except Exception: + LOG.exception(_("Error while synchronizing tunnels")) + tunnel_sync = True +# if self._agent_has_updates(polling_manager) or ovs_restarted: + if True: + try: + LOG.debug(_("Agent rpc_loop - iteration:%(iter_num)d - " + "starting polling. Elapsed:%(elapsed).3f"), + {'iter_num': self.iter_num, + 'elapsed': time.time() - start}) + # Save updated ports dict to perform rollback in + # case resync would be needed, and then clear + # self.updated_ports. As the greenthread should not yield + # between these two statements, this will be thread-safe + updated_ports_copy = self.updated_ports + self.updated_ports = set() + reg_ports = (set() if ovs_restarted else ports) + port_info = self.scan_ports(reg_ports, updated_ports_copy) + ports = port_info['current'] + LOG.debug(_("Agent rpc_loop - iteration:%(iter_num)d - " + "port information retrieved. " + "Elapsed:%(elapsed).3f"), + {'iter_num': self.iter_num, + 'elapsed': time.time() - start}) + # Secure and wire/unwire VIFs and update their status + # on Neutron server + if (self._port_info_has_changes(port_info) or + self.sg_agent.firewall_refresh_needed() or + ovs_restarted): + LOG.debug(_("Starting to process devices in:%s"), + port_info) + # If treat devices fails - must resync with plugin + sync = self.process_network_ports(port_info, + ovs_restarted) + LOG.debug(_("Agent rpc_loop - iteration:%(iter_num)d -" + "ports processed. Elapsed:%(elapsed).3f"), + {'iter_num': self.iter_num, + 'elapsed': time.time() - start}) + port_stats['regular']['added'] = ( + len(port_info.get('added', []))) + port_stats['regular']['updated'] = ( + len(port_info.get('updated', []))) + port_stats['regular']['removed'] = ( + len(port_info.get('removed', []))) + # Treat ancillary devices if they exist + if self.ancillary_brs: + port_info = self.update_ancillary_ports( + ancillary_ports) + LOG.debug(_("Agent rpc_loop - iteration:%(iter_num)d -" + "ancillary port info retrieved. " + "Elapsed:%(elapsed).3f"), + {'iter_num': self.iter_num, + 'elapsed': time.time() - start}) + + if port_info: + rc = self.process_ancillary_network_ports( + port_info) + LOG.debug(_("Agent rpc_loop - iteration:" + "%(iter_num)d - ancillary ports " + "processed. Elapsed:%(elapsed).3f"), + {'iter_num': self.iter_num, + 'elapsed': time.time() - start}) + ancillary_ports = port_info['current'] + port_stats['ancillary']['added'] = ( + len(port_info.get('added', []))) + port_stats['ancillary']['removed'] = ( + len(port_info.get('removed', []))) + sync = sync | rc + + polling_manager.polling_completed() + except Exception: + LOG.exception(_("Error while processing VIF ports")) + # Put the ports back in self.updated_port + self.updated_ports |= updated_ports_copy + sync = True + + # sleep till end of polling interval + elapsed = (time.time() - start) + LOG.debug(_("Agent rpc_loop - iteration:%(iter_num)d " + "completed. Processed ports statistics: " + "%(port_stats)s. Elapsed:%(elapsed).3f"), + {'iter_num': self.iter_num, + 'port_stats': port_stats, + 'elapsed': elapsed}) + if (elapsed < self.polling_interval): + time.sleep(self.polling_interval - elapsed) + else: + LOG.debug(_("Loop iteration exceeded interval " + "(%(polling_interval)s vs. %(elapsed)s)!"), + {'polling_interval': self.polling_interval, + 'elapsed': elapsed}) + self.iter_num = self.iter_num + 1 + + def daemon_loop(self): + with polling.get_polling_manager( + self.minimize_polling, + self.root_helper, + self.ovsdb_monitor_respawn_interval) as pm: + + self.rpc_loop(polling_manager=pm) + + def _handle_sigterm(self, signum, frame): + LOG.debug("Agent caught SIGTERM, quitting daemon loop.") + self.run_daemon_loop = False + + +def create_agent_config_map(config): + """Create a map of agent config parameters. + + :param config: an instance of cfg.CONF + :returns: a map of agent configuration parameters + """ + try: + bridge_mappings = q_utils.parse_mappings(config.OVS.bridge_mappings) + except ValueError as e: + raise ValueError(_("Parsing bridge_mappings failed: %s.") % e) + + kwargs = dict( + integ_br=config.OVS.integration_bridge, + tun_br=config.OVS.tunnel_bridge, + local_ip=config.OVS.local_ip, + bridge_mappings=bridge_mappings, + root_helper=config.AGENT.root_helper, + polling_interval=config.AGENT.polling_interval, + minimize_polling=config.AGENT.minimize_polling, + tunnel_types=config.AGENT.tunnel_types, + veth_mtu=config.AGENT.veth_mtu, + enable_distributed_routing=config.AGENT.enable_distributed_routing, + l2_population=config.AGENT.l2_population, + arp_responder=config.AGENT.arp_responder, + use_veth_interconnection=config.OVS.use_veth_interconnection, + ) + + # If enable_tunneling is TRUE, set tunnel_type to default to GRE + if config.OVS.enable_tunneling and not kwargs['tunnel_types']: + kwargs['tunnel_types'] = [p_const.TYPE_GRE] + + # Verify the tunnel_types specified are valid + for tun in kwargs['tunnel_types']: + if tun not in constants.TUNNEL_NETWORK_TYPES: + msg = _('Invalid tunnel type specified: %s'), tun + raise ValueError(msg) + if not kwargs['local_ip']: + msg = _('Tunneling cannot be enabled without a valid local_ip.') + raise ValueError(msg) + + return kwargs + + +def main(): + cfg.CONF.register_opts(ip_lib.OPTS) + common_config.init(sys.argv[1:]) + common_config.setup_logging(cfg.CONF) + q_utils.log_opt_values(LOG) + + try: + agent_config = create_agent_config_map(cfg.CONF) + except ValueError as e: + LOG.error(_('%s Agent terminated!'), e) + sys.exit(1) + + is_xen_compute_host = 'rootwrap-xen-dom0' in agent_config['root_helper'] + if is_xen_compute_host: + # Force ip_lib to always use the root helper to ensure that ip + # commands target xen dom0 rather than domU. + cfg.CONF.set_default('ip_lib_force_root', True) + + agent = OVSNeutronAgent(**agent_config) + signal.signal(signal.SIGTERM, agent._handle_sigterm) + + # Start everything. + LOG.info(_("Agent initialized successfully, now running... ")) + agent.daemon_loop() + + +if __name__ == "__main__": + main() diff --git a/neutronproxy/l2-proxy/neutron/plugins/l2_proxy/agent/neutron_keystoneclient.py b/neutronproxy/l2-proxy/neutron/plugins/l2_proxy/agent/neutron_keystoneclient.py new file mode 100644 index 00000000..08bc41aa --- /dev/null +++ b/neutronproxy/l2-proxy/neutron/plugins/l2_proxy/agent/neutron_keystoneclient.py @@ -0,0 +1,319 @@ +# Copyright 2014, Huawei, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# @author: Haojie Jia, Huawei + +from neutron.openstack.common import context +from neutron.common import exceptions + +import eventlet + +from keystoneclient.v2_0 import client as kc +from keystoneclient.v3 import client as kc_v3 +from oslo.config import cfg + +#from heat.openstack.common import importutils +from neutron.openstack.common import importutils +#from heat.openstack.common import log as logging +from neutron.openstack.common import log as logging + +logger = logging.getLogger( + 'neutron.plugins.cascading_proxy_agent.keystoneclient') + + +class KeystoneClient(object): + + """ + Wrap keystone client so we can encapsulate logic used in resources + Note this is intended to be initialized from a resource on a per-session + basis, so the session context is passed in on initialization + Also note that a copy of this is created every resource as self.keystone() + via the code in engine/client.py, so there should not be any need to + directly instantiate instances of this class inside resources themselves + """ + + def __init__(self, context): + # We have to maintain two clients authenticated with keystone: + # - ec2 interface is v2.0 only + # - trusts is v3 only + # If a trust_id is specified in the context, we immediately + # authenticate so we can populate the context with a trust token + # otherwise, we delay client authentication until needed to avoid + # unnecessary calls to keystone. + # + # Note that when you obtain a token using a trust, it cannot be + # used to reauthenticate and get another token, so we have to + # get a new trust-token even if context.auth_token is set. + # + # - context.auth_url is expected to contain the v2.0 keystone endpoint + self.context = context + self._client_v2 = None + self._client_v3 = None + + if self.context.trust_id: + # Create a connection to the v2 API, with the trust_id, this + # populates self.context.auth_token with a trust-scoped token + self._client_v2 = self._v2_client_init() + + @property + def client_v3(self): + if not self._client_v3: + # Create connection to v3 API + self._client_v3 = self._v3_client_init() + return self._client_v3 + + @property + def client_v2(self): + if not self._client_v2: + self._client_v2 = self._v2_client_init() + return self._client_v2 + + def _v2_client_init(self): + kwargs = { + 'auth_url': self.context.auth_url + } + auth_kwargs = {} + # Note try trust_id first, as we can't reuse auth_token in that case + if self.context.trust_id is not None: + # We got a trust_id, so we use the admin credentials + # to authenticate, then re-scope the token to the + # trust impersonating the trustor user. + # Note that this currently requires the trustor tenant_id + # to be passed to the authenticate(), unlike the v3 call + kwargs.update(self._service_admin_creds(api_version=2)) + auth_kwargs['trust_id'] = self.context.trust_id + auth_kwargs['tenant_id'] = self.context.tenant_id + elif self.context.auth_token is not None: + kwargs['tenant_name'] = self.context.tenant + kwargs['token'] = self.context.auth_token + elif self.context.password is not None: + kwargs['username'] = self.context.username + kwargs['password'] = self.context.password + kwargs['tenant_name'] = self.context.tenant + kwargs['tenant_id'] = self.context.tenant_id + else: + logger.error("Keystone v2 API connection failed, no password or " + "auth_token!") + raise exception.AuthorizationFailure() + client_v2 = kc.Client(**kwargs) + + client_v2.authenticate(**auth_kwargs) + # If we are authenticating with a trust auth_kwargs are set, so set + # the context auth_token with the re-scoped trust token + if auth_kwargs: + # Sanity check + if not client_v2.auth_ref.trust_scoped: + logger.error("v2 trust token re-scoping failed!") + raise exception.AuthorizationFailure() + # All OK so update the context with the token + self.context.auth_token = client_v2.auth_ref.auth_token + self.context.auth_url = kwargs.get('auth_url') + + return client_v2 + + @staticmethod + def _service_admin_creds(api_version=2): + # Import auth_token to have keystone_authtoken settings setup. + importutils.import_module('keystoneclient.middleware.auth_token') + + creds = { + 'username': cfg.CONF.keystone_authtoken.admin_user, + 'password': cfg.CONF.keystone_authtoken.admin_password, + } + if api_version >= 3: + creds['auth_url'] =\ + cfg.CONF.keystone_authtoken.auth_uri.replace('v2.0', 'v3') + creds['project_name'] =\ + cfg.CONF.keystone_authtoken.admin_tenant_name + else: + creds['auth_url'] = cfg.CONF.keystone_authtoken.auth_uri + creds['tenant_name'] =\ + cfg.CONF.keystone_authtoken.admin_tenant_name + + return creds + + def _v3_client_init(self): + kwargs = {} + if self.context.auth_token is not None: + kwargs['project_name'] = self.context.tenant + kwargs['token'] = self.context.auth_token + kwargs['auth_url'] = self.context.auth_url.replace('v2.0', 'v3') + kwargs['endpoint'] = kwargs['auth_url'] + elif self.context.trust_id is not None: + # We got a trust_id, so we use the admin credentials and get a + # Token back impersonating the trustor user + kwargs.update(self._service_admin_creds(api_version=3)) + kwargs['trust_id'] = self.context.trust_id + elif self.context.password is not None: + kwargs['username'] = self.context.username + kwargs['password'] = self.context.password + kwargs['project_name'] = self.context.tenant + kwargs['project_id'] = self.context.tenant_id + kwargs['auth_url'] = self.context.auth_url.replace('v2.0', 'v3') + kwargs['endpoint'] = kwargs['auth_url'] + else: + logger.error("Keystone v3 API connection failed, no password or " + "auth_token!") + raise exception.AuthorizationFailure() + + client = kc_v3.Client(**kwargs) + # Have to explicitly authenticate() or client.auth_ref is None + client.authenticate() + + return client + + def create_trust_context(self): + """ + If cfg.CONF.deferred_auth_method is trusts, we create a + trust using the trustor identity in the current context, with the + trustee as the heat service user and return a context containing + the new trust_id + + If deferred_auth_method != trusts, or the current context already + contains a trust_id, we do nothing and return the current context + """ + if self.context.trust_id: + return self.context + + # We need the service admin user ID (not name), as the trustor user + # can't lookup the ID in keystoneclient unless they're admin + # workaround this by creating a temporary admin client connection + # then getting the user ID from the auth_ref + admin_creds = self._service_admin_creds() + admin_client = kc.Client(**admin_creds) + trustee_user_id = admin_client.auth_ref.user_id + trustor_user_id = self.client_v3.auth_ref.user_id + trustor_project_id = self.client_v3.auth_ref.project_id + roles = cfg.CONF.trusts_delegated_roles + trust = self.client_v3.trusts.create(trustor_user=trustor_user_id, + trustee_user=trustee_user_id, + project=trustor_project_id, + impersonation=True, + role_names=roles) + + trust_context = context.RequestContext.from_dict( + self.context.to_dict()) + trust_context.trust_id = trust.id + trust_context.trustor_user_id = trustor_user_id + return trust_context + + def delete_trust(self, trust_id): + """ + Delete the specified trust. + """ + self.client_v3.trusts.delete(trust_id) + + def create_stack_user(self, username, password=''): + """ + Create a user defined as part of a stack, either via template + or created internally by a resource. This user will be added to + the heat_stack_user_role as defined in the config + Returns the keystone ID of the resulting user + """ + if(len(username) > 64): + logger.warning("Truncating the username %s to the last 64 " + "characters." % username) + # get the last 64 characters of the username + username = username[-64:] + user = self.client_v2.users.create(username, + password, + '%s@heat-api.org' % + username, + tenant_id=self.context.tenant_id, + enabled=True) + + # We add the new user to a special keystone role + # This role is designed to allow easier differentiation of the + # heat-generated "stack users" which will generally have credentials + # deployed on an instance (hence are implicitly untrusted) + roles = self.client_v2.roles.list() + stack_user_role = [r.id for r in roles + if r.name == cfg.CONF.heat_stack_user_role] + if len(stack_user_role) == 1: + role_id = stack_user_role[0] + logger.debug("Adding user %s to role %s" % (user.id, role_id)) + self.client_v2.roles.add_user_role(user.id, role_id, + self.context.tenant_id) + else: + logger.error("Failed to add user %s to role %s, check role exists!" + % (username, cfg.CONF.heat_stack_user_role)) + + return user.id + + def delete_stack_user(self, user_id): + + user = self.client_v2.users.get(user_id) + + # FIXME (shardy) : need to test, do we still need this retry logic? + # Copied from user.py, but seems like something we really shouldn't + # need to do, no bug reference in the original comment (below)... + # tempory hack to work around an openstack bug. + # seems you can't delete a user first time - you have to try + # a couple of times - go figure! + tmo = eventlet.Timeout(10) + status = 'WAITING' + reason = 'Timed out trying to delete user' + try: + while status == 'WAITING': + try: + user.delete() + status = 'DELETED' + except Exception as ce: + reason = str(ce) + logger.warning("Problem deleting user %s: %s" % + (user_id, reason)) + eventlet.sleep(1) + except eventlet.Timeout as t: + if t is not tmo: + # not my timeout + raise + else: + status = 'TIMEDOUT' + finally: + tmo.cancel() + + if status != 'DELETED': + raise exception.Error(reason) + + def delete_ec2_keypair(self, user_id, accesskey): + self.client_v2.ec2.delete(user_id, accesskey) + + def get_ec2_keypair(self, user_id): + # We make the assumption that each user will only have one + # ec2 keypair, it's not clear if AWS allow multiple AccessKey resources + # to be associated with a single User resource, but for simplicity + # we assume that here for now + cred = self.client_v2.ec2.list(user_id) + if len(cred) == 0: + return self.client_v2.ec2.create(user_id, self.context.tenant_id) + if len(cred) == 1: + return cred[0] + else: + logger.error("Unexpected number of ec2 credentials %s for %s" % + (len(cred), user_id)) + + def disable_stack_user(self, user_id): + # FIXME : This won't work with the v3 keystone API + self.client_v2.users.update_enabled(user_id, False) + + def enable_stack_user(self, user_id): + # FIXME : This won't work with the v3 keystone API + self.client_v2.users.update_enabled(user_id, True) + + def url_for(self, **kwargs): + return self.client_v2.service_catalog.url_for(**kwargs) + + @property + def auth_token(self): + return self.client_v2.auth_token diff --git a/neutronproxy/l2-proxy/neutron/plugins/l2_proxy/agent/neutron_proxy_context.py b/neutronproxy/l2-proxy/neutron/plugins/l2_proxy/agent/neutron_proxy_context.py new file mode 100644 index 00000000..53ad7bf8 --- /dev/null +++ b/neutronproxy/l2-proxy/neutron/plugins/l2_proxy/agent/neutron_proxy_context.py @@ -0,0 +1,203 @@ +# Copyright 2014, Huawei, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# @author: Haojie Jia, Huawei + +from oslo.config import cfg + +#from heat.openstack.common import local +from neutron.openstack.common import local +#from heat.common import exception +from neutron.common import exceptions +#from heat.common import wsgi +from neutron import wsgi +from neutron.openstack.common import context +#from heat.openstack.common import importutils +from neutron.openstack.common import importutils +#from heat.openstack.common import uuidutils +from neutron.openstack.common import uuidutils + + +def generate_request_id(): + return 'req-' + uuidutils.generate_uuid() + + +class RequestContext(context.RequestContext): + + """ + Stores information about the security context under which the user + accesses the system, as well as additional request information. + """ + + def __init__(self, auth_token=None, username=None, password=None, + aws_creds=None, tenant=None, + tenant_id=None, auth_url=None, roles=None, is_admin=False, + region_name=None, read_only=False, show_deleted=False, + owner_is_tenant=True, overwrite=True, + trust_id=None, trustor_user_id=None, + **kwargs): + """ + :param overwrite: Set to False to ensure that the greenthread local + copy of the index is not overwritten. + + :param kwargs: Extra arguments that might be present, but we ignore + because they possibly came in from older rpc messages. + """ + super(RequestContext, self).__init__(auth_token=auth_token, + user=username, tenant=tenant, + is_admin=is_admin, + read_only=read_only, + show_deleted=show_deleted, + request_id='unused') + + self.username = username + self.password = password + self.aws_creds = aws_creds + self.tenant_id = tenant_id + self.auth_url = auth_url + self.roles = roles or [] + self.region_name = region_name + self.owner_is_tenant = owner_is_tenant + if overwrite or not hasattr(local.store, 'context'): + self.update_store() + self._session = None + self.trust_id = trust_id + self.trustor_user_id = trustor_user_id + + def update_store(self): + local.store.context = self + + def to_dict(self): + return {'auth_token': self.auth_token, + 'username': self.username, + 'password': self.password, + 'aws_creds': self.aws_creds, + 'tenant': self.tenant, + 'tenant_id': self.tenant_id, + 'trust_id': self.trust_id, + 'trustor_user_id': self.trustor_user_id, + 'auth_url': self.auth_url, + 'roles': self.roles, + 'is_admin': self.is_admin, + 'region_name': self.region_name} + + @classmethod + def from_dict(cls, values): + return cls(**values) + + @property + def owner(self): + """Return the owner to correlate with an image.""" + return self.tenant if self.owner_is_tenant else self.user + + +def get_admin_context(read_deleted="no"): + return RequestContext(is_admin=True) + + +class ContextMiddleware(wsgi.Middleware): + + opts = [cfg.BoolOpt('owner_is_tenant', default=True), + cfg.StrOpt('admin_role', default='admin')] + + def __init__(self, app, conf, **local_conf): + cfg.CONF.register_opts(self.opts) + + # Determine the context class to use + self.ctxcls = RequestContext + if 'context_class' in local_conf: + self.ctxcls = importutils.import_class(local_conf['context_class']) + + super(ContextMiddleware, self).__init__(app) + + def make_context(self, *args, **kwargs): + """ + Create a context with the given arguments. + """ + kwargs.setdefault('owner_is_tenant', cfg.CONF.owner_is_tenant) + + return self.ctxcls(*args, **kwargs) + + def process_request(self, req): + """ + Extract any authentication information in the request and + construct an appropriate context from it. + + A few scenarios exist: + + 1. If X-Auth-Token is passed in, then consult TENANT and ROLE headers + to determine permissions. + + 2. An X-Auth-Token was passed in, but the Identity-Status is not + confirmed. For now, just raising a NotAuthenticated exception. + + 3. X-Auth-Token is omitted. If we were using Keystone, then the + tokenauth middleware would have rejected the request, so we must be + using NoAuth. In that case, assume that is_admin=True. + """ + headers = req.headers + + try: + """ + This sets the username/password to the admin user because you + need this information in order to perform token authentication. + The real 'username' is the 'tenant'. + + We should also check here to see if X-Auth-Token is not set and + in that case we should assign the user/pass directly as the real + username/password and token as None. 'tenant' should still be + the username. + """ + + username = None + password = None + aws_creds = None + + if headers.get('X-Auth-User') is not None: + username = headers.get('X-Auth-User') + password = headers.get('X-Auth-Key') + elif headers.get('X-Auth-EC2-Creds') is not None: + aws_creds = headers.get('X-Auth-EC2-Creds') + + token = headers.get('X-Auth-Token') + tenant = headers.get('X-Tenant-Name') + tenant_id = headers.get('X-Tenant-Id') + auth_url = headers.get('X-Auth-Url') + roles = headers.get('X-Roles') + if roles is not None: + roles = roles.split(',') + + except Exception: + raise exception.NotAuthenticated() + + req.context = self.make_context(auth_token=token, + tenant=tenant, tenant_id=tenant_id, + aws_creds=aws_creds, + username=username, + password=password, + auth_url=auth_url, roles=roles, + is_admin=True) + + +def ContextMiddleware_filter_factory(global_conf, **local_conf): + """ + Factory method for paste.deploy + """ + conf = global_conf.copy() + conf.update(local_conf) + + def filter(app): + return ContextMiddleware(app, conf) + + return filter diff --git a/neutronproxy/l2-proxy/neutron/plugins/l2_proxy/agent/ovs_dvr_neutron_agent.py b/neutronproxy/l2-proxy/neutron/plugins/l2_proxy/agent/ovs_dvr_neutron_agent.py new file mode 100644 index 00000000..e2c4bab2 --- /dev/null +++ b/neutronproxy/l2-proxy/neutron/plugins/l2_proxy/agent/ovs_dvr_neutron_agent.py @@ -0,0 +1,763 @@ +# Copyright 2014, Huawei, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# @author: Haojie Jia, Huawei + + +from neutron.api.rpc import dvr_rpc +from neutron.common import constants as q_const +from neutron.openstack.common import log as logging +from neutron.plugins.openvswitch.common import constants + + +LOG = logging.getLogger(__name__) + + +# A class to represent a DVR-hosted subnet including vif_ports resident on +# that subnet +class LocalDVRSubnetMapping: + + def __init__(self, subnet, csnat_ofport=constants.OFPORT_INVALID): + # set of commpute ports on on this dvr subnet + self.compute_ports = {} + self.subnet = subnet + self.csnat_ofport = csnat_ofport + self.dvr_owned = False + + def __str__(self): + return ("subnet = %s compute_ports = %s csnat_port = %s" + " is_dvr_owned = %s" % + (self.subnet, self.get_compute_ofports(), + self.get_csnat_ofport(), self.is_dvr_owned())) + + def get_subnet_info(self): + return self.subnet + + def set_dvr_owned(self, owned): + self.dvr_owned = owned + + def is_dvr_owned(self): + return self.dvr_owned + + def add_compute_ofport(self, vif_id, ofport): + self.compute_ports[vif_id] = ofport + + def remove_compute_ofport(self, vif_id): + self.compute_ports.pop(vif_id, 0) + + def remove_all_compute_ofports(self): + self.compute_ports.clear() + + def get_compute_ofports(self): + return self.compute_ports + + def set_csnat_ofport(self, ofport): + self.csnat_ofport = ofport + + def get_csnat_ofport(self): + return self.csnat_ofport + + +class OVSPort: + + def __init__(self, id, ofport, mac, device_owner): + self.id = id + self.mac = mac + self.ofport = ofport + self.subnets = set() + self.device_owner = device_owner + + def __str__(self): + return ("OVSPort: id = %s, ofport = %s, mac = %s," + "device_owner = %s, subnets = %s" % + (self.id, self.ofport, self.mac, + self.device_owner, self.subnets)) + + def add_subnet(self, subnet_id): + self.subnets.add(subnet_id) + + def remove_subnet(self, subnet_id): + self.subnets.remove(subnet_id) + + def remove_all_subnets(self): + self.subnets.clear() + + def get_subnets(self): + return self.subnets + + def get_device_owner(self): + return self.device_owner + + def get_mac(self): + return self.mac + + def get_ofport(self): + return self.ofport + + +class OVSDVRNeutronAgent(dvr_rpc.DVRAgentRpcApiMixin): + + '''Implements OVS-based DVR(Distributed Virtual Router), for + overlay networks. + + ''' + + # history + # 1.0 Initial version + + def __init__(self, context, plugin_rpc, integ_br, tun_br, + patch_int_ofport=constants.OFPORT_INVALID, + patch_tun_ofport=constants.OFPORT_INVALID, + host=None, enable_tunneling=False, + enable_distributed_routing=False): + self.context = context + self.plugin_rpc = plugin_rpc + self.int_br = integ_br + self.tun_br = tun_br + self.patch_int_ofport = patch_int_ofport + self.patch_tun_ofport = patch_tun_ofport + self.host = host + self.enable_tunneling = enable_tunneling + self.enable_distributed_routing = enable_distributed_routing + + def reset_ovs_parameters(self, integ_br, tun_br, + patch_int_ofport, patch_tun_ofport): + '''Reset the openvswitch parameters + ''' + if not self.enable_tunneling: + return + + if not self.enable_distributed_routing: + return + + self.int_br = integ_br + self.tun_br = tun_br + self.patch_int_ofport = patch_int_ofport + self.patch_tun_ofport = patch_tun_ofport + + def setup_dvr_flows_on_integ_tun_br(self): + '''Setup up initial dvr flows into integration bridge and tunnel + bridge. + ''' + if not self.enable_tunneling: + return + + if not self.enable_distributed_routing: + return + + # get the local DVR MAC Address + try: + LOG.debug("L2 Agent operating in DVR Mode") + self.dvr_mac_address = None + self.local_dvr_map = {} + self.local_csnat_map = {} + self.local_ports = {} + self.registered_dvr_macs = set() + details = self.plugin_rpc.\ + get_dvr_mac_address_by_host(self.context, self.host) + LOG.debug("L2 Agent DVR: Received response for " + "get_dvr_mac_address_by_host() from " + "plugin: %r", details) + self.dvr_mac_address = details['mac_address'] + except Exception: + LOG.exception(_("DVR: Failed to obtain local DVR Mac address")) + self.enable_distributed_routing = False + # switch all traffic using L2 learning + self.int_br.add_flow(priority=1, actions="normal") + return + + # Remove existing flows in integration bridge + # self.int_br.remove_all_flows() + + # Insert 'drop' action as the default for Table 2 + self.int_br.add_flow(table=constants.DVR_TO_SRC_MAC, + priority=1, + actions="drop") + + # Insert 'normal' action as the default for Table 1 + self.int_br.add_flow(table=constants.LOCAL_SWITCHING, + priority=1, + actions="normal") + +# add by j00209498 + self.int_br.add_flow(table=constants.LOCAL_SWITCHING, + priority=2, + in_port=self.patch_tun_ofport, + dl_src='fa:16:3f:00:00:00/ff:ff:ff:00:00:00', + actions="resubmit(,%s)" % + constants.DVR_TO_SRC_MAC) + + self.tun_br.add_flow(table=constants.DVR_NOT_LEARN, + priority=1, + dl_src='fa:16:3f:00:00:00/ff:ff:ff:00:00:00', + actions="output:%s" % self.patch_int_ofport) +# comment by j00209498 +# dvr_macs = self.plugin_rpc.get_dvr_mac_address_list(self.context) +# LOG.debug("L2 Agent DVR: Received these MACs: %r", dvr_macs) +# for mac in dvr_macs: +# if mac['mac_address'] == self.dvr_mac_address: +# continue +# Table 0 (default) will now sort DVR traffic from other +# traffic depending on in_port +# self.int_br.add_flow(table=constants.LOCAL_SWITCHING, +# priority=2, +# in_port=self.patch_tun_ofport, +# dl_src=mac['mac_address'], +# actions="resubmit(,%s)" % +# constants.DVR_TO_SRC_MAC) +# Table DVR_NOT_LEARN ensures unique dvr macs in the cloud +# are not learnt, as they may +# result in flow explosions +# self.tun_br.add_flow(table=constants.DVR_NOT_LEARN, +# priority=1, +# dl_src=mac['mac_address'], +# actions="output:%s" % self.patch_int_ofport) +# +# self.registered_dvr_macs.add(mac['mac_address']) + + self.tun_br.add_flow(priority=1, + in_port=self.patch_int_ofport, + actions="resubmit(,%s)" % + constants.DVR_PROCESS) + # table-miss should be sent to learning table + self.tun_br.add_flow(table=constants.DVR_NOT_LEARN, + priority=0, + actions="resubmit(,%s)" % + constants.LEARN_FROM_TUN) + + self.tun_br.add_flow(table=constants.DVR_PROCESS, + priority=0, + actions="resubmit(,%s)" % + constants.PATCH_LV_TO_TUN) + + def dvr_mac_address_update(self, dvr_macs): + pass + # comment by j00209498 +# if not self.enable_tunneling: +# return +# +# if not self.enable_distributed_routing: +# return +# +# LOG.debug("DVR Mac address update with host-mac: %s", dvr_macs) +# +# if not self.dvr_mac_address: +# LOG.debug("Self mac unknown, ignoring this" +# " dvr_mac_address_update() ") +# return +# +# dvr_host_macs = set() +# for entry in dvr_macs: +# if entry['mac_address'] == self.dvr_mac_address: +# continue +# dvr_host_macs.add(entry['mac_address']) +# +# if dvr_host_macs == self.registered_dvr_macs: +# LOG.debug("DVR Mac address already up to date") +# return +# +# dvr_macs_added = dvr_host_macs - self.registered_dvr_macs +# dvr_macs_removed = self.registered_dvr_macs - dvr_host_macs +# +# for oldmac in dvr_macs_removed: +# self.int_br.delete_flows(table=constants.LOCAL_SWITCHING, +# in_port=self.patch_tun_ofport, +# dl_src=oldmac) +# self.tun_br.delete_flows(table=constants.DVR_NOT_LEARN, +# dl_src=oldmac) +# LOG.debug("Removed DVR MAC flow for %s", oldmac) +# self.registered_dvr_macs.remove(oldmac) +# +# for newmac in dvr_macs_added: +# self.int_br.add_flow(table=constants.LOCAL_SWITCHING, +# priority=2, +# in_port=self.patch_tun_ofport, +# dl_src=newmac, +# actions="resubmit(,%s)" % +# constants.DVR_TO_SRC_MAC) +# self.tun_br.add_flow(table=constants.DVR_NOT_LEARN, +# priority=1, +# dl_src=newmac, +# actions="output:%s" % self.patch_int_ofport) +# LOG.debug("Added DVR MAC flow for %s", newmac) +# self.registered_dvr_macs.add(newmac) + + def is_dvr_router_interface(self, device_owner): + return (device_owner == q_const.DEVICE_OWNER_ROUTER_INTF_DISTRIBUTED) + + def process_tunneled_network(self, network_type, lvid, segmentation_id): + if not self.enable_tunneling: + return + if not self.enable_distributed_routing: + return + self.tun_br.add_flow(table=constants.TUN_TABLE[network_type], + priority=1, + tun_id=segmentation_id, + actions="mod_vlan_vid:%s," + "resubmit(,%s)" % + (lvid, constants.DVR_NOT_LEARN)) + + def _bind_distributed_router_interface_port(self, port, fixed_ips, + device_owner, local_vlan): + # since router port must have only one fixed IP, directly + # use fixed_ips[0] + subnet_uuid = fixed_ips[0]['subnet_id'] + csnat_ofport = constants.OFPORT_INVALID + ldm = None + if subnet_uuid in self.local_dvr_map: + ldm = self.local_dvr_map[subnet_uuid] + csnat_ofport = ldm.get_csnat_ofport() + if csnat_ofport == constants.OFPORT_INVALID: + LOG.error(_("DVR: Duplicate DVR router interface detected " + "for subnet %s"), subnet_uuid) + return + else: + # set up LocalDVRSubnetMapping available for this subnet + subnet_info = self.plugin_rpc.get_subnet_for_dvr(self.context, + subnet_uuid) + if not subnet_info: + LOG.error(_("DVR: Unable to retrieve subnet information" + " for subnet_id %s"), subnet_uuid) + return + LOG.debug("get_subnet_for_dvr for subnet %s returned with %s" % + (subnet_uuid, subnet_info)) + ldm = LocalDVRSubnetMapping(subnet_info) + self.local_dvr_map[subnet_uuid] = ldm + + # DVR takes over + ldm.set_dvr_owned(True) + + subnet_info = ldm.get_subnet_info() + ip_subnet = subnet_info['cidr'] + local_compute_ports = self.plugin_rpc.\ + get_compute_ports_on_host_by_subnet(self.context, + self.host, + subnet_uuid) + LOG.debug("DVR: List of ports received from " + "get_compute_ports_on_host_by_subnet %r", + local_compute_ports) + for prt in local_compute_ports: + vif = self.int_br.get_vif_port_by_id(prt['id']) + if not vif: + continue + ldm.add_compute_ofport(vif.vif_id, vif.ofport) + if vif.vif_id in self.local_ports: + # ensure if a compute port is already on + # a different dvr routed subnet + # if yes, queue this subnet to that port + ovsport = self.local_ports[vif.vif_id] + ovsport.add_subnet(subnet_uuid) + else: + # the compute port is discovered first here that its on + # a dvr routed subnet queue this subnet to that port + ovsport = OVSPort(vif.vif_id, vif.ofport, + vif.vif_mac, prt['device_owner']) + + ovsport.add_subnet(subnet_uuid) + self.local_ports[vif.vif_id] = ovsport + + # create rule for just this vm port + self.int_br.add_flow(table=constants.DVR_TO_SRC_MAC, + priority=4, + dl_vlan=local_vlan, + dl_dst=ovsport.get_mac(), + actions="strip_vlan,mod_dl_src:%s," + "output:%s" % + (subnet_info['gateway_mac'], + ovsport.get_ofport())) + + # create rule in Table LOCAL_SWITCHING to forward + # broadcast/multicast frames from dvr router interface to + # appropriate local tenant ports + ofports = ','.join(map(str, ldm.get_compute_ofports().values())) + if csnat_ofport != constants.OFPORT_INVALID: + ofports = str(csnat_ofport) + ',' + ofports + if ofports: + self.int_br.add_flow(table=constants.DVR_TO_SRC_MAC, + priority=2, + proto='ip', + dl_vlan=local_vlan, + nw_dst=ip_subnet, + actions="strip_vlan,mod_dl_src:%s, " + " output:%s" % + (subnet_info['gateway_mac'], ofports)) + + self.tun_br.add_flow(table=constants.DVR_PROCESS, + priority=3, + dl_vlan=local_vlan, + proto='arp', + nw_dst=subnet_info['gateway_ip'], + actions="drop") + + self.tun_br.add_flow(table=constants.DVR_PROCESS, + priority=2, + dl_vlan=local_vlan, + dl_dst=port.vif_mac, + actions="drop") + + self.tun_br.add_flow(table=constants.DVR_PROCESS, + priority=1, + dl_vlan=local_vlan, + dl_src=port.vif_mac, + actions="mod_dl_src:%s,resubmit(,%s)" % + (self.dvr_mac_address, + constants.PATCH_LV_TO_TUN)) + + # the dvr router interface is itself a port, so capture it + # queue this subnet to that port. A subnet appears only once as + # a router interface on any given router + ovsport = OVSPort(port.vif_id, port.ofport, + port.vif_mac, device_owner) + ovsport.add_subnet(subnet_uuid) + self.local_ports[port.vif_id] = ovsport + + def _bind_compute_port_on_dvr_subnet(self, port, fixed_ips, + device_owner, local_vlan): + # Handle new compute port added use-case + subnet_uuid = None + for ips in fixed_ips: + if ips['subnet_id'] not in self.local_dvr_map: + continue + subnet_uuid = ips['subnet_id'] + ldm = self.local_dvr_map[subnet_uuid] + if not ldm.is_dvr_owned(): + # well this is csnat stuff, let dvr come in + # and do plumbing for this vm later + continue + + # This confirms that this compute port belongs + # to a dvr hosted subnet. + # Accomodate this VM Port into the existing rule in + # the integration bridge + LOG.debug("DVR: Plumbing compute port %s", port.vif_id) + subnet_info = ldm.get_subnet_info() + ip_subnet = subnet_info['cidr'] + csnat_ofport = ldm.get_csnat_ofport() + ldm.add_compute_ofport(port.vif_id, port.ofport) + if port.vif_id in self.local_ports: + # ensure if a compute port is already on a different + # dvr routed subnet + # if yes, queue this subnet to that port + ovsport = self.local_ports[port.vif_id] + ovsport.add_subnet(subnet_uuid) + else: + # the compute port is discovered first here that its + # on a dvr routed subnet, queue this subnet to that port + ovsport = OVSPort(port.vif_id, port.ofport, + port.vif_mac, device_owner) + + ovsport.add_subnet(subnet_uuid) + self.local_ports[port.vif_id] = ovsport + # create a rule for this vm port + self.int_br.add_flow(table=constants.DVR_TO_SRC_MAC, + priority=4, + dl_vlan=local_vlan, + dl_dst=ovsport.get_mac(), + actions="strip_vlan,mod_dl_src:%s," + "output:%s" % + (subnet_info['gateway_mac'], + ovsport.get_ofport())) + ofports = ','.join(map(str, ldm.get_compute_ofports().values())) + + if csnat_ofport != constants.OFPORT_INVALID: + ofports = str(csnat_ofport) + ',' + ofports + self.int_br.add_flow(table=constants.DVR_TO_SRC_MAC, + priority=2, + proto='ip', + dl_vlan=local_vlan, + nw_dst=ip_subnet, + actions="strip_vlan,mod_dl_src:%s," + " output:%s" % + (subnet_info['gateway_mac'], ofports)) + + def _bind_centralized_snat_port_on_dvr_subnet(self, port, fixed_ips, + device_owner, local_vlan): + if port.vif_id in self.local_ports: + # throw an error if CSNAT port is already on a different + # dvr routed subnet + ovsport = self.local_ports[port.vif_id] + subs = list(ovsport.get_subnets()) + LOG.error(_("Centralized-SNAT port %s already seen on "), + port.vif_id) + LOG.error(_("a different subnet %s"), subs[0]) + return + # since centralized-SNAT (CSNAT) port must have only one fixed + # IP, directly use fixed_ips[0] + subnet_uuid = fixed_ips[0]['subnet_id'] + ldm = None + subnet_info = None + if subnet_uuid not in self.local_dvr_map: + # no csnat ports seen on this subnet - create csnat state + # for this subnet + subnet_info = self.plugin_rpc.get_subnet_for_dvr(self.context, + subnet_uuid) + ldm = LocalDVRSubnetMapping(subnet_info, port.ofport) + self.local_dvr_map[subnet_uuid] = ldm + else: + ldm = self.local_dvr_map[subnet_uuid] + subnet_info = ldm.get_subnet_info() + # Store csnat OF Port in the existing DVRSubnetMap + ldm.set_csnat_ofport(port.ofport) + + # create ovsPort footprint for csnat port + ovsport = OVSPort(port.vif_id, port.ofport, + port.vif_mac, device_owner) + ovsport.add_subnet(subnet_uuid) + self.local_ports[port.vif_id] = ovsport + + self.int_br.add_flow(table=constants.DVR_TO_SRC_MAC, + priority=4, + dl_vlan=local_vlan, + dl_dst=ovsport.get_mac(), + actions="strip_vlan,mod_dl_src:%s," + " output:%s" % + (subnet_info['gateway_mac'], + ovsport.get_ofport())) + ofports = ','.join(map(str, ldm.get_compute_ofports().values())) + ofports = str(ldm.get_csnat_ofport()) + ',' + ofports + ip_subnet = subnet_info['cidr'] + self.int_br.add_flow(table=constants.DVR_TO_SRC_MAC, + priority=2, + proto='ip', + dl_vlan=local_vlan, + nw_dst=ip_subnet, + actions="strip_vlan,mod_dl_src:%s," + " output:%s" % + (subnet_info['gateway_mac'], ofports)) + + def bind_port_to_dvr(self, port, network_type, fixed_ips, + device_owner, local_vlan_id): + # a port coming up as distributed router interface + if not self.enable_tunneling: + return + + if not self.enable_distributed_routing: + return + + if network_type not in constants.TUNNEL_NETWORK_TYPES: + return + + if device_owner == q_const.DEVICE_OWNER_ROUTER_INTF_DISTRIBUTED: + self._bind_distributed_router_interface_port(port, fixed_ips, + device_owner, + local_vlan_id) + + if 'compute' in device_owner: + self._bind_compute_port_on_dvr_subnet(port, fixed_ips, + device_owner, + local_vlan_id) + + if device_owner == q_const.DEVICE_OWNER_ROUTER_SNAT: + self._bind_centralized_snat_port_on_dvr_subnet(port, fixed_ips, + device_owner, + local_vlan_id) + + def _unbind_distributed_router_interface_port(self, port, local_vlan): + + ovsport = self.local_ports[port.vif_id] + + # removal of distributed router interface + subnet_ids = ovsport.get_subnets() + subnet_set = set(subnet_ids) + # ensure we process for all the subnets laid on this removed port + for sub_uuid in subnet_set: + if sub_uuid not in self.local_dvr_map: + continue + + ldm = self.local_dvr_map[sub_uuid] + subnet_info = ldm.get_subnet_info() + ip_subnet = subnet_info['cidr'] + + # DVR is no more owner + ldm.set_dvr_owned(False) + + # remove all vm rules for this dvr subnet + # clear of compute_ports altogether + compute_ports = ldm.get_compute_ofports() + for vif_id in compute_ports: + ovsport = self.local_ports[vif_id] + self.int_br.delete_flows(table=constants.DVR_TO_SRC_MAC, + dl_vlan=local_vlan, + dl_dst=ovsport.get_mac()) + ldm.remove_all_compute_ofports() + + if ldm.get_csnat_ofport() != -1: + # If there is a csnat port on this agent, preserve + # the local_dvr_map state + ofports = str(ldm.get_csnat_ofport()) + self.int_br.add_flow(table=constants.DVR_TO_SRC_MAC, + priority=2, + proto='ip', + dl_vlan=local_vlan, + nw_dst=ip_subnet, + actions="strip_vlan,mod_dl_src:%s," + " output:%s" % + (subnet_info['gateway_mac'], ofports)) + else: + # removed port is a distributed router interface + self.int_br.delete_flows(table=constants.DVR_TO_SRC_MAC, + proto='ip', dl_vlan=local_vlan, + nw_dst=ip_subnet) + # remove subnet from local_dvr_map as no dvr (or) csnat + # ports available on this agent anymore + self.local_dvr_map.pop(sub_uuid, None) + + self.tun_br.delete_flows(table=constants.DVR_PROCESS, + dl_vlan=local_vlan, + proto='arp', + nw_dst=subnet_info['gateway_ip']) + ovsport.remove_subnet(sub_uuid) + + self.tun_br.delete_flows(table=constants.DVR_PROCESS, + dl_vlan=local_vlan, + dl_dst=port.vif_mac) + + self.tun_br.delete_flows(table=constants.DVR_PROCESS, + dl_vlan=local_vlan, + dl_src=port.vif_mac) + # release port state + self.local_ports.pop(port.vif_id, None) + + def _unbind_compute_port_on_dvr_subnet(self, port, local_vlan): + + ovsport = self.local_ports[port.vif_id] + # This confirms that this compute port being removed belonged + # to a dvr hosted subnet. + # Accomodate this VM Port into the existing rule in + # the integration bridge + LOG.debug("DVR: Removing plumbing for compute port %s", port) + subnet_ids = ovsport.get_subnets() + # ensure we process for all the subnets laid on this port + for sub_uuid in subnet_ids: + if sub_uuid not in self.local_dvr_map: + continue + + ldm = self.local_dvr_map[sub_uuid] + subnet_info = ldm.get_subnet_info() + ldm.remove_compute_ofport(port.vif_id) + ofports = ','.join(map(str, ldm.get_compute_ofports().values())) + ip_subnet = subnet_info['cidr'] + + # first remove this vm port rule + self.int_br.delete_flows(table=constants.DVR_TO_SRC_MAC, + dl_vlan=local_vlan, + dl_dst=ovsport.get_mac()) + if ldm.get_csnat_ofport() != -1: + # If there is a csnat port on this agent, preserve + # the local_dvr_map state + ofports = str(ldm.get_csnat_ofport()) + ',' + ofports + self.int_br.add_flow(table=constants.DVR_TO_SRC_MAC, + priority=2, + proto='ip', + dl_vlan=local_vlan, + nw_dst=ip_subnet, + actions="strip_vlan,mod_dl_src:%s," + " output:%s" % + (subnet_info['gateway_mac'], ofports)) + else: + if ofports: + self.int_br.add_flow(table=constants.DVR_TO_SRC_MAC, + priority=2, + proto='ip', + dl_vlan=local_vlan, + nw_dst=ip_subnet, + actions="strip_vlan,mod_dl_src:%s," + " output:%s" % + (subnet_info['gateway_mac'], + ofports)) + else: + # remove the flow altogether, as no ports (both csnat/ + # compute) are available on this subnet in this + # agent + self.int_br.delete_flows(table=constants.DVR_TO_SRC_MAC, + proto='ip', + dl_vlan=local_vlan, + nw_dst=ip_subnet) + # release port state + self.local_ports.pop(port.vif_id, None) + + def _unbind_centralized_snat_port_on_dvr_subnet(self, port, local_vlan): + + ovsport = self.local_ports[port.vif_id] + # This comfirms that this compute port being removed belonged + # to a dvr hosted subnet. + # Accomodate this VM Port into the existing rule in + # the integration bridge + LOG.debug("DVR: Removing plumbing for csnat port %s", port) + sub_uuid = list(ovsport.get_subnets())[0] + # ensure we process for all the subnets laid on this port + if sub_uuid not in self.local_dvr_map: + return + ldm = self.local_dvr_map[sub_uuid] + subnet_info = ldm.get_subnet_info() + ip_subnet = subnet_info['cidr'] + ldm.set_csnat_ofport(constants.OFPORT_INVALID) + # then remove csnat port rule + self.int_br.delete_flows(table=constants.DVR_TO_SRC_MAC, + dl_vlan=local_vlan, + dl_dst=ovsport.get_mac()) + + ofports = ','.join(map(str, ldm.get_compute_ofports().values())) + if ofports: + self.int_br.add_flow(table=constants.DVR_TO_SRC_MAC, + priority=2, + proto='ip', + dl_vlan=local_vlan, + nw_dst=ip_subnet, + actions="strip_vlan,mod_dl_src:%s," + " output:%s" % + (subnet_info['gateway_mac'], ofports)) + else: + self.int_br.delete_flows(table=constants.DVR_TO_SRC_MAC, + proto='ip', + dl_vlan=local_vlan, + nw_dst=ip_subnet) + if not ldm.is_dvr_owned(): + # if not owned by DVR (only used for csnat), remove this + # subnet state altogether + self.local_dvr_map.pop(sub_uuid, None) + + # release port state + self.local_ports.pop(port.vif_id, None) + + def unbind_port_from_dvr(self, vif_port, local_vlan_id): + if not self.enable_tunneling: + return + + if not self.enable_distributed_routing: + return + + if not vif_port: + LOG.debug("DVR: VIF Port not available for delete %s", vif_port) + return + + # Handle port removed use-case + if vif_port.vif_id not in self.local_ports: + LOG.debug("DVR: Non distributed port, ignoring %s", vif_port) + return + + ovsport = self.local_ports[vif_port.vif_id] + + if ovsport.get_device_owner() == \ + q_const.DEVICE_OWNER_ROUTER_INTF_DISTRIBUTED: + self._unbind_distributed_router_interface_port(vif_port, + local_vlan_id) + + if 'compute' in ovsport.get_device_owner(): + self._unbind_compute_port_on_dvr_subnet(vif_port, + local_vlan_id) + + if ovsport.get_device_owner() == q_const.DEVICE_OWNER_ROUTER_SNAT: + self._unbind_centralized_snat_port_on_dvr_subnet(vif_port, + local_vlan_id) diff --git a/neutronproxy/l2-proxy/neutron/plugins/l2_proxy/agent/xenapi/README b/neutronproxy/l2-proxy/neutron/plugins/l2_proxy/agent/xenapi/README new file mode 100644 index 00000000..0a02c99e --- /dev/null +++ b/neutronproxy/l2-proxy/neutron/plugins/l2_proxy/agent/xenapi/README @@ -0,0 +1,16 @@ +This directory contains files that are required for the XenAPI support. +They should be installed in the XenServer / Xen Cloud Platform dom0. + +If you install them manually, you will need to ensure that the newly +added files are executable. You can do this by running the following +command (from dom0): + + chmod a+x /etc/xapi.d/plugins/* + +Otherwise, you can build an rpm by running the following command: + + ./contrib/build-rpm.sh + +and install the rpm by running the following command (from dom0): + + rpm -i openstack-neutron-xen-plugins.rpm diff --git a/neutronproxy/l2-proxy/neutron/plugins/l2_proxy/agent/xenapi/contrib/build-rpm.sh b/neutronproxy/l2-proxy/neutron/plugins/l2_proxy/agent/xenapi/contrib/build-rpm.sh new file mode 100644 index 00000000..81b5f3b3 --- /dev/null +++ b/neutronproxy/l2-proxy/neutron/plugins/l2_proxy/agent/xenapi/contrib/build-rpm.sh @@ -0,0 +1,34 @@ +#!/bin/bash + +set -eux + +thisdir=$(dirname $(readlink -f "$0")) +export NEUTRON_ROOT="$thisdir/../../../../../../" +export PYTHONPATH=$NEUTRON_ROOT + +cd $NEUTRON_ROOT +VERSION=$(sh -c "(cat $NEUTRON_ROOT/neutron/version.py; \ + echo 'print version_info.release_string()') | \ + python") +cd - + +PACKAGE=openstack-neutron-xen-plugins +RPMBUILD_DIR=$PWD/rpmbuild +if [ ! -d $RPMBUILD_DIR ]; then + echo $RPMBUILD_DIR is missing + exit 1 +fi + +for dir in BUILD BUILDROOT SRPMS RPMS SOURCES; do + rm -rf $RPMBUILD_DIR/$dir + mkdir -p $RPMBUILD_DIR/$dir +done + +rm -rf /tmp/$PACKAGE +mkdir /tmp/$PACKAGE +cp -r ../etc/xapi.d /tmp/$PACKAGE +tar czf $RPMBUILD_DIR/SOURCES/$PACKAGE.tar.gz -C /tmp $PACKAGE + +rpmbuild -ba --nodeps --define "_topdir $RPMBUILD_DIR" \ + --define "version $VERSION" \ + $RPMBUILD_DIR/SPECS/$PACKAGE.spec diff --git a/neutronproxy/l2-proxy/neutron/plugins/l2_proxy/agent/xenapi/contrib/rpmbuild/SPECS/openstack-quantum-xen-plugins.spec b/neutronproxy/l2-proxy/neutron/plugins/l2_proxy/agent/xenapi/contrib/rpmbuild/SPECS/openstack-quantum-xen-plugins.spec new file mode 100644 index 00000000..8ba03eaf --- /dev/null +++ b/neutronproxy/l2-proxy/neutron/plugins/l2_proxy/agent/xenapi/contrib/rpmbuild/SPECS/openstack-quantum-xen-plugins.spec @@ -0,0 +1,30 @@ +Name: openstack-neutron-xen-plugins +Version: %{version} +Release: 1 +Summary: Files for XenAPI support. +License: ASL 2.0 +Group: Applications/Utilities +Source0: openstack-neutron-xen-plugins.tar.gz +BuildArch: noarch +BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n) + +%define debug_package %{nil} + +%description +This package contains files that are required for XenAPI support for Neutron. + +%prep +%setup -q -n openstack-neutron-xen-plugins + +%install +rm -rf $RPM_BUILD_ROOT +mkdir -p $RPM_BUILD_ROOT/etc +cp -r xapi.d $RPM_BUILD_ROOT/etc +chmod a+x $RPM_BUILD_ROOT/etc/xapi.d/plugins/* + +%clean +rm -rf $RPM_BUILD_ROOT + +%files +%defattr(-,root,root,-) +/etc/xapi.d/plugins/* diff --git a/neutronproxy/l2-proxy/neutron/plugins/l2_proxy/agent/xenapi/etc/xapi.d/plugins/netwrap b/neutronproxy/l2-proxy/neutron/plugins/l2_proxy/agent/xenapi/etc/xapi.d/plugins/netwrap new file mode 100644 index 00000000..e994549d --- /dev/null +++ b/neutronproxy/l2-proxy/neutron/plugins/l2_proxy/agent/xenapi/etc/xapi.d/plugins/netwrap @@ -0,0 +1,75 @@ +#!/usr/bin/env python + +# Copyright 2012 OpenStack Foundation +# Copyright 2012 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# +# XenAPI plugin for executing network commands (ovs, iptables, etc) on dom0 +# + +import gettext +gettext.install('neutron', unicode=1) +try: + import json +except ImportError: + import simplejson as json +import subprocess + +import XenAPIPlugin + + +ALLOWED_CMDS = [ + 'ip', + 'ovs-ofctl', + 'ovs-vsctl', +] + + +class PluginError(Exception): + + """Base Exception class for all plugin errors.""" + + def __init__(self, *args): + Exception.__init__(self, *args) + + +def _run_command(cmd, cmd_input): + """Abstracts out the basics of issuing system commands. If the command + returns anything in stderr, a PluginError is raised with that information. + Otherwise, the output from stdout is returned. + """ + pipe = subprocess.PIPE + proc = subprocess.Popen(cmd, shell=False, stdin=pipe, stdout=pipe, + stderr=pipe, close_fds=True) + (out, err) = proc.communicate(cmd_input) + + if err: + raise PluginError(err) + return out + + +def run_command(session, args): + cmd = json.loads(args.get('cmd')) + if cmd and cmd[0] not in ALLOWED_CMDS: + msg = _("Dom0 execution of '%s' is not permitted") % cmd[0] + raise PluginError(msg) + result = _run_command(cmd, json.loads(args.get('cmd_input', 'null'))) + return json.dumps(result) + + +if __name__ == "__main__": + XenAPIPlugin.dispatch({"run_command": run_command}) diff --git a/neutronproxy/l2-proxy/neutron/plugins/l2_proxy/common/__init__.py b/neutronproxy/l2-proxy/neutron/plugins/l2_proxy/common/__init__.py new file mode 100644 index 00000000..8ac9340e --- /dev/null +++ b/neutronproxy/l2-proxy/neutron/plugins/l2_proxy/common/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2012 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/neutronproxy/l2-proxy/neutron/plugins/l2_proxy/common/config.py b/neutronproxy/l2-proxy/neutron/plugins/l2_proxy/common/config.py new file mode 100644 index 00000000..5b6e12cb --- /dev/null +++ b/neutronproxy/l2-proxy/neutron/plugins/l2_proxy/common/config.py @@ -0,0 +1,123 @@ +# Copyright 2014, Huawei, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# @author: Haojie Jia, Huawei + + +from oslo.config import cfg + +from neutron.agent.common import config +from neutron.plugins.l2_proxy.common import constants + + +DEFAULT_BRIDGE_MAPPINGS = [] +DEFAULT_VLAN_RANGES = [] +DEFAULT_TUNNEL_RANGES = [] +DEFAULT_TUNNEL_TYPES = [] + +ovs_opts = [ + cfg.StrOpt('integration_bridge', default='br-int', + help=_("Integration bridge to use")), + cfg.BoolOpt('enable_tunneling', default=False, + help=_("Enable tunneling support")), + cfg.StrOpt('tunnel_bridge', default='br-tun', + help=_("Tunnel bridge to use")), + cfg.StrOpt('int_peer_patch_port', default='patch-tun', + help=_("Peer patch port in integration bridge for tunnel " + "bridge")), + cfg.StrOpt('tun_peer_patch_port', default='patch-int', + help=_("Peer patch port in tunnel bridge for integration " + "bridge")), + cfg.StrOpt('local_ip', default='', + help=_("Local IP address of GRE tunnel endpoints.")), + cfg.ListOpt('bridge_mappings', + default=DEFAULT_BRIDGE_MAPPINGS, + help=_("List of :")), + cfg.StrOpt('tenant_network_type', default='local', + help=_("Network type for tenant networks " + "(local, vlan, gre, vxlan, or none)")), + cfg.ListOpt('network_vlan_ranges', + default=DEFAULT_VLAN_RANGES, + help=_("List of :: " + "or ")), + cfg.ListOpt('tunnel_id_ranges', + default=DEFAULT_TUNNEL_RANGES, + help=_("List of :")), + cfg.StrOpt('tunnel_type', default='', + help=_("The type of tunnels to use when utilizing tunnels, " + "either 'gre' or 'vxlan'")), + cfg.BoolOpt('use_veth_interconnection', default=False, + help=_("Use veths instead of patch ports to interconnect the " + "integration bridge to physical bridges")), +] + +agent_opts = [ + cfg.IntOpt('polling_interval', default=2, + help=_("The number of seconds the agent will wait between " + "polling for local device changes.")), + cfg.BoolOpt('minimize_polling', + default=True, + help=_("Minimize polling by monitoring ovsdb for interface " + "changes.")), + cfg.IntOpt('ovsdb_monitor_respawn_interval', + default=constants.DEFAULT_OVSDBMON_RESPAWN, + help=_("The number of seconds to wait before respawning the " + "ovsdb monitor after losing communication with it")), + cfg.ListOpt('tunnel_types', default=DEFAULT_TUNNEL_TYPES, + help=_("Network types supported by the agent " + "(gre and/or vxlan)")), + cfg.IntOpt('vxlan_udp_port', default=constants.VXLAN_UDP_PORT, + help=_("The UDP port to use for VXLAN tunnels.")), + cfg.IntOpt('veth_mtu', + help=_("MTU size of veth interfaces")), + cfg.BoolOpt('l2_population', default=False, + help=_("Use ml2 l2population mechanism driver to learn " + "remote mac and IPs and improve tunnel scalability")), + cfg.BoolOpt('arp_responder', default=False, + help=_("Enable local ARP responder if it is supported")), + cfg.BoolOpt('dont_fragment', default=True, + help=_("Set or un-set the don't fragment (DF) bit on " + "outgoing IP packet carrying GRE/VXLAN tunnel")), + cfg.BoolOpt('enable_distributed_routing', default=False, + help=_("Make the l2 agent run in dvr mode ")), + # add by j00209498 + cfg.StrOpt('os_region_name', default=None, + help=_("region name to use")), + cfg.StrOpt('keystone_auth_url', default='http://127.0.0.1:35357/v2.0', + help=_("keystone auth url to use")), + cfg.StrOpt('neutron_user_name', + help=_("access neutron user name to use")), + cfg.StrOpt('neutron_password', + help=_("access neutron password to use")), + cfg.StrOpt('neutron_tenant_name', + help=_("access neutron tenant to use")), + + # add by j00209498 + cfg.StrOpt('cascading_os_region_name', default=None, + help=_("region name to use")), + cfg.StrOpt('cascading_auth_url', default='http://127.0.0.1:35357/v2.0', + help=_("keystone auth url to use")), + cfg.StrOpt('cascading_user_name', + help=_("access neutron user name to use")), + cfg.StrOpt('cascading_password', + help=_("access neutron password to use")), + cfg.StrOpt('cascading_tenant_name', + help=_("access neutron tenant to use")), +] + + +cfg.CONF.register_opts(ovs_opts, "OVS") +cfg.CONF.register_opts(agent_opts, "AGENT") +config.register_agent_state_opts_helper(cfg.CONF) +config.register_root_helper(cfg.CONF) diff --git a/neutronproxy/l2-proxy/neutron/plugins/l2_proxy/common/constants.py b/neutronproxy/l2-proxy/neutron/plugins/l2_proxy/common/constants.py new file mode 100644 index 00000000..8b65e449 --- /dev/null +++ b/neutronproxy/l2-proxy/neutron/plugins/l2_proxy/common/constants.py @@ -0,0 +1,67 @@ +# Copyright (c) 2012 OpenStack Foundation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from neutron.plugins.common import constants as p_const + + +# Special vlan_id value in ovs_vlan_allocations table indicating flat network +FLAT_VLAN_ID = -1 + +# Topic for tunnel notifications between the plugin and agent +TUNNEL = 'tunnel' + +# Values for network_type +VXLAN_UDP_PORT = 4789 + +# Name prefixes for veth device or patch port pair linking the integration +# bridge with the physical bridge for a physical network +PEER_INTEGRATION_PREFIX = 'int-' +PEER_PHYSICAL_PREFIX = 'phy-' + +# Nonexistent peer used to create patch ports without associating them, it +# allows to define flows before association +NONEXISTENT_PEER = 'nonexistent-peer' + +# The different types of tunnels +TUNNEL_NETWORK_TYPES = [p_const.TYPE_GRE, p_const.TYPE_VXLAN] + +# Various tables for DVR use of integration bridge flows +LOCAL_SWITCHING = 0 +DVR_TO_SRC_MAC = 1 + +# Various tables for tunneling flows +DVR_PROCESS = 1 +PATCH_LV_TO_TUN = 2 +GRE_TUN_TO_LV = 3 +VXLAN_TUN_TO_LV = 4 +DVR_NOT_LEARN = 9 +LEARN_FROM_TUN = 10 +UCAST_TO_TUN = 20 +ARP_RESPONDER = 21 +FLOOD_TO_TUN = 22 +CANARY_TABLE = 23 + +# Map tunnel types to tables number +TUN_TABLE = {p_const.TYPE_GRE: GRE_TUN_TO_LV, + p_const.TYPE_VXLAN: VXLAN_TUN_TO_LV} + +# The default respawn interval for the ovsdb monitor +DEFAULT_OVSDBMON_RESPAWN = 30 + +# Special return value for an invalid OVS ofport +INVALID_OFPORT = '-1' + +# Represent invalid OF Port +OFPORT_INVALID = -1 diff --git a/neutronproxy/l2-proxy/neutron/plugins/l2_proxy/ovs_db_v2.py b/neutronproxy/l2-proxy/neutron/plugins/l2_proxy/ovs_db_v2.py new file mode 100644 index 00000000..b3412094 --- /dev/null +++ b/neutronproxy/l2-proxy/neutron/plugins/l2_proxy/ovs_db_v2.py @@ -0,0 +1,396 @@ +# Copyright 2011 VMware, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo.db import exception as db_exc +from six import moves +from sqlalchemy import func +from sqlalchemy.orm import exc + +from neutron.common import exceptions as n_exc +import neutron.db.api as db +from neutron.db import models_v2 +from neutron.db import securitygroups_db as sg_db +from neutron.extensions import securitygroup as ext_sg +from neutron import manager +from neutron.openstack.common import log as logging +from neutron.plugins.openvswitch.common import constants +from neutron.plugins.openvswitch import ovs_models_v2 + +LOG = logging.getLogger(__name__) + + +def get_network_binding(session, network_id): + session = session or db.get_session() + try: + binding = (session.query(ovs_models_v2.NetworkBinding). + filter_by(network_id=network_id). + one()) + return binding + except exc.NoResultFound: + return + + +def add_network_binding(session, network_id, network_type, + physical_network, segmentation_id): + with session.begin(subtransactions=True): + binding = ovs_models_v2.NetworkBinding(network_id, network_type, + physical_network, + segmentation_id) + session.add(binding) + return binding + + +def sync_vlan_allocations(network_vlan_ranges): + """Synchronize vlan_allocations table with configured VLAN ranges.""" + + session = db.get_session() + with session.begin(): + # get existing allocations for all physical networks + allocations = dict() + allocs = (session.query(ovs_models_v2.VlanAllocation). + all()) + for alloc in allocs: + if alloc.physical_network not in allocations: + allocations[alloc.physical_network] = set() + allocations[alloc.physical_network].add(alloc) + + # process vlan ranges for each configured physical network + for physical_network, vlan_ranges in network_vlan_ranges.iteritems(): + # determine current configured allocatable vlans for this + # physical network + vlan_ids = set() + for vlan_range in vlan_ranges: + vlan_ids |= set(moves.xrange(vlan_range[0], vlan_range[1] + 1)) + + # remove from table unallocated vlans not currently allocatable + if physical_network in allocations: + for alloc in allocations[physical_network]: + try: + # see if vlan is allocatable + vlan_ids.remove(alloc.vlan_id) + except KeyError: + # it's not allocatable, so check if its allocated + if not alloc.allocated: + # it's not, so remove it from table + LOG.debug(_("Removing vlan %(vlan_id)s on " + "physical network " + "%(physical_network)s from pool"), + {'vlan_id': alloc.vlan_id, + 'physical_network': physical_network}) + session.delete(alloc) + del allocations[physical_network] + + # add missing allocatable vlans to table + for vlan_id in sorted(vlan_ids): + alloc = ovs_models_v2.VlanAllocation(physical_network, vlan_id) + session.add(alloc) + + # remove from table unallocated vlans for any unconfigured physical + # networks + for allocs in allocations.itervalues(): + for alloc in allocs: + if not alloc.allocated: + LOG.debug(_("Removing vlan %(vlan_id)s on physical " + "network %(physical_network)s from pool"), + {'vlan_id': alloc.vlan_id, + 'physical_network': alloc.physical_network}) + session.delete(alloc) + + +def get_vlan_allocation(physical_network, vlan_id): + session = db.get_session() + try: + alloc = (session.query(ovs_models_v2.VlanAllocation). + filter_by(physical_network=physical_network, + vlan_id=vlan_id). + one()) + return alloc + except exc.NoResultFound: + return + + +def reserve_vlan(session): + with session.begin(subtransactions=True): + alloc = (session.query(ovs_models_v2.VlanAllocation). + filter_by(allocated=False). + with_lockmode('update'). + first()) + if alloc: + LOG.debug(_("Reserving vlan %(vlan_id)s on physical network " + "%(physical_network)s from pool"), + {'vlan_id': alloc.vlan_id, + 'physical_network': alloc.physical_network}) + alloc.allocated = True + return (alloc.physical_network, alloc.vlan_id) + raise n_exc.NoNetworkAvailable() + + +def reserve_specific_vlan(session, physical_network, vlan_id): + with session.begin(subtransactions=True): + try: + alloc = (session.query(ovs_models_v2.VlanAllocation). + filter_by(physical_network=physical_network, + vlan_id=vlan_id). + with_lockmode('update'). + one()) + if alloc.allocated: + if vlan_id == constants.FLAT_VLAN_ID: + raise n_exc.FlatNetworkInUse( + physical_network=physical_network) + else: + raise n_exc.VlanIdInUse(vlan_id=vlan_id, + physical_network=physical_network) + LOG.debug(_("Reserving specific vlan %(vlan_id)s on physical " + "network %(physical_network)s from pool"), + {'vlan_id': vlan_id, + 'physical_network': physical_network}) + alloc.allocated = True + except exc.NoResultFound: + LOG.debug(_("Reserving specific vlan %(vlan_id)s on physical " + "network %(physical_network)s outside pool"), + {'vlan_id': vlan_id, + 'physical_network': physical_network}) + alloc = ovs_models_v2.VlanAllocation(physical_network, vlan_id) + alloc.allocated = True + session.add(alloc) + + +def release_vlan(session, physical_network, vlan_id, network_vlan_ranges): + with session.begin(subtransactions=True): + try: + alloc = (session.query(ovs_models_v2.VlanAllocation). + filter_by(physical_network=physical_network, + vlan_id=vlan_id). + with_lockmode('update'). + one()) + alloc.allocated = False + inside = False + for vlan_range in network_vlan_ranges.get(physical_network, []): + if vlan_id >= vlan_range[0] and vlan_id <= vlan_range[1]: + inside = True + break + if not inside: + session.delete(alloc) + LOG.debug(_("Releasing vlan %(vlan_id)s on physical network " + "%(physical_network)s outside pool"), + {'vlan_id': vlan_id, + 'physical_network': physical_network}) + else: + LOG.debug(_("Releasing vlan %(vlan_id)s on physical network " + "%(physical_network)s to pool"), + {'vlan_id': vlan_id, + 'physical_network': physical_network}) + except exc.NoResultFound: + LOG.warning(_("vlan_id %(vlan_id)s on physical network " + "%(physical_network)s not found"), + {'vlan_id': vlan_id, + 'physical_network': physical_network}) + + +def sync_tunnel_allocations(tunnel_id_ranges): + """Synchronize tunnel_allocations table with configured tunnel ranges.""" + + # determine current configured allocatable tunnels + tunnel_ids = set() + for tunnel_id_range in tunnel_id_ranges: + tun_min, tun_max = tunnel_id_range + if tun_max + 1 - tun_min > 1000000: + LOG.error(_("Skipping unreasonable tunnel ID range " + "%(tun_min)s:%(tun_max)s"), + {'tun_min': tun_min, 'tun_max': tun_max}) + else: + tunnel_ids |= set(moves.xrange(tun_min, tun_max + 1)) + + session = db.get_session() + with session.begin(): + # remove from table unallocated tunnels not currently allocatable + allocs = (session.query(ovs_models_v2.TunnelAllocation). + all()) + for alloc in allocs: + try: + # see if tunnel is allocatable + tunnel_ids.remove(alloc.tunnel_id) + except KeyError: + # it's not allocatable, so check if its allocated + if not alloc.allocated: + # it's not, so remove it from table + LOG.debug(_("Removing tunnel %s from pool"), + alloc.tunnel_id) + session.delete(alloc) + + # add missing allocatable tunnels to table + for tunnel_id in sorted(tunnel_ids): + alloc = ovs_models_v2.TunnelAllocation(tunnel_id) + session.add(alloc) + + +def get_tunnel_allocation(tunnel_id): + session = db.get_session() + try: + alloc = (session.query(ovs_models_v2.TunnelAllocation). + filter_by(tunnel_id=tunnel_id). + with_lockmode('update'). + one()) + return alloc + except exc.NoResultFound: + return + + +def reserve_tunnel(session): + with session.begin(subtransactions=True): + alloc = (session.query(ovs_models_v2.TunnelAllocation). + filter_by(allocated=False). + with_lockmode('update'). + first()) + if alloc: + LOG.debug(_("Reserving tunnel %s from pool"), alloc.tunnel_id) + alloc.allocated = True + return alloc.tunnel_id + raise n_exc.NoNetworkAvailable() + + +def reserve_specific_tunnel(session, tunnel_id): + with session.begin(subtransactions=True): + try: + alloc = (session.query(ovs_models_v2.TunnelAllocation). + filter_by(tunnel_id=tunnel_id). + with_lockmode('update'). + one()) + if alloc.allocated: + raise n_exc.TunnelIdInUse(tunnel_id=tunnel_id) + LOG.debug(_("Reserving specific tunnel %s from pool"), tunnel_id) + alloc.allocated = True + except exc.NoResultFound: + LOG.debug(_("Reserving specific tunnel %s outside pool"), + tunnel_id) + alloc = ovs_models_v2.TunnelAllocation(tunnel_id) + alloc.allocated = True + session.add(alloc) + + +def release_tunnel(session, tunnel_id, tunnel_id_ranges): + with session.begin(subtransactions=True): + try: + alloc = (session.query(ovs_models_v2.TunnelAllocation). + filter_by(tunnel_id=tunnel_id). + with_lockmode('update'). + one()) + alloc.allocated = False + inside = False + for tunnel_id_range in tunnel_id_ranges: + if (tunnel_id >= tunnel_id_range[0] + and tunnel_id <= tunnel_id_range[1]): + inside = True + break + if not inside: + session.delete(alloc) + LOG.debug(_("Releasing tunnel %s outside pool"), tunnel_id) + else: + LOG.debug(_("Releasing tunnel %s to pool"), tunnel_id) + except exc.NoResultFound: + LOG.warning(_("tunnel_id %s not found"), tunnel_id) + + +def get_port(port_id): + session = db.get_session() + try: + port = session.query(models_v2.Port).filter_by(id=port_id).one() + except exc.NoResultFound: + port = None + return port + + +def get_port_from_device(port_id): + """Get port from database.""" + LOG.debug(_("get_port_with_securitygroups() called:port_id=%s"), port_id) + session = db.get_session() + sg_binding_port = sg_db.SecurityGroupPortBinding.port_id + + query = session.query(models_v2.Port, + sg_db.SecurityGroupPortBinding.security_group_id) + query = query.outerjoin(sg_db.SecurityGroupPortBinding, + models_v2.Port.id == sg_binding_port) + query = query.filter(models_v2.Port.id == port_id) + port_and_sgs = query.all() + if not port_and_sgs: + return None + port = port_and_sgs[0][0] + plugin = manager.NeutronManager.get_plugin() + port_dict = plugin._make_port_dict(port) + port_dict[ext_sg.SECURITYGROUPS] = [ + sg_id for port_, sg_id in port_and_sgs if sg_id] + port_dict['security_group_rules'] = [] + port_dict['security_group_source_groups'] = [] + port_dict['fixed_ips'] = [ip['ip_address'] + for ip in port['fixed_ips']] + return port_dict + + +def set_port_status(port_id, status): + session = db.get_session() + try: + port = session.query(models_v2.Port).filter_by(id=port_id).one() + port['status'] = status + session.merge(port) + session.flush() + except exc.NoResultFound: + raise n_exc.PortNotFound(port_id=port_id) + + +def get_tunnel_endpoints(): + session = db.get_session() + + tunnels = session.query(ovs_models_v2.TunnelEndpoint) + return [{'id': tunnel.id, + 'ip_address': tunnel.ip_address} for tunnel in tunnels] + + +def _generate_tunnel_id(session): + max_tunnel_id = session.query( + func.max(ovs_models_v2.TunnelEndpoint.id)).scalar() or 0 + return max_tunnel_id + 1 + + +def add_tunnel_endpoint(ip, max_retries=10): + """Return the endpoint of the given IP address or generate a new one.""" + + # NOTE(rpodolyaka): generation of a new tunnel endpoint must be put into a + # repeatedly executed transactional block to ensure it + # doesn't conflict with any other concurrently executed + # DB transactions in spite of the specified transactions + # isolation level value + for i in moves.xrange(max_retries): + LOG.debug(_('Adding a tunnel endpoint for %s'), ip) + try: + session = db.get_session() + with session.begin(subtransactions=True): + tunnel = (session.query(ovs_models_v2.TunnelEndpoint). + filter_by(ip_address=ip).with_lockmode('update'). + first()) + + if tunnel is None: + tunnel_id = _generate_tunnel_id(session) + tunnel = ovs_models_v2.TunnelEndpoint(ip, tunnel_id) + session.add(tunnel) + + return tunnel + except db_exc.DBDuplicateEntry: + # a concurrent transaction has been committed, try again + LOG.debug(_('Adding a tunnel endpoint failed due to a concurrent' + 'transaction had been committed (%s attempts left)'), + max_retries - (i + 1)) + + raise n_exc.NeutronException( + message=_('Unable to generate a new tunnel id')) diff --git a/neutronproxy/l2-proxy/neutron/plugins/l2_proxy/ovs_models_v2.py b/neutronproxy/l2-proxy/neutron/plugins/l2_proxy/ovs_models_v2.py new file mode 100644 index 00000000..bf6a2d30 --- /dev/null +++ b/neutronproxy/l2-proxy/neutron/plugins/l2_proxy/ovs_models_v2.py @@ -0,0 +1,111 @@ +# Copyright 2011 VMware, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +from sqlalchemy import Boolean, Column, ForeignKey, Integer, String +from sqlalchemy.schema import UniqueConstraint + +from neutron.db import model_base +from neutron.db import models_v2 +from sqlalchemy import orm + + +class VlanAllocation(model_base.BASEV2): + + """Represents allocation state of vlan_id on physical network.""" + __tablename__ = 'ovs_vlan_allocations' + + physical_network = Column(String(64), nullable=False, primary_key=True) + vlan_id = Column(Integer, nullable=False, primary_key=True, + autoincrement=False) + allocated = Column(Boolean, nullable=False) + + def __init__(self, physical_network, vlan_id): + self.physical_network = physical_network + self.vlan_id = vlan_id + self.allocated = False + + def __repr__(self): + return "" % (self.physical_network, + self.vlan_id, self.allocated) + + +class TunnelAllocation(model_base.BASEV2): + + """Represents allocation state of tunnel_id.""" + __tablename__ = 'ovs_tunnel_allocations' + + tunnel_id = Column(Integer, nullable=False, primary_key=True, + autoincrement=False) + allocated = Column(Boolean, nullable=False) + + def __init__(self, tunnel_id): + self.tunnel_id = tunnel_id + self.allocated = False + + def __repr__(self): + return "" % (self.tunnel_id, self.allocated) + + +class NetworkBinding(model_base.BASEV2): + + """Represents binding of virtual network to physical realization.""" + __tablename__ = 'ovs_network_bindings' + + network_id = Column(String(36), + ForeignKey('networks.id', ondelete="CASCADE"), + primary_key=True) + # 'gre', 'vlan', 'flat', 'local' + network_type = Column(String(32), nullable=False) + physical_network = Column(String(64)) + segmentation_id = Column(Integer) # tunnel_id or vlan_id + + network = orm.relationship( + models_v2.Network, + backref=orm.backref("binding", lazy='joined', + uselist=False, cascade='delete')) + + def __init__(self, network_id, network_type, physical_network, + segmentation_id): + self.network_id = network_id + self.network_type = network_type + self.physical_network = physical_network + self.segmentation_id = segmentation_id + + def __repr__(self): + return "" % (self.network_id, + self.network_type, + self.physical_network, + self.segmentation_id) + + +class TunnelEndpoint(model_base.BASEV2): + + """Represents tunnel endpoint in RPC mode.""" + __tablename__ = 'ovs_tunnel_endpoints' + __table_args__ = ( + UniqueConstraint('id', name='uniq_ovs_tunnel_endpoints0id'), + model_base.BASEV2.__table_args__, + ) + + ip_address = Column(String(64), primary_key=True) + id = Column(Integer, nullable=False) + + def __init__(self, ip_address, id): + self.ip_address = ip_address + self.id = id + + def __repr__(self): + return "" % (self.ip_address, self.id) diff --git a/neutronproxy/l2-proxy/neutron/plugins/l2_proxy/ovs_neutron_plugin.py b/neutronproxy/l2-proxy/neutron/plugins/l2_proxy/ovs_neutron_plugin.py new file mode 100644 index 00000000..cdd29abb --- /dev/null +++ b/neutronproxy/l2-proxy/neutron/plugins/l2_proxy/ovs_neutron_plugin.py @@ -0,0 +1,635 @@ +# Copyright 2011 VMware, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import sys + +from oslo.config import cfg + +from neutron.agent import securitygroups_rpc as sg_rpc +from neutron.api.rpc.agentnotifiers import dhcp_rpc_agent_api +from neutron.api.rpc.agentnotifiers import l3_rpc_agent_api +from neutron.api.v2 import attributes +from neutron.common import constants as q_const +from neutron.common import exceptions as n_exc +from neutron.common import rpc as n_rpc +from neutron.common import topics +from neutron.common import utils +from neutron.db import agents_db +from neutron.db import agentschedulers_db +from neutron.db import allowedaddresspairs_db as addr_pair_db +from neutron.db import db_base_plugin_v2 +from neutron.db import dhcp_rpc_base +from neutron.db import external_net_db +from neutron.db import extradhcpopt_db +from neutron.db import extraroute_db +from neutron.db import l3_agentschedulers_db +from neutron.db import l3_gwmode_db +from neutron.db import l3_rpc_base +from neutron.db import portbindings_db +from neutron.db import quota_db # noqa +from neutron.db import securitygroups_rpc_base as sg_db_rpc +from neutron.extensions import allowedaddresspairs as addr_pair +from neutron.extensions import extra_dhcp_opt as edo_ext +from neutron.extensions import portbindings +from neutron.extensions import providernet as provider +from neutron import manager +from neutron.openstack.common import importutils +from neutron.openstack.common import log as logging +from neutron.plugins.common import constants as svc_constants +from neutron.plugins.common import utils as plugin_utils +from neutron.plugins.openvswitch.common import config # noqa +from neutron.plugins.openvswitch.common import constants +from neutron.plugins.openvswitch import ovs_db_v2 + + +LOG = logging.getLogger(__name__) + + +class OVSRpcCallbacks(n_rpc.RpcCallback, + dhcp_rpc_base.DhcpRpcCallbackMixin, + l3_rpc_base.L3RpcCallbackMixin, + sg_db_rpc.SecurityGroupServerRpcCallbackMixin): + + # history + # 1.0 Initial version + # 1.1 Support Security Group RPC + # 1.2 Support get_devices_details_list + + RPC_API_VERSION = '1.2' + + def __init__(self, notifier, tunnel_type): + super(OVSRpcCallbacks, self).__init__() + self.notifier = notifier + self.tunnel_type = tunnel_type + + @classmethod + def get_port_from_device(cls, device): + port = ovs_db_v2.get_port_from_device(device) + if port: + port['device'] = device + return port + + def get_device_details(self, rpc_context, **kwargs): + """Agent requests device details.""" + agent_id = kwargs.get('agent_id') + device = kwargs.get('device') + LOG.debug(_("Device %(device)s details requested from %(agent_id)s"), + {'device': device, 'agent_id': agent_id}) + port = ovs_db_v2.get_port(device) + if port: + binding = ovs_db_v2.get_network_binding(None, port['network_id']) + entry = {'device': device, + 'network_id': port['network_id'], + 'port_id': port['id'], + 'admin_state_up': port['admin_state_up'], + 'network_type': binding.network_type, + 'segmentation_id': binding.segmentation_id, + 'physical_network': binding.physical_network} + new_status = (q_const.PORT_STATUS_ACTIVE if port['admin_state_up'] + else q_const.PORT_STATUS_DOWN) + if port['status'] != new_status: + ovs_db_v2.set_port_status(port['id'], new_status) + else: + entry = {'device': device} + LOG.debug(_("%s can not be found in database"), device) + return entry + + def get_devices_details_list(self, rpc_context, **kwargs): + return [ + self.get_device_details( + rpc_context, + device=device, + **kwargs + ) + for device in kwargs.pop('devices', []) + ] + + def update_device_down(self, rpc_context, **kwargs): + """Device no longer exists on agent.""" + agent_id = kwargs.get('agent_id') + device = kwargs.get('device') + host = kwargs.get('host') + port = ovs_db_v2.get_port(device) + LOG.debug(_("Device %(device)s no longer exists on %(agent_id)s"), + {'device': device, 'agent_id': agent_id}) + if port: + entry = {'device': device, + 'exists': True} + plugin = manager.NeutronManager.get_plugin() + if (host and + not plugin.get_port_host(rpc_context, port['id']) == host): + LOG.debug(_("Device %(device)s not bound to the" + " agent host %(host)s"), + {'device': device, 'host': host}) + elif port['status'] != q_const.PORT_STATUS_DOWN: + # Set port status to DOWN + ovs_db_v2.set_port_status(port['id'], + q_const.PORT_STATUS_DOWN) + else: + entry = {'device': device, + 'exists': False} + LOG.debug(_("%s can not be found in database"), device) + return entry + + def update_device_up(self, rpc_context, **kwargs): + """Device is up on agent.""" + agent_id = kwargs.get('agent_id') + device = kwargs.get('device') + host = kwargs.get('host') + port = ovs_db_v2.get_port(device) + LOG.debug(_("Device %(device)s up on %(agent_id)s"), + {'device': device, 'agent_id': agent_id}) + plugin = manager.NeutronManager.get_plugin() + if port: + if (host and + not plugin.get_port_host(rpc_context, port['id']) == host): + LOG.debug(_("Device %(device)s not bound to the" + " agent host %(host)s"), + {'device': device, 'host': host}) + return + elif port['status'] != q_const.PORT_STATUS_ACTIVE: + ovs_db_v2.set_port_status(port['id'], + q_const.PORT_STATUS_ACTIVE) + else: + LOG.debug(_("%s can not be found in database"), device) + + def tunnel_sync(self, rpc_context, **kwargs): + """Update new tunnel. + + Updates the datbase with the tunnel IP. All listening agents will also + be notified about the new tunnel IP. + """ + tunnel_ip = kwargs.get('tunnel_ip') + # Update the database with the IP + tunnel = ovs_db_v2.add_tunnel_endpoint(tunnel_ip) + tunnels = ovs_db_v2.get_tunnel_endpoints() + entry = dict() + entry['tunnels'] = tunnels + # Notify all other listening agents + self.notifier.tunnel_update(rpc_context, tunnel.ip_address, + tunnel.id, self.tunnel_type) + # Return the list of tunnels IP's to the agent + return entry + + +class AgentNotifierApi(n_rpc.RpcProxy, + sg_rpc.SecurityGroupAgentRpcApiMixin): + + '''Agent side of the openvswitch rpc API. + + API version history: + 1.0 - Initial version. + + ''' + + BASE_RPC_API_VERSION = '1.0' + + def __init__(self, topic): + super(AgentNotifierApi, self).__init__( + topic=topic, default_version=self.BASE_RPC_API_VERSION) + self.topic_network_delete = topics.get_topic_name(topic, + topics.NETWORK, + topics.DELETE) + self.topic_port_update = topics.get_topic_name(topic, + topics.PORT, + topics.UPDATE) + self.topic_tunnel_update = topics.get_topic_name(topic, + constants.TUNNEL, + topics.UPDATE) + + def network_delete(self, context, network_id): + self.fanout_cast(context, + self.make_msg('network_delete', + network_id=network_id), + topic=self.topic_network_delete) + + def port_update(self, context, port, network_type, segmentation_id, + physical_network): + self.fanout_cast(context, + self.make_msg('port_update', + port=port, + network_type=network_type, + segmentation_id=segmentation_id, + physical_network=physical_network), + topic=self.topic_port_update) + + def tunnel_update(self, context, tunnel_ip, tunnel_id, tunnel_type): + self.fanout_cast(context, + self.make_msg('tunnel_update', + tunnel_ip=tunnel_ip, + tunnel_id=tunnel_id, + tunnel_type=tunnel_type), + topic=self.topic_tunnel_update) + + +class OVSNeutronPluginV2(db_base_plugin_v2.NeutronDbPluginV2, + external_net_db.External_net_db_mixin, + extraroute_db.ExtraRoute_db_mixin, + l3_gwmode_db.L3_NAT_db_mixin, + sg_db_rpc.SecurityGroupServerRpcMixin, + l3_agentschedulers_db.L3AgentSchedulerDbMixin, + agentschedulers_db.DhcpAgentSchedulerDbMixin, + portbindings_db.PortBindingMixin, + extradhcpopt_db.ExtraDhcpOptMixin, + addr_pair_db.AllowedAddressPairsMixin): + + """Implement the Neutron abstractions using Open vSwitch. + + Depending on whether tunneling is enabled, either a GRE, VXLAN tunnel or + a new VLAN is created for each network. An agent is relied upon to + perform the actual OVS configuration on each host. + + The provider extension is also supported. As discussed in + https://bugs.launchpad.net/neutron/+bug/1023156, this class could + be simplified, and filtering on extended attributes could be + handled, by adding support for extended attributes to the + NeutronDbPluginV2 base class. When that occurs, this class should + be updated to take advantage of it. + + The port binding extension enables an external application relay + information to and from the plugin. + """ + + # This attribute specifies whether the plugin supports or not + # bulk/pagination/sorting operations. Name mangling is used in + # order to ensure it is qualified by class + __native_bulk_support = True + __native_pagination_support = True + __native_sorting_support = True + + _supported_extension_aliases = ["provider", "external-net", "router", + "ext-gw-mode", "binding", "quotas", + "security-group", "agent", "extraroute", + "l3_agent_scheduler", + "dhcp_agent_scheduler", + "extra_dhcp_opt", + "allowed-address-pairs"] + + @property + def supported_extension_aliases(self): + if not hasattr(self, '_aliases'): + aliases = self._supported_extension_aliases[:] + sg_rpc.disable_security_group_extension_by_config(aliases) + self._aliases = aliases + return self._aliases + + db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs( + attributes.NETWORKS, ['_extend_network_dict_provider_ovs']) + + def __init__(self, configfile=None): + super(OVSNeutronPluginV2, self).__init__() + self.base_binding_dict = { + portbindings.VIF_TYPE: portbindings.VIF_TYPE_OVS, + portbindings.VIF_DETAILS: { + # TODO(rkukura): Replace with new VIF security details + portbindings.CAP_PORT_FILTER: + 'security-group' in self.supported_extension_aliases, + portbindings.OVS_HYBRID_PLUG: True}} + self._parse_network_vlan_ranges() + ovs_db_v2.sync_vlan_allocations(self.network_vlan_ranges) + self.tenant_network_type = cfg.CONF.OVS.tenant_network_type + if self.tenant_network_type not in [svc_constants.TYPE_LOCAL, + svc_constants.TYPE_VLAN, + svc_constants.TYPE_GRE, + svc_constants.TYPE_VXLAN, + svc_constants.TYPE_NONE]: + LOG.error(_("Invalid tenant_network_type: %s. " + "Server terminated!"), + self.tenant_network_type) + sys.exit(1) + self.enable_tunneling = cfg.CONF.OVS.enable_tunneling + self.tunnel_type = None + if self.enable_tunneling: + self.tunnel_type = (cfg.CONF.OVS.tunnel_type or + svc_constants.TYPE_GRE) + elif cfg.CONF.OVS.tunnel_type: + self.tunnel_type = cfg.CONF.OVS.tunnel_type + self.enable_tunneling = True + self.tunnel_id_ranges = [] + if self.enable_tunneling: + self._parse_tunnel_id_ranges() + ovs_db_v2.sync_tunnel_allocations(self.tunnel_id_ranges) + elif self.tenant_network_type in constants.TUNNEL_NETWORK_TYPES: + LOG.error(_("Tunneling disabled but tenant_network_type is '%s'. " + "Server terminated!"), self.tenant_network_type) + sys.exit(1) + self.setup_rpc() + self.network_scheduler = importutils.import_object( + cfg.CONF.network_scheduler_driver + ) + self.router_scheduler = importutils.import_object( + cfg.CONF.router_scheduler_driver + ) + + def setup_rpc(self): + # RPC support + self.service_topics = {svc_constants.CORE: topics.PLUGIN, + svc_constants.L3_ROUTER_NAT: topics.L3PLUGIN} + self.conn = n_rpc.create_connection(new=True) + self.notifier = AgentNotifierApi(topics.AGENT) + self.agent_notifiers[q_const.AGENT_TYPE_DHCP] = ( + dhcp_rpc_agent_api.DhcpAgentNotifyAPI() + ) + self.agent_notifiers[q_const.AGENT_TYPE_L3] = ( + l3_rpc_agent_api.L3AgentNotifyAPI() + ) + self.endpoints = [OVSRpcCallbacks(self.notifier, self.tunnel_type), + agents_db.AgentExtRpcCallback()] + for svc_topic in self.service_topics.values(): + self.conn.create_consumer(svc_topic, self.endpoints, fanout=False) + # Consume from all consumers in threads + self.conn.consume_in_threads() + + def _parse_network_vlan_ranges(self): + try: + self.network_vlan_ranges = plugin_utils.parse_network_vlan_ranges( + cfg.CONF.OVS.network_vlan_ranges) + except Exception as ex: + LOG.error(_("%s. Server terminated!"), ex) + sys.exit(1) + LOG.info(_("Network VLAN ranges: %s"), self.network_vlan_ranges) + + def _parse_tunnel_id_ranges(self): + for entry in cfg.CONF.OVS.tunnel_id_ranges: + entry = entry.strip() + try: + tun_min, tun_max = entry.split(':') + self.tunnel_id_ranges.append((int(tun_min), int(tun_max))) + except ValueError as ex: + LOG.error(_("Invalid tunnel ID range: " + "'%(range)s' - %(e)s. Server terminated!"), + {'range': entry, 'e': ex}) + sys.exit(1) + LOG.info(_("Tunnel ID ranges: %s"), self.tunnel_id_ranges) + + def _extend_network_dict_provider_ovs(self, network, net_db, + net_binding=None): + # this method used in two cases: when binding is provided explicitly + # and when it is a part of db model object + binding = net_db.binding if net_db else net_binding + network[provider.NETWORK_TYPE] = binding.network_type + if binding.network_type in constants.TUNNEL_NETWORK_TYPES: + network[provider.PHYSICAL_NETWORK] = None + network[provider.SEGMENTATION_ID] = binding.segmentation_id + elif binding.network_type == svc_constants.TYPE_FLAT: + network[provider.PHYSICAL_NETWORK] = binding.physical_network + network[provider.SEGMENTATION_ID] = None + elif binding.network_type == svc_constants.TYPE_VLAN: + network[provider.PHYSICAL_NETWORK] = binding.physical_network + network[provider.SEGMENTATION_ID] = binding.segmentation_id + elif binding.network_type == svc_constants.TYPE_LOCAL: + network[provider.PHYSICAL_NETWORK] = None + network[provider.SEGMENTATION_ID] = None + + def _process_provider_create(self, context, attrs): + network_type = attrs.get(provider.NETWORK_TYPE) + physical_network = attrs.get(provider.PHYSICAL_NETWORK) + segmentation_id = attrs.get(provider.SEGMENTATION_ID) + + network_type_set = attributes.is_attr_set(network_type) + physical_network_set = attributes.is_attr_set(physical_network) + segmentation_id_set = attributes.is_attr_set(segmentation_id) + + if not (network_type_set or physical_network_set or + segmentation_id_set): + return (None, None, None) + + if not network_type_set: + msg = _("provider:network_type required") + raise n_exc.InvalidInput(error_message=msg) + elif network_type == svc_constants.TYPE_FLAT: + if segmentation_id_set: + msg = _("provider:segmentation_id specified for flat network") + raise n_exc.InvalidInput(error_message=msg) + else: + segmentation_id = constants.FLAT_VLAN_ID + elif network_type == svc_constants.TYPE_VLAN: + if not segmentation_id_set: + msg = _("provider:segmentation_id required") + raise n_exc.InvalidInput(error_message=msg) + if not utils.is_valid_vlan_tag(segmentation_id): + msg = (_("provider:segmentation_id out of range " + "(%(min_id)s through %(max_id)s)") % + {'min_id': q_const.MIN_VLAN_TAG, + 'max_id': q_const.MAX_VLAN_TAG}) + raise n_exc.InvalidInput(error_message=msg) + elif network_type in constants.TUNNEL_NETWORK_TYPES: + if not self.enable_tunneling: + msg = _("%s networks are not enabled") % network_type + raise n_exc.InvalidInput(error_message=msg) + if physical_network_set: + msg = _("provider:physical_network specified for %s " + "network") % network_type + raise n_exc.InvalidInput(error_message=msg) + else: + physical_network = None + if not segmentation_id_set: + msg = _("provider:segmentation_id required") + raise n_exc.InvalidInput(error_message=msg) + elif network_type == svc_constants.TYPE_LOCAL: + if physical_network_set: + msg = _("provider:physical_network specified for local " + "network") + raise n_exc.InvalidInput(error_message=msg) + else: + physical_network = None + if segmentation_id_set: + msg = _("provider:segmentation_id specified for local " + "network") + raise n_exc.InvalidInput(error_message=msg) + else: + segmentation_id = None + else: + msg = _("provider:network_type %s not supported") % network_type + raise n_exc.InvalidInput(error_message=msg) + + if network_type in [svc_constants.TYPE_VLAN, svc_constants.TYPE_FLAT]: + if physical_network_set: + if physical_network not in self.network_vlan_ranges: + msg = _("Unknown provider:physical_network " + "%s") % physical_network + raise n_exc.InvalidInput(error_message=msg) + elif 'default' in self.network_vlan_ranges: + physical_network = 'default' + else: + msg = _("provider:physical_network required") + raise n_exc.InvalidInput(error_message=msg) + + return (network_type, physical_network, segmentation_id) + + def create_network(self, context, network): + (network_type, physical_network, + segmentation_id) = self._process_provider_create(context, + network['network']) + + session = context.session + # set up default security groups + tenant_id = self._get_tenant_id_for_create( + context, network['network']) + self._ensure_default_security_group(context, tenant_id) + + with session.begin(subtransactions=True): + if not network_type: + # tenant network + network_type = self.tenant_network_type + if network_type == svc_constants.TYPE_NONE: + raise n_exc.TenantNetworksDisabled() + elif network_type == svc_constants.TYPE_VLAN: + (physical_network, + segmentation_id) = ovs_db_v2.reserve_vlan(session) + elif network_type in constants.TUNNEL_NETWORK_TYPES: + segmentation_id = ovs_db_v2.reserve_tunnel(session) + # no reservation needed for TYPE_LOCAL + else: + # provider network + if network_type in [svc_constants.TYPE_VLAN, + svc_constants.TYPE_FLAT]: + ovs_db_v2.reserve_specific_vlan(session, physical_network, + segmentation_id) + elif network_type in constants.TUNNEL_NETWORK_TYPES: + ovs_db_v2.reserve_specific_tunnel(session, segmentation_id) + # no reservation needed for TYPE_LOCAL + net = super(OVSNeutronPluginV2, self).create_network(context, + network) + binding = ovs_db_v2.add_network_binding(session, net['id'], + network_type, + physical_network, + segmentation_id) + + self._process_l3_create(context, net, network['network']) + # passing None as db model to use binding object + self._extend_network_dict_provider_ovs(net, None, binding) + # note - exception will rollback entire transaction + LOG.debug(_("Created network: %s"), net['id']) + return net + + def update_network(self, context, id, network): + provider._raise_if_updates_provider_attributes(network['network']) + + session = context.session + with session.begin(subtransactions=True): + net = super(OVSNeutronPluginV2, self).update_network(context, id, + network) + self._process_l3_update(context, net, network['network']) + return net + + def delete_network(self, context, id): + session = context.session + with session.begin(subtransactions=True): + binding = ovs_db_v2.get_network_binding(session, id) + self._process_l3_delete(context, id) + super(OVSNeutronPluginV2, self).delete_network(context, id) + if binding.network_type in constants.TUNNEL_NETWORK_TYPES: + ovs_db_v2.release_tunnel(session, binding.segmentation_id, + self.tunnel_id_ranges) + elif binding.network_type in [svc_constants.TYPE_VLAN, + svc_constants.TYPE_FLAT]: + ovs_db_v2.release_vlan(session, binding.physical_network, + binding.segmentation_id, + self.network_vlan_ranges) + # the network_binding record is deleted via cascade from + # the network record, so explicit removal is not necessary + self.notifier.network_delete(context, id) + + def get_network(self, context, id, fields=None): + session = context.session + with session.begin(subtransactions=True): + net = super(OVSNeutronPluginV2, self).get_network(context, + id, None) + return self._fields(net, fields) + + def get_networks(self, context, filters=None, fields=None, + sorts=None, + limit=None, marker=None, page_reverse=False): + session = context.session + with session.begin(subtransactions=True): + nets = super(OVSNeutronPluginV2, + self).get_networks(context, filters, None, sorts, + limit, marker, page_reverse) + + return [self._fields(net, fields) for net in nets] + + def create_port(self, context, port): + # Set port status as 'DOWN'. This will be updated by agent + port['port']['status'] = q_const.PORT_STATUS_DOWN + port_data = port['port'] + session = context.session + with session.begin(subtransactions=True): + self._ensure_default_security_group_on_port(context, port) + sgids = self._get_security_groups_on_port(context, port) + dhcp_opts = port['port'].get(edo_ext.EXTRADHCPOPTS, []) + port = super(OVSNeutronPluginV2, self).create_port(context, port) + self._process_portbindings_create_and_update(context, + port_data, port) + self._process_port_create_security_group(context, port, sgids) + self._process_port_create_extra_dhcp_opts(context, port, + dhcp_opts) + port[addr_pair.ADDRESS_PAIRS] = ( + self._process_create_allowed_address_pairs( + context, port, + port_data.get(addr_pair.ADDRESS_PAIRS))) + self.notify_security_groups_member_updated(context, port) + return port + + def update_port(self, context, id, port): + session = context.session + need_port_update_notify = False + with session.begin(subtransactions=True): + original_port = super(OVSNeutronPluginV2, self).get_port( + context, id) + updated_port = super(OVSNeutronPluginV2, self).update_port( + context, id, port) + if addr_pair.ADDRESS_PAIRS in port['port']: + need_port_update_notify |= ( + self.update_address_pairs_on_port(context, id, port, + original_port, + updated_port)) + need_port_update_notify |= self.update_security_group_on_port( + context, id, port, original_port, updated_port) + self._process_portbindings_create_and_update(context, + port['port'], + updated_port) + need_port_update_notify |= self._update_extra_dhcp_opts_on_port( + context, id, port, updated_port) + + need_port_update_notify |= self.is_security_group_member_updated( + context, original_port, updated_port) + if original_port['admin_state_up'] != updated_port['admin_state_up']: + need_port_update_notify = True + + if need_port_update_notify: + binding = ovs_db_v2.get_network_binding(None, + updated_port['network_id']) + self.notifier.port_update(context, updated_port, + binding.network_type, + binding.segmentation_id, + binding.physical_network) + return updated_port + + def delete_port(self, context, id, l3_port_check=True): + + # if needed, check to see if this is a port owned by + # and l3-router. If so, we should prevent deletion. + if l3_port_check: + self.prevent_l3_port_deletion(context, id) + + session = context.session + with session.begin(subtransactions=True): + self.disassociate_floatingips(context, id) + port = self.get_port(context, id) + self._delete_port_security_group_bindings(context, id) + super(OVSNeutronPluginV2, self).delete_port(context, id) + + self.notify_security_groups_member_updated(context, port) diff --git a/neutronproxy/l3-proxy/README.md b/neutronproxy/l3-proxy/README.md new file mode 100644 index 00000000..732fdbf2 --- /dev/null +++ b/neutronproxy/l3-proxy/README.md @@ -0,0 +1,156 @@ +Neutron L3 Proxy +=============================== + + L3-Proxy acts as the same role of L3-agent in cascading OpenStack. + L3-Proxy treats cascaded Neutron-Server as its linux namespaces, convert the internal request message from the message bus to restful API calling to cascaded neutron-server. + + +Key modules +----------- + +* The new l3 proxy module l3_proxy,which treats cascaded Neutron-Server as its linux namespaces, convert the internal request message from the message bus to restful API calling to cascaded neutron-server: + + neutron/agent/l3_proxy.py + + +Requirements +------------ +* openstack-neutron-l3-agent-2014.1-1.1 and l2-proxy has been installed + +Installation +------------ + +We provide two ways to install the l3 proxy code. In this section, we will guide you through installing the l3 proxy with the minimum configuration. + +* **Note:** + + - Make sure you have an existing installation of **Openstack Icehouse**. + - We recommend that you Do backup at least the following files before installation, because they are to be overwritten or modified: + $NEUTRON_CONFIG_PARENT_DIR/neutron.conf + (replace the $... with actual directory names.) + +* **Manual Installation** + + - Navigate to the local repository and copy the contents in 'neutron' sub-directory to the corresponding places in existing neutron, e.g. + ```cp -r $LOCAL_REPOSITORY_DIR/neutron $NEUTRON_PARENT_DIR``` + (replace the $... with actual directory name.) + + - Navigate to the local repository and copy the contents in 'etc' sub-directory to the corresponding places in existing neutron, e.g. + ```cp -r $LOCAL_REPOSITORY_DIR/etc $NEUTRON_CONFIG_DIR``` + (replace the $... with actual directory name.) + + - Update the neutron configuration file (e.g. /etc/neutron/l3_proxy_agent.ini) with the minimum option below. If the option already exists, modify its value, otherwise add it to the config file. Check the "Configurations" section below for a full configuration guide and detail explanation for each configuration item. + ``` + [DEFAULT] + ... + ###configuration for neutron cascading ### + admin_tenant_name = $TENANT_NAME + admin_user = $USER_NAME + admin_password = $USER_PWD + auth_url = http://$CASCADING_CONTROL_IP:35357/v2.0 + os_region_name = $CASCADEDING_REGION_NAME + + cascaded_os_region_name = $CASCADED_REGION_NAME + cascaded_auth_url = http://$CASCADING_CONTROL_IP:35357/v2.0 + cascaded_admin_user_name = $USER_NAME + cascaded_admin_password = $USER_PWD + cascaded_tenant_name = $CASCADED_TENANT_NAME + ``` + + - Start the neutron l3 proxy. + ```nohup /usr/bin/python /usr/lib64/python2.6/site-packages/neutron/agent/l3_proxy.py --config-file=/etc/neutron/neutron.conf --config-file=/etc/neutron/l3_proxy_agent.ini >/dev/null 2>&1 &``` + + - Done. The neutron l3 proxy should be working with a demo configuration. + +* **Automatic Installation** + + - Navigate to the installation directory and run installation script. + ``` + cd $LOCAL_REPOSITORY_DIR/installation + sudo bash ./install.sh + ``` + (replace the $... with actual directory name.) + + - Done. The installation code should setup the l3 proxy with the minimum configuration below. Check the "Configurations" section for a full configuration guide and detail explanation for each configuration item. + ``` + [DEFAULT] + ... + ###cascade info ### + admin_tenant_name = $TENANT_NAME + admin_user = $USER_NAME + admin_password = $USER_PWD + auth_url = http://$CASCADING_CONTROL_IP:35357/v2.0 + os_region_name = $CASCADEDING_REGION_NAME + + cascaded_os_region_name = $CASCADED_REGION_NAME + cascaded_auth_url = http://$CASCADING_CONTROL_IP:35357/v2.0 + cascaded_admin_user_name = $USER_NAME + cascaded_admin_password = $USER_PWD + cascaded_tenant_name = $CASCADED_TENANT_NAME + +* **Troubleshooting** + + In case the automatic installation process is not complete, please check the followings: + + - Make sure your OpenStack version is Icehouse. + + - Check the variables in the beginning of the install.sh scripts. Your installation directories may be different from the default values we provide. + + - The installation code will automatically add the related codes to $NEUTRON_PARENT_DIR/neutron and modify the related configuration. + + - In case the automatic installation does not work, try to install manually. + +Configurations +-------------- + +* This is a (default) configuration sample for the l3 proxy. Please add/modify these options in /etc/neutron/l3_proxy_agent.ini. +* Note: + - Please carefully make sure that options in the configuration file are not duplicated. If an option name already exists, modify its value instead of adding a new one of the same name. + - Please refer to the 'Configuration Details' section below for proper configuration and usage of costs and constraints. + +``` +[DEFAULT] + +... + +#The global keystone component service url, by which the l3 porxy +#can access to global keystone service. +#In future, seperate KeyStone service may be used. +cascaded_auth_url = http://$CASCADING_CONTROL_IP:35357/v2.0 + +#The region name ,which will be set as a parameter when +#the cascaded level component services register endpoint to keystone +cascaded_os_region_name =$CASCADED_REGION_NAME + +# username for connecting to cascaded neutron in admin context (string +# value) +cascaded_admin_user_name=$USER_NAME + +# password for connecting to cascaded neutron in admin context (string +# value) +cascaded_admin_password=$USER_PWD + +# tenant name for connecting to cascaded neutron in admin context +# (string value) +cascaded_tenant_name=$TENANT_NAME + +#The global keystone component service url, by which the l3 porxy +#can access to global keystone service. +#In future, seperate KeyStone service may be used. +auth_url= http://$CASCADING_CONTROL_IP:35357/v2.0 + +#The region name ,which will be set as a parameter when +#the cascading level component services register endpoint to keystone +os_region_name = $CASCADEDING_REGION_NAME + +# username for connecting to cascading neutron in admin context (string +# value) +admin_user= $USER_NAME + +# password for connecting to cascading neutron in admin context (string +# value) +admin_password=$USER_PWD + +# tenant name for connecting to cascading neutron in admin context +# (string value) +admin_tenant_name=$TENANT_NAME diff --git a/neutronproxy/l3-proxy/etc/neutron/l3_proxy_agent.ini b/neutronproxy/l3-proxy/etc/neutron/l3_proxy_agent.ini new file mode 100644 index 00000000..9930e8a6 --- /dev/null +++ b/neutronproxy/l3-proxy/etc/neutron/l3_proxy_agent.ini @@ -0,0 +1,22 @@ +[DEFAULT] +distributed_agent=True +debug = True +verbose = True +interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver +use_namespaces = True +signing_dir = /var/cache/neutron +admin_tenant_name = admin +admin_user = neutron +admin_password = Galax8800 +auth_url = http://CASCADING_CONTROL_IP:35357/v2.0 +os_region_name = CASCADEDING_REGION_NAME + +cascaded_os_region_name = CASCADED_REGION_NAME +cascaded_auth_url = http://CASCADING_CONTROL_IP:35357/v2.0 +cascaded_admin_user_name = USER_NAME +cascaded_admin_password = USER_PWD +cascaded_tenant_name = TENANT_NAME +l3_agent_manager = neutron.agent.l3_proxy.L3NATAgentWithStateReport +root_helper = sudo neutron-rootwrap /etc/neutron/rootwrap.conf +external_network_bridge = br-ex + diff --git a/neutronproxy/l3-proxy/installation/install.sh b/neutronproxy/l3-proxy/installation/install.sh new file mode 100644 index 00000000..f5a39889 --- /dev/null +++ b/neutronproxy/l3-proxy/installation/install.sh @@ -0,0 +1,91 @@ +#!/bin/bash + +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# Copyright (c) 2014 Huawei Technologies. + +_NEUTRON_CONF_DIR="/etc/neutron" +_NEUTRON_DIR="/usr/lib64/python2.6/site-packages/neutron" +_NEUTRON_L3_PROXY_CONF_FILE='l3_proxy_agent.ini' + +CASCADING_CONTROL_IP=127.0.0.1 +CASCADEDING_REGION_NAME=Cascading_Openstack +CASCADED_REGION_NAME=AZ1 +USER_NAME=neutron +USER_PWD=neutron +TENANT_NAME=admin + + +# if you did not make changes to the installation files, +# please do not edit the following directories. +_CODE_DIR="../neutron/" +_CONFIG_DIR="../etc/neutron/" + +#_SCRIPT_NAME="${0##*/}" +#_SCRIPT_LOGFILE="/var/log/neutron/installation/${_SCRIPT_NAME}.log" + +if [[ ${EUID} -ne 0 ]]; then + echo "Please run as root." + exit 1 +fi + +##Redirecting output to logfile as well as stdout +#exec > >(tee -a ${_SCRIPT_LOGFILE}) +#exec 2> >(tee -a ${_SCRIPT_LOGFILE} >&2) + +cd `dirname $0` +echo "checking installation directories..." +if [ ! -d "${_NEUTRON_DIR}" ] ; then + echo "Could not find the neutron installation. Please check the variables in the beginning of the script." + echo "aborted." + exit 1 +fi + +echo "copying in new code files..." +cp -r "${_CODE_DIR}" `dirname ${_NEUTRON_DIR}` +echo +if [ $? -ne 0 ] ; then + echo "Error in copying new code files, aborted." + exit 1 +fi + +echo "copying in new config files..." +cp -r "${_CONFIG_DIR}" `dirname ${_NEUTRON_CONF_DIR}` +if [ $? -ne 0 ] ; then + echo "Error in copying config files, aborted." + exit 1 +fi + +echo "updating config file..." +sed -i "s/CASCADING_CONTROL_IP/$CASCADING_CONTROL_IP/g" "${_NEUTRON_CONF_DIR}/${_NEUTRON_L3_PROXY_CONF_FILE}" +sed -i "s/CASCADEDING_REGION_NAME/$CASCADEDING_REGION_NAME/g" "${_NEUTRON_CONF_DIR}/${_NEUTRON_L3_PROXY_CONF_FILE}" +sed -i "s/CASCADED_REGION_NAME/$CASCADED_REGION_NAME/g" "${_NEUTRON_CONF_DIR}/${_NEUTRON_L3_PROXY_CONF_FILE}" +sed -i "s/USER_NAME/$USER_NAME/g" "${_NEUTRON_CONF_DIR}/${_NEUTRON_L3_PROXY_CONF_FILE}" +sed -i "s/USER_PWD/$USER_PWD/g" "${_NEUTRON_CONF_DIR}/${_NEUTRON_L3_PROXY_CONF_FILE}" +sed -i "s/TENANT_NAME/$TENANT_NAME/g" "${_NEUTRON_CONF_DIR}/${_NEUTRON_L3_PROXY_CONF_FILE}" + +if [ $? -ne 0 ] ; then + echo "Error in updating config file, aborted." + exit 1 +fi + +echo "starting neutron l3-proxy..." +nohup /usr/bin/python /usr/lib64/python2.6/site-packages/neutron/agent/l3_proxy.py --config-file=/etc/neutron/neutron.conf --config-file=/etc/neutron/l3_proxy_agent.ini >/dev/null 2>&1 & +if [ $? -ne 0 ] ; then + echo "There was an error in starting the l3-proxy, please start neutron l3-proxy manually." + exit 1 +fi + +echo "Completed." +echo "See README to get started." + +exit 0 diff --git a/neutronproxy/l3-proxy/neutron/agent/l3_proxy.py b/neutronproxy/l3-proxy/neutron/agent/l3_proxy.py new file mode 100644 index 00000000..868f3ea5 --- /dev/null +++ b/neutronproxy/l3-proxy/neutron/agent/l3_proxy.py @@ -0,0 +1,1782 @@ +# Copyright 2014, Huawei, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# @author: Haojie Jia, Huawei + +import sys +import json +import eventlet +eventlet.monkey_patch() + +import netaddr +from oslo.config import cfg + +from neutron.agent.common import config +from neutron.agent.linux import external_process +from neutron.agent.linux import interface +from neutron.agent.linux import ip_lib +from neutron.agent.linux import iptables_manager +from neutron.agent import rpc as agent_rpc +from neutron.common import config as common_config +from neutron.common import constants as l3_constants +from neutron.common import rpc as rpc_compat +from neutron.common import topics +from neutron.common import utils as common_utils +from neutron import context +from neutron import manager +from neutron.openstack.common import excutils +from neutron.openstack.common import importutils +from neutron.openstack.common import lockutils +from neutron.openstack.common import log as logging +from neutron.openstack.common import loopingcall +from neutron.openstack.common import periodic_task +from neutron.openstack.common import processutils +from neutron.openstack.common import service +from neutron import service as neutron_service +from neutron.services.firewall.agents.l3reference import firewall_l3_agent + +from neutron.plugins.l2_proxy.agent import neutron_proxy_context +from neutron.plugins.l2_proxy.agent import clients + +LOG = logging.getLogger(__name__) +NS_PREFIX = 'qrouter-' +INTERNAL_DEV_PREFIX = 'qr-' +EXTERNAL_DEV_PREFIX = 'qg-' +SNAT_INT_DEV_PREFIX = 'sg-' +FIP_NS_PREFIX = 'fip-' +SNAT_NS_PREFIX = 'snat-' +FIP_2_RTR_DEV_PREFIX = 'fpr-' +RTR_2_FIP_DEV_PREFIX = 'rfp-' +FIP_EXT_DEV_PREFIX = 'fg-' +FIP_LL_PREFIX = '169.254.30.' +# Route Table index for FIPs +FIP_RT_TBL = 16 +# Rule priority range for FIPs +FIP_PR_ST = 32768 +FIP_PR_END = FIP_PR_ST + 40000 +RPC_LOOP_INTERVAL = 1 +FLOATING_IP_CIDR_SUFFIX = '/32' + + +class L3PluginApi(rpc_compat.RpcProxy): + + """Agent side of the l3 agent RPC API. + + API version history: + 1.0 - Initial version. + 1.1 - Floating IP operational status updates + 1.2 - DVR support + + """ + + BASE_RPC_API_VERSION = '1.0' + + def __init__(self, topic, host): + super(L3PluginApi, self).__init__( + topic=topic, default_version=self.BASE_RPC_API_VERSION) + self.host = host + + def get_routers(self, context, router_ids=None): + """Make a remote process call to retrieve the sync data for routers.""" + return self.call(context, + self.make_msg('sync_routers', host=self.host, + router_ids=router_ids), + topic=self.topic) + + def get_external_network_id(self, context): + """Make a remote process call to retrieve the external network id. + + @raise rpc_compat.RemoteError: with TooManyExternalNetworks + as exc_type if there are + more than one external network + """ + return self.call(context, + self.make_msg('get_external_network_id', + host=self.host), + topic=self.topic) + + def update_floatingip_statuses(self, context, router_id, fip_statuses): + """Call the plugin update floating IPs's operational status.""" + return self.call(context, + self.make_msg('update_floatingip_statuses', + router_id=router_id, + fip_statuses=fip_statuses), + topic=self.topic, + version='1.1') + + def get_ports_by_subnet(self, context, subnet_id): + """Retrieve ports by subnet id.""" + return self.call(context, + self.make_msg('get_ports_by_subnet', host=self.host, + subnet_id=subnet_id), + topic=self.topic, + version='1.2') + + def get_agent_gateway_port(self, context, fip_net): + """Get or create a agent_gateway_port.""" + return self.call(context, + self.make_msg('get_agent_gateway_port', + network_id=fip_net, host=self.host), + topic=self.topic, + version='1.2') + + +class RouterInfo(object): + + def __init__(self, router_id, root_helper, use_namespaces, router): + self.router_id = router_id + self.cascaded_router_id = None + self.ex_gw_port = None + self._snat_enabled = None + self._snat_action = None + self.internal_ports = [] + self.snat_ports = [] + self.floating_ips = set() + # TODO(mrsmith): DVR merge cleanup + self.floating_ips_dict = {} + self.root_helper = root_helper + self.use_namespaces = use_namespaces + # Invoke the setter for establishing initial SNAT action + self.router = router + self.ns_name = NS_PREFIX + router_id if use_namespaces else None + self.iptables_manager = iptables_manager.IptablesManager( + root_helper=root_helper, + # FIXME(danwent): use_ipv6=True, + namespace=self.ns_name) + self.routes = [] + # DVR Data + # Linklocal router to floating IP addr + self.rtr_2_fip = None + # Linklocal floating to router IP addr + self.fip_2_rtr = None + self.dist_fip_count = 0 + + @property + def router(self): + return self._router + + @router.setter + def router(self, value): + self._router = value + if not self._router: + return + # enable_snat by default if it wasn't specified by plugin + self._snat_enabled = self._router.get('enable_snat', True) + # Set a SNAT action for the router + if self._router.get('gw_port'): + self._snat_action = ('add_rules' if self._snat_enabled + else 'remove_rules') + elif self.ex_gw_port: + # Gateway port was removed, remove rules + self._snat_action = 'remove_rules' + + def perform_snat_action(self, snat_callback, *args): + # Process SNAT rules for attached subnets + if self._snat_action: + snat_callback(self, self._router.get('gw_port'), + *args, action=self._snat_action) + self._snat_action = None + + +class L3NATAgent(firewall_l3_agent.FWaaSL3AgentRpcCallback, manager.Manager): + + """Manager for L3NatAgent + + API version history: + 1.0 initial Version + 1.1 changed the type of the routers parameter + to the routers_updated method. + It was previously a list of routers in dict format. + It is now a list of router IDs only. + Per rpc versioning rules, it is backwards compatible. + """ + RPC_API_VERSION = '1.1' + + OPTS = [ + cfg.BoolOpt('distributed_agent', default=False, + help=_("Enables distributed router agent function.")), + cfg.BoolOpt('centralized_snat', default=False, + help=_("Enables centralized SNAT in dvr mode.")), + cfg.BoolOpt('centralized_router', default=True, + help=_("Enables centralized router in dvr mode.")), + cfg.StrOpt('external_network_bridge', default='br-ex', + help=_("Name of bridge used for external network " + "traffic.")), + cfg.IntOpt('metadata_port', + default=9697, + help=_("TCP Port used by Neutron metadata namespace " + "proxy.")), + cfg.IntOpt('send_arp_for_ha', + default=3, + help=_("Send this many gratuitous ARPs for HA setup, if " + "less than or equal to 0, the feature is disabled")), + cfg.StrOpt('router_id', default='', + help=_("If namespaces is disabled, the l3 agent can only" + " configure a router that has the matching router " + "ID.")), + cfg.BoolOpt('handle_internal_only_routers', + default=True, + help=_("Agent should implement routers with no gateway")), + cfg.StrOpt('gateway_external_network_id', default='', + help=_("UUID of external network for routers implemented " + "by the agents.")), + cfg.BoolOpt('enable_metadata_proxy', default=True, + help=_("Allow running metadata proxy.")), + cfg.BoolOpt('router_delete_namespaces', default=False, + help=_("Delete namespace after removing a router.")), + cfg.StrOpt('metadata_proxy_socket', + default='$state_path/metadata_proxy', + help=_('Location of Metadata Proxy UNIX domain ' + 'socket')), + # add by j00209498 + cfg.StrOpt('cascaded_os_region_name', default=None, + help=_("region name to use")), + cfg.StrOpt('cascaded_auth_url', + default='http://127.0.0.1:35357/v2.0', + help=_("keystone auth url to use")), + cfg.StrOpt('cascaded_admin_user_name', + help=_("access neutron user name to use")), + cfg.StrOpt('cascaded_admin_password', + help=_("access neutron password to use")), + cfg.StrOpt('cascaded_tenant_name', + help=_("access neutron tenant to use")), + ] + + def __init__(self, host, conf=None): + if conf: + self.conf = conf + else: + self.conf = cfg.CONF + self.root_helper = config.get_root_helper(self.conf) + self.router_info = {} + + self._check_config_params() + + try: + self.driver = importutils.import_object( + self.conf.interface_driver, + self.conf + ) + except Exception: + msg = _("Error importing interface driver " + "'%s'") % self.conf.interface_driver + LOG.error(msg) + raise SystemExit(1) + + self.context = context.get_admin_context_without_session() + self.plugin_rpc = L3PluginApi(topics.L3PLUGIN, host) + self.fullsync = True + self.updated_routers = set() + self.removed_routers = set() + self.sync_progress = False + + # TODO(mrsmith): remove once agent restart with + # stale namespaces is supported for DVR + root_ip = ip_lib.IPWrapper(self.root_helper) + host_namespaces = root_ip.get_namespaces(self.root_helper) + snat_namespaces = set(ns for ns in host_namespaces + if ns.startswith(SNAT_NS_PREFIX)) + self._destroy_stale_router_namespaces(snat_namespaces) + fip_namespaces = set(ns for ns in host_namespaces + if ns.startswith(FIP_NS_PREFIX)) + self._destroy_stale_router_namespaces(fip_namespaces) + + self._clean_stale_namespaces = self.conf.use_namespaces + + # added by j00209498 cascading data + self.network_map = {} + self.subnet_map = {} + + # dvr data + self.agent_gateway_port = None + self.agent_fip_count = 0 + self.local_ips = set(xrange(2, 251)) + self.fip_priorities = set(xrange(FIP_PR_ST, FIP_PR_END)) + + self.rpc_loop = loopingcall.FixedIntervalLoopingCall( + self._rpc_loop) + self.rpc_loop.start(interval=RPC_LOOP_INTERVAL) + super(L3NATAgent, self).__init__(conf=self.conf) + + self.target_ex_net_id = None + + def _check_config_params(self): + """Check items in configuration files. + + Check for required and invalid configuration items. + The actual values are not verified for correctness. + """ + if not self.conf.interface_driver: + msg = _('An interface driver must be specified') + LOG.error(msg) + raise SystemExit(1) + + if not self.conf.use_namespaces and not self.conf.router_id: + msg = _('Router id is required if not using namespaces.') + LOG.error(msg) + raise SystemExit(1) + + def _cleanup_namespaces(self, routers): + """Destroy stale router namespaces on host when L3 agent restarts + + This routine is called when self._clean_stale_namespaces is True. + + The argument routers is the list of routers that are recorded in + the database as being hosted on this node. + """ + try: + root_ip = ip_lib.IPWrapper(self.root_helper) + + host_namespaces = root_ip.get_namespaces(self.root_helper) + router_namespaces = set(ns for ns in host_namespaces + if ns.startswith(NS_PREFIX)) + ns_to_ignore = set(NS_PREFIX + r['id'] for r in routers) + # TODO(mrsmith): include DVR SNAT namespaces, FIP namespaces + ns_to_destroy = router_namespaces - ns_to_ignore + except RuntimeError: + LOG.exception(_('RuntimeError in obtaining router list ' + 'for namespace cleanup.')) + else: + self._destroy_stale_router_namespaces(ns_to_destroy) + + def _destroy_stale_router_namespaces(self, router_namespaces): + """Destroys the stale router namespaces + + The argumenet router_namespaces is a list of stale router namespaces + + As some stale router namespaces may not be able to be deleted, only + one attempt will be made to delete them. + """ + for ns in router_namespaces: + try: + self._destroy_namespace(ns) + except RuntimeError: + LOG.exception(_('Failed to destroy stale router namespace ' + '%s'), ns) + self._clean_stale_namespaces = False + + def _destroy_namespace(self, ns): + if ns.startswith(NS_PREFIX): + if self.conf.enable_metadata_proxy: + self._destroy_metadata_proxy(ns[len(NS_PREFIX):], ns) + self._destroy_router_namespace(ns) + elif ns.startswith(FIP_NS_PREFIX): + self._destroy_fip_namespace(ns) + elif ns.startswith(SNAT_NS_PREFIX): + self._destroy_snat_namespace(ns) + + def _destroy_snat_namespace(self, ns_name): + ns_ip = ip_lib.IPWrapper(self.root_helper, namespace=ns_name) + # delete internal interfaces + for d in ns_ip.get_devices(exclude_loopback=True): + if d.name.startswith(SNAT_INT_DEV_PREFIX): + LOG.debug('DVR: unplug: %s', d.name) + self.driver.unplug(d.name, namespace=ns_name, + prefix=SNAT_INT_DEV_PREFIX) + + # TODO(mrsmith): delete ext-gw-port + LOG.debug('DVR: destroy snat ns: %s', ns_name) + ns_ip.netns.delete(ns_name) + + def _destroy_fip_namespace(self, ns_name): + ns_ip = ip_lib.IPWrapper(self.root_helper, namespace=ns_name) + for d in ns_ip.get_devices(exclude_loopback=True): + if d.name.startswith(FIP_2_RTR_DEV_PREFIX): + # internal link between IRs and FIP NS + # TODO(mrsmith): remove IR interfaces (IP pool?) + pass + elif d.name.startswith(FIP_EXT_DEV_PREFIX): + # single port from FIP NS to br-ext + # TODO(mrsmith): remove br-ext interface + LOG.debug('DVR: unplug: %s', d.name) + self.driver.unplug(d.name, + bridge=self.conf.external_network_bridge, + namespace=ns_name, + prefix=FIP_EXT_DEV_PREFIX) + LOG.debug('DVR: destroy fip ns: %s', ns_name) + # TODO(mrsmith): add LOG warn if fip count != 0 + ns_ip.netns.delete(ns_name) + self.agent_gateway_port = None + + def _destroy_router_namespace(self, namespace): + ns_ip = ip_lib.IPWrapper(self.root_helper, namespace=namespace) + for d in ns_ip.get_devices(exclude_loopback=True): + if d.name.startswith(INTERNAL_DEV_PREFIX): + # device is on default bridge + self.driver.unplug(d.name, namespace=namespace, + prefix=INTERNAL_DEV_PREFIX) + elif d.name.startswith(EXTERNAL_DEV_PREFIX): + self.driver.unplug(d.name, + bridge=self.conf.external_network_bridge, + namespace=namespace, + prefix=EXTERNAL_DEV_PREFIX) + + if self.conf.router_delete_namespaces: + try: + ns_ip.netns.delete(namespace) + except RuntimeError: + msg = _('Failed trying to delete namespace: %s') + LOG.exception(msg % namespace) + + def _create_namespace(self, name): + ip_wrapper_root = ip_lib.IPWrapper(self.root_helper) + ip_wrapper = ip_wrapper_root.ensure_namespace(name) + LOG.debug('DVR: ns-name: %s', name) + ip_wrapper.netns.execute(['sysctl', '-w', 'net.ipv4.ip_forward=1']) + + def _create_router_namespace(self, ri): + self._create_namespace(ri.ns_name) + + def _fetch_external_net_id(self, force=False): + """Find UUID of single external network for this agent.""" + if self.conf.gateway_external_network_id: + return self.conf.gateway_external_network_id + + # L3 agent doesn't use external_network_bridge to handle external + # networks, so bridge_mappings with provider networks will be used + # and the L3 agent is able to handle any external networks. + if not self.conf.external_network_bridge: + return + + if not force and self.target_ex_net_id: + return self.target_ex_net_id + + try: + self.target_ex_net_id = self.plugin_rpc.get_external_network_id( + self.context) + return self.target_ex_net_id + except rpc_compat.RemoteError as e: + with excutils.save_and_reraise_exception() as ctx: + if e.exc_type == 'TooManyExternalNetworks': + ctx.reraise = False + msg = _( + "The 'gateway_external_network_id' option must be " + "configured for this agent as Neutron has more than " + "one external network.") + raise Exception(msg) + + def _router_added(self, router_id, router): + ri = RouterInfo(router_id, self.root_helper, + self.conf.use_namespaces, router) + self.router_info[router_id] = ri +# may be deleted. by j00209498 +# if self.conf.use_namespaces: +# self._create_router_namespace(ri) +# for c, r in self.metadata_filter_rules(): +# ri.iptables_manager.ipv4['filter'].add_rule(c, r) +# for c, r in self.metadata_nat_rules(): +# ri.iptables_manager.ipv4['nat'].add_rule(c, r) +# ri.iptables_manager.apply() +# super(L3NATAgent, self).process_router_add(ri) +# if self.conf.enable_metadata_proxy: +# self._spawn_metadata_proxy(ri.router_id, ri.ns_name) + + def _router_removed(self, router_id): + ri = self.router_info.get(router_id) + if ri is None: + LOG.warn(_("Info for router %s were not found. " + "Skipping router removal"), router_id) + return + ri.router['gw_port'] = None + ri.router[l3_constants.INTERFACE_KEY] = [] + ri.router[l3_constants.FLOATINGIP_KEY] = [] + self.process_router(ri) + for c, r in self.metadata_filter_rules(): + ri.iptables_manager.ipv4['filter'].remove_rule(c, r) + for c, r in self.metadata_nat_rules(): + ri.iptables_manager.ipv4['nat'].remove_rule(c, r) + ri.iptables_manager.apply() + if self.conf.enable_metadata_proxy: + self._destroy_metadata_proxy(ri.router_id, ri.ns_name) + del self.router_info[router_id] + self._destroy_router_namespace(ri.ns_name) + + def _spawn_metadata_proxy(self, router_id, ns_name): + def callback(pid_file): + metadata_proxy_socket = cfg.CONF.metadata_proxy_socket + proxy_cmd = ['neutron-ns-metadata-proxy', + '--pid_file=%s' % pid_file, + '--metadata_proxy_socket=%s' % metadata_proxy_socket, + '--router_id=%s' % router_id, + '--state_path=%s' % self.conf.state_path, + '--metadata_port=%s' % self.conf.metadata_port] + proxy_cmd.extend(config.get_log_args( + cfg.CONF, 'neutron-ns-metadata-proxy-%s.log' % + router_id)) + return proxy_cmd + + pm = external_process.ProcessManager( + self.conf, + router_id, + self.root_helper, + ns_name) + pm.enable(callback) + + def _destroy_metadata_proxy(self, router_id, ns_name): + pm = external_process.ProcessManager( + self.conf, + router_id, + self.root_helper, + ns_name) + pm.disable() + + def get_one_compute_port(self, ri, port): + # Get DVR ports for subnet + if 'id' not in port['subnet'] or ri.router['distributed'] is False: + return + + subnet_ports = ( + self.plugin_rpc.get_ports_by_subnet(self.context, + port['subnet']['id'])) + LOG.debug(_('DVR: subnet_ports: %s'), subnet_ports) + + for p in subnet_ports: + # TODO: check for multiple subnets on port case + if (p['device_owner'] != 'network:router_interface' and + p['device_owner'] != + 'network:router_interface_distributed' and + p['binding:host_id'] == self.conf.host): + return p + + def _set_subnet_arp_info(self, ri, port): + """Get ARP info from Plugin for existing ports for subnet.""" + if 'id' not in port['subnet'] or not ri.router['distributed']: + return + subnet_id = port['subnet']['id'] + subnet_ports = ( + self.plugin_rpc.get_ports_by_subnet(self.context, + subnet_id)) + + for p in subnet_ports: + if (p['device_owner'] not in ( + 'network:router_interface', + 'network:router_interface_distributed')): + for fixed_ip in p['fixed_ips']: + self._update_arp_entry(ri, fixed_ip['ip_address'], + p['mac_address'], + subnet_id, 'add') + + def _set_subnet_info(self, port): + ips = port['fixed_ips'] + if not ips: + raise Exception(_("Router port %s has no IP address") % port['id']) + if len(ips) > 1: + LOG.error(_("Ignoring multiple IPs on router port %s"), + port['id']) + prefixlen = netaddr.IPNetwork(port['subnet']['cidr']).prefixlen + port['ip_cidr'] = "%s/%s" % (ips[0]['ip_address'], prefixlen) + + def _get_existing_devices(self, ri): + ip_wrapper = ip_lib.IPWrapper(root_helper=self.root_helper, + namespace=ri.ns_name) + ip_devs = ip_wrapper.get_devices(exclude_loopback=True) + return [ip_dev.name for ip_dev in ip_devs] + + def get_neutron_client(self): + kwargs = {'auth_token': None, + 'username': self.conf.cascaded_admin_user_name, + 'password': self.conf.cascaded_admin_password, + 'aws_creds': None, + 'tenant': self.conf.cascaded_tenant_name, + # 'tenant_id':'e8f280855dbe42a189eebb0f3ecb94bb', #context.values['tenant'], + 'auth_url': self.conf.cascaded_auth_url, + 'roles': self.context.roles, + 'is_admin': self.context.is_admin, + 'region_name': self.conf.cascaded_os_region_name} + reqCon = neutron_proxy_context.RequestContext(**kwargs) + openStackClients = clients.OpenStackClients(reqCon) + neutronClient = openStackClients.neutron() + return neutronClient + + def create_cascaded_router(self, router_name): + req_props = {'name': router_name} + neutron_client = self.get_neutron_client() + router_ret = neutron_client.create_router({'router': req_props}) + if(not router_ret or + (router_ret and (not router_ret.get('router')))): + LOG.debug(_("cascaded router created failed, " + "router name:%s"), router_name) + return + LOG.debug(_('create router, Response:%s'), str(router_ret)) + return router_ret['router']['id'] + + def get_or_create_cascaded_net_id(self, port): + '''only get cascaded net_id from port binding:profile''' + '''not implement creating cascaded network''' + cascaded_net_id = self.network_map.get(port['network_id']) + if cascaded_net_id: + return cascaded_net_id + profile = port['binding:profile'] + #profile = json.load(profile) + cascaded_net_id_dict = profile.get('cascaded_net_id') + if(not cascaded_net_id_dict): + return + cascaded_net_id_dict = cascaded_net_id_dict.get(port['network_id']) + cascaded_net_id = cascaded_net_id_dict.get(cfg.CONF.host) + if(cascaded_net_id): + self.network_map[port['network_id']] = cascaded_net_id + return cascaded_net_id + + def get_or_create_cascaded_subnet_id(self, subnet_id, port): + '''only get cascaded subnet_id from port binding:profile''' + '''not implement creating cascaded subnet''' + cascaded_subnet_id = \ + self.subnet_map.get(port['fixed_ips'][0]['subnet_id']) + if cascaded_subnet_id: + return cascaded_subnet_id + profile = port['binding:profile'] + #profile = json.load(profile) + cascaded_subnet_id_dict = profile.get('cascaded_subnet_id') + if(not cascaded_subnet_id_dict): + return + cascaded_subnet_id_dict = cascaded_subnet_id_dict.get(subnet_id) + if(not cascaded_subnet_id_dict): + return + cascaded_subnet_id = cascaded_subnet_id_dict.get(cfg.CONF.host) + if(cascaded_subnet_id): + self.subnet_map[port['fixed_ips'][0]['subnet_id']] = \ + cascaded_subnet_id + return cascaded_subnet_id + + def create_cascaded_router_port(self, cascaded_net_id, port): + neutron_client = self.get_neutron_client() + mac_address = port['mac_address'] + ip_address = port['fixed_ips'][0]['ip_address'] + profile = {'cascading_port_id': port['id']} + req_props = {'network_id': cascaded_net_id, + 'name': 'router_port', + 'admin_state_up': True, + 'fixed_ips': [{'ip_address': ip_address}], + 'mac_address': mac_address, + 'binding:profile': profile + } + port_ret = neutron_client.create_port({'port': req_props}) + if(not port_ret or + (port_ret and (not port_ret.get('port')))): + LOG.debug(_("router port created failed, " + "ip_address:%s, mac_address:%s"), + ip_address, mac_address) + return + LOG.debug(_('create router port, Response:%s'), str(port_ret)) + return port_ret['port'].get('id') + + def delete_cascaded_router_port(self, cascaded_port_id): + neutron_client = self.get_neutron_client() + bodyResponse = neutron_client.delete_port(cascaded_port_id) + LOG.debug(_('delete port, Response:%s'), str(bodyResponse)) + return bodyResponse + + def add_interface_for_cascaded_router(self, cascaded_router_id, + cascaded_subnet_id, + cascaded_port_id): + neutron_client = self.get_neutron_client() + #'subnet_id': cascaded_subnet_id, + req_props = {'port_id': cascaded_port_id} + ret = neutron_client.add_interface_router(cascaded_router_id, + req_props) + LOG.debug(_('add interface for router port, Response:%s'), str(ret)) + return + + def delete_interface_for_cascaded_router(self, cascaded_router_id, + cascaded_subnet_id): + neutron_client = self.get_neutron_client() + req_props = {'subnet_id': cascaded_subnet_id} + ret = neutron_client.remove_interface_router(cascaded_router_id, + req_props) + LOG.debug(_('delete interface for router port, Response:%s'), str(ret)) + return + + def process_router(self, ri): + # TODO(mrsmith) - we shouldn't need to check here + if 'distributed' not in ri.router: + ri.router['distributed'] = False +# ri.iptables_manager.defer_apply_on() +# ex_gw_port = self._get_ex_gw_port(ri) + internal_ports = ri.router.get(l3_constants.INTERFACE_KEY, []) +# snat_ports = ri.router.get(l3_constants.SNAT_ROUTER_INTF_KEY, []) + existing_port_ids = set([p['id'] for p in ri.internal_ports]) + current_port_ids = set([p['id'] for p in internal_ports + if p['admin_state_up']]) + new_ports = [p for p in internal_ports if + p['id'] in current_port_ids and + p['id'] not in existing_port_ids] + old_ports = [p for p in ri.internal_ports if + p['id'] not in current_port_ids] + if(len(existing_port_ids) == 0 and len(internal_ports) > 0 and + not ri.cascaded_router_id): + router_name = ri.router['name'] + router_id = self.create_cascaded_router(router_name) + if(not router_id): + LOG.error(_('ERR: can not create cascaded router: %s'), + router_name) + return + ri.cascaded_router_id = router_id + for p in new_ports: + vm_port = self.get_one_compute_port(ri, p) + cascaded_net_id = self.get_or_create_cascaded_net_id(vm_port) + if(not cascaded_net_id): + return + cascaded_subnet_id = \ + self.get_or_create_cascaded_subnet_id( + p['fixed_ips'][0]['subnet_id'], + vm_port) + if(not cascaded_subnet_id): + return + cascaded_port_id = \ + self.create_cascaded_router_port(cascaded_net_id, p) + if(not cascaded_port_id): + return + p['cascaded_port_id'] = cascaded_port_id + if(not ri.cascaded_router_id): + return + self.add_interface_for_cascaded_router(ri.cascaded_router_id, + cascaded_subnet_id, + cascaded_port_id) + # deleted by j00209498 +# self._set_subnet_info(p) +# self.internal_network_added(ri, p['network_id'], p['id'], +# p['ip_cidr'], p['mac_address']) + + ri.internal_ports.append(p) +# self._set_subnet_arp_info(ri, p) + + for p in old_ports: + # self.internal_network_removed(ri, p['id'], p['ip_cidr']) + cascaded_subnet_id = self.subnet_map.get( + p['fixed_ips'][0]['subnet_id']) + if(not cascaded_subnet_id): + LOG.error(_('ERR: can not delete interface for cascaded' + ' router, not find cascaded_subnet_id!')) + return + self.delete_interface_for_cascaded_router(ri.cascaded_router_id, + cascaded_subnet_id) + # self.delete_cascaded_router_port(p['cascaded_port_id']) + ri.internal_ports.remove(p) + + # not support external network, so return. by j00209498 + # return + # by j00209498 +# existing_devices = self._get_existing_devices(ri) +# current_internal_devs = set([n for n in existing_devices +# if n.startswith(INTERNAL_DEV_PREFIX)]) +# current_port_devs = set([self.get_internal_device_name(id) for +# id in current_port_ids]) +# stale_devs = current_internal_devs - current_port_devs +# for stale_dev in stale_devs: +# LOG.debug(_('Deleting stale internal router device: %s'), +# stale_dev) +# self.driver.unplug(stale_dev, +# namespace=ri.ns_name, +# prefix=INTERNAL_DEV_PREFIX) +# +# Get IPv4 only internal CIDRs +# internal_cidrs = [p['ip_cidr'] for p in ri.internal_ports +# if netaddr.IPNetwork(p['ip_cidr']).version == 4] +# TODO(salv-orlando): RouterInfo would be a better place for +# this logic too +# ex_gw_port_id = (ex_gw_port and ex_gw_port['id'] or +# ri.ex_gw_port and ri.ex_gw_port['id']) +# +# interface_name = None +# if ex_gw_port_id: +# interface_name = self.get_external_device_name(ex_gw_port_id) +# if ex_gw_port and ex_gw_port != ri.ex_gw_port: +# self._set_subnet_info(ex_gw_port) +# self.external_gateway_added(ri, ex_gw_port, +# interface_name, internal_cidrs) +# elif not ex_gw_port and ri.ex_gw_port: +# self.external_gateway_removed(ri, ri.ex_gw_port, +# interface_name, internal_cidrs) +# +# stale_devs = [dev for dev in existing_devices +# if dev.startswith(EXTERNAL_DEV_PREFIX) +# and dev != interface_name] +# for stale_dev in stale_devs: +# LOG.debug(_('Deleting stale external router device: %s'), +# stale_dev) +# self.driver.unplug(stale_dev, +# bridge=self.conf.external_network_bridge, +# namespace=ri.ns_name, +# prefix=EXTERNAL_DEV_PREFIX) +# +# Process static routes for router +# self.routes_updated(ri) +# Process SNAT rules for external gateway +# if (not ri.router['distributed'] or +# ex_gw_port and ri.router['gw_port_host'] == self.host): +# ri.perform_snat_action(self._handle_router_snat_rules, +# internal_cidrs, interface_name) +# +# Process SNAT/DNAT rules for floating IPs +# fip_statuses = {} +# try: +# if ex_gw_port: +# existing_floating_ips = ri.floating_ips +# self.process_router_floating_ip_nat_rules(ri) +# ri.iptables_manager.defer_apply_off() +# Once NAT rules for floating IPs are safely in place +# configure their addresses on the external gateway port +# fip_statuses = self.process_router_floating_ip_addresses( +# ri, ex_gw_port) +# except Exception: +# TODO(salv-orlando): Less broad catching +# All floating IPs must be put in error state +# for fip in ri.router.get(l3_constants.FLOATINGIP_KEY, []): +# fip_statuses[fip['id']] = l3_constants.FLOATINGIP_STATUS_ERROR +# +# if ex_gw_port: +# Identify floating IPs which were disabled +# ri.floating_ips = set(fip_statuses.keys()) +# for fip_id in existing_floating_ips - ri.floating_ips: +# fip_statuses[fip_id] = l3_constants.FLOATINGIP_STATUS_DOWN +# Update floating IP status on the neutron server +# self.plugin_rpc.update_floatingip_statuses( +# self.context, ri.router_id, fip_statuses) +# +# Update ex_gw_port and enable_snat on the router info cache +# ri.ex_gw_port = ex_gw_port +# ri.snat_ports = snat_ports +# ri.enable_snat = ri.router.get('enable_snat') + + def _handle_router_snat_rules(self, ri, ex_gw_port, internal_cidrs, + interface_name, action): + # Remove all the rules + # This is safe because if use_namespaces is set as False + # then the agent can only configure one router, otherwise + # each router's SNAT rules will be in their own namespace + if ri.router['distributed']: + iptables_manager = ri.snat_iptables_manager + else: + iptables_manager = ri.iptables_manager + + iptables_manager.ipv4['nat'].empty_chain('POSTROUTING') + iptables_manager.ipv4['nat'].empty_chain('snat') + + if not ri.router['distributed']: + # Add back the jump to float-snat + iptables_manager.ipv4['nat'].add_rule('snat', '-j $float-snat') + + # And add them back if the action is add_rules + if action == 'add_rules' and ex_gw_port: + # ex_gw_port should not be None in this case + # NAT rules are added only if ex_gw_port has an IPv4 address + for ip_addr in ex_gw_port['fixed_ips']: + ex_gw_ip = ip_addr['ip_address'] + if netaddr.IPAddress(ex_gw_ip).version == 4: + rules = self.external_gateway_nat_rules(ex_gw_ip, + internal_cidrs, + interface_name) + for rule in rules: + iptables_manager.ipv4['nat'].add_rule(*rule) + break + iptables_manager.apply() + + def _handle_router_fip_nat_rules(self, ri, interface_name, action): + """Configures NAT rules for Floating IPs for DVR. + + Remove all the rules. This is safe because if + use_namespaces is set as False then the agent can + only configure one router, otherwise each router's + NAT rules will be in their own namespace. + """ + ri.iptables_manager.ipv4['nat'].empty_chain('POSTROUTING') + ri.iptables_manager.ipv4['nat'].empty_chain('snat') + + # Add back the jump to float-snat + ri.iptables_manager.ipv4['nat'].add_rule('snat', '-j $float-snat') + + # And add them back if the action if add_rules + if action == 'add_rules' and interface_name: + rule = ('POSTROUTING', '! -i %(interface_name)s ' + '! -o %(interface_name)s -m conntrack ! ' + '--ctstate DNAT -j ACCEPT' % + {'interface_name': interface_name}) + ri.iptables_manager.ipv4['nat'].add_rule(*rule) + ri.iptables_manager.apply() + + def process_router_floating_ip_nat_rules(self, ri): + """Configure NAT rules for the router's floating IPs. + + Configures iptables rules for the floating ips of the given router + """ + # Clear out all iptables rules for floating ips + ri.iptables_manager.ipv4['nat'].clear_rules_by_tag('floating_ip') + + floating_ips = self.get_floating_ips(ri) + # Loop once to ensure that floating ips are configured. + for fip in floating_ips: + # Rebuild iptables rules for the floating ip. + fixed = fip['fixed_ip_address'] + fip_ip = fip['floating_ip_address'] + for chain, rule in self.floating_forward_rules(fip_ip, fixed): + ri.iptables_manager.ipv4['nat'].add_rule(chain, rule, + tag='floating_ip') + + ri.iptables_manager.apply() + + def process_router_floating_ip_addresses(self, ri, ex_gw_port): + """Configure IP addresses on router's external gateway interface. + + Ensures addresses for existing floating IPs and cleans up + those that should not longer be configured. + """ + fip_statuses = {} + + floating_ips = ri.router.get(l3_constants.FLOATINGIP_KEY, []) + if ri.router['distributed']: + # filter out only FIPs for this host/agent + floating_ips = [i for i in floating_ips if i['host'] == self.host] + if floating_ips and self.agent_gateway_port is None: + self._create_agent_gateway_port(ri, floating_ips[0] + ['floating_network_id']) + + if self.agent_gateway_port: + if floating_ips and ri.dist_fip_count == 0: + self.create_rtr_2_fip_link(ri, floating_ips[0] + ['floating_network_id']) + interface_name = self.get_rtr_int_device_name(ri.router_id) + else: + # there are no fips or agent port, no work to do + return fip_statuses + else: + interface_name = self.get_external_device_name(ex_gw_port['id']) + + device = ip_lib.IPDevice(interface_name, self.root_helper, + namespace=ri.ns_name) + existing_cidrs = set([addr['cidr'] for addr in device.addr.list()]) + new_cidrs = set() + + # Loop once to ensure that floating ips are configured. + for fip in floating_ips: + fip_ip = fip['floating_ip_address'] + ip_cidr = str(fip_ip) + FLOATING_IP_CIDR_SUFFIX + + new_cidrs.add(ip_cidr) + + if ip_cidr not in existing_cidrs: + net = netaddr.IPNetwork(ip_cidr) + try: + device.addr.add(net.version, ip_cidr, str(net.broadcast)) + except (processutils.UnknownArgumentError, + processutils.ProcessExecutionError): + # any exception occurred here should cause the floating IP + # to be set in error state + fip_statuses[fip['id']] = ( + l3_constants.FLOATINGIP_STATUS_ERROR) + LOG.warn(_("Unable to configure IP address for " + "floating IP: %s"), fip['id']) + continue + if ri.router['distributed']: + # Special Handling for DVR - update FIP namespace + # and ri.namespace to handle DVR based FIP + self.floating_ip_added_dist(ri, fip) + else: + # As GARP is processed in a distinct thread the call below + # won't raise an exception to be handled. + self._send_gratuitous_arp_packet( + ri.ns_name, interface_name, fip_ip) + fip_statuses[fip['id']] = ( + l3_constants.FLOATINGIP_STATUS_ACTIVE) + + # Clean up addresses that no longer belong on the gateway interface. + for ip_cidr in existing_cidrs - new_cidrs: + if ip_cidr.endswith(FLOATING_IP_CIDR_SUFFIX): + net = netaddr.IPNetwork(ip_cidr) + device.addr.delete(net.version, ip_cidr) + if ri.router['distributed']: + self.floating_ip_removed_dist(ri, ip_cidr) + return fip_statuses + + def _get_ex_gw_port(self, ri): + return ri.router.get('gw_port') + + def _arping(self, ns_name, interface_name, ip_address, dist=None): + if dist: + device = ip_lib.IPDevice(interface_name, self.root_helper, + namespace=ns_name) + ip_cidr = str(ip_address) + FLOATING_IP_CIDR_SUFFIX + net = netaddr.IPNetwork(ip_cidr) + device.addr.add(net.version, ip_cidr, str(net.broadcast)) + + arping_cmd = ['arping', '-A', + '-I', interface_name, + '-c', self.conf.send_arp_for_ha, + ip_address] + try: + ip_wrapper = ip_lib.IPWrapper(self.root_helper, + namespace=ns_name) + ip_wrapper.netns.execute(arping_cmd, check_exit_code=True) + except Exception as e: + LOG.error(_("Failed sending gratuitous ARP: %s"), str(e)) + if dist: + device.addr.delete(net.version, ip_cidr) + + def _send_gratuitous_arp_packet(self, ns_name, interface_name, ip_address, + dist=None): + if self.conf.send_arp_for_ha > 0: + eventlet.spawn_n(self._arping, ns_name, interface_name, ip_address, + dist) + + def get_internal_port(self, ri, subnet_id): + """Returns internal router port based on subnet_id.""" + router_ports = ri.router.get(l3_constants.INTERFACE_KEY, []) + for port in router_ports: + fips = port['fixed_ips'] + for f in fips: + if f['subnet_id'] == subnet_id: + return port + + def get_internal_device_name(self, port_id): + return (INTERNAL_DEV_PREFIX + port_id)[:self.driver.DEV_NAME_LEN] + + def get_external_device_name(self, port_id): + return (EXTERNAL_DEV_PREFIX + port_id)[:self.driver.DEV_NAME_LEN] + + def get_fip_ext_device_name(self, port_id): + return (FIP_EXT_DEV_PREFIX + port_id)[:self.driver.DEV_NAME_LEN] + + def get_rtr_int_device_name(self, router_id): + return (RTR_2_FIP_DEV_PREFIX + router_id)[:self.driver.DEV_NAME_LEN] + + def get_fip_int_device_name(self, router_id): + return (FIP_2_RTR_DEV_PREFIX + router_id)[:self.driver.DEV_NAME_LEN] + + def get_snat_int_device_name(self, port_id): + return (SNAT_INT_DEV_PREFIX + port_id)[:self.driver.DEV_NAME_LEN] + + def get_fip_ns_name(self, ext_net_id): + return (FIP_NS_PREFIX + ext_net_id) + + def get_snat_ns_name(self, ext_gw_port_id): + return (SNAT_NS_PREFIX + ext_gw_port_id) + + def get_snat_interfaces(self, ri): + return ri.router.get(l3_constants.SNAT_ROUTER_INTF_KEY, []) + + def get_floating_ips(self, ri): + """Filters Floating IPs for DVR to be hosted on this agent.""" + floating_ips = ri.router.get(l3_constants.FLOATINGIP_KEY, []) + if ri.router['distributed']: + floating_ips = [i for i in floating_ips if i['host'] == self.host] + return floating_ips + + def _map_internal_interfaces(self, ri, int_port, snat_ports): + """Returns the SNAT port for the given internal interface port.""" + fixed_ip = int_port['fixed_ips'][0] + subnet_id = fixed_ip['subnet_id'] + match_port = [p for p in snat_ports if + p['fixed_ips'][0]['subnet_id'] == subnet_id] + if match_port: + return match_port[0] + else: + LOG.debug('DVR: no map match_port found!') + + def _create_dvr_gateway(self, ri, ex_gw_port, gw_interface_name, + internal_cidrs, snat_ports): + """Create SNAT namespace.""" + snat_ns_name = self.get_snat_ns_name(ex_gw_port['id']) + self._create_namespace(snat_ns_name) + # connect snat_ports to br_int from SNAT namespace + for port in snat_ports: + # create interface_name + self._set_subnet_info(port) + interface_name = self.get_snat_int_device_name(port['id']) + self._internal_network_added(snat_ns_name, port['network_id'], + port['id'], port['ip_cidr'], + port['mac_address'], interface_name, + SNAT_INT_DEV_PREFIX) + self._external_gateway_added(ri, ex_gw_port, gw_interface_name, + internal_cidrs, snat_ns_name, + preserve_ips=[]) + ri.snat_iptables_manager = ( + iptables_manager.IptablesManager( + root_helper=self.root_helper, namespace=snat_ns_name + ) + ) + + def external_gateway_added(self, ri, ex_gw_port, + interface_name, internal_cidrs): + if ri.router['distributed']: + snat_ports = self.get_snat_interfaces(ri) + for p in ri.internal_ports: + gateway = self._map_internal_interfaces(ri, p, snat_ports) + id_name = self.get_internal_device_name(p['id']) + self._snat_redirect_add(ri, gateway['fixed_ips'][0] + ['ip_address'], p, id_name) + + if self.conf.centralized_snat and ( + ri.router['gw_port_host'] == self.host): + if snat_ports: + self._create_dvr_gateway(ri, ex_gw_port, + interface_name, + internal_cidrs, snat_ports) + for port in snat_ports: + for ip in port['fixed_ips']: + self._update_arp_entry(ri, ip['ip_address'], + port['mac_address'], + ip['subnet_id'], 'add') + return + + # Compute a list of addresses this router is supposed to have. + # This avoids unnecessarily removing those addresses and + # causing a momentarily network outage. + floating_ips = self.get_floating_ips(ri) + preserve_ips = [ip['floating_ip_address'] + FLOATING_IP_CIDR_SUFFIX + for ip in floating_ips] + + self._external_gateway_added(ri, ex_gw_port, interface_name, + internal_cidrs, ri.ns_name, + preserve_ips) + + def _external_gateway_added(self, ri, ex_gw_port, interface_name, + internal_cidrs, ns_name, preserve_ips): + if not ip_lib.device_exists(interface_name, + root_helper=self.root_helper, + namespace=ns_name): + self.driver.plug(ex_gw_port['network_id'], + ex_gw_port['id'], interface_name, + ex_gw_port['mac_address'], + bridge=self.conf.external_network_bridge, + namespace=ns_name, + prefix=EXTERNAL_DEV_PREFIX) + + self.driver.init_l3(interface_name, [ex_gw_port['ip_cidr']], + namespace=ns_name, + gateway=ex_gw_port['subnet'].get('gateway_ip'), + extra_subnets=ex_gw_port.get('extra_subnets', []), + preserve_ips=preserve_ips) + ip_address = ex_gw_port['ip_cidr'].split('/')[0] + self._send_gratuitous_arp_packet(ns_name, + interface_name, ip_address) + + def agent_gateway_added(self, ns_name, ex_gw_port, + interface_name): + """Adds Floating IP gateway port to FIP namespace.""" + if not ip_lib.device_exists(interface_name, + root_helper=self.root_helper, + namespace=ns_name): + self.driver.plug(ex_gw_port['network_id'], + ex_gw_port['id'], interface_name, + ex_gw_port['mac_address'], + bridge=self.conf.external_network_bridge, + namespace=ns_name, + prefix=FIP_EXT_DEV_PREFIX) + + self.driver.init_l3(interface_name, [ex_gw_port['ip_cidr']], + namespace=ns_name) + ip_address = ex_gw_port['ip_cidr'].split('/')[0] + self._send_gratuitous_arp_packet(ns_name, interface_name, ip_address) + + gw_ip = ex_gw_port['subnet']['gateway_ip'] + if gw_ip: + ipd = ip_lib.IPDevice(interface_name, self.root_helper, + namespace=ns_name) + ipd.route.add_gateway(gw_ip) + + cmd = ['sysctl', '-w', 'net.ipv4.conf.%s.proxy_arp=1' % interface_name] + ip_wrapper = ip_lib.IPWrapper(self.root_helper, namespace=ns_name) + ip_wrapper.netns.execute(cmd, check_exit_code=False) + + def internal_ns_interface_added(self, ip_cidr, + interface_name, ns_name): + ip_wrapper = ip_lib.IPWrapper(self.root_helper, namespace=ns_name) + ip_wrapper.netns.execute(['ip', 'addr', 'add', + ip_cidr, 'dev', interface_name]) + + def external_gateway_removed(self, ri, ex_gw_port, + interface_name, internal_cidrs): + if ri.router['distributed']: + for p in ri.internal_ports: + internal_interface = self.get_internal_device_name(p['id']) + self._snat_redirect_remove(ri, p, internal_interface) + + if self.conf.centralized_snat and ( + ex_gw_port['binding:host_id'] == self.host): + ns_name = self.get_snat_ns_name(ex_gw_port['id']) + else: + # not hosting agent - no work to do + LOG.debug('DVR: CSNAT not hosted: %s', ex_gw_port) + return + else: + ns_name = ri.ns_name + + self.driver.unplug(interface_name, + bridge=self.conf.external_network_bridge, + namespace=ns_name, + prefix=EXTERNAL_DEV_PREFIX) + if ri.router['distributed']: + self._destroy_snat_namespace(ns_name) + + def metadata_filter_rules(self): + rules = [] + if self.conf.enable_metadata_proxy: + rules.append(('INPUT', '-s 0.0.0.0/0 -d 127.0.0.1 ' + '-p tcp -m tcp --dport %s ' + '-j ACCEPT' % self.conf.metadata_port)) + return rules + + def metadata_nat_rules(self): + rules = [] + if self.conf.enable_metadata_proxy: + rules.append(('PREROUTING', '-s 0.0.0.0/0 -d 169.254.169.254/32 ' + '-p tcp -m tcp --dport 80 -j REDIRECT ' + '--to-port %s' % self.conf.metadata_port)) + return rules + + def external_gateway_nat_rules(self, ex_gw_ip, internal_cidrs, + interface_name): + rules = [('POSTROUTING', '! -i %(interface_name)s ' + '! -o %(interface_name)s -m conntrack ! ' + '--ctstate DNAT -j ACCEPT' % + {'interface_name': interface_name})] + for cidr in internal_cidrs: + rules.extend(self.internal_network_nat_rules(ex_gw_ip, cidr)) + return rules + + def _gen_snat_idx(self, cidr): + """Generate index based on cidr for SNAT entries.""" + ip = cidr.split('/')[0] + ip_str = ip.split('.') + ip_num = (((int(ip_str[0])) << 24) + ((int(ip_str[1])) << 16) + + ((int(ip_str[2])) << 8) + (int(ip_str[3]))) + return ip_num + + def _snat_redirect_add(self, ri, gateway, sn_port, sn_int): + """Adds rules and routes for SNAT redirection.""" + try: + snat_idx = self._gen_snat_idx(sn_port['ip_cidr']) + ns_ipr = ip_lib.IpRule(self.root_helper, namespace=ri.ns_name) + ns_ipd = ip_lib.IPDevice(sn_int, self.root_helper, + namespace=ri.ns_name) + ns_ipd.route.add_gateway(gateway, table=snat_idx) + ns_ipr.add_rule_from(sn_port['ip_cidr'], snat_idx, snat_idx) + ns_ipr.netns.execute(['sysctl', '-w', + 'net.ipv4.conf.all.send_redirects=0']) + ns_ipr.netns.execute(['sysctl', '-w', 'net.ipv4.conf.%s.' + 'send_redirects=0' % sn_int]) + except Exception: + LOG.exception(_('DVR: error adding redirection logic')) + + def _snat_redirect_remove(self, ri, sn_port, sn_int): + """Removes rules and routes for SNAT redirection.""" + try: + snat_idx = self._gen_snat_idx(sn_port['ip_cidr']) + ns_ipr = ip_lib.IpRule(self.root_helper, namespace=ri.ns_name) + ns_ipd = ip_lib.IPDevice(sn_int, self.root_helper, + namespace=ri.ns_name) + ns_ipd.route.delete_gateway(table=snat_idx) + ns_ipr.delete_rule_priority(snat_idx) + except Exception: + LOG.exception(_('DVR: removed snat failed')) + + def _internal_network_added(self, ns_name, network_id, port_id, + internal_cidr, mac_address, + interface_name, prefix): + if not ip_lib.device_exists(interface_name, + root_helper=self.root_helper, + namespace=ns_name): + self.driver.plug(network_id, port_id, interface_name, mac_address, + namespace=ns_name, + prefix=prefix) + + self.driver.init_l3(interface_name, [internal_cidr], + namespace=ns_name) + ip_address = internal_cidr.split('/')[0] + self._send_gratuitous_arp_packet(ns_name, interface_name, ip_address) + + def internal_network_added(self, ri, port): + network_id = port['network_id'] + port_id = port['id'] + internal_cidr = port['ip_cidr'] + mac_address = port['mac_address'] + + interface_name = self.get_internal_device_name(port_id) + + self._internal_network_added(ri.ns_name, network_id, port_id, + internal_cidr, mac_address, + interface_name, INTERNAL_DEV_PREFIX) + + if ri.router['distributed'] and ri.ex_gw_port: + ex_gw_port = ri.ex_gw_port + snat_ports = self.get_snat_interfaces(ri) + snat_ip = self._map_internal_interfaces(ri, port, snat_ports) + self._snat_redirect_add(ri, snat_ip['fixed_ips'][0] + ['ip_address'], port, interface_name) + if self.conf.centralized_snat and ( + ri.router['gw_port_host'] == self.host): + for port in snat_ports: + self._set_subnet_info(port) + interface_name = self.get_snat_int_device_name(port['id']) + ns_name = self.get_snat_ns_name(ex_gw_port['id']) + self._internal_network_added(ns_name, port['network_id'], + port['id'], internal_cidr, + port['mac_address'], + interface_name, + SNAT_INT_DEV_PREFIX) + + def internal_network_removed(self, ri, port): + port_id = port['id'] + interface_name = self.get_internal_device_name(port_id) + if ri.router['distributed'] and ri.ex_gw_port: + # DVR handling code for SNAT + ex_gw_port = ri.ex_gw_port + self._snat_redirect_remove(ri, port, interface_name) + if self.conf.centralized_snat and ( + ri.ex_gw_port['binding:host_id'] == self.host): + snat_port = self._map_internal_interfaces(ri, port, + ri.snat_ports) + snat_interface = ( + self.get_snat_int_device_name(snat_port['id']) + ) + ns_name = self.get_snat_ns_name(ex_gw_port['id']) + prefix = SNAT_INT_DEV_PREFIX + if ip_lib.device_exists(snat_interface, + root_helper=self.root_helper, + namespace=ns_name): + self.driver.unplug(snat_interface, namespace=ns_name, + prefix=prefix) + + if ip_lib.device_exists(interface_name, + root_helper=self.root_helper, + namespace=ri.ns_name): + self.driver.unplug(interface_name, namespace=ri.ns_name, + prefix=INTERNAL_DEV_PREFIX) + + def internal_network_nat_rules(self, ex_gw_ip, internal_cidr): + rules = [('snat', '-s %s -j SNAT --to-source %s' % + (internal_cidr, ex_gw_ip))] + return rules + + def _create_agent_gateway_port(self, ri, network_id): + """Creates Floating IP gateway port. + + Request port creation from Plugin then creates + Floating IP namespace and adds gateway port. + """ + # Port does not exist, request port from plugin + self.agent_gateway_port = ( + self.plugin_rpc.get_agent_gateway_port( + self.context, network_id)) + if 'subnet' not in self.agent_gateway_port: + LOG.error(_('Missing subnet/agent_gateway_port')) + return + self._set_subnet_info(self.agent_gateway_port) + + # add fip-namespace and agent_gateway_port + fip_ns_name = ( + self.get_fip_ns_name(str(network_id))) + self._create_namespace(fip_ns_name) + interface_name = ( + self.get_fip_ext_device_name(self.agent_gateway_port['id'])) + self.agent_gateway_added(fip_ns_name, self.agent_gateway_port, + interface_name) + + def create_rtr_2_fip_link(self, ri, network_id): + """Creates interface between router and Floating IP namespace.""" + rtr_2_fip_name = self.get_rtr_int_device_name(ri.router_id) + fip_2_rtr_name = self.get_fip_int_device_name(ri.router_id) + fip_ns_name = self.get_fip_ns_name(str(network_id)) + + # add link local IP to interface + if ri.rtr_2_fip is None: + ri.rtr_2_fip = FIP_LL_PREFIX + str(self.local_ips.pop()) + if ri.fip_2_rtr is None: + ri.fip_2_rtr = FIP_LL_PREFIX + str(self.local_ips.pop()) + ip_wrapper = ip_lib.IPWrapper(self.root_helper, + namespace=ri.ns_name) + int_dev = ip_wrapper.add_veth(rtr_2_fip_name, + fip_2_rtr_name, fip_ns_name) + self.internal_ns_interface_added(ri.rtr_2_fip + '/31', + rtr_2_fip_name, ri.ns_name) + self.internal_ns_interface_added(ri.fip_2_rtr + '/31', + fip_2_rtr_name, fip_ns_name) + int_dev[0].link.set_up() + int_dev[1].link.set_up() + # add default route for the link local interface + device = ip_lib.IPDevice(rtr_2_fip_name, self.root_helper, + namespace=ri.ns_name) + device.route.add_gateway(ri.fip_2_rtr, table=FIP_RT_TBL) + # setup the NAT rules and chains + self._handle_router_fip_nat_rules(ri, rtr_2_fip_name, 'add_rules') + + def floating_ip_added_dist(self, ri, fip): + """Adds floating IP to FIP namespace.""" + floating_ip = fip['floating_ip_address'] + fixed_ip = fip['fixed_ip_address'] + rule_pr = self.fip_priorities.pop() + ri.floating_ips_dict[floating_ip] = rule_pr + fip_2_rtr_name = self.get_fip_int_device_name(ri.router_id) + ipRule = ip_lib.IpRule(self.root_helper, namespace=ri.ns_name) + ipRule.add_rule_from(fixed_ip, FIP_RT_TBL, rule_pr) + + # Add routing rule in fip namespace + fip_cidr = str(floating_ip) + FLOATING_IP_CIDR_SUFFIX + fip_ns_name = self.get_fip_ns_name(str(fip['floating_network_id'])) + device = ip_lib.IPDevice(fip_2_rtr_name, self.root_helper, + namespace=fip_ns_name) + device.route.add_route(fip_cidr, ri.rtr_2_fip) + interface_name = ( + self.get_fip_ext_device_name(self.agent_gateway_port['id'])) + self._send_gratuitous_arp_packet(fip_ns_name, + interface_name, floating_ip, + dist=True) + # update internal structures + self.agent_fip_count = self.agent_fip_count + 1 + ri.dist_fip_count = ri.dist_fip_count + 1 + + def floating_ip_removed_dist(self, ri, fip_cidr): + """Removes floating IP from FIP namespace.""" + floating_ip = fip_cidr.split('/')[0] + rtr_2_fip_name = self.get_rtr_int_device_name(ri.router_id) + fip_2_rtr_name = self.get_fip_int_device_name(ri.router_id) + fip_ns_name = self.get_fip_ns_name(str(self._fetch_external_net_id())) + ip_rule_rtr = ip_lib.IpRule(self.root_helper, namespace=ri.ns_name) + if floating_ip in ri.floating_ips_dict: + rule_pr = ri.floating_ips_dict[floating_ip] + # TODO(rajeev): Handle else case - exception/log? + else: + rule_pr = None + + ip_rule_rtr.delete_rule_priority(rule_pr) + self.fip_priorities.add(rule_pr) + device = ip_lib.IPDevice(fip_2_rtr_name, self.root_helper, + namespace=fip_ns_name) + + device.route.delete_route(fip_cidr, ri.rtr_2_fip) + # check if this is the last FIP for this router + ri.dist_fip_count = ri.dist_fip_count - 1 + if ri.dist_fip_count == 0: + # remove default route entry + device = ip_lib.IPDevice(rtr_2_fip_name, self.root_helper, + namespace=ri.ns_name) + device.route.delete_gateway(ri.fip_2_rtr, table=FIP_RT_TBL) + self.local_ips.add(ri.rtr_2_fip.rsplit('.', 1)[1]) + ri.rtr_2_fip = None + self.local_ips.add(ri.fip_2_rtr.rsplit('.', 1)[1]) + ri.fip_2_rtr = None + # TODO(mrsmith): remove interface + # clean up fip-namespace if this is the last FIP + self.agent_fip_count = self.agent_fip_count - 1 + if self.agent_fip_count == 0: + self._destroy_fip_namespace(fip_ns_name) + + def floating_forward_rules(self, floating_ip, fixed_ip): + return [('PREROUTING', '-d %s -j DNAT --to %s' % + (floating_ip, fixed_ip)), + ('OUTPUT', '-d %s -j DNAT --to %s' % + (floating_ip, fixed_ip)), + ('float-snat', '-s %s -j SNAT --to %s' % + (fixed_ip, floating_ip))] + + def router_deleted(self, context, router_id): + """Deal with router deletion RPC message.""" + LOG.debug(_('Got router deleted notification for %s'), router_id) + self.removed_routers.add(router_id) + + def _update_arp_entry(self, ri, ip, mac, subnet_id, operation): + """Add or delete arp entry into router namespace.""" + port = self.get_internal_port(ri, subnet_id) + if 'id' in port: + ip_cidr = str(ip) + '/32' + try: + # TODO(mrsmith): optimize the calls below for bulk calls + net = netaddr.IPNetwork(ip_cidr) + interface_name = self.get_internal_device_name(port['id']) + device = ip_lib.IPDevice(interface_name, self.root_helper, + namespace=ri.ns_name) + if operation == 'add': + device.neigh.add(net.version, ip, mac) + elif operation == 'delete': + device.neigh.delete(net.version, ip, mac) + except Exception: + LOG.exception(_("DVR: Failed updating arp entry")) + self.fullsync = True + + def add_arp_entry(self, context, payload): + """Adds arp entry into router namespace from RPC.""" + arp_table = payload['arp_table'] + router_id = payload['router_id'] + ip = arp_table['ip_address'] + mac = arp_table['mac_address'] + subnet_id = arp_table['subnet_id'] + ri = self.router_info.get(router_id) + self._update_arp_entry(ri, ip, mac, subnet_id, 'add') + + def delete_arp_entry(self, context, payload): + """Deletes arp entry into router namespace from RPC.""" + arp_table = payload['arp_table'] + router_id = payload['router_id'] + ip = arp_table['ip_address'] + mac = arp_table['mac_address'] + subnet_id = arp_table['subnet_id'] + ri = self.router_info.get(router_id) + self._update_arp_entry(ri, ip, mac, subnet_id, 'delete') + + def routers_updated(self, context, routers): + """Deal with routers modification and creation RPC message.""" + LOG.debug(_('Got routers updated notification :%s'), routers) + if routers: + # This is needed for backward compatibility + if isinstance(routers[0], dict): + routers = [router['id'] for router in routers] + self.updated_routers.update(routers) + + def router_removed_from_agent(self, context, payload): + LOG.debug(_('Got router removed from agent :%r'), payload) + self.removed_routers.add(payload['router_id']) + + def router_added_to_agent(self, context, payload): + LOG.debug(_('Got router added to agent :%r'), payload) + self.routers_updated(context, payload) + + def _process_routers(self, routers, all_routers=False): + pool = eventlet.GreenPool() + if (self.conf.external_network_bridge and + not ip_lib.device_exists(self.conf.external_network_bridge)): + LOG.error(_("The external network bridge '%s' does not exist"), + self.conf.external_network_bridge) + return + + target_ex_net_id = self._fetch_external_net_id() + # if routers are all the routers we have (They are from router sync on + # starting or when error occurs during running), we seek the + # routers which should be removed. + # If routers are from server side notification, we seek them + # from subset of incoming routers and ones we have now. + if all_routers: + prev_router_ids = set(self.router_info) + else: + prev_router_ids = set(self.router_info) & set( + [router['id'] for router in routers]) + cur_router_ids = set() + for r in routers: + # If namespaces are disabled, only process the router associated + # with the configured agent id. + if (not self.conf.use_namespaces and + r['id'] != self.conf.router_id): + continue + ex_net_id = (r['external_gateway_info'] or {}).get('network_id') + if not ex_net_id and not self.conf.handle_internal_only_routers: + continue + if (target_ex_net_id and ex_net_id and + ex_net_id != target_ex_net_id): + # Double check that our single external_net_id has not changed + # by forcing a check by RPC. + if (ex_net_id != self._fetch_external_net_id(force=True)): + continue + cur_router_ids.add(r['id']) + if r['id'] not in self.router_info: + self._router_added(r['id'], r) + ri = self.router_info[r['id']] + ri.router = r + pool.spawn_n(self.process_router, ri) + # identify and remove routers that no longer exist + for router_id in prev_router_ids - cur_router_ids: + pool.spawn_n(self._router_removed, router_id) + pool.waitall() + + @lockutils.synchronized('l3-agent', 'neutron-') + def _rpc_loop(self): + # _rpc_loop and _sync_routers_task will not be + # executed in the same time because of lock. + # so we can clear the value of updated_routers + # and removed_routers, but they can be updated by + # updated_routers and removed_routers rpc call + try: + LOG.debug(_("Starting RPC loop for %d updated routers"), + len(self.updated_routers)) + if self.updated_routers: + # We're capturing and clearing the list, and will + # process the "captured" updates in this loop, + # and any updates that happen due to a context switch + # will be picked up on the next pass. + updated_routers = set(self.updated_routers) + self.updated_routers.clear() + router_ids = list(updated_routers) + routers = self.plugin_rpc.get_routers( + self.context, router_ids) + # routers with admin_state_up=false will not be in the fetched + fetched = set([r['id'] for r in routers]) + self.removed_routers.update(updated_routers - fetched) + + self._process_routers(routers) + self._process_router_delete() + LOG.debug(_("RPC loop successfully completed")) + except Exception: + LOG.exception(_("Failed synchronizing routers")) + self.fullsync = True + + def _process_router_delete(self): + current_removed_routers = list(self.removed_routers) + for router_id in current_removed_routers: + self._router_removed(router_id) + self.removed_routers.remove(router_id) + + def _router_ids(self): + if not self.conf.use_namespaces: + return [self.conf.router_id] + + @periodic_task.periodic_task + @lockutils.synchronized('l3-agent', 'neutron-') + def _sync_routers_task(self, context): + if self.services_sync: + super(L3NATAgent, self).process_services_sync(context) + LOG.debug(_("Starting _sync_routers_task - fullsync:%s"), + self.fullsync) + if not self.fullsync: + return + try: + router_ids = self._router_ids() + self.updated_routers.clear() + self.removed_routers.clear() + routers = self.plugin_rpc.get_routers( + context, router_ids) + + LOG.debug(_('Processing :%r'), routers) + self._process_routers(routers, all_routers=True) + self.fullsync = False + LOG.debug(_("_sync_routers_task successfully completed")) + except rpc_compat.RPCException: + LOG.exception(_("Failed synchronizing routers due to RPC error")) + self.fullsync = True + return + except Exception: + LOG.exception(_("Failed synchronizing routers")) + self.fullsync = True + + # Resync is not necessary for the cleanup of stale + # namespaces. + if self._clean_stale_namespaces: + self._cleanup_namespaces(routers) + + def after_start(self): + LOG.info(_("L3 agent started")) + + def _update_routing_table(self, ri, operation, route): + cmd = ['ip', 'route', operation, 'to', route['destination'], + 'via', route['nexthop']] + ip_wrapper = ip_lib.IPWrapper(self.root_helper, + namespace=ri.ns_name) + ip_wrapper.netns.execute(cmd, check_exit_code=False) + + def routes_updated(self, ri): + new_routes = ri.router['routes'] + old_routes = ri.routes + adds, removes = common_utils.diff_list_of_dict(old_routes, + new_routes) + for route in adds: + LOG.debug(_("Added route entry is '%s'"), route) + # remove replaced route from deleted route + for del_route in removes: + if route['destination'] == del_route['destination']: + removes.remove(del_route) + # replace success even if there is no existing route + self._update_routing_table(ri, 'replace', route) + for route in removes: + LOG.debug(_("Removed route entry is '%s'"), route) + self._update_routing_table(ri, 'delete', route) + ri.routes = new_routes + + +class L3NATAgentWithStateReport(L3NATAgent): + + def __init__(self, host, conf=None): + super(L3NATAgentWithStateReport, self).__init__(host=host, conf=conf) + self.state_rpc = agent_rpc.PluginReportStateAPI(topics.PLUGIN) + self.agent_state = { + 'binary': 'neutron-l3-agent', + 'host': host, + 'topic': topics.L3_AGENT, + 'configurations': { + 'distributed_agent': self.conf.distributed_agent, + 'centralized_snat': self.conf.centralized_snat, + 'centralized_router': self.conf.centralized_router, + 'use_namespaces': self.conf.use_namespaces, + 'router_id': self.conf.router_id, + 'handle_internal_only_routers': + self.conf.handle_internal_only_routers, + 'external_network_bridge': self.conf.external_network_bridge, + 'gateway_external_network_id': + self.conf.gateway_external_network_id, + 'interface_driver': self.conf.interface_driver}, + 'start_flag': True, + 'agent_type': l3_constants.AGENT_TYPE_L3} + report_interval = cfg.CONF.AGENT.report_interval + self.use_call = True + if report_interval: + self.heartbeat = loopingcall.FixedIntervalLoopingCall( + self._report_state) + self.heartbeat.start(interval=report_interval) + + def _report_state(self): + LOG.debug(_("Report state task started")) + num_ex_gw_ports = 0 + num_interfaces = 0 + num_floating_ips = 0 + router_infos = self.router_info.values() + num_routers = len(router_infos) + for ri in router_infos: + ex_gw_port = self._get_ex_gw_port(ri) + if ex_gw_port: + num_ex_gw_ports += 1 + num_interfaces += len(ri.router.get(l3_constants.INTERFACE_KEY, + [])) + num_floating_ips += len(ri.router.get(l3_constants.FLOATINGIP_KEY, + [])) + configurations = self.agent_state['configurations'] + configurations['routers'] = num_routers + configurations['ex_gw_ports'] = num_ex_gw_ports + configurations['interfaces'] = num_interfaces + configurations['floating_ips'] = num_floating_ips + try: + self.state_rpc.report_state(self.context, self.agent_state, + self.use_call) + self.agent_state.pop('start_flag', None) + self.use_call = False + LOG.debug(_("Report state task successfully completed")) + except AttributeError: + # This means the server does not support report_state + LOG.warn(_("Neutron server does not support state report." + " State report for this agent will be disabled.")) + self.heartbeat.stop() + return + except Exception: + LOG.exception(_("Failed reporting state!")) + + def agent_updated(self, context, payload): + """Handle the agent_updated notification event.""" + self.fullsync = True + LOG.info(_("agent_updated by server side %s!"), payload) + + +def main(manager='neutron.agent.l3_proxy.L3NATAgentWithStateReport'): + conf = cfg.CONF + conf.register_opts(L3NATAgent.OPTS) + config.register_interface_driver_opts_helper(conf) + config.register_use_namespaces_opts_helper(conf) + config.register_agent_state_opts_helper(conf) + config.register_root_helper(conf) + conf.register_opts(interface.OPTS) + conf.register_opts(external_process.OPTS) + common_config.init(sys.argv[1:]) + config.setup_logging(conf) + server = neutron_service.Service.create( + binary='neutron-l3-agent', + topic=topics.L3_AGENT, + report_interval=cfg.CONF.AGENT.report_interval, + manager=manager) + service.launch(server).wait() + +if __name__ == "__main__": + sys.exit(main()) diff --git a/novaproxy/README.md b/novaproxy/README.md new file mode 100644 index 00000000..b9fb2754 --- /dev/null +++ b/novaproxy/README.md @@ -0,0 +1,156 @@ +Openstack Nova Proxy +=============================== + + Nova-Proxy acts as the same role of Nova-Compute in cascading OpenStack. + Nova-Proxy treats cascaded Nova as its hypervisor, convert the internal request message from the message bus to restful API calling to cascaded Nova. + + +Key modules +----------- + +* The new nova proxy module manager_proxy,which is configured to manage specified Availability Zone cascaded Nova. All VM in the cascaded Nova of this AZ will be bind to the manager_proxy host in the cascading level: + + nova/compute/manager_proxy.py + +* The code include clients of various component service(nova neutron cinder glance),through the client you can call cascaded various component service API by restful API + + nova/compute/clients.py + +* The solution of that clients gets token or checks token from token: + nova/compute/compute_context.py + nova/compute/compute_keystoneclient.py + +Requirements +------------ +* openstack-nova-compute-2014.1-23.1 has been installed + +Installation +------------ + +We provide two ways to install the nova proxy code. In this section, we will guide you through installing the nova proxy with the minimum configuration. + +* **Note:** + + - Make sure you have an existing installation of **Openstack Icehouse**. + - We recommend that you Do backup at least the following files before installation, because they are to be overwritten or modified: + $NOVA_CONFIG_PARENT_DIR/nova.conf + (replace the $... with actual directory names.) + +* **Manual Installation** + + - Make sure you have performed backups properly. + + - Navigate to the local repository and copy the contents in 'nova' sub-directory to the corresponding places in existing nova, e.g. + ```cp -r $LOCAL_REPOSITORY_DIR/nova $NOVA_PARENT_DIR``` + (replace the $... with actual directory name.) + + - Update the nova configuration file (e.g. /etc/nova/nova.conf) with the minimum option below. If the option already exists, modify its value, otherwise add it to the config file. Check the "Configurations" section below for a full configuration guide. + ``` + [DEFAULT] + ... + ###configuration for Nova cascading ### + proxy_region_name=$proxy_region_name + cascading_nova_url=$cascading_nova_url + cascaded_nova_url=$cascaded_nova_url + cascaded_neutron_url=$cascaded_neutron_url + cascaded_glance_flag=False + cascaded_glance_url=$cascaded_glance_url + os_region_name=$os_region_name + keystone_auth_url=$keystone_auth_url + cinder_endpoint_template=$cinder_endpoint_template + compute_manager=nova.compute.manager_proxy.ComputeManager + ``` + + - Restart the nova proxy. + ```service nova-compute restart``` + + - Done. The nova proxy should be working with a demo configuration. + +* **Automatic Installation** + + - Make sure you have performed backups properly. + + - Navigate to the installation directory and run installation script. + ``` + cd $LOCAL_REPOSITORY_DIR/installation + sudo bash ./install.sh + ``` + (replace the $... with actual directory name.) + + - Done. The installation code should setup the nova proxy with the minimum configuration below. Check the "Configurations" section for a full configuration guide. + ``` + [DEFAULT] + ... + ###cascade info ### + proxy_region_name=$proxy_region_name + cascading_nova_url=$cascading_nova_url + cascaded_nova_url=$cascaded_nova_url + cascaded_neutron_url=$cascaded_neutron_url + cascaded_glance_flag=False + cascaded_glance_url=$cascaded_glance_url + os_region_name=$os_region_name + keystone_auth_url=$keystone_auth_url + cinder_endpoint_template=$cinder_endpoint_template + compute_manager=nova.compute.manager_proxy.ComputeManager + +* **Troubleshooting** + + In case the automatic installation process is not complete, please check the followings: + + - Make sure your OpenStack version is Icehouse. + + - Check the variables in the beginning of the install.sh scripts. Your installation directories may be different from the default values we provide. + + - The installation code will automatically add the related codes to $NOVA_PARENT_DIR/nova and modify the related configuration. + + - In case the automatic installation does not work, try to install manually. + +Configurations +-------------- + +* This is a (default) configuration sample for the nova proxy. Please add/modify these options in /etc/nova/nova.conf. +* Note: + - Please carefully make sure that options in the configuration file are not duplicated. If an option name already exists, modify its value instead of adding a new one of the same name. + - Please refer to the 'Configuration Details' section below for proper configuration and usage of costs and constraints. + +``` +[DEFAULT] + +... + +# +#Options defined in nova.compute.manager +# + +# Default driver to use for the nova proxy (string value) +compute_manager=nova.compute.manager_proxy.ComputeManager + +#The region name ,which will be set as a parameter when +#the cascaded level component services register endpoint to keystone +proxy_region_name=$proxy_region_name + +#The cascading level nova component service url, by which the nova porxy +#can access to cascading level nova service +cascading_nova_url=$cascading_nova_url + +#The cascaded level nova component service url, by which the nova porxy +#can access to cascaded level nova service +cascaded_nova_url=$cascaded_nova_url +cascaded_neutron_url=$cascaded_neutron_url + +#when cascaded_glance_flag is set to True, the cascaded nova will use casaded glance to +#provide image but not cascading level glance, if it cascaded_glance_flag is set to False +#casacaded Nova will use image from global glance. +cascaded_glance_flag=True or False + +#The cascaded level glance service url, by which the nova porxy +#can judge whether cascading glance has a location for this cascaded glance +cascaded_glance_url=$cascaded_glance_url + +#The region name ,which will be set as a parameter when +#the cascading level component services register endpoint to keystone +os_region_name=$os_region_name + +#The cascading level keystone component service url, by which the nova porxy +#can access to cascading level keystone service +keystone_auth_url=$keystone_auth_url diff --git a/novaproxy/installation/install.sh b/novaproxy/installation/install.sh new file mode 100644 index 00000000..ce449872 --- /dev/null +++ b/novaproxy/installation/install.sh @@ -0,0 +1,111 @@ +#!/bin/bash + +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# Copyright (c) 2014 Huawei Technologies. + +_NOVA_CONF_DIR="/etc/nova" +_NOVA_CONF_FILE="nova.conf" +_NOVA_INSTALL="/usr/lib64/python2.6/site-packages" +_NOVA_DIR="${_NOVA_INSTALL}/nova" + +# if you did not make changes to the installation files, +# please do not edit the following directories. +_CODE_DIR="../nova" +_BACKUP_DIR="${_NOVA_INSTALL}/.nova-proxy-installation-backup" + +_SCRIPT_LOGFILE="/var/log/nova-proxy/installation/install.log" + +config_option_list="proxy_region_name=AZ1 cascading_nova_url=http://10.67.148.210:8774/v2 cascaded_nova_url=http://10.67.148.201:8774/v2 cascaded_neutron_url=http://10.67.148.201:9696 cascaded_glance_flag=False cascaded_glance_url=http://10.67.148.201:9292 os_region_name=Cascading_Openstack keystone_auth_url=http://10.67.148.210:5000/v2.0/ cinder_endpoint_template=http://10.67.148.210:8776/v1/%(project_id)s compute_manager=nova.compute.manager_proxy.ComputeManager" + +function log() +{ + log_path=`dirname ${_SCRIPT_LOGFILE}` + if [ ! -d $log_path ] ; then + mkdir -p $log_path + fi + echo "$@" + echo "`date -u +'%Y-%m-%d %T.%N'`: $@" >> $_SCRIPT_LOGFILE +} + +if [[ ${EUID} -ne 0 ]]; then + log "Please run as root." + exit 1 +fi + + +cd `dirname $0` + +log "checking installation directories..." +if [ ! -d "${_NOVA_DIR}" ] ; then + log "Could not find the nova installation. Please check the variables in the beginning of the script." + log "aborted." + exit 1 +fi +if [ ! -f "${_NOVA_CONF_DIR}/${_NOVA_CONF_FILE}" ] ; then + log "Could not find nova config file. Please check the variables in the beginning of the script." + log "aborted." + exit 1 +fi + +log "checking previous installation..." +if [ -d "${_BACKUP_DIR}/nova" ] ; then + log "It seems nova-proxy has already been installed!" + log "Please check README for solution if this is not true." + exit 1 +fi + +log "backing up current files that might be overwritten..." +mkdir -p "${_BACKUP_DIR}/nova" +mkdir -p "${_BACKUP_DIR}/etc/nova" +cp "${_NOVA_CONF_DIR}/${_NOVA_CONF_FILE}" "${_BACKUP_DIR}/etc/nova/" +if [ $? -ne 0 ] ; then + rm -r "${_BACKUP_DIR}/nova" + rm -r "${_BACKUP_DIR}/etc" + log "Error in config backup, aborted." + exit 1 +fi + +log "copying in new files..." +cp -r "${_CODE_DIR}" `dirname ${_NOVA_DIR}` +if [ $? -ne 0 ] ; then + log "Error in copying, aborted." + log "Recovering original files..." + cp -r "${_BACKUP_DIR}/nova" `dirname ${_NOVA_DIR}` && rm -r "${_BACKUP_DIR}/nova" + if [ $? -ne 0 ] ; then + log "Recovering failed! Please install manually." + fi + exit 1 +fi + +log "updating config file..." +for option in $config_option_list +do + option_key=`echo $option|awk -F "=" '{print $1}'` + option_value=`echo $option|awk -F "=" '{print $2}'` + sed -i.backup -e "/$option_key *=/d" "${_NOVA_CONF_DIR}/${_NOVA_CONF_FILE}" + echo "$option_key,***************$option_value" + echo $option_key=$option_value >> "${_NOVA_CONF_DIR}/${_NOVA_CONF_FILE}" + +done + +log "restarting nova compute..." +service openstack-nova-compute restart +if [ $? -ne 0 ] ; then + log "There was an error in restarting the service, please restart nova scheduler manually." + exit 1 +fi + +log "Completed." +log "See README to get started." + +exit 0 diff --git a/novaproxy/installation/uninstall.sh b/novaproxy/installation/uninstall.sh new file mode 100644 index 00000000..f770fd93 --- /dev/null +++ b/novaproxy/installation/uninstall.sh @@ -0,0 +1,19 @@ +#!/bin/bash + +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# Copyright (c) 2014 Huawei Technologies. + + +# The uninstallation script don't had been realization, +# it will be supplied if needed. +exit 1 \ No newline at end of file diff --git a/novaproxy/nova/compute/clients.py b/novaproxy/nova/compute/clients.py new file mode 100644 index 00000000..3aef7998 --- /dev/null +++ b/novaproxy/nova/compute/clients.py @@ -0,0 +1,246 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo.config import cfg + +from nova.openstack.common import importutils +from nova.openstack.common import log as logging + +logger = logging.getLogger(__name__) + + +from nova.compute import compute_keystoneclient as hkc +from novaclient import client as novaclient +from novaclient import shell as novashell +try: + from swiftclient import client as swiftclient +except ImportError: + swiftclient = None + logger.info('swiftclient not available') +try: + from neutronclient.v2_0 import client as neutronclient +except ImportError: + neutronclient = None + logger.info('neutronclient not available') +try: + from cinderclient import client as cinderclient +except ImportError: + cinderclient = None + logger.info('cinderclient not available') + +try: + from ceilometerclient.v2 import client as ceilometerclient +except ImportError: + ceilometerclient = None + logger.info('ceilometerclient not available') + + +cloud_opts = [ + cfg.StrOpt('cloud_backend', + default=None, + help="Cloud module to use as a backend. Defaults to OpenStack.") +] +cfg.CONF.register_opts(cloud_opts) + + +class OpenStackClients(object): + + ''' + Convenience class to create and cache client instances. + ''' + + def __init__(self, context): + self.context = context + self._nova = {} + self._keystone = None + self._swift = None + self._neutron = None + self._cinder = None + self._ceilometer = None + + @property + def auth_token(self): + # if there is no auth token in the context + # attempt to get one using the context username and password + return self.context.auth_token or self.keystone().auth_token + + def keystone(self): + if self._keystone: + return self._keystone + + self._keystone = hkc.KeystoneClient(self.context) + return self._keystone + + def url_for(self, **kwargs): + return self.keystone().url_for(**kwargs) + + def nova(self, service_type='compute'): + if service_type in self._nova: + return self._nova[service_type] + + con = self.context + if self.auth_token is None: + logger.error("Nova connection failed, no auth_token!") + return None + + computeshell = novashell.OpenStackComputeShell() + extensions = computeshell._discover_extensions("1.1") + + args = { + 'project_id': con.tenant_id, + 'auth_url': con.auth_url, + 'service_type': service_type, + 'username': None, + 'api_key': None, + 'extensions': extensions + } + if con.password is not None: + if self.context.region_name is None: + management_url = self.url_for(service_type=service_type) + else: + management_url = self.url_for( + service_type=service_type, + attr='region', + filter_value=self.context.region_name) + else: + management_url = con.nova_url + '/' + con.tenant_id + client = novaclient.Client(2, **args) + client.client.auth_token = self.auth_token + client.client.management_url = management_url + + self._nova[service_type] = client + return client + + def swift(self): + if swiftclient is None: + return None + if self._swift: + return self._swift + + con = self.context + if self.auth_token is None: + logger.error("Swift connection failed, no auth_token!") + return None + + args = { + 'auth_version': '2.0', + 'tenant_name': con.tenant_id, + 'user': con.username, + 'key': None, + 'authurl': None, + 'preauthtoken': self.auth_token, + 'preauthurl': self.url_for(service_type='object-store') + } + self._swift = swiftclient.Connection(**args) + return self._swift + + def neutron(self): + if neutronclient is None: + return None + if self._neutron: + return self._neutron + + con = self.context + if self.auth_token is None: + logger.error("Neutron connection failed, no auth_token!") + return None + if con.password is not None: + if self.context.region_name is None: + management_url = self.url_for(service_type='network') + else: + management_url = self.url_for( + service_type='network', + attr='region', + filter_value=self.context.region_name) + else: + management_url = con.neutron_url + + args = { + 'auth_url': con.auth_url, + 'service_type': 'network', + 'token': self.auth_token, + 'endpoint_url': management_url + } + + self._neutron = neutronclient.Client(**args) + + return self._neutron + + def cinder(self): + if cinderclient is None: + return self.nova('volume') + if self._cinder: + return self._cinder + + con = self.context + if self.auth_token is None: + logger.error("Cinder connection failed, no auth_token!") + return None + + args = { + 'service_type': 'volume', + 'auth_url': con.auth_url, + 'project_id': con.tenant_id, + 'username': None, + 'api_key': None + } + + self._cinder = cinderclient.Client('1', **args) + if con.password is not None: + if self.context.region_name is None: + management_url = self.url_for(service_type='volume') + else: + management_url = self.url_for( + service_type='volume', + attr='region', + filter_value=self.context.region_name) + else: + management_url = con.cinder_url + '/' + con.tenant_id + self._cinder.client.auth_token = self.auth_token + self._cinder.client.management_url = management_url + + return self._cinder + + def ceilometer(self): + if ceilometerclient is None: + return None + if self._ceilometer: + return self._ceilometer + + if self.auth_token is None: + logger.error("Ceilometer connection failed, no auth_token!") + return None + con = self.context + args = { + 'auth_url': con.auth_url, + 'service_type': 'metering', + 'project_id': con.tenant_id, + 'token': lambda: self.auth_token, + 'endpoint': self.url_for(service_type='metering'), + } + + client = ceilometerclient.Client(**args) + + self._ceilometer = client + return self._ceilometer + + +if cfg.CONF.cloud_backend: + cloud_backend_module = importutils.import_module(cfg.CONF.cloud_backend) + Clients = cloud_backend_module.Clients +else: + Clients = OpenStackClients + +logger.debug('Using backend %s' % Clients) diff --git a/novaproxy/nova/compute/compute_context.py b/novaproxy/nova/compute/compute_context.py new file mode 100644 index 00000000..173469de --- /dev/null +++ b/novaproxy/nova/compute/compute_context.py @@ -0,0 +1,199 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo.config import cfg +from nova.openstack.common import local +from nova import exception +from nova import wsgi +from nova.openstack.common import context +from nova.openstack.common import importutils +from nova.openstack.common import uuidutils + + +def generate_request_id(): + return 'req-' + uuidutils.generate_uuid() + + +class RequestContext(context.RequestContext): + + """ + Stores information about the security context under which the user + accesses the system, as well as additional request information. + """ + + def __init__(self, auth_token=None, username=None, password=None, + aws_creds=None, tenant=None, + tenant_id=None, auth_url=None, roles=None, + is_admin=False, region_name=None, + nova_url=None, cinder_url=None, neutron_url=None, + read_only=False, show_deleted=False, + owner_is_tenant=True, overwrite=True, + trust_id=None, trustor_user_id=None, + **kwargs): + """ + :param overwrite: Set to False to ensure that the greenthread local + copy of the index is not overwritten. + + :param kwargs: Extra arguments that might be present, but we ignore + because they possibly came in from older rpc messages. + """ + super(RequestContext, self).__init__(auth_token=auth_token, + user=username, tenant=tenant, + is_admin=is_admin, + read_only=read_only, + show_deleted=show_deleted, + request_id='unused') + self.username = username + self.password = password + self.aws_creds = aws_creds + self.tenant_id = tenant_id + self.auth_url = auth_url + self.roles = roles or [] + self.owner_is_tenant = owner_is_tenant + if overwrite or not hasattr(local.store, 'context'): + self.update_store() + self._session = None + self.trust_id = trust_id + self.trustor_user_id = trustor_user_id + self.nova_url = nova_url + self.cinder_url = cinder_url + self.neutron_url = neutron_url + self.region_name = region_name + + def update_store(self): + local.store.context = self + + def to_dict(self): + return {'auth_token': self.auth_token, + 'username': self.username, + 'password': self.password, + 'aws_creds': self.aws_creds, + 'tenant': self.tenant, + 'tenant_id': self.tenant_id, + 'trust_id': self.trust_id, + 'trustor_user_id': self.trustor_user_id, + 'auth_url': self.auth_url, + 'roles': self.roles, + 'is_admin': self.is_admin} + + @classmethod + def from_dict(cls, values): + return cls(**values) + + @property + def owner(self): + """Return the owner to correlate with an image.""" + return self.tenant if self.owner_is_tenant else self.user + + +def get_admin_context(read_deleted="no"): + return RequestContext(is_admin=True) + + +class ContextMiddleware(wsgi.Middleware): + + opts = [cfg.BoolOpt('owner_is_tenant', default=True), + cfg.StrOpt('admin_role', default='admin')] + + def __init__(self, app, conf, **local_conf): + cfg.CONF.register_opts(self.opts) + + # Determine the context class to use + self.ctxcls = RequestContext + if 'context_class' in local_conf: + self.ctxcls = importutils.import_class(local_conf['context_class']) + + super(ContextMiddleware, self).__init__(app) + + def make_context(self, *args, **kwargs): + """ + Create a context with the given arguments. + """ + kwargs.setdefault('owner_is_tenant', cfg.CONF.owner_is_tenant) + + return self.ctxcls(*args, **kwargs) + + def process_request(self, req): + """ + Extract any authentication information in the request and + construct an appropriate context from it. + + A few scenarios exist: + + 1. If X-Auth-Token is passed in, then consult TENANT and ROLE headers + to determine permissions. + + 2. An X-Auth-Token was passed in, but the Identity-Status is not + confirmed. For now, just raising a NotAuthenticated exception. + + 3. X-Auth-Token is omitted. If we were using Keystone, then the + tokenauth middleware would have rejected the request, so we must be + using NoAuth. In that case, assume that is_admin=True. + """ + headers = req.headers + + try: + """ + This sets the username/password to the admin user because you + need this information in order to perform token authentication. + The real 'username' is the 'tenant'. + + We should also check here to see if X-Auth-Token is not set and + in that case we should assign the user/pass directly as the real + username/password and token as None. 'tenant' should still be + the username. + """ + + username = None + password = None + aws_creds = None + + if headers.get('X-Auth-User') is not None: + username = headers.get('X-Auth-User') + password = headers.get('X-Auth-Key') + elif headers.get('X-Auth-EC2-Creds') is not None: + aws_creds = headers.get('X-Auth-EC2-Creds') + + token = headers.get('X-Auth-Token') + tenant = headers.get('X-Tenant-Name') + tenant_id = headers.get('X-Tenant-Id') + auth_url = headers.get('X-Auth-Url') + roles = headers.get('X-Roles') + if roles is not None: + roles = roles.split(',') + + except Exception: + raise exception.NotAuthenticated() + + req.context = self.make_context(auth_token=token, + tenant=tenant, tenant_id=tenant_id, + aws_creds=aws_creds, + username=username, + password=password, + auth_url=auth_url, roles=roles, + is_admin=True) + + +def ContextMiddleware_filter_factory(global_conf, **local_conf): + """ + Factory method for paste.deploy + """ + conf = global_conf.copy() + conf.update(local_conf) + + def filter(app): + return ContextMiddleware(app, conf) + + return filter diff --git a/novaproxy/nova/compute/compute_keystoneclient.py b/novaproxy/nova/compute/compute_keystoneclient.py new file mode 100644 index 00000000..b2d822a3 --- /dev/null +++ b/novaproxy/nova/compute/compute_keystoneclient.py @@ -0,0 +1,314 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from nova.openstack.common import context +from nova import exception + +import eventlet + +from keystoneclient.v2_0 import client as kc +from keystoneclient.v3 import client as kc_v3 +from oslo.config import cfg +from nova.openstack.common import importutils +from nova.openstack.common import log as logging + +logger = logging.getLogger('nova.compute.keystoneclient') + + +class KeystoneClient(object): + + """ + Wrap keystone client so we can encapsulate logic used in resources + Note this is intended to be initialized from a resource on a per-session + basis, so the session context is passed in on initialization + Also note that a copy of this is created every resource as self.keystone() + via the code in engine/client.py, so there should not be any need to + directly instantiate instances of this class inside resources themselves + """ + + def __init__(self, context): + # We have to maintain two clients authenticated with keystone: + # - ec2 interface is v2.0 only + # - trusts is v3 only + # If a trust_id is specified in the context, we immediately + # authenticate so we can populate the context with a trust token + # otherwise, we delay client authentication until needed to avoid + # unnecessary calls to keystone. + # + # Note that when you obtain a token using a trust, it cannot be + # used to reauthenticate and get another token, so we have to + # get a new trust-token even if context.auth_token is set. + # + # - context.auth_url is expected to contain the v2.0 keystone endpoint + self.context = context + self._client_v2 = None + self._client_v3 = None + + if self.context.trust_id: + # Create a connection to the v2 API, with the trust_id, this + # populates self.context.auth_token with a trust-scoped token + self._client_v2 = self._v2_client_init() + + @property + def client_v3(self): + if not self._client_v3: + # Create connection to v3 API + self._client_v3 = self._v3_client_init() + return self._client_v3 + + @property + def client_v2(self): + if not self._client_v2: + self._client_v2 = self._v2_client_init() + return self._client_v2 + + def _v2_client_init(self): + kwargs = { + 'auth_url': self.context.auth_url + } + auth_kwargs = {} + # Note try trust_id first, as we can't reuse auth_token in that case + if self.context.trust_id is not None: + # We got a trust_id, so we use the admin credentials + # to authenticate, then re-scope the token to the + # trust impersonating the trustor user. + # Note that this currently requires the trustor tenant_id + # to be passed to the authenticate(), unlike the v3 call + kwargs.update(self._service_admin_creds(api_version=2)) + auth_kwargs['trust_id'] = self.context.trust_id + auth_kwargs['tenant_id'] = self.context.tenant_id + elif self.context.auth_token is not None: + kwargs['tenant_name'] = self.context.tenant + kwargs['token'] = self.context.auth_token + elif self.context.password is not None: + kwargs['username'] = self.context.username + kwargs['password'] = self.context.password + kwargs['tenant_name'] = self.context.tenant + kwargs['tenant_id'] = self.context.tenant_id + else: + logger.error("Keystone v2 API connection failed, no password or " + "auth_token!") + raise exception.AuthorizationFailure() + client_v2 = kc.Client(**kwargs) + + client_v2.authenticate(**auth_kwargs) + # If we are authenticating with a trust auth_kwargs are set, so set + # the context auth_token with the re-scoped trust token + if auth_kwargs: + # Sanity check + if not client_v2.auth_ref.trust_scoped: + logger.error("v2 trust token re-scoping failed!") + raise exception.AuthorizationFailure() + # All OK so update the context with the token + self.context.auth_token = client_v2.auth_ref.auth_token + self.context.auth_url = kwargs.get('auth_url') + + return client_v2 + + @staticmethod + def _service_admin_creds(api_version=2): + # Import auth_token to have keystone_authtoken settings setup. + importutils.import_module('keystoneclient.middleware.auth_token') + + creds = { + 'username': cfg.CONF.keystone_authtoken.admin_user, + 'password': cfg.CONF.keystone_authtoken.admin_password, + } + if api_version >= 3: + creds['auth_url'] =\ + cfg.CONF.keystone_authtoken.auth_uri.replace('v2.0', 'v3') + creds['project_name'] =\ + cfg.CONF.keystone_authtoken.admin_tenant_name + else: + creds['auth_url'] = cfg.CONF.keystone_authtoken.auth_uri + creds['tenant_name'] =\ + cfg.CONF.keystone_authtoken.admin_tenant_name + + return creds + + def _v3_client_init(self): + kwargs = {} + if self.context.auth_token is not None: + kwargs['project_name'] = self.context.tenant + kwargs['token'] = self.context.auth_token + kwargs['auth_url'] = self.context.auth_url.replace('v2.0', 'v3') + kwargs['endpoint'] = kwargs['auth_url'] + elif self.context.trust_id is not None: + # We got a trust_id, so we use the admin credentials and get a + # Token back impersonating the trustor user + kwargs.update(self._service_admin_creds(api_version=3)) + kwargs['trust_id'] = self.context.trust_id + elif self.context.password is not None: + kwargs['username'] = self.context.username + kwargs['password'] = self.context.password + kwargs['project_name'] = self.context.tenant + kwargs['project_id'] = self.context.tenant_id + kwargs['auth_url'] = self.context.auth_url.replace('v2.0', 'v3') + kwargs['endpoint'] = kwargs['auth_url'] + else: + logger.error("Keystone v3 API connection failed, no password or " + "auth_token!") + raise exception.AuthorizationFailure() + + client = kc_v3.Client(**kwargs) + # Have to explicitly authenticate() or client.auth_ref is None + client.authenticate() + + return client + + def create_trust_context(self): + """ + If cfg.CONF.deferred_auth_method is trusts, we create a + trust using the trustor identity in the current context, with the + trustee as the heat service user and return a context containing + the new trust_id + + If deferred_auth_method != trusts, or the current context already + contains a trust_id, we do nothing and return the current context + """ + if self.context.trust_id: + return self.context + + # We need the service admin user ID (not name), as the trustor user + # can't lookup the ID in keystoneclient unless they're admin + # workaround this by creating a temporary admin client connection + # then getting the user ID from the auth_ref + admin_creds = self._service_admin_creds() + admin_client = kc.Client(**admin_creds) + trustee_user_id = admin_client.auth_ref.user_id + trustor_user_id = self.client_v3.auth_ref.user_id + trustor_project_id = self.client_v3.auth_ref.project_id + roles = cfg.CONF.trusts_delegated_roles + trust = self.client_v3.trusts.create(trustor_user=trustor_user_id, + trustee_user=trustee_user_id, + project=trustor_project_id, + impersonation=True, + role_names=roles) + + trust_context = context.RequestContext.from_dict( + self.context.to_dict()) + trust_context.trust_id = trust.id + trust_context.trustor_user_id = trustor_user_id + return trust_context + + def delete_trust(self, trust_id): + """ + Delete the specified trust. + """ + self.client_v3.trusts.delete(trust_id) + + def create_stack_user(self, username, password=''): + """ + Create a user defined as part of a stack, either via template + or created internally by a resource. This user will be added to + the heat_stack_user_role as defined in the config + Returns the keystone ID of the resulting user + """ + if(len(username) > 64): + logger.warning("Truncating the username %s to the last 64 " + "characters." % username) + # get the last 64 characters of the username + username = username[-64:] + user = self.client_v2.users.create(username, + password, + '%s@heat-api.org' % + username, + tenant_id=self.context.tenant_id, + enabled=True) + + # We add the new user to a special keystone role + # This role is designed to allow easier differentiation of the + # heat-generated "stack users" which will generally have credentials + # deployed on an instance (hence are implicitly untrusted) + roles = self.client_v2.roles.list() + stack_user_role = [r.id for r in roles + if r.name == cfg.CONF.heat_stack_user_role] + if len(stack_user_role) == 1: + role_id = stack_user_role[0] + logger.debug("Adding user %s to role %s" % (user.id, role_id)) + self.client_v2.roles.add_user_role(user.id, role_id, + self.context.tenant_id) + else: + logger.error("Failed to add user %s to role %s, check role exists!" + % (username, cfg.CONF.heat_stack_user_role)) + + return user.id + + def delete_stack_user(self, user_id): + + user = self.client_v2.users.get(user_id) + + # FIXME (shardy) : need to test, do we still need this retry logic? + # Copied from user.py, but seems like something we really shouldn't + # need to do, no bug reference in the original comment (below)... + # tempory hack to work around an openstack bug. + # seems you can't delete a user first time - you have to try + # a couple of times - go figure! + tmo = eventlet.Timeout(10) + status = 'WAITING' + reason = 'Timed out trying to delete user' + try: + while status == 'WAITING': + try: + user.delete() + status = 'DELETED' + except Exception as ce: + reason = str(ce) + logger.warning("Problem deleting user %s: %s" % + (user_id, reason)) + eventlet.sleep(1) + except eventlet.Timeout as t: + if t is not tmo: + # not my timeout + raise + else: + status = 'TIMEDOUT' + finally: + tmo.cancel() + + if status != 'DELETED': + raise exception.Error(reason) + + def delete_ec2_keypair(self, user_id, accesskey): + self.client_v2.ec2.delete(user_id, accesskey) + + def get_ec2_keypair(self, user_id): + # We make the assumption that each user will only have one + # ec2 keypair, it's not clear if AWS allow multiple AccessKey resources + # to be associated with a single User resource, but for simplicity + # we assume that here for now + cred = self.client_v2.ec2.list(user_id) + if len(cred) == 0: + return self.client_v2.ec2.create(user_id, self.context.tenant_id) + if len(cred) == 1: + return cred[0] + else: + logger.error("Unexpected number of ec2 credentials %s for %s" % + (len(cred), user_id)) + + def disable_stack_user(self, user_id): + # FIXME : This won't work with the v3 keystone API + self.client_v2.users.update_enabled(user_id, False) + + def enable_stack_user(self, user_id): + # FIXME : This won't work with the v3 keystone API + self.client_v2.users.update_enabled(user_id, True) + + def url_for(self, **kwargs): + return self.client_v2.service_catalog.url_for(**kwargs) + + @property + def auth_token(self): + return self.client_v2.auth_token diff --git a/novaproxy/nova/compute/manager_proxy.py b/novaproxy/nova/compute/manager_proxy.py new file mode 100644 index 00000000..2d65d036 --- /dev/null +++ b/novaproxy/nova/compute/manager_proxy.py @@ -0,0 +1,2999 @@ +import base64 +import contextlib +import functools +import os +import sys +import time +import traceback +import uuid + +import novaclient +import eventlet.event +import eventlet.timeout +from oslo.config import cfg +from oslo import messaging + +from nova.compute import clients +from nova.compute import compute_context +from nova.openstack.common import timeutils + +from nova import block_device +from nova.cells import rpcapi as cells_rpcapi +from nova.cloudpipe import pipelib +from nova import compute +from nova.compute import flavors +from nova.compute import power_state +from nova.compute import resource_tracker +from nova.compute import rpcapi as compute_rpcapi +from nova.compute import task_states +from nova.compute import utils as compute_utils +from nova.compute import vm_states +from nova import conductor +from nova import consoleauth +import nova.context +from nova import exception +from nova import hooks +from nova.image import glance +from nova import manager +from nova import network +from nova.network import model as network_model +from nova.network.security_group import openstack_driver +from nova.objects import aggregate as aggregate_obj +from nova.objects import base as obj_base +from nova.objects import block_device as block_device_obj +from nova.objects import external_event as external_event_obj +from nova.objects import flavor as flavor_obj +from nova.objects import instance as instance_obj +from nova.objects import instance_group as instance_group_obj +from nova.objects import migration as migration_obj +from nova.objects import quotas as quotas_obj +from nova.openstack.common import excutils +from nova.openstack.common.gettextutils import _ +from nova.openstack.common import jsonutils +from nova.openstack.common import log as logging +from nova.openstack.common import periodic_task +from nova import paths +from nova import rpc +from nova import safe_utils +from nova.scheduler import rpcapi as scheduler_rpcapi +from nova import utils +from nova.virt import block_device as driver_block_device +from nova.virt import driver +from nova.virt import virtapi +from nova import volume + +from nova.virt.libvirt import utils as libvirt_utils +from nova.network import neutronv2 +from neutronclient.v2_0 import client as clientv20 + + +compute_opts = [ + cfg.StrOpt('instances_path', + default=paths.state_path_def('instances'), + help='Where instances are stored on disk'), + cfg.IntOpt('network_allocate_retries', + default=0, + help="Number of times to retry network allocation on failures"), + cfg.StrOpt('keystone_auth_url', + default='http://127.0.0.1:5000/v2.0/', + help='value of keystone url'), + cfg.StrOpt('nova_admin_username', + default='nova', + help='username for connecting to nova in admin context'), + cfg.StrOpt('nova_admin_password', + default='nova', + help='password for connecting to nova in admin context', + secret=True), + cfg.StrOpt('nova_admin_tenant_name', + default='admin', + help='tenant name for connecting to nova in admin context'), + cfg.StrOpt('proxy_region_name', + deprecated_name='proxy_region_name', + help='region name for connecting to neutron in admin context'), + cfg.IntOpt('novncproxy_port', + default=6080, + help='Port on which to listen for incoming requests'), + cfg.StrOpt('cascading_nova_url', + default='http://127.0.0.1:8774/v2', + help='value of cascading url'), + cfg.StrOpt('cascaded_nova_url', + default='http://127.0.0.1:8774/v2', + help='value of cascaded url'), + cfg.StrOpt('cascaded_neutron_url', + default='http://127.0.0.1:9696', + help='value of cascaded neutron url'), + cfg.BoolOpt('cascaded_glance_flag', + default=False, + help='Whether to use glance cescaded'), + cfg.StrOpt('cascaded_glance_url', + default='http://127.0.0.1:9292', + help='value of cascaded glance url') +] + +interval_opts = [ + cfg.IntOpt('volume_usage_poll_interval', + default=0, + help='Interval in seconds for gathering volume usages'), + cfg.IntOpt('sync_instance_state_interval', + default=5, + help='interval to sync instance states between ' + 'the nova and the nova-proxy') + +] + +CONF = cfg.CONF +CONF.register_opts(compute_opts) +CONF.register_opts(interval_opts) +CONF.import_opt('allow_resize_to_same_host', 'nova.compute.api') +CONF.import_opt('host', 'nova.netconf') +CONF.import_opt('my_ip', 'nova.netconf') +CONF.import_opt('vnc_enabled', 'nova.vnc') +CONF.import_opt('enabled', 'nova.spice', group='spice') +CONF.import_opt('enable', 'nova.cells.opts', group='cells') +CONF.import_opt('enabled', 'nova.rdp', group='rdp') + +LOG = logging.getLogger(__name__) + +get_notifier = functools.partial(rpc.get_notifier, service='compute') +wrap_exception = functools.partial(exception.wrap_exception, + get_notifier=get_notifier) + + +@utils.expects_func_args('migration') +def errors_out_migration(function): + """Decorator to error out migration on failure.""" + + @functools.wraps(function) + def decorated_function(self, context, *args, **kwargs): + try: + return function(self, context, *args, **kwargs) + except Exception: + with excutils.save_and_reraise_exception(): + # Find migration argument. The argument cannot be + # defined by position because the wrapped functions + # do not have the same signature. + for arg in args: + if not isinstance(arg, migration_obj.Migration): + continue + status = arg.status + if status not in ['migrating', 'post-migrating']: + continue + arg.status = 'error' + try: + arg.save(context.elevated()) + except Exception: + LOG.debug(_('Error setting migration status ' + 'for instance %s.') % + arg.instance_uuid, exc_info=True) + break + + return decorated_function + + +@utils.expects_func_args('instance') +def reverts_task_state(function): + """Decorator to revert task_state on failure.""" + + @functools.wraps(function) + def decorated_function(self, context, *args, **kwargs): + try: + return function(self, context, *args, **kwargs) + except exception.UnexpectedTaskStateError as e: + # Note(maoy): unexpected task state means the current + # task is preempted. Do not clear task state in this + # case. + with excutils.save_and_reraise_exception(): + LOG.info(_("Task possibly preempted: %s") % e.format_message()) + except Exception: + with excutils.save_and_reraise_exception(): + try: + self._instance_update(context, + kwargs['instance']['uuid'], + task_state=None) + except Exception: + pass + + return decorated_function + + +@utils.expects_func_args('instance') +def wrap_instance_fault(function): + """Wraps a method to catch exceptions related to instances. + + This decorator wraps a method to catch any exceptions having to do with + an instance that may get thrown. It then logs an instance fault in the db. + """ + + @functools.wraps(function) + def decorated_function(self, context, *args, **kwargs): + try: + return function(self, context, *args, **kwargs) + except exception.InstanceNotFound: + raise + except Exception as e: + # NOTE(gtt): If argument 'instance' is in args rather than kwargs, + # we will get a KeyError exception which will cover up the real + # exception. So, we update kwargs with the values from args first. + # then, we can get 'instance' from kwargs easily. + kwargs.update(dict(zip(function.func_code.co_varnames[2:], args))) + + with excutils.save_and_reraise_exception(): + compute_utils.add_instance_fault_from_exc(context, + self.conductor_api, + kwargs['instance'], + e, sys.exc_info()) + + return decorated_function + + +@utils.expects_func_args('instance') +def wrap_instance_event(function): + """Wraps a method to log the event taken on the instance, and result. + + This decorator wraps a method to log the start and result of an event, as + part of an action taken on an instance. + """ + + @functools.wraps(function) + def decorated_function(self, context, *args, **kwargs): + wrapped_func = utils.get_wrapped_function(function) + keyed_args = safe_utils.getcallargs(wrapped_func, context, *args, + **kwargs) + instance_uuid = keyed_args['instance']['uuid'] + + event_name = 'compute_{0}'.format(function.func_name) + with compute_utils.EventReporter(context, self.conductor_api, + event_name, instance_uuid): + + function(self, context, *args, **kwargs) + + return decorated_function + + +@utils.expects_func_args('image_id', 'instance') +def delete_image_on_error(function): + """Used for snapshot related method to ensure the image created in + compute.api is deleted when an error occurs. + """ + + @functools.wraps(function) + def decorated_function(self, context, image_id, instance, + *args, **kwargs): + try: + return function(self, context, image_id, instance, + *args, **kwargs) + except Exception: + with excutils.save_and_reraise_exception(): + LOG.debug(_("Cleaning up image %s") % image_id, + exc_info=True, instance=instance) + try: + image_service = glance.get_default_image_service() + image_service.delete(context, image_id) + except Exception: + LOG.exception(_("Error while trying to clean up image %s") + % image_id, instance=instance) + + return decorated_function + + +# TODO(danms): Remove me after Icehouse +# NOTE(mikal): if the method being decorated has more than one decorator, then +# put this one first. Otherwise the various exception handling decorators do +# not function correctly. +def object_compat(function): + """Wraps a method that expects a new-world instance + + This provides compatibility for callers passing old-style dict + instances. + """ + + @functools.wraps(function) + def decorated_function(self, context, *args, **kwargs): + def _load_instance(instance_or_dict): + if isinstance(instance_or_dict, dict): + instance = instance_obj.Instance._from_db_object( + context, instance_obj.Instance(), instance_or_dict, + expected_attrs=metas) + instance._context = context + return instance + return instance_or_dict + + metas = ['metadata', 'system_metadata'] + try: + kwargs['instance'] = _load_instance(kwargs['instance']) + except KeyError: + args = (_load_instance(args[0]),) + args[1:] + + migration = kwargs.get('migration') + if isinstance(migration, dict): + migration = migration_obj.Migration._from_db_object( + context.elevated(), + migration_obj.Migration(), + migration) + kwargs['migration'] = migration + + return function(self, context, *args, **kwargs) + + return decorated_function + + +# TODO(danms): Remove me after Icehouse +def aggregate_object_compat(function): + """Wraps a method that expects a new-world aggregate.""" + + @functools.wraps(function) + def decorated_function(self, context, *args, **kwargs): + aggregate = kwargs.get('aggregate') + if isinstance(aggregate, dict): + aggregate = aggregate_obj.Aggregate._from_db_object( + context.elevated(), aggregate_obj.Aggregate(), + aggregate) + kwargs['aggregate'] = aggregate + return function(self, context, *args, **kwargs) + return decorated_function + + +def _get_image_meta(context, image_ref): + image_service, image_id = glance.get_remote_image_service(context, + image_ref) + return image_service.show(context, image_id) + + +class InstanceEvents(object): + + def __init__(self): + self._events = {} + + @staticmethod + def _lock_name(instance): + return '%s-%s' % (instance.uuid, 'events') + + def prepare_for_instance_event(self, instance, event_name): + """Prepare to receive an event for an instance. + + This will register an event for the given instance that we will + wait on later. This should be called before initiating whatever + action will trigger the event. The resulting eventlet.event.Event + object should be wait()'d on to ensure completion. + + :param instance: the instance for which the event will be generated + :param event_name: the name of the event we're expecting + :returns: an event object that should be wait()'d on + """ + @utils.synchronized(self._lock_name) + def _create_or_get_event(): + if instance.uuid not in self._events: + self._events.setdefault(instance.uuid, {}) + return self._events[instance.uuid].setdefault( + event_name, eventlet.event.Event()) + LOG.debug(_('Preparing to wait for external event %(event)s'), + {'event': event_name}, instance=instance) + return _create_or_get_event() + + def pop_instance_event(self, instance, event): + """Remove a pending event from the wait list. + + This will remove a pending event from the wait list so that it + can be used to signal the waiters to wake up. + + :param instance: the instance for which the event was generated + :param event: the nova.objects.external_event.InstanceExternalEvent + that describes the event + :returns: the eventlet.event.Event object on which the waiters + are blocked + """ + @utils.synchronized(self._lock_name) + def _pop_event(): + events = self._events.get(instance.uuid) + if not events: + return None + _event = events.pop(event.key, None) + if not events: + del self._events[instance.uuid] + return _event + return _pop_event() + + def clear_events_for_instance(self, instance): + """Remove all pending events for an instance. + + This will remove all events currently pending for an instance + and return them (indexed by event name). + + :param instance: the instance for which events should be purged + :returns: a dictionary of {event_name: eventlet.event.Event} + """ + @utils.synchronized(self._lock_name) + def _clear_events(): + # NOTE(danms): Use getitem syntax for the instance until + # all the callers are using objects + return self._events.pop(instance['uuid'], {}) + return _clear_events() + + +class ComputeVirtAPI(virtapi.VirtAPI): + + def __init__(self, compute): + super(ComputeVirtAPI, self).__init__() + self._compute = compute + + def instance_update(self, context, instance_uuid, updates): + return self._compute._instance_update(context, + instance_uuid, + **updates) + + def provider_fw_rule_get_all(self, context): + return self._compute.conductor_api.provider_fw_rule_get_all(context) + + def agent_build_get_by_triple(self, context, hypervisor, os, architecture): + return self._compute.conductor_api.agent_build_get_by_triple( + context, hypervisor, os, architecture) + + def _default_error_callback(self, event_name, instance): + raise exception.NovaException(_('Instance event failed')) + + @contextlib.contextmanager + def wait_for_instance_event(self, instance, event_names, deadline=300, + error_callback=None): + """Plan to wait for some events, run some code, then wait. + + This context manager will first create plans to wait for the + provided event_names, yield, and then wait for all the scheduled + events to complete. + + Note that this uses an eventlet.timeout.Timeout to bound the + operation, so callers should be prepared to catch that + failure and handle that situation appropriately. + + If the event is not received by the specified timeout deadline, + eventlet.timeout.Timeout is raised. + + If the event is received but did not have a 'completed' + status, a NovaException is raised. If an error_callback is + provided, instead of raising an exception as detailed above + for the failure case, the callback will be called with the + event_name and instance, and can return True to continue + waiting for the rest of the events, False to stop processing, + or raise an exception which will bubble up to the waiter. + + :param:instance: The instance for which an event is expected + :param:event_names: A list of event names. Each element can be a + string event name or tuple of strings to + indicate (name, tag). + :param:deadline: Maximum number of seconds we should wait for all + of the specified events to arrive. + :param:error_callback: A function to be called if an event arrives + """ + + if error_callback is None: + error_callback = self._default_error_callback + events = {} + for event_name in event_names: + if isinstance(event_name, tuple): + name, tag = event_name + event_name = external_event_obj.InstanceExternalEvent.make_key( + name, tag) + events[event_name] = ( + self._compute.instance_events.prepare_for_instance_event( + instance, event_name)) + yield + with eventlet.timeout.Timeout(deadline): + for event_name, event in events.items(): + actual_event = event.wait() + if actual_event.status == 'completed': + continue + decision = error_callback(event_name, instance) + if decision is False: + break + + +class ComputeManager(manager.Manager): + + """Manages the running instances from creation to destruction.""" + + target = messaging.Target(version='3.23') + + def __init__(self, compute_driver=None, *args, **kwargs): + """Load configuration options and connect to the hypervisor.""" + self.virtapi = ComputeVirtAPI(self) + self.network_api = network.API() + self.volume_api = volume.API() + self._last_host_check = 0 + self._last_bw_usage_poll = 0 + self._bw_usage_supported = True + self._last_bw_usage_cell_update = 0 + self.compute_api = compute.API() + self.compute_rpcapi = compute_rpcapi.ComputeAPI() + self.conductor_api = conductor.API() + self.compute_task_api = conductor.ComputeTaskAPI() + self.is_neutron_security_groups = ( + openstack_driver.is_neutron_security_groups()) + self.consoleauth_rpcapi = consoleauth.rpcapi.ConsoleAuthAPI() + self.cells_rpcapi = cells_rpcapi.CellsAPI() + self.scheduler_rpcapi = scheduler_rpcapi.SchedulerAPI() + self._resource_tracker_dict = {} + self.instance_events = InstanceEvents() + + super(ComputeManager, self).__init__(service_name="compute", + *args, **kwargs) + + # NOTE(russellb) Load the driver last. It may call back into the + # compute manager via the virtapi, so we want it to be fully + # initialized before that happens. + self.driver = driver.load_compute_driver(self.virtapi, compute_driver) + self.use_legacy_block_device_info = \ + self.driver.need_legacy_block_device_info + self._last_info_instance_state_heal = 0 + self._change_since_time = None + + def _get_resource_tracker(self, nodename): + rt = self._resource_tracker_dict.get(nodename) + if not rt: + if not self.driver.node_is_available(nodename): + raise exception.NovaException( + _("%s is not a valid node managed by this " + "compute host.") % nodename) + + rt = resource_tracker.ResourceTracker(self.host, + self.driver, + nodename) + self._resource_tracker_dict[nodename] = rt + return rt + + def _instance_update(self, context, instance_uuid, **kwargs): + """Update an instance in the database using kwargs as value.""" + + instance_ref = self.conductor_api.instance_update(context, + instance_uuid, + **kwargs) + if (instance_ref['host'] == self.host and + self.driver.node_is_available(instance_ref['node'])): + rt = self._get_resource_tracker(instance_ref.get('node')) + rt.update_usage(context, instance_ref) + + return instance_ref + + @periodic_task.periodic_task(spacing=CONF.sync_instance_state_interval, + run_immediately=True) + def _heal_instance_state(self, context): + heal_interval = CONF.sync_instance_state_interval + if not heal_interval: + return + curr_time = time.time() + if self._last_info_instance_state_heal != 0: + if self._last_info_instance_state_heal + heal_interval > curr_time: + return + self._last_info_instance_state_heal = curr_time + + kwargs = { + 'username': cfg.CONF.nova_admin_username, + 'password': cfg.CONF.nova_admin_password, + 'tenant': cfg.CONF.nova_admin_tenant_name, + 'auth_url': cfg.CONF.keystone_auth_url, + 'region_name': cfg.CONF.proxy_region_name + } + reqCon = compute_context.RequestContext(**kwargs) + openStackClients = clients.OpenStackClients(reqCon) + cascadedNovaCli = openStackClients.nova() + try: + if self._change_since_time is None: + search_opts_args = {'all_tenants': True} + servers = cascadedNovaCli.servers.list( + search_opts=search_opts_args) + else: + search_opts_args = { + 'changes-since': self._change_since_time, + 'all_tenants': True + } + servers = cascadedNovaCli.servers.list( + search_opts=search_opts_args) + self._change_since_time = timeutils.isotime() + if len(servers) > 0: + LOG.debug(_('Updated the servers %s '), servers) + + for server in servers: + self._instance_update( + context, + server._info['metadata']['mapping_uuid'], + vm_state=server._info['OS-EXT-STS:vm_state'], + task_state=server._info['OS-EXT-STS:task_state'], + power_state=server._info['OS-EXT-STS:power_state'], + launched_at=server._info['OS-SRV-USG:launched_at'] + ) + LOG.debug(_('Updated the server %s from nova-proxy'), + server.id) + except Exception: + with excutils.save_and_reraise_exception(): + LOG.error(_('Failed to sys server status to db.')) + + @periodic_task.periodic_task + def update_available_resource(self, context): + """See driver.get_available_resource() + + Periodic process that keeps that the compute host's understanding of + resource availability and usage in sync with the underlying hypervisor. + + :param context: security context + """ + new_resource_tracker_dict = {} + nodenames = set(self.driver.get_available_nodes()) + for nodename in nodenames: + rt = self._get_resource_tracker(nodename) + rt.update_available_resource(context) + new_resource_tracker_dict[nodename] = rt + + self._resource_tracker_dict = new_resource_tracker_dict + + @wrap_exception() + @reverts_task_state + @wrap_instance_event + @wrap_instance_fault + def run_instance(self, context, instance, request_spec, + filter_properties, requested_networks, + injected_files, admin_password, + is_first_time, node, legacy_bdm_in_spec): + + if filter_properties is None: + filter_properties = {} + + @utils.synchronized(instance['uuid']) + def do_run_instance(): + self._run_instance( + context, + request_spec, + filter_properties, + requested_networks, + injected_files, + admin_password, + is_first_time, + node, + instance, + legacy_bdm_in_spec) + do_run_instance() + + def _run_instance(self, context, request_spec, + filter_properties, requested_networks, injected_files, + admin_password, is_first_time, node, instance, + legacy_bdm_in_spec): + """Launch a new instance with specified options.""" + + extra_usage_info = {} + + def notify(status, msg="", fault=None, **kwargs): + """Send a create.{start,error,end} notification.""" + type_ = "create.%(status)s" % dict(status=status) + info = extra_usage_info.copy() + info['message'] = unicode(msg) + self._notify_about_instance_usage( + context, + instance, + type_, + extra_usage_info=info, + fault=fault, + **kwargs) + + try: + self._prebuild_instance(context, instance) + + if request_spec and request_spec.get('image'): + image_meta = request_spec['image'] + else: + image_meta = {} + + extra_usage_info = {"image_name": image_meta.get('name', '')} + + notify("start") # notify that build is starting + + instance, network_info = self._build_instance(context, + request_spec, filter_properties, requested_networks, + injected_files, admin_password, is_first_time, node, + instance, image_meta, legacy_bdm_in_spec) + notify("end", msg=_("Success"), network_info=network_info) + + except exception.RescheduledException as e: + # Instance build encountered an error, and has been rescheduled. + notify("error", fault=e) + + except exception.BuildAbortException as e: + # Instance build aborted due to a non-failure + LOG.info(e) + notify("end", msg=unicode(e)) # notify that build is done + + except Exception as e: + # Instance build encountered a non-recoverable error: + with excutils.save_and_reraise_exception(): + self._set_instance_error_state(context, instance['uuid']) + notify("error", fault=e) # notify that build failed + + def _set_instance_error_state(self, context, instance_uuid): + try: + self._instance_update(context, instance_uuid, + vm_state=vm_states.ERROR) + except exception.InstanceNotFound: + LOG.debug(_('Instance has been destroyed from under us while ' + 'trying to set it to ERROR'), + instance_uuid=instance_uuid) + + def _notify_about_instance_usage(self, context, instance, event_suffix, + network_info=None, system_metadata=None, + extra_usage_info=None, fault=None): + compute_utils.notify_about_instance_usage( + self.notifier, context, instance, event_suffix, + network_info=network_info, + system_metadata=system_metadata, + extra_usage_info=extra_usage_info, fault=fault) + + def _prebuild_instance(self, context, instance): + try: + self._start_building(context, instance) + except (exception.InstanceNotFound, + exception.UnexpectedDeletingTaskStateError): + msg = _("Instance disappeared before we could start it") + # Quickly bail out of here + raise exception.BuildAbortException(instance_uuid=instance['uuid'], + reason=msg) + + def _start_building(self, context, instance): + """Save the host and launched_on fields and log appropriately.""" + LOG.audit(_('Starting instance...'), context=context, + instance=instance) + self._instance_update(context, instance['uuid'], + vm_state=vm_states.BUILDING, + task_state=None, + expected_task_state=(task_states.SCHEDULING, + None)) + + def _build_instance( + self, + context, + request_spec, + filter_properties, + requested_networks, + injected_files, + admin_password, + is_first_time, + node, + instance, + image_meta, + legacy_bdm_in_spec): + context = context.elevated() + + # If neutron security groups pass requested security + # groups to allocate_for_instance() + if request_spec and self.is_neutron_security_groups: + security_groups = request_spec.get('security_group') + else: + security_groups = [] + + if node is None: + node = self.driver.get_available_nodes(refresh=True)[0] + LOG.debug(_("No node specified, defaulting to %s"), node) + + network_info = None + bdms = block_device_obj.BlockDeviceMappingList.get_by_instance_uuid( + context, instance['uuid']) + + # b64 decode the files to inject: + injected_files_orig = injected_files + injected_files = self._decode_files(injected_files) + + rt = self._get_resource_tracker(node) + try: + limits = filter_properties.get('limits', {}) + with rt.instance_claim(context, instance, limits): + # NOTE(russellb) It's important that this validation be done + # *after* the resource tracker instance claim, as that is where + # the host is set on the instance. + self._validate_instance_group_policy(context, instance, + filter_properties) + macs = self.driver.macs_for_instance(instance) + dhcp_options = self.driver.dhcp_options_for_instance(instance) + + network_info = self._allocate_network( + context, + instance, + requested_networks, + macs, + security_groups, + dhcp_options) + + self._instance_update( + context, instance['uuid'], + vm_state=vm_states.BUILDING, + task_state=task_states.BLOCK_DEVICE_MAPPING) + + cascaded_ports = [] + self._heal_proxy_networks(context, instance, network_info) + cascaded_ports = self._heal_proxy_ports(context, instance, + network_info) + self._proxy_run_instance(context, + instance, + request_spec, + filter_properties, + requested_networks, + injected_files, + admin_password, + is_first_time, + node, + legacy_bdm_in_spec, + cascaded_ports) + except (exception.InstanceNotFound, + exception.UnexpectedDeletingTaskStateError): + # the instance got deleted during the spawn + # Make sure the async call finishes + msg = _("Instance disappeared during build") + if network_info is not None: + network_info.wait(do_raise=False) + try: + self._deallocate_network(context, instance) + except Exception: + msg = _('Failed to dealloc network ' + 'for deleted instance') + LOG.exception(msg, instance=instance) + raise exception.BuildAbortException( + instance_uuid=instance['uuid'], + reason=msg) + except (exception.UnexpectedTaskStateError, + exception.VirtualInterfaceCreateException) as e: + # Don't try to reschedule, just log and reraise. + with excutils.save_and_reraise_exception(): + LOG.debug(e.format_message(), instance=instance) + # Make sure the async call finishes + if network_info is not None: + network_info.wait(do_raise=False) + except exception.InvalidBDM: + with excutils.save_and_reraise_exception(): + if network_info is not None: + network_info.wait(do_raise=False) + try: + self._deallocate_network(context, instance) + except Exception: + msg = _('Failed to dealloc network ' + 'for failed instance') + LOG.exception(msg, instance=instance) + except Exception: + exc_info = sys.exc_info() + # try to re-schedule instance: + # Make sure the async call finishes + if network_info is not None: + network_info.wait(do_raise=False) + self._reschedule_or_error( + context, + instance, + exc_info, + requested_networks, + admin_password, + injected_files_orig, + is_first_time, + request_spec, + filter_properties, + bdms, + legacy_bdm_in_spec) + raise exc_info[0], exc_info[1], exc_info[2] + + # spawn success + return instance, network_info + + def _heal_proxy_ports(self, context, network_info): + physical_ports = [] + for netObj in network_info: + net_id = netObj['network']['id'] + physical_net_id = None + ovs_interface_mac = netObj['address'] + fixed_ips = [] + physicalNetIdExiFlag = False + if net_id in self.cascading_info_mapping['networks']: + physical_net_id = \ + self.cascading_info_mapping['networks'][net_id] + physicalNetIdExiFlag = True + LOG.debug(_('Physical network has been created in physical' + ' leval,logicalNetId:%s, physicalNetId: %s '), + net_id, physical_net_id) + if not physicalNetIdExiFlag: + raise exception.NetworkNotFound(network_id=net_id) + fixed_ips.append( + {'ip_address': + netObj['network']['subnets'] + [0]['ips'][0]['address']} + ) + reqbody = {'port': + { + 'tenant_id': instance['project_id'], + 'admin_state_up': True, + 'network_id': physical_net_id, + 'mac_address': ovs_interface_mac, + 'fixed_ips': fixed_ips, + 'binding:profile': + {"cascading_port_id": netObj['ovs_interfaceid']} + } + } + neutronClient = self._get_neutron_pythonClient( + context, + cfg.CONF.proxy_region_name, + cfg.CONF.cascaded_neutron_url) + try: + bodyReps = neutronClient.create_port(reqbody) + physical_ports.append(bodyReps) + LOG.debug(_('Finish to create Physical port, bodyReps %s'), + bodyReps) + except Exception: + with excutils.save_and_reraise_exception(): + LOG.error(_('Fail to create physical port reqbody %s .'), + reqbody) + + return physical_ports + + def _heal_proxy_networks(self, context, instance, network_info): + cascaded_network_list = [] + self.cascading_info_mapping = {} + self.cascading_info_mapping['networks'] = {} + cascading_info_mapping_file = os.path.join( + CONF.instances_path, + 'cascading_info_mapping.json') + if os.path.isfile(cascading_info_mapping_file): + cascading_info_mapping_file_context = libvirt_utils.load_file( + cascading_info_mapping_file) + mapping_networks = jsonutils.loads( + cascading_info_mapping_file_context)['networks'] + self.cascading_info_mapping['networks'] = mapping_networks + for netObj in network_info: + net_id = netObj['network']['id'] + physicalNetIdExiFlag = False + if net_id in self.cascading_info_mapping['networks']: + physicalNetIdExiFlag = True + physicalNetId = self.cascading_info_mapping['networks'][net_id] + cascaded_network_list.append(physicalNetId) + LOG.debug(_('Physical network has been exist, do not' + ' need to create,logicalNetId:%s,' + 'physicalNetId: %s '), net_id, physicalNetId) + if not physicalNetIdExiFlag: + try: + LOG.debug(_('Physical network do not be exist,' + 'need to create,logicalNetId:%s'), + net_id) + kwargs = { + 'username': cfg.CONF.neutron_admin_username, + 'password': cfg.CONF.neutron_admin_password, + 'tenant': cfg.CONF.neutron_admin_tenant_name, + 'auth_url': cfg.CONF.neutron_admin_auth_url, + 'region_name': cfg.CONF.os_region_name + } + reqCon = compute_context.RequestContext(**kwargs) + neutron = neutronv2.get_client(reqCon, True) + logicalnets = self.network_api._get_available_networks( + reqCon, + instance['project_id'], + [net_id], + neutron) + neutronClient = self._get_neutron_pythonClient( + context, + cfg.CONF.proxy_region_name, + cfg.CONF.cascaded_neutron_url) + + if logicalnets[0]['provider:network_type'] == 'vxlan': + reqNetwork = { + 'network': { + 'provider:network_type': logicalnets[0]['provider:network_type'], + 'provider:segmentation_id': logicalnets[0]['provider:segmentation_id'], + 'tenant_id': instance['project_id'], + 'admin_state_up': True}} + elif logicalnets[0]['provider:network_type'] == 'flat': + reqNetwork = { + 'network': { + 'provider:network_type': logicalnets[0]['provider:network_type'], + 'provider:physical_network': logicalnets[0]['provider:physical_network'], + 'tenant_id': instance['project_id'], + 'admin_state_up': True}} + else: + reqNetwork = { + 'network': { + 'provider:network_type': logicalnets[0]['provider:network_type'], + 'provider:physical_network': logicalnets[0]['provider:physical_network'], + 'provider:segmentation_id': logicalnets[0]['provider:segmentation_id'], + 'tenant_id': instance['project_id'], + 'admin_state_up': True}} + repsNetwork = neutronClient.create_network(reqNetwork) + self.cascading_info_mapping['networks'][net_id] = \ + repsNetwork['network']['id'] + cascaded_network_list.append(repsNetwork['network']['id']) + LOG.debug(_('Finish to create Physical network,' + 'repsNetwork %s'), reqNetwork) + reqSubnet = { + 'subnet': { + 'network_id': repsNetwork['network']['id'], + 'cidr': netObj['network']['subnets'][0]['cidr'], + 'ip_version': netObj['network']['subnets'][0]['version'], + 'tenant_id': instance['project_id']}} + neutronClient.create_subnet(reqSubnet) + except Exception: + with excutils.save_and_reraise_exception(): + LOG.error(_('Fail to synchronizate physical network')) + + cascading_info_mapping_path = os.path.join( + CONF.instances_path, + 'cascading_info_mapping.json') + libvirt_utils.write_to_file( + cascading_info_mapping_path, + jsonutils.dumps( + self.cascading_info_mapping)) + + return cascaded_network_list + + def _log_original_error(self, exc_info, instance_uuid): + LOG.error(_('Error: %s') % exc_info[1], instance_uuid=instance_uuid, + exc_info=exc_info) + + def _cleanup_volumes(self, context, instance_uuid, bdms): + for bdm in bdms: + LOG.debug(_("terminating bdm %s") % bdm, + instance_uuid=instance_uuid) + if bdm.volume_id and bdm.delete_on_termination: + self.volume_api.delete(context, bdm.volume_id) + # NOTE(vish): bdms will be deleted on instance destroy + + def _reschedule_or_error( + self, + context, + instance, + exc_info, + requested_networks, + admin_password, + injected_files, + is_first_time, + request_spec, + filter_properties, + bdms=None, + legacy_bdm_in_spec=True): + instance_uuid = instance['uuid'] + rescheduled = False + + compute_utils.add_instance_fault_from_exc( + context, + self.conductor_api, + instance, + exc_info[1], + exc_info=exc_info) + self._notify_about_instance_usage( + context, + instance, + 'instance.create.error', + fault=exc_info[1]) + + try: + LOG.debug(_("Clean up resource before rescheduling."), + instance=instance) + if bdms is None: + bdms = (block_device_obj.BlockDeviceMappingList. + get_by_instance_uuid(context, instance.uuid)) + + self._shutdown_instance(context, instance, + bdms, requested_networks) + self._cleanup_volumes(context, instance['uuid'], bdms) + except Exception: + # do not attempt retry if clean up failed: + with excutils.save_and_reraise_exception(): + self._log_original_error(exc_info, instance_uuid) + + return rescheduled + + def _quota_rollback(self, context, reservations, project_id=None, + user_id=None): + if reservations: + self.conductor_api.quota_rollback(context, reservations, + project_id=project_id, + user_id=user_id) + + def _complete_deletion(self, context, instance, bdms, + quotas, system_meta): + if quotas: + quotas.commit() + + # ensure block device mappings are not leaked + for bdm in bdms: + bdm.destroy() + + self._notify_about_instance_usage(context, instance, "delete.end", + system_metadata=system_meta) + + if CONF.vnc_enabled or CONF.spice.enabled: + if CONF.cells.enable: + self.cells_rpcapi.consoleauth_delete_tokens(context, + instance.uuid) + else: + self.consoleauth_rpcapi.delete_tokens_for_instance( + context, + instance.uuid) + + @hooks.add_hook("delete_instance") + def _delete_instance(self, context, instance, bdms, + reservations=None): + """Delete an instance on this host. Commit or rollback quotas + as necessary. + """ + instance_uuid = instance['uuid'] + + project_id, user_id = quotas_obj.ids_from_instance(context, instance) + + was_soft_deleted = instance['vm_state'] == vm_states.SOFT_DELETED + if was_soft_deleted: + # Instances in SOFT_DELETED vm_state have already had quotas + # decremented. + try: + self._quota_rollback(context, reservations, + project_id=project_id, + user_id=user_id) + except Exception: + pass + reservations = None + + try: + events = self.instance_events.clear_events_for_instance(instance) + if events: + LOG.debug(_('Events pending at deletion: %(events)s'), + {'events': ','.join(events.keys())}, + instance=instance) + db_inst = obj_base.obj_to_primitive(instance) + instance.info_cache.delete() + self._notify_about_instance_usage(context, instance, + "delete.start") + self._shutdown_instance(context, db_inst, bdms) + # NOTE(vish): We have already deleted the instance, so we have + # to ignore problems cleaning up the volumes. It + # would be nice to let the user know somehow that + # the volume deletion failed, but it is not + # acceptable to have an instance that can not be + # deleted. Perhaps this could be reworked in the + # future to set an instance fault the first time + # and to only ignore the failure if the instance + # is already in ERROR. + try: + self._cleanup_volumes(context, instance_uuid, bdms) + except Exception as exc: + err_str = _("Ignoring volume cleanup failure due to %s") + LOG.warn(err_str % exc, instance=instance) + # if a delete task succeed, always update vm state and task + # state without expecting task state to be DELETING + instance.vm_state = vm_states.DELETED + instance.task_state = None + instance.terminated_at = timeutils.utcnow() + instance.save() + system_meta = utils.instance_sys_meta(instance) + db_inst = self.conductor_api.instance_destroy( + context, obj_base.obj_to_primitive(instance)) + instance = instance_obj.Instance._from_db_object(context, + instance, + db_inst) + except Exception: + with excutils.save_and_reraise_exception(): + self._quota_rollback(context, reservations, + project_id=project_id, + user_id=user_id) + + quotas = quotas_obj.Quotas.from_reservations(context, + reservations, + instance=instance) + self._complete_deletion(context, + instance, + bdms, + quotas, + system_meta) + + @wrap_exception() + @wrap_instance_event + @wrap_instance_fault + def terminate_instance(self, context, instance, bdms, reservations): + """Terminate an instance on this host.""" + # NOTE (ndipanov): If we get non-object BDMs, just get them from the + # db again, as this means they are sent in the old format and we want + # to avoid converting them back when we can just get them. + # Remove this when we bump the RPC major version to 4.0 + if (bdms and + any(not isinstance(bdm, block_device_obj.BlockDeviceMapping) + for bdm in bdms)): + bdms = (block_device_obj.BlockDeviceMappingList. + get_by_instance_uuid(context, instance.uuid)) + + @utils.synchronized(instance['uuid']) + def do_terminate_instance(instance, bdms): + try: + self._delete_instance(context, instance, bdms, + reservations=reservations) + except exception.InstanceNotFound: + LOG.info(_("Instance disappeared during terminate"), + instance=instance) + except Exception: + # As we're trying to delete always go to Error if something + # goes wrong that _delete_instance can't handle. + with excutils.save_and_reraise_exception(): + LOG.exception(_('Setting instance vm_state to ERROR'), + instance=instance) + self._set_instance_error_state(context, instance['uuid']) + + do_terminate_instance(instance, bdms) + + def _heal_syn_server_metadata(self, context, + cascadingInsId, cascadedInsId): + """ + when only reboots the server scenario, + needs to synchronize server metadata between + logical and physical openstack. + """ + cascadedNovaCli = self._get_nova_pythonClient( + context, + cfg.CONF.proxy_region_name, + cfg.CONF.cascaded_nova_url) + cascadedSerInf = cascadedNovaCli.servers.get(cascadedInsId) + cascadedSerMedInf = cascadedSerInf.metadata + + cascadingNovCli = self._get_nova_pythonClient( + context, + cfg.CONF.os_region_name, + cfg.CONF.cascading_nova_url) + cascadingSerInf = cascadingNovCli.servers.get(cascadingInsId) + cascadingSerMedInf = cascadingSerInf.metadata + + tmpCascadedSerMedInf = dict(cascadedSerMedInf) + del tmpCascadedSerMedInf['mapping_uuid'] + + if tmpCascadedSerMedInf == cascadingSerMedInf: + LOG.debug(_("Don't need to synchronize server metadata between" + "logical and physical openstack.")) + return + else: + LOG.debug(_('synchronize server metadata between logical and' + 'physical openstack,cascadingSerMedInf %s,cascadedSerMedInf %s'), + cascadingSerMedInf, + cascadedSerMedInf) + delKeys = [] + for key in cascadedSerMedInf: + if key != 'mapping_uuid' and key not in cascadingSerMedInf: + delKeys.append(key) + if len(delKeys) > 0: + cascadedNovaCli.servers.delete_meta(cascadedInsId, delKeys) + cascadingSerMedInf['mapping_uuid'] = \ + cascadedSerMedInf['mapping_uuid'] + cascadedNovaCli.servers.set_meta(cascadedInsId, cascadingSerMedInf) + + @wrap_exception() + @reverts_task_state + @wrap_instance_event + @wrap_instance_fault + def pause_instance(self, context, instance): + """Pause an instance on this host.""" + context = context.elevated() + LOG.audit(_('Pausing'), context=context, instance=instance) + self._notify_about_instance_usage(context, instance, 'pause.start') + cascaded_instance_id = instance['mapping_uuid'] + if cascaded_instance_id is None: + LOG.error(_('start vm failed,can not find server' + 'in cascaded layer.'), + instance['uuid']) + return + cascadedNovaCli = self._get_nova_pythonClient( + context, + cfg.CONF.proxy_region_name, + cfg.CONF.cascaded_nova_url) + cascadedNovaCli.servers.pause(cascaded_instance_id) + self._notify_about_instance_usage(context, instance, 'pause.end') + + @wrap_exception() + @reverts_task_state + @wrap_instance_fault + @delete_image_on_error + def snapshot_instance(self, context, image_id, instance): + """Snapshot an instance on this host. + + :param context: security context + :param instance: a nova.objects.instance.Instance object + :param image_id: glance.db.sqlalchemy.models.Image.Id + """ + # NOTE(dave-mcnally) the task state will already be set by the api + # but if the compute manager has crashed/been restarted prior to the + # request getting here the task state may have been cleared so we set + # it again and things continue normally + glanceClient = glance.GlanceClientWrapper() + image = glanceClient.call(context, 2, 'get', image_id) + + cascaded_instance_id = instance['mapping_uuid'] + if cascaded_instance_id is None: + LOG.error(_('can not snapshot instance server %s.'), + instance['uuid']) + return + cascadedNovaCli = self._get_nova_pythonClient( + context, + cfg.CONF.proxy_region_name, + cfg.CONF.cascaded_nova_url) + resp_image_id = cascadedNovaCli.servers.create_image( + cascaded_instance_id, + image['name']) + # update image's location + url = '%s/v2/images/%s' % (CONF.cascaded_glance_url, resp_image_id) + locations = [{ + 'url': url, + 'metadata': { + 'image_id': str(resp_image_id), + 'image_from': 'snapshot' + } + }] + glanceClient.call(context, 2, 'update', image_id, + remove_props=None, locations=locations) + LOG.debug(_('Finish update image %s locations %s'), + image_id, locations) + + def pre_start_hook(self): + """After the service is initialized, but before we fully bring + the service up by listening on RPC queues, make sure to update + our available resources (and indirectly our available nodes). + """ + self.update_available_resource(nova.context.get_admin_context()) + + @contextlib.contextmanager + def _error_out_instance_on_exception(self, context, instance_uuid, + reservations=None, + instance_state=vm_states.ACTIVE): + try: + yield + except NotImplementedError as error: + with excutils.save_and_reraise_exception(): + self._quota_rollback(context, reservations) + LOG.info(_("Setting instance back to %(state)s after: " + "%(error)s") % + {'state': instance_state, 'error': error}, + instance_uuid=instance_uuid) + self._instance_update(context, instance_uuid, + vm_state=instance_state, + task_state=None) + except exception.InstanceFaultRollback as error: + self._quota_rollback(context, reservations) + LOG.info(_("Setting instance back to ACTIVE after: %s"), + error, instance_uuid=instance_uuid) + self._instance_update(context, instance_uuid, + vm_state=vm_states.ACTIVE, + task_state=None) + raise error.inner_exception + except Exception as error: + LOG.exception(_('Setting instance vm_state to ERROR'), + instance_uuid=instance_uuid) + with excutils.save_and_reraise_exception(): + self._quota_rollback(context, reservations) + self._set_instance_error_state(context, instance_uuid) + + def _get_volume_bdms(self, bdms, legacy=True): + """Return only bdms that have a volume_id.""" + if legacy: + return [bdm for bdm in bdms if bdm['volume_id']] + else: + return [bdm for bdm in bdms + if bdm['destination_type'] == 'volume'] + + @object_compat + @messaging.expected_exceptions(exception.PreserveEphemeralNotSupported) + @wrap_exception() + @reverts_task_state + @wrap_instance_event + @wrap_instance_fault + def rebuild_instance(self, context, instance, orig_image_ref, image_ref, + injected_files, new_pass, orig_sys_metadata, + bdms, recreate, on_shared_storage, + preserve_ephemeral=False): + """Destroy and re-make this instance. + + A 'rebuild' effectively purges all existing data from the system and + remakes the VM with given 'metadata' and 'personalities'. + + :param context: `nova.RequestContext` object + :param instance: Instance object + :param orig_image_ref: Original image_ref before rebuild + :param image_ref: New image_ref for rebuild + :param injected_files: Files to inject + :param new_pass: password to set on rebuilt instance + :param orig_sys_metadata: instance system metadata from pre-rebuild + :param bdms: block-device-mappings to use for rebuild + :param recreate: True if the instance is being recreated (e.g. the + hypervisor it was on failed) - cleanup of old state will be + skipped. + :param on_shared_storage: True if instance files on shared storage + :param preserve_ephemeral: True if the default ephemeral storage + partition must be preserved on rebuild + """ + context = context.elevated() + with self._error_out_instance_on_exception(context, instance['uuid']): + LOG.audit(_("Rebuilding instance"), context=context, + instance=instance) + if bdms is None: + bdms = self.conductor_api.\ + block_device_mapping_get_all_by_instance( + context, instance) + # NOTE(sirp): this detach is necessary b/c we will reattach the + # volumes in _prep_block_devices below. + for bdm in self._get_volume_bdms(bdms): + self.volume_api.detach(context, bdm['volume_id']) + + kwargs = {} + disk_config = None + if len(injected_files) > 0: + kwargs['personality'] = injected_files + cascaded_instance_id = instance['mapping_uuid'] + if cascaded_instance_id is None: + LOG.error(_('Rebuild failed,can not find server %s '), + instance['uuid']) + return + if cfg.CONF.cascaded_glance_flag: + image_uuid = self._get_cascaded_image_uuid(context, + image_ref) + else: + image_uuid = image_ref + cascadedNovaCli = self._get_nova_pythonClient( + context, + cfg.CONF.proxy_region_name, + cfg.CONF.cascaded_nova_url) + cascadedNovaCli.servers.rebuild(cascaded_instance_id, image_uuid, + new_pass, disk_config, **kwargs) + + @wrap_exception() + @reverts_task_state + @wrap_instance_event + @wrap_instance_fault + def suspend_instance(self, context, instance): + """Suspend the given instance.""" + context = context.elevated() + + cascaded_instance_id = instance['mapping_uuid'] + if cascaded_instance_id is None: + LOG.error(_('start vm failed,can not find server ' + 'in cascaded layer.'), + instance['uuid']) + return + cascadedNovaCli = self._get_nova_pythonClient( + context, + cfg.CONF.proxy_region_name, + cfg.CONF.cascaded_nova_url) + cascadedNovaCli.servers.suspend(cascaded_instance_id) + self._notify_about_instance_usage(context, instance, 'suspend') + + @wrap_exception() + @reverts_task_state + @wrap_instance_event + @wrap_instance_fault + def resume_instance(self, context, instance): + """Resume the given suspended instance.""" + context = context.elevated() + LOG.audit(_('Resuming'), context=context, instance=instance) + + cascaded_instance_id = instance['mapping_uuid'] + if cascaded_instance_id is None: + LOG.error(_('resume server,but can not find server'), + instance['uuid']) + return + + cascadedNovaCli = self._get_nova_pythonClient( + context, + cfg.CONF.proxy_region_name, + cfg.CONF.cascaded_nova_url) + try: + cascadedNovaCli.servers.resume(cascaded_instance_id) + except Exception: + with excutils.save_and_reraise_exception(): + LOG.error(_('Failed to resume server %s .'), + cascaded_instance_id) + self._notify_about_instance_usage(context, instance, 'resume') + + @wrap_exception() + @reverts_task_state + @wrap_instance_event + @wrap_instance_fault + def unpause_instance(self, context, instance): + """Unpause a paused instance on this host.""" + context = context.elevated() + LOG.audit(_('Unpausing'), context=context, instance=instance) + self._notify_about_instance_usage(context, instance, 'unpause.start') + cascaded_instance_id = instance['mapping_uuid'] + if cascaded_instance_id is None: + LOG.error(_('start vm failed,can not find server' + ' in cascaded layer.'), + instance['uuid']) + return + cascadedNovaCli = self._get_nova_pythonClient( + context, + cfg.CONF.proxy_region_name, + cfg.CONF.cascaded_nova_url) + cascadedNovaCli.servers.unpause(cascaded_instance_id) + self._notify_about_instance_usage(context, instance, 'unpause.end') + + @wrap_exception() + @reverts_task_state + @wrap_instance_event + @wrap_instance_fault + def start_instance(self, context, instance): + """Starting an instance on this host.""" + self._notify_about_instance_usage(context, instance, "power_on.start") + cascaded_instance_id = instance['mapping_uuid'] + if cascaded_instance_id is None: + LOG.error(_('start vm failed,can not find server' + ' in cascaded layer.'), + instance['uuid']) + return + cascadedNovaCli = self._get_nova_pythonClient( + context, + cfg.CONF.proxy_region_name, + cfg.CONF.cascaded_nova_url) + cascadedNovaCli.servers.start(cascaded_instance_id) + self._notify_about_instance_usage(context, instance, "power_on.end") + + @wrap_exception() + @reverts_task_state + @wrap_instance_event + @wrap_instance_fault + def stop_instance(self, context, instance): + """Stopping an instance on this host.""" + self._notify_about_instance_usage(context, + instance, "power_off.start") + cascaded_instance_id = instance['mapping_uuid'] + if cascaded_instance_id is None: + LOG.error(_('stop vm failed,can not find server' + ' in cascaded layer.'), + instance['uuid']) + return + cascadedNovaCli = self._get_nova_pythonClient( + context, + cfg.CONF.proxy_region_name, + cfg.CONF.cascaded_nova_url) + cascadedNovaCli.servers.stop(cascaded_instance_id) + self._notify_about_instance_usage(context, instance, "power_off.end") + + @wrap_exception() + @reverts_task_state + @wrap_instance_event + @wrap_instance_fault + def reboot_instance(self, context, instance, block_device_info, + reboot_type): + """Reboot an instance on this host.""" + self._notify_about_instance_usage(context, instance, "reboot.start") + context = context.elevated() + cascaded_instance_id = instance['mapping_uuid'] + if cascaded_instance_id is None: + LOG.error(_('Reboot can not find server %s.'), instance) + return + cascadedNovaCli = self._get_nova_pythonClient( + context, + cfg.CONF.proxy_region_name, + cfg.CONF.cascaded_nova_url) + try: + self._heal_syn_server_metadata(context, instance['uuid'], + cascaded_instance_id) + cascadedNovaCli.servers.reboot(cascaded_instance_id, reboot_type) + except Exception: + with excutils.save_and_reraise_exception(): + LOG.error(_('Failed to reboot server %s .'), + cascaded_instance_id) + self._notify_about_instance_usage(context, instance, "reboot.end") + + def _delete_proxy_instance(self, context, instance): + proxy_instance_id = instance['mapping_uuid'] + if proxy_instance_id is None: + LOG.error(_('Delete server %s,but can not find this server'), + proxy_instance_id) + return + cascadedNovaCli = self._get_nova_pythonClient( + context, + cfg.CONF.proxy_region_name, + cfg.CONF.cascaded_nova_url) + try: + cascadedNovaCli.servers.delete(proxy_instance_id) + self._instance_update( + context, + instance['uuid'], + vm_state=vm_states.DELETED, + task_state=None) + LOG.debug(_('delete the server %s from nova-proxy'), + instance['uuid']) + except Exception: + if isinstance(sys.exc_info()[1], novaclient.exceptions.NotFound): + return + with excutils.save_and_reraise_exception(): + LOG.error(_('Failed to delete server %s'), proxy_instance_id) + + def _get_instance_nw_info(self, context, instance, use_slave=False): + """Get a list of dictionaries of network data of an instance.""" + if (not hasattr(instance, 'system_metadata') or + len(instance['system_metadata']) == 0): + # NOTE(danms): Several places in the code look up instances without + # pulling system_metadata for performance, and call this function. + # If we get an instance without it, re-fetch so that the call + # to network_api (which requires it for instance_type) will + # succeed. + instance = instance_obj.Instance.get_by_uuid(context, + instance['uuid'], + use_slave=use_slave) + + network_info = self.network_api.get_instance_nw_info(context, + instance) + return network_info + + def _get_instance_volume_block_device_info(self, context, instance, + refresh_conn_info=False, + bdms=None): + """Transform volumes to the driver block_device format.""" + + if not bdms: + bdms = (block_device_obj.BlockDeviceMappingList. + get_by_instance_uuid(context, instance['uuid'])) + block_device_mapping = ( + driver_block_device.convert_volumes(bdms) + + driver_block_device.convert_snapshots(bdms) + + driver_block_device.convert_images(bdms)) + + if not refresh_conn_info: + # if the block_device_mapping has no value in connection_info + # (returned as None), don't include in the mapping + block_device_mapping = [ + bdm for bdm in block_device_mapping + if bdm.get('connection_info')] + else: + block_device_mapping = driver_block_device.refresh_conn_infos( + block_device_mapping, context, instance, self.volume_api, + self.driver) + + if self.use_legacy_block_device_info: + block_device_mapping = driver_block_device.legacy_block_devices( + block_device_mapping) + return {'block_device_mapping': block_device_mapping} + + def _try_deallocate_network(self, context, instance, + requested_networks=None): + try: + # tear down allocated network structure + self._deallocate_network(context, instance, requested_networks) + except Exception: + with excutils.save_and_reraise_exception(): + LOG.error(_('Failed to deallocate network for instance.'), + instance=instance) + self._set_instance_error_state(context, instance['uuid']) + + def _deallocate_network(self, context, instance, + requested_networks=None): + LOG.debug(_('Deallocating network for instance'), instance=instance) + self.network_api.deallocate_for_instance( + context, instance, requested_networks=requested_networks) + + def _shutdown_instance(self, context, instance, + bdms, requested_networks=None, notify=True): + """Shutdown an instance on this host.""" + context = context.elevated() + LOG.audit(_('%(action_str)s instance') % {'action_str': 'Terminating'}, + context=context, instance=instance) + + if notify: + self._notify_about_instance_usage(context, instance, + "shutdown.start") + + # get network info before tearing down + try: + self._get_instance_nw_info(context, instance) + except (exception.NetworkNotFound, exception.NoMoreFixedIps, + exception.InstanceInfoCacheNotFound): + network_model.NetworkInfo() + + # NOTE(vish) get bdms before destroying the instance + vol_bdms = [bdm for bdm in bdms if bdm.is_volume] +# block_device_info = self._get_instance_volume_block_device_info( +# context, instance, bdms=bdms) + + # NOTE(melwitt): attempt driver destroy before releasing ip, may + # want to keep ip allocated for certain failures + try: + self._delete_proxy_instance(context, instance) + except exception.InstancePowerOffFailure: + # if the instance can't power off, don't release the ip + with excutils.save_and_reraise_exception(): + pass + except Exception: + with excutils.save_and_reraise_exception(): + # deallocate ip and fail without proceeding to + # volume api calls, preserving current behavior + self._try_deallocate_network(context, instance, + requested_networks) + + self._try_deallocate_network(context, instance, requested_networks) + + for bdm in vol_bdms: + try: + # NOTE(vish): actual driver detach done in driver.destroy, so + # just tell cinder that we are done with it. + # connector = self.driver.get_volume_connector(instance) + # self.volume_api.terminate_connection(context, + # bdm.volume_id, + # connector) + self.volume_api.detach(context, bdm.volume_id) + except exception.DiskNotFound as exc: + LOG.warn(_('Ignoring DiskNotFound: %s') % exc, + instance=instance) + except exception.VolumeNotFound as exc: + LOG.warn(_('Ignoring VolumeNotFound: %s') % exc, + instance=instance) + + if notify: + self._notify_about_instance_usage(context, instance, + "shutdown.end") + + def _get_nova_pythonClient(self, context, regNam, nova_url): + try: + # kwargs = {'auth_token':None, + # 'username':context.values['user_name'], + # 'password':cfg.CONF.nova_admin_password, + # 'aws_creds':None,'tenant':None, + # 'tenant_id':context.values['tenant'], + # 'auth_url':cfg.CONF.keystone_auth_url, + # 'roles':context.values['roles'], + # 'is_admin':context.values['is_admin'], + # 'region_name':regNam + # } + kwargs = { + 'auth_token': context.auth_token, + 'username': context.user_name, + 'tenant_id': context.tenant, + 'auth_url': cfg.CONF.keystone_auth_url, + 'roles': context.roles, + 'is_admin': context.is_admin, + 'region_name': regNam, + 'nova_url': nova_url + } + reqCon = compute_context.RequestContext(**kwargs) + openStackClients = clients.OpenStackClients(reqCon) + novaClient = openStackClients.nova() + return novaClient + except Exception: + with excutils.save_and_reraise_exception(): + LOG.error(_('Failed to get nova python client.')) + + def _get_neutron_pythonClient(self, context, regNam, neutrol_url): + try: + kwargs = { + 'endpoint_url': neutrol_url, + 'timeout': CONF.neutron_url_timeout, + 'insecure': CONF.neutron_api_insecure, + 'ca_cert': CONF.neutron_ca_certificates_file, + 'username': CONF.neutron_admin_username, + 'password': CONF.neutron_admin_password, + 'tenant_name': CONF.neutron_admin_tenant_name, + 'auth_url': CONF.neutron_admin_auth_url, + 'auth_strategy': CONF.neutron_auth_strategy + } + neutronClient = clientv20.Client(**kwargs) + return neutronClient + except Exception: + with excutils.save_and_reraise_exception(): + LOG.error(_('Failed to get neutron python client.')) + + def _reschedule(self, context, request_spec, filter_properties, + instance_uuid, scheduler_method, method_args, task_state, + exc_info=None): + """Attempt to re-schedule a compute operation.""" + + retry = filter_properties.get('retry', None) + if not retry: + # no retry information, do not reschedule. + LOG.debug(_("Retry info not present, will not reschedule"), + instance_uuid=instance_uuid) + return + + if not request_spec: + LOG.debug(_("No request spec, will not reschedule"), + instance_uuid=instance_uuid) + return + + request_spec['instance_uuids'] = [instance_uuid] + + LOG.debug(_("Re-scheduling %(method)s: attempt %(num)d") % + {'method': scheduler_method.func_name, + 'num': retry['num_attempts']}, instance_uuid=instance_uuid) + + # reset the task state: + self._instance_update(context, instance_uuid, task_state=task_state) + + if exc_info: + # stringify to avoid circular ref problem in json serialization: + retry['exc'] = traceback.format_exception(*exc_info) + + scheduler_method(context, *method_args) + return True + + def _reschedule_resize_or_reraise( + self, + context, + image, + instance, + exc_info, + instance_type, + reservations, + request_spec, + filter_properties): + """Try to re-schedule the resize or re-raise the original error to + error out the instance. + """ + if not request_spec: + request_spec = {} + if not filter_properties: + filter_properties = {} + + rescheduled = False + instance_uuid = instance['uuid'] + + try: + # NOTE(comstud): remove the scheduler RPCAPI method when + # this is adjusted to send to conductor... and then + # deprecate the scheduler manager method. + scheduler_method = self.scheduler_rpcapi.prep_resize + instance_p = obj_base.obj_to_primitive(instance) + method_args = (instance_p, instance_type, image, request_spec, + filter_properties, reservations) + task_state = task_states.RESIZE_PREP + + rescheduled = self._reschedule( + context, + request_spec, + filter_properties, + instance_uuid, + scheduler_method, + method_args, + task_state, + exc_info) + except Exception as error: + rescheduled = False + LOG.exception(_("Error trying to reschedule"), + instance_uuid=instance_uuid) + compute_utils.add_instance_fault_from_exc( + context, + self.conductor_api, + instance, + error, + exc_info=sys.exc_info()) + self._notify_about_instance_usage(context, instance, + 'resize.error', fault=error) + + if rescheduled: + self._log_original_error(exc_info, instance_uuid) + compute_utils.add_instance_fault_from_exc( + context, + self.conductor_api, + instance, + exc_info[1], + exc_info=exc_info) + self._notify_about_instance_usage( + context, + instance, + 'resize.error', + fault=exc_info[1]) + else: + # not re-scheduling + raise exc_info[0], exc_info[1], exc_info[2] + + @wrap_exception() + @reverts_task_state + @wrap_instance_event + @wrap_instance_fault + def prep_resize(self, context, image, instance, instance_type, + reservations, request_spec, filter_properties, node): + """Initiates the process of moving a running instance to another host. + + Possibly changes the RAM and disk size in the process. + + """ + if node is None: + node = self.driver.get_available_nodes(refresh=True)[0] + LOG.debug(_("No node specified, defaulting to %s"), node, + instance=instance) + + with self._error_out_instance_on_exception(context, instance['uuid'], + reservations): + self.conductor_api.notify_usage_exists( + context, instance, current_period=True) + self._notify_about_instance_usage( + context, instance, "resize.prep.start") + try: + self._prep_resize(context, image, instance, + instance_type, reservations, + request_spec, filter_properties, + node) + except Exception: + # try to re-schedule the resize elsewhere: + exc_info = sys.exc_info() + self._reschedule_resize_or_reraise( + context, + image, + instance, + exc_info, + instance_type, + reservations, + request_spec, + filter_properties) + finally: + extra_usage_info = dict( + new_instance_type=instance_type['name'], + new_instance_type_id=instance_type['id']) + + self._notify_about_instance_usage( + context, instance, "resize.prep.end", + extra_usage_info=extra_usage_info) + + def _prep_resize(self, context, image, instance, instance_type, + reservations, request_spec, filter_properties, node): + + if not filter_properties: + filter_properties = {} + + if not instance['host']: + self._set_instance_error_state(context, instance['uuid']) + msg = _('Instance has no source host') + raise exception.MigrationError(msg) + + same_host = instance['host'] == self.host + if same_host and not CONF.allow_resize_to_same_host: + self._set_instance_error_state(context, instance['uuid']) + msg = _('destination same as source!') + raise exception.MigrationError(msg) + + # NOTE(danms): Stash the new instance_type to avoid having to + # look it up in the database later + sys_meta = instance.system_metadata + flavors.save_flavor_info(sys_meta, instance_type, prefix='new_') + # NOTE(mriedem): Stash the old vm_state so we can set the + # resized/reverted instance back to the same state later. + vm_state = instance['vm_state'] + LOG.debug(_('Stashing vm_state: %s'), vm_state, instance=instance) + sys_meta['old_vm_state'] = vm_state + instance.save() + + limits = filter_properties.get('limits', {}) + rt = self._get_resource_tracker(node) + with rt.resize_claim(context, instance, instance_type, + limits=limits) as claim: + LOG.audit(_('Migrating'), context=context, instance=instance) + self.compute_rpcapi.resize_instance( + context, + instance, + claim.migration, + image, + instance_type, + reservations) + + def _terminate_volume_connections(self, context, instance, bdms): + connector = self.driver.get_volume_connector(instance) + for bdm in bdms: + if bdm.is_volume: + self.volume_api.terminate_connection(context, bdm.volume_id, + connector) + + @wrap_exception() + @reverts_task_state + @wrap_instance_event + @errors_out_migration + @wrap_instance_fault + def resize_instance(self, context, instance, image, + reservations, migration, instance_type): + """Starts the migration of a running instance to another host.""" + with self._error_out_instance_on_exception(context, instance.uuid, + reservations): + if not instance_type: + instance_type = flavor_obj.Flavor.get_by_id( + context, migration['new_instance_type_id']) + + network_info = self._get_instance_nw_info(context, instance) + + migration.status = 'migrating' + migration.save(context.elevated()) + + instance.task_state = task_states.RESIZE_MIGRATING + instance.save(expected_task_state=task_states.RESIZE_PREP) + + self._notify_about_instance_usage( + context, instance, "resize.start", network_info=network_info) + + bdms = (block_device_obj.BlockDeviceMappingList. + get_by_instance_uuid(context, instance.uuid)) +# block_device_info = self._get_instance_volume_block_device_info( +# context, instance, bdms=bdms) + +# disk_info = self.driver.migrate_disk_and_power_off( +# context, instance, migration.dest_host, +# instance_type, network_info, +# block_device_info) + disk_info = None + + self._terminate_volume_connections(context, instance, bdms) + + migration_p = obj_base.obj_to_primitive(migration) + instance_p = obj_base.obj_to_primitive(instance) + self.conductor_api.network_migrate_instance_start(context, + instance_p, + migration_p) + + migration.status = 'post-migrating' + migration.save(context.elevated()) + + instance.host = migration.dest_compute + instance.node = migration.dest_node + instance.task_state = task_states.RESIZE_MIGRATED + instance.save(expected_task_state=task_states.RESIZE_MIGRATING) + + self.compute_rpcapi.finish_resize( + context, + instance, + migration, + image, + disk_info, + migration.dest_compute, + reservations=reservations) + + self._notify_about_instance_usage(context, instance, "resize.end", + network_info=network_info) + self.instance_events.clear_events_for_instance(instance) + + @wrap_exception() + @reverts_task_state + @wrap_instance_event + @errors_out_migration + @wrap_instance_fault + def finish_resize(self, context, disk_info, image, instance, + reservations, migration): + """Completes the migration process. + + Sets up the newly transferred disk and turns on the instance at its + new host machine. + + """ + try: + self._finish_resize(context, instance, migration, + disk_info, image) + self._quota_commit(context, reservations) + except Exception: + LOG.exception(_('Setting instance vm_state to ERROR'), + instance=instance) + with excutils.save_and_reraise_exception(): + try: + self._quota_rollback(context, reservations) + except Exception as qr_error: + LOG.exception(_("Failed to rollback quota for failed " + "finish_resize: %s"), + qr_error, instance=instance) + self._set_instance_error_state(context, instance['uuid']) + + @object_compat + @wrap_exception() + @reverts_task_state + @wrap_instance_fault + def reserve_block_device_name(self, context, instance, device, + volume_id, disk_bus=None, device_type=None): + # NOTE(ndipanov): disk_bus and device_type will be set to None if not + # passed (by older clients) and defaulted by the virt driver. Remove + # default values on the next major RPC version bump. + + @utils.synchronized(instance['uuid']) + def do_reserve(): + bdms = ( + block_device_obj.BlockDeviceMappingList.get_by_instance_uuid( + context, instance.uuid)) + + device_name = compute_utils.get_device_name_for_instance( + context, instance, bdms, device) + + # NOTE(vish): create bdm here to avoid race condition + bdm = block_device_obj.BlockDeviceMapping( + source_type='volume', destination_type='volume', + instance_uuid=instance.uuid, + volume_id=volume_id or 'reserved', + device_name=device_name, + disk_bus=disk_bus, device_type=device_type) + bdm.create(context) + + return device_name + + return do_reserve() + + @object_compat + @wrap_exception() + @reverts_task_state + @wrap_instance_fault + def attach_volume(self, context, volume_id, mountpoint, + instance, bdm=None): + """Attach a volume to an instance.""" + if not bdm: + bdm = block_device_obj.BlockDeviceMapping.get_by_volume_id( + context, volume_id) + driver_bdm = driver_block_device.DriverVolumeBlockDevice(bdm) + try: + return self._attach_volume(context, instance, driver_bdm) + except Exception: + with excutils.save_and_reraise_exception(): + bdm.destroy(context) + + def _attach_volume(self, context, instance, bdm): + context = context.elevated() + LOG.audit(_('Attaching volume %(volume_id)s to %(mountpoint)s'), + {'volume_id': bdm.volume_id, + 'mountpoint': bdm['mount_device']}, + context=context, instance=instance) + try: + # bdm.attach(context, instance, self.volume_api, self.driver, + # do_check_attach=False, do_driver_attach=True) + self.volume_api.attach(context, bdm.volume_id, + instance['uuid'], bdm['mount_device']) + proxy_volume_id = None + try: + bodyReps = self.volume_api.get(context, bdm.volume_id) + proxy_volume_id = bodyReps['volume_metadata']['mapping_uuid'] + except Exception: + with excutils.save_and_reraise_exception(): + LOG.error(_('Failed to get physical volume id ,logical' + ' volume id %s,device %s'), + bdm.volume_id, bdm['mount_device']) + if proxy_volume_id is None: + LOG.error(_('attach_volume can not find physical volume id %s' + ' in physical opensack lay,logical volume id %s'), + instance['uuid'], bdm.volume_id) + return + + cascadedNovaCli = self._get_nova_pythonClient( + context, + cfg.CONF.proxy_region_name, + cfg.CONF.cascaded_nova_url) + bodyReps = cascadedNovaCli.volumes.create_server_volume( + instance['mapping_uuid'], + proxy_volume_id, bdm['mount_device']) + except Exception: # pylint: disable=W0702 + with excutils.save_and_reraise_exception(): + LOG.exception(_("Failed to attach %(volume_id)s " + "at %(mountpoint)s"), + {'volume_id': bdm.volume_id, + 'mountpoint': bdm['mount_device']}, + context=context, instance=instance) + self.volume_api.unreserve_volume(context, bdm.volume_id) + + info = {'volume_id': bdm.volume_id} + self._notify_about_instance_usage( + context, instance, "volume.attach", extra_usage_info=info) + + @wrap_exception() + @reverts_task_state + @wrap_instance_fault + def detach_volume(self, context, volume_id, instance): + """Detach a volume from an instance.""" + bdm = block_device_obj.BlockDeviceMapping.get_by_volume_id( + context, volume_id) + if CONF.volume_usage_poll_interval > 0: + vol_stats = [] + mp = bdm.device_name + # Handle bootable volumes which will not contain /dev/ + if '/dev/' in mp: + mp = mp[5:] + try: + vol_stats = self.driver.block_stats(instance['name'], mp) + except NotImplementedError: + pass + + if vol_stats: + LOG.debug(_("Updating volume usage cache with totals"), + instance=instance) + rd_req, rd_bytes, wr_req, wr_bytes, flush_ops = vol_stats + self.conductor_api.vol_usage_update(context, volume_id, + rd_req, rd_bytes, + wr_req, wr_bytes, + instance, + update_totals=True) + + self._detach_volume(context, instance, bdm) + self.volume_api.detach(context.elevated(), volume_id) + bdm.destroy() + info = dict(volume_id=volume_id) + self._notify_about_instance_usage( + context, instance, "volume.detach", extra_usage_info=info) + + def _detach_volume(self, context, instance, bdm): + """Do the actual driver detach using block device mapping.""" + mp = bdm.device_name + volume_id = bdm.volume_id + + LOG.audit(_('Detach volume %(volume_id)s from mountpoint %(mp)s'), + {'volume_id': volume_id, 'mp': mp}, + context=context, instance=instance) + try: + proxy_volume_id = None + try: + bodyReps = self.volume_api.get(context, volume_id) + proxy_volume_id = bodyReps['volume_metadata']['mapping_uuid'] + except Exception: + with excutils.save_and_reraise_exception(): + LOG.error(_('Failed to get physical volume id ,logical' + ' volume id %s,device %s'), + volume_id, mp) + if proxy_volume_id is None: + LOG.error(_('detach_volume can not find physical volume id %s ' + 'in physical opensack lay,logical volume id %s'), + instance['uuid'], volume_id) + return + cascadedNovaCli = self._get_nova_pythonClient( + context, + cfg.CONF.proxy_region_name, + cfg.CONF.cascaded_nova_url) + bodyReps = cascadedNovaCli.volumes.delete_server_volume( + instance['mapping_uuid'], proxy_volume_id) + except Exception: # pylint: disable=W0702 + with excutils.save_and_reraise_exception(): + LOG.exception(_('Failed to detach volume %(volume_id)s ' + 'from %(mp)s'), + {'volume_id': volume_id, 'mp': mp}, + context=context, instance=instance) + self.volume_api.roll_detaching(context, volume_id) + + @wrap_exception() + @wrap_instance_event + @wrap_instance_fault + def confirm_resize(self, context, instance, reservations, migration): + + @utils.synchronized(instance['uuid']) + def do_confirm_resize(context, instance, migration_id): + # NOTE(wangpan): Get the migration status from db, if it has been + # confirmed, we do nothing and return here + LOG.debug(_("Going to confirm migration %s") % migration_id, + context=context, instance=instance) + try: + # TODO(russellb) Why are we sending the migration object just + # to turn around and look it up from the db again? + migration = migration_obj.Migration.get_by_id( + context.elevated(), migration_id) + except exception.MigrationNotFound: + LOG.error(_("Migration %s is not found during confirmation") % + migration_id, context=context, instance=instance) + return + + if migration.status == 'confirmed': + LOG.info(_("Migration %s is already confirmed") % + migration_id, context=context, instance=instance) + return + elif migration.status not in ('finished', 'confirming'): + LOG.warn(_("Unexpected confirmation status '%(status)s' of " + "migration %(id)s, exit confirmation process") % + {"status": migration.status, "id": migration_id}, + context=context, instance=instance) + return + + # NOTE(wangpan): Get the instance from db, if it has been + # deleted, we do nothing and return here + expected_attrs = ['metadata', 'system_metadata'] + try: + instance = instance_obj.Instance.get_by_uuid( + context, + instance.uuid, + expected_attrs=expected_attrs) + except exception.InstanceNotFound: + LOG.info(_("Instance is not found during confirmation"), + context=context, instance=instance) + return + + self._confirm_resize(context, instance, reservations=reservations, + migration=migration) + + do_confirm_resize(context, instance, migration.id) + + def _confirm_resize(self, context, instance, reservations=None, + migration=None): + """Destroys the source instance.""" + self._notify_about_instance_usage(context, instance, + "resize.confirm.start") + + with self._error_out_instance_on_exception(context, instance['uuid'], + reservations): + # NOTE(danms): delete stashed migration information + # sys_meta, instance_type = self._cleanup_stored_instance_types( + # migration, instance) + # sys_meta.pop('old_vm_state', None) + # + # instance.system_metadata = sys_meta + # instance.save() + + # NOTE(tr3buchet): tear down networks on source host + self.network_api.setup_networks_on_host( + context, + instance, + migration.source_compute, + teardown=True) + + network_info = self._get_instance_nw_info(context, instance) + cascaded_instance_id = instance['mapping_uuid'] + if cascaded_instance_id is None: + LOG.debug(_('Confirm resize can not find server %s.'), + instance['uuid']) + return + cascadedNovaCli = self._get_nova_pythonClient( + context, + cfg.CONF.proxy_region_name, + cfg.CONF.cascaded_nova_url) + try: + cascadedNovaCli.servers.confirm_resize(cascaded_instance_id) + except Exception: + with excutils.save_and_reraise_exception(): + LOG.error(_('Failed to confirm resize server %s .'), + cascaded_instance_id) + + migration.status = 'confirmed' + migration.save(context.elevated()) + +# rt = self._get_resource_tracker(migration.source_node) +# rt.drop_resize_claim(instance, prefix='old_') + + # NOTE(mriedem): The old_vm_state could be STOPPED but the user + # might have manually powered up the instance to confirm the + # resize/migrate, so we need to check the current power state + # on the instance and set the vm_state appropriately. We default + # to ACTIVE because if the power state is not SHUTDOWN, we + # assume _sync_instance_power_state will clean it up. + p_state = instance.power_state + vm_state = None + if p_state == power_state.SHUTDOWN: + vm_state = vm_states.STOPPED + LOG.debug(_("Resized/migrated instance is powered off. " + "Setting vm_state to '%s'."), vm_state, + instance=instance) + else: + vm_state = vm_states.ACTIVE + + instance.vm_state = vm_state + instance.task_state = None + instance.save(expected_task_state=[None, task_states.DELETING]) + + self._notify_about_instance_usage( + context, instance, "resize.confirm.end", + network_info=network_info) + + self._quota_commit(context, reservations) + + @messaging.expected_exceptions(NotImplementedError) + @wrap_exception() + @wrap_instance_fault + def get_console_output(self, context, instance, tail_length): + """Send the console output for the given instance.""" + instance = instance_obj.Instance._from_db_object( + context, instance_obj.Instance(), instance) + context = context.elevated() + LOG.audit(_("Get console output"), context=context, + instance=instance) + cascaded_instance_id = instance['mapping_uuid'] + if cascaded_instance_id is None: + LOG.debug(_('get_vnc_console can not find server %s in' + ' cascading_info_mapping %s .'), + instance['uuid'], self.cascading_info_mapping) + return + cascadedNovaCli = self._get_nova_pythonClient( + context, + cfg.CONF.proxy_region_name, + cfg.CONF.cascaded_nova_url) + + try: + output = cascadedNovaCli.servers.get_console_output( + cascaded_instance_id, tail_length) + except Exception: + with excutils.save_and_reraise_exception(): + LOG.error(_('Failed to get_vnc_console server %s .'), + cascaded_instance_id) + return output + + @object_compat + @wrap_exception() + @wrap_instance_fault + def get_vnc_console(self, context, console_type, instance): + """Return connection information for a vnc console.""" + context = context.elevated() + LOG.debug(_("Getting vnc console"), instance=instance) + token = str(uuid.uuid4()) + + if not CONF.vnc_enabled: + raise exception.ConsoleTypeInvalid(console_type=console_type) + + try: + # access info token + cascaded_instance_id = instance['mapping_uuid'] + if cascaded_instance_id is None: + LOG.debug(_('Get vnc_console can not find server %s .'), + instance['uuid']) + return + cascadedNovaCli = self._get_nova_pythonClient( + context, + cfg.CONF.proxy_region_name, + cfg.CONF.cascaded_nova_url) + try: + bodyReps = cascadedNovaCli.servers.get_vnc_console( + cascaded_instance_id, console_type) + except Exception: + with excutils.save_and_reraise_exception(): + LOG.error(_('Failed to get_vnc_console server %s .'), + cascaded_instance_id) + if console_type != 'novnc' and console_type != 'xvpvnc': + # For essex, novncproxy_base_url must include the full path + # including the html file (like http://myhost/vnc_auto.html) + raise exception.ConsoleTypeInvalid(console_type=console_type) + connect_info = {} + connect_info['token'] = token + connect_info['access_url'] = bodyReps['console']['url'] + connect_info['host'] = CONF.vncserver_proxyclient_address + connect_info['port'] = CONF.novncproxy_port + connect_info['internal_access_path'] = None + except exception.InstanceNotFound: + if instance['vm_state'] != vm_states.BUILDING: + raise + raise exception.InstanceNotReady(instance_id=instance['uuid']) + + return connect_info + + def _cleanup_stored_instance_types(self, migration, instance, + restore_old=False): + """Clean up "old" and "new" instance_type information stored in + instance's system_metadata. Optionally update the "current" + instance_type to the saved old one first. + + Returns the updated system_metadata as a dict, as well as the + post-cleanup current instance type. + """ + sys_meta = instance.system_metadata + if restore_old: + instance_type = flavors.extract_flavor(instance, 'old_') + sys_meta = flavors.save_flavor_info(sys_meta, instance_type) + else: + instance_type = flavors.extract_flavor(instance) + + flavors.delete_flavor_info(sys_meta, 'old_') + flavors.delete_flavor_info(sys_meta, 'new_') + + return sys_meta, instance_type + + @wrap_exception() + @reverts_task_state + @wrap_instance_event + @wrap_instance_fault + def finish_revert_resize(self, context, instance, reservations, migration): + """Finishes the second half of reverting a resize. + + Bring the original source instance state back (active/shutoff) and + revert the resized attributes in the database. + + """ + with self._error_out_instance_on_exception(context, instance.uuid, + reservations): + self._get_instance_nw_info(context, instance) + + self._notify_about_instance_usage( + context, instance, "resize.revert.start") + + sys_meta, instance_type = self._cleanup_stored_instance_types( + migration, instance, True) + + # NOTE(mriedem): delete stashed old_vm_state information; we + # default to ACTIVE for backwards compatibility if old_vm_state + # is not set + old_vm_state = sys_meta.pop('old_vm_state', vm_states.ACTIVE) + + instance.system_metadata = sys_meta + instance.memory_mb = instance_type['memory_mb'] + instance.vcpus = instance_type['vcpus'] + instance.root_gb = instance_type['root_gb'] + instance.ephemeral_gb = instance_type['ephemeral_gb'] + instance.instance_type_id = instance_type['id'] + instance.host = migration['source_compute'] + instance.node = migration['source_node'] + instance.save() + + self.network_api.setup_networks_on_host( + context, + instance, + migration['source_compute']) + +# block_device_info = self._get_instance_volume_block_device_info( +# context, instance, refresh_conn_info=True) + + power_on = old_vm_state != vm_states.STOPPED +# self.driver.finish_revert_migration(context, instance, +# network_info, +# block_device_info, power_on) + cascaded_instance_id = instance['mapping_uuid'] + if cascaded_instance_id is None: + LOG.debug(_('Revert resize can not find server %s.'), + instance['uuid']) + return + cascadedNovaCli = self._get_nova_pythonClient( + context, + cfg.CONF.proxy_region_name, + cfg.CONF.cascaded_nova_url) + try: + cascadedNovaCli.servers.revert_resize(cascaded_instance_id) + except Exception: + with excutils.save_and_reraise_exception(): + LOG.error(_('Failed to resize server %s .'), + cascaded_instance_id) + + instance.launched_at = timeutils.utcnow() + instance.save(expected_task_state=task_states.RESIZE_REVERTING) + + instance_p = obj_base.obj_to_primitive(instance) + migration_p = obj_base.obj_to_primitive(migration) + self.conductor_api.network_migrate_instance_finish(context, + instance_p, + migration_p) + + # if the original vm state was STOPPED, set it back to STOPPED + LOG.info(_("Updating instance to original state: '%s'") % + old_vm_state) + if power_on: + instance.vm_state = vm_states.ACTIVE + instance.task_state = None + instance.save() + else: + instance.task_state = task_states.POWERING_OFF + instance.save() + self.stop_instance(context, instance=instance) + + self._notify_about_instance_usage( + context, instance, "resize.revert.end") + + self._quota_commit(context, reservations) + + @wrap_exception() + @reverts_task_state + @wrap_instance_event + @wrap_instance_fault + def revert_resize(self, context, instance, migration, reservations): + """Destroys the new instance on the destination machine. + + Reverts the model changes, and powers on the old instance on the + source machine. + + """ + # NOTE(comstud): A revert_resize is essentially a resize back to + # the old size, so we need to send a usage event here. + self.conductor_api.notify_usage_exists( + context, instance, current_period=True) + + with self._error_out_instance_on_exception(context, instance['uuid'], + reservations): + # NOTE(tr3buchet): tear down networks on destination host + self.network_api.setup_networks_on_host(context, instance, + teardown=True) + + instance_p = obj_base.obj_to_primitive(instance) + migration_p = obj_base.obj_to_primitive(migration) + self.conductor_api.network_migrate_instance_start(context, + instance_p, + migration_p) + +# network_info = self._get_instance_nw_info(context, instance) + bdms = (block_device_obj.BlockDeviceMappingList. + get_by_instance_uuid(context, instance.uuid)) +# block_device_info = self._get_instance_volume_block_device_info( +# context, instance, bdms=bdms) + +# self.driver.destroy(context, instance, network_info, +# block_device_info) + + self._terminate_volume_connections(context, instance, bdms) + + migration.status = 'reverted' + migration.save(context.elevated()) + + rt = self._get_resource_tracker(instance.node) + rt.drop_resize_claim(instance) + + self.compute_rpcapi.finish_revert_resize( + context, + instance, + migration, + migration.source_compute, + reservations=reservations) + + def _finish_resize(self, context, instance, migration, disk_info, + image): + old_instance_type_id = migration['old_instance_type_id'] + new_instance_type_id = migration['new_instance_type_id'] + old_instance_type = flavors.extract_flavor(instance) + sys_meta = instance.system_metadata + # NOTE(mriedem): Get the old_vm_state so we know if we should + # power on the instance. If old_vm_sate is not set we need to default + # to ACTIVE for backwards compatibility + sys_meta.get('old_vm_state', vm_states.ACTIVE) + flavors.save_flavor_info(sys_meta, + old_instance_type, + prefix='old_') + + if old_instance_type_id != new_instance_type_id: + instance_type = flavors.extract_flavor(instance, prefix='new_') + flavors.save_flavor_info(sys_meta, instance_type) + instance.instance_type_id = instance_type['id'] + instance.memory_mb = instance_type['memory_mb'] + instance.vcpus = instance_type['vcpus'] + instance.root_gb = instance_type['root_gb'] + instance.ephemeral_gb = instance_type['ephemeral_gb'] + instance.system_metadata = sys_meta + instance.save() + + # NOTE(tr3buchet): setup networks on destination host + self.network_api.setup_networks_on_host(context, instance, + migration['dest_compute']) + + instance_p = obj_base.obj_to_primitive(instance) + migration_p = obj_base.obj_to_primitive(migration) + self.conductor_api.network_migrate_instance_finish(context, + instance_p, + migration_p) + + network_info = self._get_instance_nw_info(context, instance) + + instance.task_state = task_states.RESIZE_FINISH + instance.system_metadata = sys_meta +# instance.save(expected_task_state=task_states.RESIZE_MIGRATED) + instance.save() + + self._notify_about_instance_usage( + context, instance, "finish_resize.start", + network_info=network_info) + +# block_device_info = self._get_instance_volume_block_device_info( +# context, instance, refresh_conn_info=True) + + # NOTE(mriedem): If the original vm_state was STOPPED, we don't + # automatically power on the instance after it's migrated +# power_on = old_vm_state != vm_states.STOPPED +# self.driver.finish_migration(context, migration, instance, +# disk_info, +# network_info, +# image, resize_instance, +# block_device_info, power_on) + cascaded_instance_id = instance['mapping_uuid'] + if cascaded_instance_id is None: + LOG.error(_('Finish resize can not find server %s %s .'), + instance['uuid']) + return + + cascadedNovaCli = self._get_nova_pythonClient( + context, + cfg.CONF.proxy_region_name, + cfg.CONF.cascaded_nova_url) + try: + cascadedNovaCli.servers.resize( + cascaded_instance_id, + instance.system_metadata['new_instance_type_flavorid']) + except Exception: + with excutils.save_and_reraise_exception(): + LOG.error(_('Failed to resize server %s .'), + cascaded_instance_id) + + migration.status = 'finished' + migration.save(context.elevated()) + +# instance.vm_state = vm_states.RESIZED +# instance.task_state = None +# instance.launched_at = timeutils.utcnow() +# instance.save(expected_task_state=task_states.RESIZE_FINISH) + + self._notify_about_instance_usage( + context, instance, "finish_resize.end", + network_info=network_info) + + def _quota_commit(self, context, reservations, project_id=None, + user_id=None): + if reservations: + self.conductor_api.quota_commit(context, reservations, + project_id=project_id, + user_id=user_id) + + def _heal_syn_flavor_info(self, context, instance_type): + cascadedNovaCli = self._get_nova_pythonClient( + context, + cfg.CONF.proxy_region_name, + cfg.CONF.cascaded_nova_url) + try: + flavors = cascadedNovaCli.flavors.get(instance_type['flavorid']) + except Exception: + with excutils.save_and_reraise_exception(): + flavors = cascadedNovaCli.flavors.create( + name=instance_type['name'], + ram=instance_type['memory_mb'], + vcpus=instance_type['vcpus'], + disk=instance_type['root_gb'], + flavorid=instance_type['flavorid'], + ephemeral=instance_type['ephemeral_gb'], + swap=instance_type['swap'], + rxtx_factor=instance_type['rxtx_factor'] + ) + LOG.debug(_('creat flavor %s .'), instance_type['flavorid']) + + def _heal_syn_keypair_info(self, context, instance): + LOG.debug(_('Start to synchronize keypair %s to cascaded openstack'), + instance['key_name']) + cascadedNovaCli = self._get_nova_pythonClient( + context, + cfg.CONF.proxy_region_name, + cfg.CONF.cascaded_nova_url) + keyPai = cascadedNovaCli.keypairs.list() + keyNam = instance['key_name'] + keyDat = instance['key_data'] + keyExiFlag = False + for key in keyPai: + if keyNam == key.name: + keyExiFlag = True + break + if keyExiFlag: + LOG.debug(_('Keypair is not updated ,no need to synchronize'), + keyNam) + return + else: + cascadedNovaCli.keypairs.create(keyNam, keyDat) + LOG.debug(_('Finish to synchronize keypair %s to cascaded openstack'), + instance['key_name']) + + def _get_cascaded_image_uuid(self, context, image_uuid): + try: + glanceClient = glance.GlanceClientWrapper() + image = glanceClient.call(context, 2, 'get', image_uuid) + cascaded_image_uuid = None + for location in image['locations']: + if location['url'] and location['url'].startswith( + cfg.CONF.cascaded_glance_url): + cascaded_image_uuid = location['url'].split('/')[-1] + return cascaded_image_uuid + except Exception: + with excutils.save_and_reraise_exception(): + LOG.error(_("Error while trying to get cascaded" + " image and cascading uuid %s") + % image_uuid) + + def _proxy_run_instance( + self, + context, + instance, + request_spec=None, + filter_properties=None, + requested_networks=None, + injected_files=None, + admin_password=None, + is_first_time=False, + node=None, + legacy_bdm_in_spec=True, + physical_ports=None): + cascadedNovaCli = self._get_nova_pythonClient( + context, + cfg.CONF.proxy_region_name, + cfg.CONF.cascaded_nova_url) + nicsList = [] + for port in physical_ports: + nicsList.append({'port-id': port['port']['id']}) +# for net in requested_networks: +# nicsList.append({'net-id':net[0]}) + metadata = request_spec['instance_properties']['metadata'] + metadata['mapping_uuid'] = instance['uuid'] + + try: + self._heal_syn_flavor_info(context, request_spec['instance_type']) + except Exception: + pass + + if instance['key_name'] is not None: + self._heal_syn_keypair_info(context, instance) + + availability_zone_info = \ + request_spec['instance_properties']['availability_zone'] + force_hosts = filter_properties.get('force_hosts') + if force_hosts and len(force_hosts) > 0: + availability_zone_info = availability_zone_info + \ + ":" + force_hosts[0] + + files = {} + if injected_files is not None: + for injected_file in injected_files: + file_path = injected_file[0] + context = injected_file[1] + files[file_path] = context + + image_uuid = None + if 'id' in request_spec['image']: + if cfg.CONF.cascaded_glance_flag: + image_uuid = self._get_cascaded_image_uuid( + context, + request_spec['image']['id']) + else: + image_uuid = request_spec['image']['id'] + + try: + block_device_mapping_v2_lst = None + block_device_mapping = request_spec['block_device_mapping'] + for block_device_mapping_value in block_device_mapping: + if block_device_mapping_value['source_type'] == 'volume': + proxy_volume_id = None + bdm = block_device_obj.BlockDeviceMapping.get_by_volume_id( + context, block_device_mapping_value['volume_id']) + driver_bdm = \ + driver_block_device.DriverVolumeBlockDevice(bdm) + try: + if driver_bdm['mount_device'] is None: + mount_point = '/dev/vda' + else: + mount_point = driver_bdm['mount_device'] + self.volume_api.attach(context, bdm.volume_id, + instance['uuid'], mount_point) + except Exception: + with excutils.save_and_reraise_exception(): + self.volume_api.detach(context.elevated(), + volume_id) + bdm.destroy(context) + try: + bodyReps = self.volume_api.get( + context, + block_device_mapping_value['volume_id']) + proxy_volume_id = \ + bodyReps['volume_metadata']['mapping_uuid'] + except Exception: + with excutils.save_and_reraise_exception(): + LOG.error(_('Failed to get physical volume id ,' + 'logical volume id %s,device %s'), + block_device_mapping_value['volume_id'], + block_device_mapping_value['device_name']) + if proxy_volume_id is None: + LOG.error(_('Can not find physical volume' + ' id %s in physical opensack lay,' + 'logical volume id %s'), + instance['uuid'], + block_device_mapping_value['volume_id']) + return + + block_device_mapping_v2_value = {} + block_device_mapping_v2_value['uuid'] = proxy_volume_id + block_device_mapping_v2_value['boot_index'] = \ + block_device_mapping_value['boot_index'] + block_device_mapping_v2_value['volume_size'] = \ + block_device_mapping_value['volume_size'] + block_device_mapping_v2_value['source_type'] = \ + block_device_mapping_value['source_type'] + block_device_mapping_v2_value['destination_type'] = \ + block_device_mapping_value['destination_type'] + block_device_mapping_v2_value['delete_on_termination'] = \ + block_device_mapping_value['delete_on_termination'] + block_device_mapping_v2_value['device_name'] = \ + block_device_mapping_value['device_name'] + block_device_mapping_v2_lst = \ + [block_device_mapping_v2_value] + LOG.info(_("block_device_mapping_v2_value is:%s") + % block_device_mapping_v2_value) + break + + bodyResponse = cascadedNovaCli.servers.create( + name=request_spec['instance_properties']['display_name'], + image=image_uuid, + flavor=request_spec['instance_type']['flavorid'], + meta=metadata, + key_name=request_spec['instance_properties']['key_name'], + security_groups=request_spec['security_group'], + userdata=request_spec['instance_properties']['user_data'], + block_device_mapping_v2=block_device_mapping_v2_lst, + scheduler_hints=filter_properties['scheduler_hints'], + nics=nicsList, + files=files, + availability_zone=availability_zone_info) + self._instance_update(context, instance['uuid'], + vm_state=vm_states.BUILDING, + mapping_uuid=bodyResponse.id, + task_state=None) + except Exception: + # Avoid a race condition where the thread could be cancelled + # before the ID is stored + with excutils.save_and_reraise_exception(): + LOG.error(_('Failed to create server for instance.'), + instance=instance) + self._set_instance_error_state(context, instance['uuid']) + + def _decode_files(self, injected_files): + """Base64 decode the list of files to inject.""" + if not injected_files: + return [] + + def _decode(f): + path, contents = f + try: + decoded = base64.b64decode(contents) + return path, decoded + except TypeError: + raise exception.Base64Exception(path=path) + + return [_decode(f) for f in injected_files] + + def _validate_instance_group_policy(self, context, instance, + filter_properties): + # NOTE(russellb) Instance group policy is enforced by the scheduler. + # However, there is a race condition with the enforcement of + # anti-affinity. Since more than one instance may be scheduled at the + # same time, it's possible that more than one instance with an + # anti-affinity policy may end up here. This is a validation step to + # make sure that starting the instance here doesn't violate the policy. + + scheduler_hints = filter_properties.get('scheduler_hints') or {} + group_uuid = scheduler_hints.get('group') + if not group_uuid: + return + + @utils.synchronized(group_uuid) + def _do_validation(context, instance, group_uuid): + group = instance_group_obj.InstanceGroup.get_by_uuid(context, + group_uuid) + if 'anti-affinity' not in group.policies: + return + + group_hosts = group.get_hosts(context, exclude=[instance['uuid']]) + if self.host in group_hosts: + msg = _("Anti-affinity instance group policy was violated.") + raise exception.RescheduledException( + instance_uuid=instance['uuid'], + reason=msg) + + _do_validation(context, instance, group_uuid) + + def _allocate_network_async(self, context, instance, requested_networks, + macs, security_groups, is_vpn, dhcp_options): + """Method used to allocate networks in the background. + + Broken out for testing. + """ + LOG.debug(_("Allocating IP information in the background."), + instance=instance) + retries = CONF.network_allocate_retries + if retries < 0: + LOG.warn(_("Treating negative config value (%(retries)s) for " + "'network_allocate_retries' as 0."), + {'retries': retries}) + attempts = retries > 1 and retries + 1 or 1 + retry_time = 1 + for attempt in range(1, attempts + 1): + try: + nwinfo = self.network_api.allocate_for_instance( + context, instance, vpn=is_vpn, + requested_networks=requested_networks, + macs=macs, + security_groups=security_groups, + dhcp_options=dhcp_options) + LOG.debug(_('Instance network_info: |%s|'), nwinfo, + instance=instance) + # NOTE(alaski): This can be done more cleanly once we're sure + # we'll receive an object. + sys_meta = utils.metadata_to_dict(instance['system_metadata']) + sys_meta['network_allocated'] = 'True' + self._instance_update(context, instance['uuid'], + system_metadata=sys_meta) + return nwinfo + except Exception: + exc_info = sys.exc_info() + log_info = {'attempt': attempt, + 'attempts': attempts} + if attempt == attempts: + LOG.exception(_('Instance failed network setup ' + 'after %(attempts)d attempt(s)'), + log_info) + raise exc_info[0], exc_info[1], exc_info[2] + LOG.warn(_('Instance failed network setup ' + '(attempt %(attempt)d of %(attempts)d)'), + log_info, instance=instance) + time.sleep(retry_time) + retry_time *= 2 + if retry_time > 30: + retry_time = 30 + + def _allocate_network(self, context, instance, requested_networks, macs, + security_groups, dhcp_options): + """Start network allocation asynchronously. Return an instance + of NetworkInfoAsyncWrapper that can be used to retrieve the + allocated networks when the operation has finished. + """ + # NOTE(comstud): Since we're allocating networks asynchronously, + # this task state has little meaning, as we won't be in this + # state for very long. + instance = self._instance_update(context, instance['uuid'], + vm_state=vm_states.BUILDING, + task_state=task_states.NETWORKING, + expected_task_state=[None]) + is_vpn = pipelib.is_vpn_image(instance['image_ref']) + return network_model.NetworkInfoAsyncWrapper( + self._allocate_network_async, context, instance, + requested_networks, macs, security_groups, is_vpn, + dhcp_options)