From f9874c05ae6e861c0575b03ac55666f58524fb43 Mon Sep 17 00:00:00 2001 From: zhiyuan_cai Date: Thu, 18 Jun 2015 11:58:12 +0800 Subject: [PATCH] Submit new code base Change-Id: I233a1e0c8ecd9d35a66e28be0a6328b5c7215829 --- cinderproxy/README.md | 196 - cinderproxy/cinder/volume/cinder_proxy.py | 1324 ------- cinderproxy/installation/install.sh | 130 - .../nova/compute => compute}/clients.py | 4 +- .../compute => compute}/compute_context.py | 6 +- .../compute_keystoneclient.py | 6 +- .../nova/compute => compute}/manager_proxy.py | 135 +- envrc | 3 - glancesync/README.md | 139 - glancesync/etc/glance-sync | 10 - glancesync/etc/glance/glance-sync-paste.ini | 35 - glancesync/etc/glance/glance-sync.conf | 60 - glancesync/etc/glance/glance_store.yaml | 29 - glancesync/glance/cmd/sync.py | 65 - glancesync/glance/sync/__init__.py | 257 -- glancesync/glance/sync/api/__init__.py | 22 - glancesync/glance/sync/api/v1/__init__.py | 59 - glancesync/glance/sync/api/v1/images.py | 95 - glancesync/glance/sync/base.py | 738 ---- glancesync/glance/sync/client/__init__.py | 46 - glancesync/glance/sync/client/v1/api.py | 124 - glancesync/glance/sync/client/v1/client.py | 106 - glancesync/glance/sync/clients.py | 89 - glancesync/glance/sync/pool.py | 33 - .../glance/sync/store/_drivers/__init__.py | 0 .../glance/sync/store/_drivers/filesystem.py | 171 - glancesync/glance/sync/store/driver.py | 63 - glancesync/glance/sync/store/glance_store.py | 111 - glancesync/glance/sync/store/location.py | 95 - glancesync/glance/sync/task/__init__.py | 356 -- glancesync/glance/sync/utils.py | 226 -- glancesync/installation/install.sh | 160 - {novaproxy/nova/image => image}/cascading.py | 25 +- {novaproxy/nova/image => image}/exception.py | 0 .../nova/image => image}/sync/__init__.py | 0 .../image => image}/sync/drivers/__init__.py | 0 .../sync/drivers/filesystem.py | 0 .../cinder/timestamp-query-patch/README.md | 54 - .../cinder/db/sqlalchemy/api.py | 3153 ----------------- .../installation/install.sh | 87 - .../glance/glance_location_patch/README.md | 22 - .../glance-egg-info/entry_points.txt | 21 - .../glance/api/v2/images.py | 856 ----- .../glance/common/config.py | 286 -- .../glance/common/exception.py | 422 --- .../glance/common/utils.py | 657 ---- .../glance_location_patch/glance/gateway.py | 214 -- .../glance_location_patch/glance/location.py | 459 --- .../installation/install.sh | 130 - .../glance_store/glance_store_patch/README.md | 5 - .../glance_store/_drivers/http.py | 230 -- .../glance_store/backend.py | 400 --- .../installation/install.sh | 103 - .../installation/install.sh | 97 - .../plugins/ml2/drivers/l2pop/config.py | 28 - .../neutron/plugins/ml2/drivers/l2pop/db.py | 136 - .../plugins/ml2/drivers/l2pop/mech_driver.py | 383 -- .../neutron_cascaded_l3_patch/README.md | 83 - .../installation/install.sh | 127 - .../installation/uninstall.sh | 86 - .../neutron/agent/l3_agent.py | 2127 ----------- .../neutron/agent/linux/ip_lib.py | 625 ---- .../neutron/common/config.py | 196 - .../neutron/db/extraroute_db.py | 220 -- .../installation/install.sh | 97 - .../neutron/plugins/ml2/drivers/l2pop/db.py | 123 - .../plugins/ml2/drivers/l2pop/mech_driver.py | 304 -- .../etc/neutron/neutron.conf | 39 - .../etc/neutron/plugins/ml2/ml2_conf.ini | 107 - .../installation/install.sh | 159 - .../neutron/api/rpc/handlers/l3_rpc.py | 275 -- .../neutron/common/config.py | 196 - .../neutron/common/exceptions.py | 341 -- .../neutron/db/cascade_db.py | 162 - .../versions/2026156eab2f_l2_dvr_models.py | 84 - .../neutron/plugins/ml2/plugin.py | 1214 ------- .../README.md | 68 - .../installation/install.sh | 103 - .../neutron/db/common_db_mixin.py | 203 -- .../alembic_migrations/core_init_ops.py | 132 - ...38cf36dab26_add_port_timestamp_revision.py | 44 - .../neutron/db/models_v2.py | 209 -- .../nova/conductor/manager.py | 769 ---- .../plugins/l2_proxy => l2_proxy}/README | 0 .../sync/client/v1 => l2_proxy}/__init__.py | 0 .../sync/store => l2_proxy/agent}/__init__.py | 0 .../l2_proxy => l2_proxy}/agent/clients.py | 4 +- .../l2_proxy => l2_proxy}/agent/l2_proxy.py | 421 +-- .../agent/neutron_keystoneclient.py | 8 +- .../agent/neutron_proxy_context.py | 19 +- .../agent/ovs_dvr_neutron_agent.py | 0 .../l2_proxy => l2_proxy}/agent/xenapi/README | 0 .../agent/xenapi/contrib/build-rpm.sh | 0 .../SPECS/openstack-quantum-xen-plugins.spec | 0 .../agent/xenapi/etc/xapi.d/plugins/netwrap | 0 .../l2_proxy => l2_proxy}/common/__init__.py | 0 .../l2_proxy => l2_proxy}/common/config.py | 0 .../l2_proxy => l2_proxy}/common/constants.py | 0 .../l2_proxy => l2_proxy}/ovs_models_v2.py | 0 .../etc/neutron/plugins/ml2/ml2_conf.ini | 107 - neutronproxy/l2proxy/installation/install.sh | 130 - .../neutron/plugins/l2_proxy/__init__.py | 0 .../plugins/l2_proxy/agent/__init__.py | 0 neutronproxy/l3proxy/etc/neutron/l3_proxy.ini | 20 - neutronproxy/l3proxy/installation/install.sh | 132 - .../l3proxy/neutron/agent/l3_proxy.py | 2432 ------------- novaproxy/README.md | 165 - novaproxy/installation/install.sh | 115 - novaproxy/installation/uninstall.sh | 19 - script/README.md | 44 - script/__init__.py | 1 - script/config.py | 113 - script/exec.sh | 3 - script/tricircle.cfg | 124 - 114 files changed, 233 insertions(+), 24378 deletions(-) delete mode 100644 cinderproxy/README.md delete mode 100644 cinderproxy/cinder/volume/cinder_proxy.py delete mode 100644 cinderproxy/installation/install.sh rename {novaproxy/nova/compute => compute}/clients.py (98%) rename {novaproxy/nova/compute => compute}/compute_context.py (98%) rename {novaproxy/nova/compute => compute}/compute_keystoneclient.py (98%) rename {novaproxy/nova/compute => compute}/manager_proxy.py (98%) delete mode 100755 envrc delete mode 100644 glancesync/README.md delete mode 100644 glancesync/etc/glance-sync delete mode 100644 glancesync/etc/glance/glance-sync-paste.ini delete mode 100644 glancesync/etc/glance/glance-sync.conf delete mode 100644 glancesync/etc/glance/glance_store.yaml delete mode 100644 glancesync/glance/cmd/sync.py delete mode 100644 glancesync/glance/sync/__init__.py delete mode 100644 glancesync/glance/sync/api/__init__.py delete mode 100644 glancesync/glance/sync/api/v1/__init__.py delete mode 100644 glancesync/glance/sync/api/v1/images.py delete mode 100644 glancesync/glance/sync/base.py delete mode 100644 glancesync/glance/sync/client/__init__.py delete mode 100644 glancesync/glance/sync/client/v1/api.py delete mode 100644 glancesync/glance/sync/client/v1/client.py delete mode 100644 glancesync/glance/sync/clients.py delete mode 100644 glancesync/glance/sync/pool.py delete mode 100644 glancesync/glance/sync/store/_drivers/__init__.py delete mode 100644 glancesync/glance/sync/store/_drivers/filesystem.py delete mode 100644 glancesync/glance/sync/store/driver.py delete mode 100644 glancesync/glance/sync/store/glance_store.py delete mode 100644 glancesync/glance/sync/store/location.py delete mode 100644 glancesync/glance/sync/task/__init__.py delete mode 100644 glancesync/glance/sync/utils.py delete mode 100644 glancesync/installation/install.sh rename {novaproxy/nova/image => image}/cascading.py (89%) rename {novaproxy/nova/image => image}/exception.py (100%) mode change 100755 => 100644 rename {novaproxy/nova/image => image}/sync/__init__.py (100%) rename {novaproxy/nova/image => image}/sync/drivers/__init__.py (100%) rename {novaproxy/nova/image => image}/sync/drivers/filesystem.py (100%) delete mode 100644 juno-patches/cinder/timestamp-query-patch/README.md delete mode 100644 juno-patches/cinder/timestamp-query-patch/cinder/db/sqlalchemy/api.py delete mode 100644 juno-patches/cinder/timestamp-query-patch/installation/install.sh delete mode 100644 juno-patches/glance/glance_location_patch/README.md delete mode 100644 juno-patches/glance/glance_location_patch/glance-egg-info/entry_points.txt delete mode 100644 juno-patches/glance/glance_location_patch/glance/api/v2/images.py delete mode 100644 juno-patches/glance/glance_location_patch/glance/common/config.py delete mode 100644 juno-patches/glance/glance_location_patch/glance/common/exception.py delete mode 100644 juno-patches/glance/glance_location_patch/glance/common/utils.py delete mode 100644 juno-patches/glance/glance_location_patch/glance/gateway.py delete mode 100644 juno-patches/glance/glance_location_patch/glance/location.py delete mode 100644 juno-patches/glance/glance_location_patch/installation/install.sh delete mode 100644 juno-patches/glance_store/glance_store_patch/README.md delete mode 100644 juno-patches/glance_store/glance_store_patch/glance_store/_drivers/http.py delete mode 100644 juno-patches/glance_store/glance_store_patch/glance_store/backend.py delete mode 100644 juno-patches/glance_store/glance_store_patch/installation/install.sh delete mode 100644 juno-patches/neutron/neutron_cascaded_big2layer_patch/installation/install.sh delete mode 100644 juno-patches/neutron/neutron_cascaded_big2layer_patch/neutron/plugins/ml2/drivers/l2pop/config.py delete mode 100644 juno-patches/neutron/neutron_cascaded_big2layer_patch/neutron/plugins/ml2/drivers/l2pop/db.py delete mode 100644 juno-patches/neutron/neutron_cascaded_big2layer_patch/neutron/plugins/ml2/drivers/l2pop/mech_driver.py delete mode 100644 juno-patches/neutron/neutron_cascaded_l3_patch/README.md delete mode 100644 juno-patches/neutron/neutron_cascaded_l3_patch/installation/install.sh delete mode 100644 juno-patches/neutron/neutron_cascaded_l3_patch/installation/uninstall.sh delete mode 100644 juno-patches/neutron/neutron_cascaded_l3_patch/neutron/agent/l3_agent.py delete mode 100644 juno-patches/neutron/neutron_cascaded_l3_patch/neutron/agent/linux/ip_lib.py delete mode 100644 juno-patches/neutron/neutron_cascaded_l3_patch/neutron/common/config.py delete mode 100644 juno-patches/neutron/neutron_cascaded_l3_patch/neutron/db/extraroute_db.py delete mode 100644 juno-patches/neutron/neutron_cascading_big2layer_patch/installation/install.sh delete mode 100644 juno-patches/neutron/neutron_cascading_big2layer_patch/neutron/plugins/ml2/drivers/l2pop/db.py delete mode 100644 juno-patches/neutron/neutron_cascading_big2layer_patch/neutron/plugins/ml2/drivers/l2pop/mech_driver.py delete mode 100644 juno-patches/neutron/neutron_cascading_l3_patch/etc/neutron/neutron.conf delete mode 100644 juno-patches/neutron/neutron_cascading_l3_patch/etc/neutron/plugins/ml2/ml2_conf.ini delete mode 100644 juno-patches/neutron/neutron_cascading_l3_patch/installation/install.sh delete mode 100644 juno-patches/neutron/neutron_cascading_l3_patch/neutron/api/rpc/handlers/l3_rpc.py delete mode 100644 juno-patches/neutron/neutron_cascading_l3_patch/neutron/common/config.py delete mode 100644 juno-patches/neutron/neutron_cascading_l3_patch/neutron/common/exceptions.py delete mode 100644 juno-patches/neutron/neutron_cascading_l3_patch/neutron/db/cascade_db.py delete mode 100644 juno-patches/neutron/neutron_cascading_l3_patch/neutron/db/migration/alembic_migrations/versions/2026156eab2f_l2_dvr_models.py delete mode 100644 juno-patches/neutron/neutron_cascading_l3_patch/neutron/plugins/ml2/plugin.py delete mode 100644 juno-patches/neutron/neutron_timestamp_cascaded_patch/README.md delete mode 100644 juno-patches/neutron/neutron_timestamp_cascaded_patch/installation/install.sh delete mode 100644 juno-patches/neutron/neutron_timestamp_cascaded_patch/neutron/db/common_db_mixin.py delete mode 100644 juno-patches/neutron/neutron_timestamp_cascaded_patch/neutron/db/migration/alembic_migrations/core_init_ops.py delete mode 100644 juno-patches/neutron/neutron_timestamp_cascaded_patch/neutron/db/migration/alembic_migrations/versions/238cf36dab26_add_port_timestamp_revision.py delete mode 100644 juno-patches/neutron/neutron_timestamp_cascaded_patch/neutron/db/models_v2.py delete mode 100755 juno-patches/nova/nova_scheduling_patch/nova/conductor/manager.py rename {neutronproxy/l2proxy/neutron/plugins/l2_proxy => l2_proxy}/README (100%) rename {glancesync/glance/sync/client/v1 => l2_proxy}/__init__.py (100%) rename {glancesync/glance/sync/store => l2_proxy/agent}/__init__.py (100%) rename {neutronproxy/l2proxy/neutron/plugins/l2_proxy => l2_proxy}/agent/clients.py (98%) rename {neutronproxy/l2proxy/neutron/plugins/l2_proxy => l2_proxy}/agent/l2_proxy.py (79%) rename {neutronproxy/l2proxy/neutron/plugins/l2_proxy => l2_proxy}/agent/neutron_keystoneclient.py (98%) rename {neutronproxy/l2proxy/neutron/plugins/l2_proxy => l2_proxy}/agent/neutron_proxy_context.py (93%) rename {neutronproxy/l2proxy/neutron/plugins/l2_proxy => l2_proxy}/agent/ovs_dvr_neutron_agent.py (100%) rename {neutronproxy/l2proxy/neutron/plugins/l2_proxy => l2_proxy}/agent/xenapi/README (100%) rename {neutronproxy/l2proxy/neutron/plugins/l2_proxy => l2_proxy}/agent/xenapi/contrib/build-rpm.sh (100%) rename {neutronproxy/l2proxy/neutron/plugins/l2_proxy => l2_proxy}/agent/xenapi/contrib/rpmbuild/SPECS/openstack-quantum-xen-plugins.spec (100%) rename {neutronproxy/l2proxy/neutron/plugins/l2_proxy => l2_proxy}/agent/xenapi/etc/xapi.d/plugins/netwrap (100%) rename {neutronproxy/l2proxy/neutron/plugins/l2_proxy => l2_proxy}/common/__init__.py (100%) rename {neutronproxy/l2proxy/neutron/plugins/l2_proxy => l2_proxy}/common/config.py (100%) rename {neutronproxy/l2proxy/neutron/plugins/l2_proxy => l2_proxy}/common/constants.py (100%) rename {neutronproxy/l2proxy/neutron/plugins/l2_proxy => l2_proxy}/ovs_models_v2.py (100%) delete mode 100644 neutronproxy/l2proxy/etc/neutron/plugins/ml2/ml2_conf.ini delete mode 100644 neutronproxy/l2proxy/installation/install.sh delete mode 100644 neutronproxy/l2proxy/neutron/plugins/l2_proxy/__init__.py delete mode 100644 neutronproxy/l2proxy/neutron/plugins/l2_proxy/agent/__init__.py delete mode 100644 neutronproxy/l3proxy/etc/neutron/l3_proxy.ini delete mode 100644 neutronproxy/l3proxy/installation/install.sh delete mode 100644 neutronproxy/l3proxy/neutron/agent/l3_proxy.py delete mode 100644 novaproxy/README.md delete mode 100644 novaproxy/installation/install.sh delete mode 100644 novaproxy/installation/uninstall.sh delete mode 100755 script/README.md delete mode 100755 script/__init__.py delete mode 100755 script/config.py delete mode 100755 script/exec.sh delete mode 100755 script/tricircle.cfg diff --git a/cinderproxy/README.md b/cinderproxy/README.md deleted file mode 100644 index 72884b29..00000000 --- a/cinderproxy/README.md +++ /dev/null @@ -1,196 +0,0 @@ -Openstack Cinder Proxy -=============================== - - Cinder-Proxy acts as the same role of Cinder-Volume in cascading OpenStack. - Cinder-Proxy treats cascaded Cinder as its cinder volume, convert the internal request message from the message bus to restful API calling to cascaded Cinder. - - -Key modules ------------ - -* The new Cinder-Proxy module cinder_proxy,which treats cascaded Cinder as its cinder volume, convert the internal request message from the message bus to restful API calling to cascaded Cinder: - - cinder/volume/cinder_proxy.py - -Requirements ------------- -* openstack-cinder-volume-juno has been installed - -Installation ------------- - -We provide two ways to install the Cinder-Proxy code. In this section, we will guide you through installing the Cinder-Proxy with the minimum configuration. - -* **Note:** - - - Make sure you have an existing installation of **Openstack Juno**. - - We recommend that you Do backup at least the following files before installation, because they are to be overwritten or modified: - $CINDER_CONFIG_PARENT_DIR/cinder.conf - (replace the $... with actual directory names.) - -* **Manual Installation** - - - Make sure you have performed backups properly. - - - Navigate to the local repository and copy the contents in 'cinder' sub-directory to the corresponding places in existing cinder, e.g. - ```cp -r $LOCAL_REPOSITORY_DIR/cinder $CINDER_PARENT_DIR``` - (replace the $... with actual directory name.) - - - Update the cinder configuration file (e.g. /etc/cinder/cinder.conf) with the minimum option below. If the option already exists, modify its value, otherwise add it to the config file. Check the "Configurations" section below for a full configuration guide. - ``` - [DEFAULT] - ... - ###configuration for Cinder cascading ### - volume_manager=cinder.volume.cinder_proxy.CinderProxy - volume_sync_interval=5 - voltype_sync_interval=3600 - pagination_limit=50 - volume_sync_timestamp_flag=True - cinder_tenant_name=$CASCADED_ADMIN_TENANT - cinder_tenant_id=$CASCADED_ADMIN_ID - cinder_username=$CASCADED_ADMIN_NAME - cinder_password=$CASCADED_ADMIN_PASSWORD - keystone_auth_url=http://$GLOBAL_KEYSTONE_IP:5000/v2.0/ - glance_cascading_flag=True - cascading_glance_url=$CASCADING_GLANCE - cascaded_glance_url=http://$CASCADED_GLANCE - cascaded_available_zone=$CASCADED_AVAILABLE_ZONE - cascaded_region_name=$CASCADED_REGION_NAME - ``` - - - Restart the Cinder-Proxy. - ```service openstack-cinder-volume restart``` - - - Done. The Cinder-Proxy should be working with a demo configuration. - -* **Automatic Installation** - - - Make sure you have performed backups properly. - - - Navigate to the installation directory and run installation script. - ``` - cd $LOCAL_REPOSITORY_DIR/installation - sudo bash ./install.sh - ``` - (replace the $... with actual directory name.) - - - Done. The installation code should setup the Cinder-Proxy with the minimum configuration below. Check the "Configurations" section for a full configuration guide. - ``` - [DEFAULT] - ... - ###cascade info ### - ... - ###configuration for Cinder cascading ### - volume_manager=cinder.volume.cinder_proxy.CinderProxy - volume_sync_interval=5 - voltype_sync_interval=3600 - pagination_limit=50 - volume_sync_timestamp_flag=True - cinder_tenant_name=$CASCADED_ADMIN_TENANT - cinder_tenant_id=$CASCADED_ADMIN_ID - cinder_username=$CASCADED_ADMIN_NAME - cinder_password=$CASCADED_ADMIN_PASSWORD - keystone_auth_url=http://$GLOBAL_KEYSTONE_IP:5000/v2.0/ - glance_cascading_flag=True - cascading_glance_url=$CASCADING_GLANCE - cascaded_glance_url=http://$CASCADED_GLANCE - cascaded_available_zone=$CASCADED_AVAILABLE_ZONE - cascaded_region_name=$CASCADED_REGION_NAME - ``` - -* **Troubleshooting** - - In case the automatic installation process is not complete, please check the followings: - - - Make sure your OpenStack version is Juno. - - - Check the variables in the beginning of the install.sh scripts. Your installation directories may be different from the default values we provide. - - - The installation code will automatically add the related codes to $CINDER_PARENT_DIR/cinder and modify the related configuration. - - - In case the automatic installation does not work, try to install manually. - -Configurations --------------- - -* This is a (default) configuration sample for the Cinder-Proxy. Please add/modify these options in /etc/cinder/cinder.conf. -* Note: - - Please carefully make sure that options in the configuration file are not duplicated. If an option name already exists, modify its value instead of adding a new one of the same name. - - Please refer to the 'Configuration Details' section below for proper configuration and usage of costs and constraints. - -``` -[DEFAULT] - -... - -# -#Options defined in cinder.volume.manager -# - -# Default driver to use for the Cinder-Proxy (string value) -volume_manager=cinder.volume.cinder_proxy.CinderProxy - -#The period time used by Cinder-Proxy to determine how often volume status -#is synchronized between cascading and cascaded cinder (integer value, default 5) -volume_sync_interval=5 - -#The period time used by Cinder-Proxy to control how often volume types -#is synchronized between cascading and cascaded cinder (integer value, default 3600) -voltype_sync_interval=3600 - -#The length of volume list used by Cinder-Proxy to control each pagination volume query -#for Cinder-Proxy between cascading and cascaded cinder (integer value, default 50) -pagination_limit=50 - -#The switch flag used by Cinder-Proxy to determine whether to use time-stamp when synchronize -#volume status.( boolean value, default true) -volume_sync_timestamp_flag=True - -#The cascaded level tenant name, which will be set as a parameter when cascaded cinder -#client is constructed by Cinder-Proxy -cinder_tenant_name=$CASCADED_ADMIN_TENANT - -#The cascaded level tenant id, which will be set as a parameter when cascaded cinder -#client is constructed by Cinder-Proxy -cinder_tenant_id=$CASCADED_ADMIN_ID - -#The cascaded level user name, which will be set as a parameter when cascaded cinder -#client is constructed by Cinder-Proxy -cinder_username=$CASCADED_ADMIN_NAME - -#The cascaded level user password, which will be set as a parameter when cascaded cinder -#client is constructed by Cinder-Proxy -cinder_password=$CASCADED_ADMIN_PASSWORD - -#The cascading level keystone component service url, by which the Cinder-Proxy -#can access to cascading level keystone service -keystone_auth_url=$keystone_auth_url - -#The switch flag used by Cinder-Proxy to determine glance is used OpenStack-cascading solution. -#(boolean value, default true) -glance_cascading_flag=True - -#The cascading level glance component service url, by which the Cinder-Proxy -#can access to cascading level glance service -cascading_glance_url=$CASCADING_GLANCE - -#The cascaded level glance component service url, by which the Cinder-Proxy -#can judge whether the cascading glance image has a location for this cascaded glance -cascaded_glance_url=http://$CASCADED_GLANCE - -#The cascaded level region name, which will be set as a parameter when -#the cascaded level component services register endpoint to keystone -cascaded_region_name=$CASCADED_REGION_NAME - -#The cascaded level available zone name, which will be set as a parameter when -#forward request to cascaded level cinder. Please pay attention to that value of -#cascaded_available_zone of Cinder-Proxy must be the same as storage_availability_zone in -#the cascaded level node. And Cinder-Proxy should be configured to the same storage_availability_zone. -#this configuration could be removed in the future to just use the Cinder-Proxy storage_availability_zone -#configuration item. but it is up to the admin to make sure the storage_availability_zone in Cinder-Proxy -#and cascaded cinder keep the same value. -cascaded_available_zone=$CASCADED_AVAILABLE_ZONE - - - - diff --git a/cinderproxy/cinder/volume/cinder_proxy.py b/cinderproxy/cinder/volume/cinder_proxy.py deleted file mode 100644 index 1b597573..00000000 --- a/cinderproxy/cinder/volume/cinder_proxy.py +++ /dev/null @@ -1,1324 +0,0 @@ -# Copyright 2014 Huawei Technologies Co., LTD -# All Rights Reserved. -# -# @author: z00209472, Huawei Technologies Co., LTD -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -cinder-proxy manages creating, attaching, detaching, and persistent storage. -cinder-proxy acts as the same role of cinder-volume in cascading OpenStack. -cinder-proxy treats cascaded cinder as its cinder volume,convert the internal -request message from the message bus to restful API calling to cascaded cinder. - -Persistent storage volumes keep their state independent of instances. You can -attach to an instance, terminate the instance, spawn a new instance (even -one from a different image) and re-attach the volume with the same data -intact. - -**Related Flags** - -:volume_topic: What :mod:`rpc` topic to listen to (default: `cinder-volume`). -:volume_manager: The module name of a class derived from - :class:`manager.Manager` (default: - :class:`cinder.volume.cinder_proxy.CinderProxy`). -:volume_group: Name of the group that will contain exported volumes (default: - `cinder-volumes`) -:num_shell_tries: Number of times to attempt to run commands (default: 3) - -""" -import time - -from oslo.config import cfg -from oslo import messaging - -from cinder import context -from cinder import exception -from cinder import manager -from cinder import quota -from cinder import utils -from cinder import volume - -from cinder.i18n import _ -from cinder.image import glance -from cinder.openstack.common import excutils -from cinder.openstack.common import log as logging -from cinder.openstack.common import periodic_task -from cinder.openstack.common import timeutils -from cinder.openstack.common import uuidutils -from cinder.volume.configuration import Configuration -from cinder.volume import utils as volume_utils -from cinderclient.v2 import client as cinder_client -from cinderclient import exceptions as cinder_exception - -from eventlet.greenpool import GreenPool -from keystoneclient.v2_0 import client as kc -from keystoneclient import exceptions as keystone_exception - - -LOG = logging.getLogger(__name__) - -QUOTAS = quota.QUOTAS -CGQUOTAS = quota.CGQUOTAS - -volume_manager_opts = [ - cfg.IntOpt('migration_create_volume_timeout_secs', - default=300, - help='Timeout for creating the volume to migrate to ' - 'when performing volume migration (seconds)'), - cfg.ListOpt('enabled_volume_types', - default=None, - help='A list of volume types to use'), - cfg.IntOpt('volume_sync_interval', - default=5, - help='seconds between cascading and cascaded cinders' - 'when synchronizing volume data'), - cfg.IntOpt('pagination_limit', - default=50, - help='pagination limit query for volumes between' - 'cascading and cascaded OpenStack'), - cfg.IntOpt('voltype_sync_interval', - default=3600, - help='seconds between cascading and cascaded cinders' - 'when synchronizing volume type and qos data'), - cfg.BoolOpt('volume_sync_timestamp_flag', - default=True, - help='whether to sync volume status based on timestamp'), - cfg.BoolOpt('clean_extra_cascaded_vol_flag', - default=False, - help='whether to clean extra cascaded volumes while sync' - 'volumes between cascading and cascaded OpenStack' - 'please with caution when set to True'), - cfg.BoolOpt('volume_service_inithost_offload', - default=False, - help='Offload pending volume delete during ' - 'volume service startup'), - cfg.StrOpt('cinder_username', - default='cinder_username', - help='username for connecting to cinder in admin context'), - cfg.StrOpt('cinder_password', - default='cinder_password', - help='password for connecting to cinder in admin context', - secret=True), - cfg.StrOpt('cinder_tenant_id', - default='cinder_tenant_id', - help='tenant id for connecting to cinder in admin context'), - cfg.StrOpt('cascaded_available_zone', - default='nova', - help='available zone for cascaded OpenStack'), - cfg.StrOpt('keystone_auth_url', - default='http://127.0.0.1:5000/v2.0/', - help='value of keystone url'), - cfg.StrOpt('cascaded_cinder_url', - default='http://127.0.0.1:8776/v2/%(project_id)s', - help='value of cascaded cinder url'), - cfg.StrOpt('cascading_cinder_url', - default='http://127.0.0.1:8776/v2/%(project_id)s', - help='value of cascading cinder url'), - cfg.BoolOpt('glance_cascading_flag', - default=False, - help='Whether to use glance cescaded'), - cfg.StrOpt('cascading_glance_url', - default='127.0.0.1:9292', - help='value of cascading glance url'), - cfg.StrOpt('cascaded_glance_url', - default='http://127.0.0.1:9292', - help='value of cascaded glance url'), - cfg.StrOpt('cascaded_region_name', - default='RegionOne', - help='Region name of this node'), -] -CONF = cfg.CONF -CONF.register_opts(volume_manager_opts) - - -def locked_volume_operation(f): - """Lock decorator for volume operations. - - Takes a named lock prior to executing the operation. The lock is named with - the operation executed and the id of the volume. This lock can then be used - by other operations to avoid operation conflicts on shared volumes. - - Example use: - - If a volume operation uses this decorator, it will block until the named - lock is free. This is used to protect concurrent operations on the same - volume e.g. delete VolA while create volume VolB from VolA is in progress. - """ - def lvo_inner1(inst, context, volume_id, **kwargs): - @utils.synchronized("%s-%s" % (volume_id, f.__name__), external=True) - def lvo_inner2(*_args, **_kwargs): - return f(*_args, **_kwargs) - return lvo_inner2(inst, context, volume_id, **kwargs) - return lvo_inner1 - - -def locked_snapshot_operation(f): - """Lock decorator for snapshot operations. - - Takes a named lock prior to executing the operation. The lock is named with - the operation executed and the id of the snapshot. This lock can then be - used by other operations to avoid operation conflicts on shared snapshots. - - Example use: - - If a snapshot operation uses this decorator, it will block until the named - lock is free. This is used to protect concurrent operations on the same - snapshot e.g. delete SnapA while create volume VolA from SnapA is in - progress. - """ - def lso_inner1(inst, context, snapshot_id, **kwargs): - @utils.synchronized("%s-%s" % (snapshot_id, f.__name__), external=True) - def lso_inner2(*_args, **_kwargs): - return f(*_args, **_kwargs) - return lso_inner2(inst, context, snapshot_id, **kwargs) - return lso_inner1 - - -class CinderProxy(manager.SchedulerDependentManager): - - """Manages attachable block storage devices.""" - - RPC_API_VERSION = '1.18' - target = messaging.Target(version=RPC_API_VERSION) - - VOLUME_NAME_MAX_LEN = 255 - VOLUME_UUID_MAX_LEN = 36 - SNAPSHOT_NAME_MAX_LEN = 255 - SNAPSHOT_UUID_MAX_LEN = 36 - - def __init__(self, service_name=None, *args, **kwargs): - """Load the specified in args, or flags.""" - # update_service_capabilities needs service_name to be volume - super(CinderProxy, self).__init__(service_name='volume', - *args, **kwargs) - self.configuration = Configuration(volume_manager_opts, - config_group=service_name) - self._tp = GreenPool() - self.volume_api = volume.API() - self._last_info_volume_state_heal = 0 - self._change_since_time = None - self.volumes_mapping_cache = {'volumes': {}, 'snapshots': {}} - self.image_service = glance.get_default_image_service() - self.adminCinderClient = self._get_cinder_cascaded_admin_client() - self._init_volume_mapping_cache() - - def _init_volume_mapping_cache(self): - try: - volumes = \ - self._query_vol_cascaded_pagination(change_since_time=None) - for vol in volumes: - ccding_volume_id = self._get_ccding_volume_id(vol) - if ccding_volume_id == '': - continue - self.volumes_mapping_cache['volumes'][ccding_volume_id] = \ - vol._info['id'] - - snapshots = self._query_snapshot_cascaded_all_tenant() - for snapshot in snapshots: - ccding__snapshot_id = self._get_ccding_snapsot_id(snapshot) - if ccding__snapshot_id == '': - continue - self.volumes_mapping_cache['snapshots'][ccding__snapshot_id] = \ - snapshot._info['id'] - - LOG.info(_("cascade info: init volume mapping cache is %s"), - self.volumes_mapping_cache) - except Exception as ex: - LOG.error(_("Failed init volumes mapping cache")) - LOG.exception(ex) - - def _get_ccding_volume_id(self, volume): - csd_name = volume._info.get("name", None) - if csd_name is None: - LOG.error(_("Cascade info: csd_name is None!!!. %s"), - volume._info) - return '' - - uuid_len = self.VOLUME_UUID_MAX_LEN - if len(csd_name) > (uuid_len+1) and csd_name[-(uuid_len+1)] == '@': - return csd_name[-uuid_len:] - try: - return volume._info['metadata']['logicalVolumeId'] - except KeyError: - return '' - - def _get_ccding_snapsot_id(self, snapshot): - csd_name = snapshot._info["name"] - uuid_len = self.SNAPSHOT_UUID_MAX_LEN - if len(csd_name) > (uuid_len+1) and csd_name[-(uuid_len+1)] == '@': - return csd_name[-uuid_len:] - try: - return snapshot._info['metadata']['logicalVolumeId'] - except KeyError: - return '' - - def _gen_ccding_volume_name(self, volume_name, volume_id): - max_len = self.VOLUME_NAME_MAX_LEN - self.VOLUME_UUID_MAX_LEN - 1 - if (len(volume_name) <= max_len): - return volume_name + "@" + volume_id - else: - return volume_name[0:max_len] + "@" + volume_id - - def _gen_ccding_snapshot_name(self, snapshot_name, snapshot_id): - max_len = self.SNAPSHOT_NAME_MAX_LEN - self.SNAPSHOT_UUID_MAX_LEN - 1 - if (len(snapshot_name) <= max_len): - return snapshot_name + "@" + snapshot_id - else: - return snapshot_name[0:max_len] + "@" + snapshot_id - - def _get_cinder_cascaded_admin_client(self): - - try: - kwargs = {'username': cfg.CONF.cinder_username, - 'password': cfg.CONF.cinder_password, - 'tenant_id': cfg.CONF.cinder_tenant_id, - 'auth_url': cfg.CONF.keystone_auth_url - } - - keystoneclient = kc.Client(**kwargs) - cinderclient = cinder_client.Client( - username=cfg.CONF.cinder_username, - auth_url=cfg.CONF.keystone_auth_url, - insecure=True) - cinderclient.client.auth_token = keystoneclient.auth_ref.auth_token - diction = {'project_id': cfg.CONF.cinder_tenant_id} - cinderclient.client.management_url = \ - cfg.CONF.cascaded_cinder_url % diction - - return cinderclient - except keystone_exception.Unauthorized: - with excutils.save_and_reraise_exception(): - LOG.error(_('Token unauthorized failed for keystoneclient ' - 'constructed when get cascaded admin client')) - except cinder_exception.Unauthorized: - with excutils.save_and_reraise_exception(): - LOG.error(_('Token unauthorized failed for cascaded ' - 'cinderClient constructed')) - except Exception: - with excutils.save_and_reraise_exception(): - LOG.error(_('Failed to get cinder python client.')) - - def _get_cinder_cascaded_user_client(self, context): - - try: - ctx_dict = context.to_dict() - cinderclient = cinder_client.Client( - username=ctx_dict.get('user_id'), - auth_url=cfg.CONF.keystone_auth_url, - insecure=True) - cinderclient.client.auth_token = ctx_dict.get('auth_token') - cinderclient.client.management_url = \ - cfg.CONF.cascaded_cinder_url % ctx_dict - return cinderclient - - except Exception: - with excutils.save_and_reraise_exception(): - LOG.error(_('Failed to get cinder python client.')) - - def _get_image_cascaded(self, context, image_id, cascaded_glance_url): - - try: - # direct_url is returned by v2 api - netloc = cfg.CONF.cascading_glance_url - header = 'http://' - if header in cfg.CONF.cascading_glance_url: - netloc = netloc[len(header):] - - client = glance.GlanceClientWrapper( - context, - netloc=netloc, - use_ssl=False, - version="2") - image_meta = client.call(context, 'get', image_id) - - except Exception: - glance._reraise_translated_image_exception(image_id) - - if not self.image_service._is_image_available(context, image_meta): - raise exception.ImageNotFound(image_id=image_id) - - LOG.debug(_("cascade ino: image glance get_image_cascaded," - "cascaded_glance_url:%s"), cascaded_glance_url) - - locations = getattr(image_meta, 'locations', None) - LOG.debug(_("cascade ino: image glance get_image_cascaded," - "locations:%s"), locations) - cascaded_image_id = None - for loc in locations: - image_url = loc.get('url') - LOG.debug(_("cascade ino: image glance get_image_cascaded," - "image_url:%s"), image_url) - if cascaded_glance_url in image_url: - (cascaded_image_id, glance_netloc, use_ssl) = \ - glance._parse_image_ref(image_url) - LOG.debug(_("cascade ino : result :image glance " - "get_image_cascaded,%s") % cascaded_image_id) - break - - if cascaded_image_id is None: - raise exception.CinderException( - _("cascade exception: cascaded image for image %s not exist.") - % image_id) - - return cascaded_image_id - - def _add_to_threadpool(self, func, *args, **kwargs): - self._tp.spawn_n(func, *args, **kwargs) - - def init_host(self): - """Do any initialization that needs to be run if this is a - standalone service. - """ - - ctxt = context.get_admin_context() - - volumes = self.db.volume_get_all_by_host(ctxt, self.host) - LOG.debug(_("Re-exporting %s volumes"), len(volumes)) - - LOG.debug(_('Resuming any in progress delete operations')) - for volume in volumes: - if volume['status'] == 'deleting': - LOG.info(_('Resuming delete on volume: %s') % volume['id']) - if CONF.volume_service_inithost_offload: - # Offload all the pending volume delete operations to the - # threadpool to prevent the main volume service thread - # from being blocked. - self._add_to_threadpool(self.delete_volume(ctxt, - volume['id'])) - else: - # By default, delete volumes sequentially - self.delete_volume(ctxt, volume['id']) - - # collect and publish service capabilities - self.publish_service_capabilities(ctxt) - - def create_volume(self, context, volume_id, request_spec=None, - filter_properties=None, allow_reschedule=True, - snapshot_id=None, image_id=None, source_volid=None, - source_replicaid=None, consistencygroup_id=None): - """Creates and exports the volume.""" - - ctx_dict = context.to_dict() - try: - volume_properties = request_spec.get('volume_properties') - size = volume_properties.get('size') - volume_name = volume_properties.get('display_name') - display_name = self._gen_ccding_volume_name(volume_name, volume_id) - display_description = volume_properties.get('display_description') - volume_type_id = volume_properties.get('volume_type_id') - user_id = ctx_dict.get('user_id') - project_id = ctx_dict.get('project_id') - - cascaded_snapshot_id = None - if snapshot_id is not None: - cascaded_snapshot_id = \ - self.volumes_mapping_cache['snapshots'].get(snapshot_id, - None) - LOG.info(_('cascade ino: create volume from snapshot, ' - 'cascade id:%s'), cascaded_snapshot_id) - - cascaded_source_volid = None - if source_volid is not None: - cascaded_source_volid = \ - self.volumes_mapping_cache['volumes'].get(source_volid, - None) - LOG.info(_('cascade ino: create volume from source volume, ' - 'cascade id:%s'), cascaded_source_volid) - - cascaded_volume_type = None - if volume_type_id is not None: - volume_type_ref = \ - self.db.volume_type_get(context, volume_type_id) - cascaded_volume_type = volume_type_ref['name'] - LOG.info(_('cascade ino: create volume use volume type, ' - 'cascade name:%s'), cascaded_volume_type) - - cascaded_image_id = None - if image_id is not None: - if cfg.CONF.glance_cascading_flag: - cascaded_image_id = self._get_image_cascaded( - context, - image_id, - cfg.CONF.cascaded_glance_url) - else: - cascaded_image_id = image_id - LOG.info(_("cascade ino: create volume use image, " - "cascaded image id is %s:"), cascaded_image_id) - - availability_zone = cfg.CONF.cascaded_available_zone - LOG.info(_('cascade ino: create volume with available zone:%s'), - availability_zone) - - metadata = volume_properties.get('metadata', {}) - metadata['logicalVolumeId'] = volume_id - - cinderClient = self._get_cinder_cascaded_user_client(context) - - bodyResponse = cinderClient.volumes.create( - size=size, - snapshot_id=cascaded_snapshot_id, - source_volid=cascaded_source_volid, - name=display_name, - description=display_description, - volume_type=cascaded_volume_type, - user_id=user_id, - project_id=project_id, - availability_zone=availability_zone, - metadata=metadata, - imageRef=cascaded_image_id) - - if bodyResponse._info['status'] == 'creating': - self.volumes_mapping_cache['volumes'][volume_id] = \ - bodyResponse._info['id'] - if 'logicalVolumeId' in metadata: - metadata.pop('logicalVolumeId') - metadata['mapping_uuid'] = bodyResponse._info['id'] - self.db.volume_metadata_update(context, volume_id, - metadata, True) - return volume_id - - except Exception: - with excutils.save_and_reraise_exception(): - self.db.volume_update(context, - volume_id, - {'status': 'error'}) - - def _query_vol_cascaded_pagination(self, change_since_time=None): - - if not CONF.volume_sync_timestamp_flag: - change_since_time = None - - try: - page_limit = CONF.pagination_limit - marker = None - volumes = [] - while True: - sopt = {'all_tenants': True, - 'changes-since': change_since_time, - 'sort_key': 'updated_at', - 'sort_dir': 'desc', - 'marker': marker, - 'limit': page_limit, - } - vols = \ - self.adminCinderClient.volumes.list(search_opts=sopt) - - LOG.debug(_('cascade ino: volume pagination query. marker: %s,' - ' pagination_limit: %s, change_since: %s, vols: %s' - ), marker, page_limit, change_since_time, vols) - - if (vols): - volumes.extend(vols) - marker = vols[-1]._info['id'] - continue - else: - break - - LOG.debug(_('cascade ino: ready to update volume status from ' - 'pagination query. volumes: %s'), volumes) - return volumes - except cinder_exception.Unauthorized: - self.adminCinderClient = self._get_cinder_cascaded_admin_client() - return self._query_vol_cascaded_pagination(change_since_time) - except Exception: - with excutils.save_and_reraise_exception(): - LOG.error(_('Failed to query volumes by pagination.')) - - def _query_snapshot_cascaded_all_tenant(self): - """ cinder snapshots pagination query API has not been supported until - native OpenStack Juno version yet. - """ - try: - opts = {'all_tenants': True} - snapshots = \ - self.adminCinderClient.volume_snapshots.list(search_opts=opts) - LOG.debug(_('cascade ino: snapshots query.' - 'snapshots: %s'), snapshots) - return snapshots - except cinder_exception.Unauthorized: - self.adminCinderClient = self._get_cinder_cascaded_admin_client() - return self._query_snapshot_cascaded_all_tenant() - except Exception: - with excutils.save_and_reraise_exception(): - LOG.error(_('Failed to query snapshots by all tenant.')) - - def _check_update_volume(self, context, refresh_vol): - '''check refresh volumes before update''' - - volume_id = refresh_vol._info['metadata'].get('logicalVolumeId', None) - if volume_id is None: - LOG.error(_("cascade info: logicalVolumeId for %s is None !"), - volume_id) - return False - - volume = self.db.volume_get(context, volume_id) - volume_metadata = dict((item['key'], item['value']) - for item in volume['volume_metadata']) - mapping_uuid = volume_metadata.get('mapping_uuid', None) - - ccded_id = self.volumes_mapping_cache['volumes'].get(volume_id, None) - if ccded_id is None: - LOG.error(_("cascade info:cascaded volume for %s in volume mapping" - "cache is None"), volume_id) - return False - - if mapping_uuid != ccded_id: - msg = _("cascade info: cascaded vol for %(volume_id)s in volume" - " mapping cache is %(ccded_id)s ,not equal mapping_uuid" - "%(mapping_uuid)s") - LOG.error(msg % {"volume_id": volume_id, - "ccded_id": ccded_id, - "mapping_uuid": mapping_uuid}) - return False - - if ccded_id != refresh_vol._info['id']: - rtn_id = refresh_vol._info['id'] - msg = _("cascade info: cascaded vol id %(ccded_id)s not equal" - " return volume id:%(rtn_id)s") - LOG.error(msg % {"ccded_id": ccded_id, - "rtn_id": rtn_id}) - return False - - return True - - def _update_volumes(self, context, volumes): - for volume in volumes: - LOG.debug(_("cascade ino: update volume:%s"), str(volume._info)) - try: - ret = self._check_update_volume(context, volume) - if not ret: - if CONF.clean_extra_cascaded_vol_flag: - ccded_vol = volume._info['id'] - self.adminCinderClient.volumes.delete(volume=ccded_vol) - LOG.info(_("Cascade info:cascaded volume %s deleted!"), - ccded_vol) - continue - - volume_id = volume._info['metadata']['logicalVolumeId'] - volume_status = volume._info['status'] - if volume_status == "available": - if volume._info['bootable'].lower() == 'false': - bootable_vl = '0' - else: - bootable_vl = '1' - self.db.volume_update(context, volume_id, - {'status': volume._info['status'], - 'attach_status': 'detached', - 'instance_uuid': None, - 'attached_host': None, - 'mountpoint': None, - 'attach_time': None, - 'bootable': bootable_vl - }) - metadata = volume._info['metadata'] - self._update_volume_metada(context, volume_id, metadata) - elif volume_status == "in-use": - self.db.volume_update(context, volume_id, - {'status': volume._info['status'], - 'attach_status': 'attached', - 'attach_time': timeutils.strtime() - }) - else: - self.db.volume_update(context, volume_id, - {'status': volume._info['status']}) - LOG.info(_('cascade ino: updated the volume %s status from' - 'cinder-proxy'), volume_id) - except exception.VolumeNotFound: - LOG.error(_("cascade ino: cascading volume for %s not found!"), - volume._info['id']) - continue - - def _update_volume_metada(self, context, volume_id, ccded_volume_metadata): - ccding_vol_metadata = self.db.volume_metadata_get(context, volume_id) - ccded_vol_metadata_keys = ccded_volume_metadata.keys() - unsync_metada_keys_list = ['logicalVolumeId', 'urn', 'uri'] - for temp_unsync_key in unsync_metada_keys_list: - if temp_unsync_key in ccded_vol_metadata_keys: - ccded_vol_metadata_keys.remove(temp_unsync_key) - - for temp_key in ccded_vol_metadata_keys: - ccding_vol_metadata[temp_key] =\ - ccded_volume_metadata.get(temp_key, None) - - self.db.volume_metadata_update(context, volume_id, - ccding_vol_metadata, False) - - def _update_volume_types(self, context, volumetypes): - vol_types = self.db.volume_type_get_all(context, inactive=False) - LOG.debug(_("cascade ino:, vol_types cascading :%s"), vol_types) - for volumetype in volumetypes: - LOG.debug(_("cascade ino: vol types cascaded :%s"), volumetype) - volume_type_name = volumetype._info['name'] - if volume_type_name not in vol_types.keys(): - extraspec = volumetype._info['extra_specs'] - self.db.volume_type_create( - context, - dict(name=volume_type_name, extra_specs=extraspec)) - LOG.debug(_("cascade ino: update volume types finished")) - - def _update_volume_qos(self, context, qosSpecs): - qos_specs = self.db.qos_specs_get_all(context, inactive=False) - - qosname_list_cascading = [] - for qos_cascading in qos_specs: - qosname_list_cascading.append(qos_cascading['name']) - - for qos_cascaded in qosSpecs: - qos_name_cascaded = qos_cascaded._info['name'] - - """update qos from cascaded cinder - """ - if qos_name_cascaded not in qosname_list_cascading: - qos_create_val = {} - qos_create_val['name'] = qos_name_cascaded - qos_spec_value = qos_cascaded._info['specs'] - qos_spec_value['consumer'] = \ - qos_cascaded._info['consumer'] - qos_create_val['qos_specs'] = qos_spec_value - LOG.info(_('cascade ino: create qos_spec %sin db'), - qos_name_cascaded) - self.db.qos_specs_create(context, qos_create_val) - LOG.info(_('cascade ino: qos_spec finished %sin db'), - qos_create_val) - - """update qos specs association with vol types from cascaded - """ - qos_specs_id = qos_cascading['id'] - assoc_ccd = \ - self.db.volume_type_qos_associations_get(context, - qos_specs_id) - qos = qos_cascaded._info['id'] - association = \ - self.adminCinderClient.qos_specs.get_associations(qos) - - for assoc in association: - assoc_name = assoc._info['name'] - LOG.debug(_("cascade ino: assoc name %s"), assoc_name) - if assoc_ccd is None or assoc_name not in assoc_ccd: - voltype = \ - self.db.volume_type_get_by_name(context, - assoc_name) - LOG.debug(_("cascade ino: voltypes %s"), voltype) - self.db.qos_specs_associate(context, - qos_cascading['id'], - voltype['id'],) - LOG.debug(_("cascade ino: update qos from cascaded finished")) - - @periodic_task.periodic_task(spacing=CONF.volume_sync_interval, - run_immediately=True) - def _heal_volume_status(self, context): - - # TIME_SHIFT_TOLERANCE = 3 - - heal_interval = CONF.volume_sync_interval - - if not heal_interval: - return - - curr_time = time.time() - if self._last_info_volume_state_heal + heal_interval > curr_time: - return - self._last_info_volume_state_heal = curr_time - - try: - LOG.debug(_('cascade ino: current change since time:' - '%s'), self._change_since_time) - volumes = \ - self._query_vol_cascaded_pagination(self._change_since_time) - if volumes: - self._update_volumes(context, volumes) - - self._change_since_time = timeutils.isotime() - - except Exception: - with excutils.save_and_reraise_exception(): - LOG.error(_('Failed to sys volume status to db.')) - - @periodic_task.periodic_task(spacing=CONF.voltype_sync_interval, - run_immediately=True) - def _heal_volumetypes_and_qos(self, context): - - try: - - volumetypes = self.adminCinderClient.volume_types.list() - if volumetypes: - self._update_volume_types(context, volumetypes) - - qosSpecs = self.adminCinderClient.qos_specs.list() - if qosSpecs: - self._update_volume_qos(context, qosSpecs) - except cinder_exception.Unauthorized: - self.adminCinderClient = self._get_cinder_cascaded_admin_client() - except Exception: - with excutils.save_and_reraise_exception(): - LOG.error(_('Failed to sys volume type to db.')) - - @locked_volume_operation - def delete_volume(self, context, volume_id, unmanage_only=False): - """Deletes and unexports volume.""" - context = context.elevated() - - volume_ref = self.db.volume_get(context, volume_id) - - if context.project_id != volume_ref['project_id']: - project_id = volume_ref['project_id'] - else: - project_id = context.project_id - - LOG.info(_("volume %s: deleting"), volume_ref['id']) - if volume_ref['attach_status'] == "attached": - # Volume is still attached, need to detach first - raise exception.VolumeAttached(volume_id=volume_id) - - self._notify_about_volume_usage(context, volume_ref, "delete.start") - self._reset_stats() - - try: - if unmanage_only: - self._ummanage(context, volume_id) - else: - self._delete_cascaded_volume(context, volume_id) - except exception.VolumeIsBusy: - LOG.error(_("Cannot delete volume %s: volume is busy"), - volume_ref['id']) - self.db.volume_update(context, volume_ref['id'], - {'status': 'available'}) - return True - except Exception: - with excutils.save_and_reraise_exception(): - self.db.volume_update(context, - volume_ref['id'], - {'status': 'error_deleting'}) - - # If deleting the source volume in a migration, we want to skip quotas - # and other database updates. - if volume_ref['migration_status']: - return True - - # Get reservations - try: - reserve_opts = {'volumes': -1, 'gigabytes': -volume_ref['size']} - QUOTAS.add_volume_type_opts(context, - reserve_opts, - volume_ref.get('volume_type_id')) - reservations = QUOTAS.reserve(context, - project_id=project_id, - **reserve_opts) - except Exception: - reservations = None - LOG.exception(_("Failed to update usages deleting volume")) - - # Delete glance metadata if it exists - try: - self.db.volume_glance_metadata_delete_by_volume(context, volume_id) - LOG.debug(_("volume %s: glance metadata deleted"), - volume_ref['id']) - except exception.GlanceMetadataNotFound: - LOG.debug(_("no glance metadata found for volume %s"), - volume_ref['id']) - - self.db.volume_destroy(context, volume_id) - LOG.info(_("volume %s: deleted successfully"), volume_ref['id']) - self._notify_about_volume_usage(context, volume_ref, "delete.end") - - # Commit the reservations - if reservations: - QUOTAS.commit(context, reservations, project_id=project_id) - - self.publish_service_capabilities(context) - - return True - - def _delete_cascaded_volume(self, context, volume_id): - - try: - # vol_ref = self.db.volume_get(context, volume_id) - # caecaded_volume_id = vol_ref['mapping_uuid'] - cascaded_volume_id = \ - self.volumes_mapping_cache['volumes'].get(volume_id, None) - if cascaded_volume_id is None: - LOG.error(_("cascade info: physical volume for vol %s " - "not found !"), volume_id) - return - LOG.info(_('cascade ino: prepare to delete cascaded volume %s.'), - cascaded_volume_id) - - cinderClient = self._get_cinder_cascaded_user_client(context) - cinderClient.volumes.get(cascaded_volume_id) - cinderClient.volumes.delete(volume=cascaded_volume_id) - self.volumes_mapping_cache['volumes'].pop(volume_id, '') - LOG.info(_('cascade ino: finished to delete cascade volume %s'), - cascaded_volume_id) - return - # self._heal_volume_mapping_cache(volume_id,casecade_volume_id,s'remove') - except cinder_exception.NotFound: - self.volumes_mapping_cache['volumes'].pop(volume_id, '') - - LOG.info(_('cascade ino: finished to delete cascade volume %s'), - cascaded_volume_id) - return - except Exception: - with excutils.save_and_reraise_exception(): - self.db.volume_update(context, - volume_id, - {'status': 'error_deleting'}) - LOG.error(_('cascade ino: failed to delete cascaded' - 'volume %s'), cascaded_volume_id) - - def create_snapshot(self, context, volume_id, snapshot_id): - """Creates and exports the snapshot.""" - - context = context.elevated() - snapshot_ref = self.db.snapshot_get(context, snapshot_id) - snap_name = snapshot_ref['display_name'] - display_name = self._gen_ccding_snapshot_name(snap_name, snapshot_id) - display_description = snapshot_ref['display_description'] - LOG.info(_("snapshot %s: creating"), snapshot_ref['id']) - - self._notify_about_snapshot_usage( - context, snapshot_ref, "create.start") - - vol_ref = self.db.volume_get(context, volume_id) - - try: - cascaded_volume_id = \ - self.volumes_mapping_cache['volumes'].get(volume_id, '') - LOG.debug(_('cascade ino: create snapshot, cascaded volume' - 'id is : %s '), cascaded_volume_id) - cinderClient = self._get_cinder_cascaded_user_client(context) - bodyResponse = cinderClient.volume_snapshots.create( - volume_id=cascaded_volume_id, - force=False, - name=display_name, - description=display_description) - - LOG.info(_("cascade ino: create snapshot while response is:%s"), - bodyResponse._info) - if bodyResponse._info['status'] == 'creating': - self.volumes_mapping_cache['snapshots'][snapshot_id] = \ - bodyResponse._info['id'] - - while True: - time.sleep(CONF.volume_sync_interval) - queryResponse = \ - cinderClient.volume_snapshots.get(bodyResponse._info['id']) - query_status = queryResponse._info['status'] - if query_status != 'creating': - self.db.snapshot_update(context, snapshot_ref['id'], - {'status': query_status, - 'progress': '100%' - }) - break - else: - continue - # self.db.snapshot_update( - # context, - # snapshot_ref['id'], - # {'mapping_uuid': bodyResponse._info['id']}) - - except Exception: - with excutils.save_and_reraise_exception(): - self.db.snapshot_update(context, - snapshot_ref['id'], - {'status': 'error'}) - return - # vol_ref = self.db.volume_get(context, volume_id) - - if vol_ref.bootable: - try: - self.db.volume_glance_metadata_copy_to_snapshot( - context, snapshot_ref['id'], volume_id) - except exception.CinderException as ex: - LOG.exception(_("Failed updating %(snapshot_id)s" - " metadata using the provided volumes" - " %(volume_id)s metadata") % - {'volume_id': volume_id, - 'snapshot_id': snapshot_id}) - raise exception.MetadataCopyFailure(reason=ex) - - LOG.info(_("cascade ino: snapshot %s, created successfully"), - snapshot_ref['id']) - self._notify_about_snapshot_usage(context, snapshot_ref, "create.end") - - return snapshot_id - - @locked_snapshot_operation - def delete_snapshot(self, context, snapshot_id): - """Deletes and unexports snapshot.""" - caller_context = context - context = context.elevated() - snapshot_ref = self.db.snapshot_get(context, snapshot_id) - project_id = snapshot_ref['project_id'] - - LOG.info(_("snapshot %s: deleting"), snapshot_ref['id']) - self._notify_about_snapshot_usage( - context, snapshot_ref, "delete.start") - - try: - LOG.debug(_("snapshot %s: deleting"), snapshot_ref['id']) - - # Pass context so that drivers that want to use it, can, - # but it is not a requirement for all drivers. - snapshot_ref['context'] = caller_context - - self._delete_snapshot_cascaded(context, snapshot_id) - except exception.SnapshotIsBusy: - LOG.error(_("Cannot delete snapshot %s: snapshot is busy"), - snapshot_ref['id']) - self.db.snapshot_update(context, - snapshot_ref['id'], - {'status': 'available'}) - return True - except Exception: - with excutils.save_and_reraise_exception(): - self.db.snapshot_update(context, - snapshot_ref['id'], - {'status': 'error_deleting'}) - - # Get reservations - try: - if CONF.no_snapshot_gb_quota: - reserve_opts = {'snapshots': -1} - else: - reserve_opts = { - 'snapshots': -1, - 'gigabytes': -snapshot_ref['volume_size'], - } - volume_ref = self.db.volume_get(context, snapshot_ref['volume_id']) - QUOTAS.add_volume_type_opts(context, - reserve_opts, - volume_ref.get('volume_type_id')) - reservations = QUOTAS.reserve(context, - project_id=project_id, - **reserve_opts) - except Exception: - reservations = None - LOG.exception(_("Failed to update usages deleting snapshot")) - self.db.volume_glance_metadata_delete_by_snapshot(context, snapshot_id) - self.db.snapshot_destroy(context, snapshot_id) - LOG.info(_("snapshot %s: deleted successfully"), snapshot_ref['id']) - self._notify_about_snapshot_usage(context, snapshot_ref, "delete.end") - - # Commit the reservations - if reservations: - QUOTAS.commit(context, reservations, project_id=project_id) - return True - - def _delete_snapshot_cascaded(self, context, snapshot_id): - - try: - # snapshot_ref = self.db.snapshot_get(context, snapshot_id) - # cascaded_snapshot_id = snapshot_ref['mapping_uuid'] - cascaded_snapshot_id = \ - self.volumes_mapping_cache['snapshots'].get(snapshot_id, '') - LOG.info(_("cascade ino: delete cascaded snapshot:%s"), - cascaded_snapshot_id) - - cinderClient = self._get_cinder_cascaded_user_client(context) - cinderClient.volume_snapshots.get(cascaded_snapshot_id) - resp = cinderClient.volume_snapshots.delete(cascaded_snapshot_id) - self.volumes_mapping_cache['snapshots'].pop(snapshot_id, '') - LOG.info(_("delete cascaded snapshot %s successfully. resp :%s"), - cascaded_snapshot_id, resp) - return - except cinder_exception.NotFound: - self.volumes_mapping_cache['snapshots'].pop(snapshot_id, '') - LOG.info(_("delete cascaded snapshot %s successfully."), - cascaded_snapshot_id) - return - except Exception: - with excutils.save_and_reraise_exception(): - self.db.snapshot_update(context, - snapshot_id, - {'status': 'error_deleting'}) - LOG.error(_("failed to delete cascaded snapshot %s"), - cascaded_snapshot_id) - - def attach_volume(self, context, volume_id, instance_uuid, host_name, - mountpoint, mode): - """Updates db to show volume is attached - interface about attch_volume has been realized in nova-proxy - cinder-proxy just update cascading level data, other fields - about attaching is synced from timer (_heal_volume_status) - """ - @utils.synchronized(volume_id, external=True) - def do_attach(): - # check the volume status before attaching - volume = self.db.volume_get(context, volume_id) - volume_metadata = self.db.volume_admin_metadata_get( - context.elevated(), volume_id) - if volume['status'] == 'attaching': - if (volume['instance_uuid'] and volume['instance_uuid'] != - instance_uuid): - msg = _("being attached by another instance") - raise exception.InvalidVolume(reason=msg) - if (volume['attached_host'] and volume['attached_host'] != - host_name): - msg = _("being attached by another host") - raise exception.InvalidVolume(reason=msg) - if (volume_metadata.get('attached_mode') and - volume_metadata.get('attached_mode') != mode): - msg = _("being attached by different mode") - raise exception.InvalidVolume(reason=msg) - elif volume['status'] != "available": - msg = _("status must be available") - raise exception.InvalidVolume(reason=msg) - # TODO(jdg): attach_time column is currently varchar - # we should update this to a date-time object - # also consider adding detach_time? - self._notify_about_volume_usage(context, volume, - "attach.start") - host_name_sanitized = None - if instance_uuid is not None: - if uuidutils.is_uuid_like(instance_uuid): - self.db.volume_update(context, volume_id, - {"instance_uuid": instance_uuid, - "mountpoint": mountpoint - }) - LOG.debug(_('Cascade info: attach volume, db, vm_uuid %s,' - 'mountpoint:%s'), instance_uuid, mountpoint) - else: - self.db.volume_update(context, volume_id, - {'status': 'error_attaching'}) - raise exception.InvalidUUID(uuid=instance_uuid) - elif host_name is not None: - self.db.volume_update(context, volume_id, - {"attached_host": host_name, - "mountpoint": mountpoint, - }) - LOG.debug(_('Cascade info: attach volume, db, host_name %s,' - 'mountpoint:%s'), host_name, mountpoint) - host_name_sanitized = utils.sanitize_hostname(host_name) - self.db.volume_admin_metadata_update(context.elevated(), - volume_id, - {"attached_mode": mode}, - False) - volume = self.db.volume_attached(context.elevated(), - volume_id, - instance_uuid, - host_name_sanitized, - mountpoint) - if volume['migration_status']: - self.db.volume_update(context, volume_id, - {'migration_status': None}) - self._notify_about_volume_usage(context, volume, "attach.end") - return do_attach() - - @locked_volume_operation - def detach_volume(self, context, volume_id): - """Updates db to show volume is detached - interface about detach_volume has been realized in nova-proxy - cinder-proxy just update cascading level data, other fields - about detaching is synced from timer (_heal_volume_status) - """ - # TODO(vish): refactor this into a more general "unreserve" - # TODO(sleepsonthefloor): Is this 'elevated' appropriate? - # self.db.volume_detached(context.elevated(), volume_id) - self.db.volume_admin_metadata_delete(context.elevated(), volume_id, - 'attached_mode') - - def copy_volume_to_image(self, context, volume_id, image_meta): - """Uploads the specified volume to Glance. - - image_meta is a dictionary containing the following keys: - 'id', 'container_format', 'disk_format' - - """ - LOG.info(_("cascade ino: copy volume to image, image_meta is:%s"), - image_meta) - force = image_meta.get('force', False) - image_name = image_meta.get("name") - container_format = image_meta.get("container_format") - disk_format = image_meta.get("disk_format") - # vol_ref = self.db.volume_get(context, volume_id) - # casecaded_volume_id = vol_ref['mapping_uuid'] - cascaded_volume_id = \ - self.volumes_mapping_cache['volumes'].get(volume_id, '') - LOG.debug(_('cascade ino: cop vol to img, ccded vol id is %s'), - cascaded_volume_id) - cinderClient = self._get_cinder_cascaded_user_client(context) - - resp = cinderClient.volumes.upload_to_image( - volume=cascaded_volume_id, - force=force, - image_name=image_name, - container_format=container_format, - disk_format=disk_format) - - if cfg.CONF.glance_cascading_flag: - cascaded_image_id = resp[1]['os-volume_upload_image']['image_id'] - LOG.debug(_('cascade ino:upload volume to image,get cascaded ' - 'image id is %s'), cascaded_image_id) - url = '%s/v2/images/%s' % (cfg.CONF.cascaded_glance_url, - cascaded_image_id) - locations = [{ - 'url': url, - 'metadata': {'image_id': str(cascaded_image_id), - 'image_from': 'volume' - } - }] - - image_service, image_id = \ - glance.get_remote_image_service(context, image_meta['id']) - LOG.debug(_("cascade ino: image service:%s"), image_service) - - netloc = cfg.CONF.cascading_glance_url - header = 'http://' - if header in cfg.CONF.cascading_glance_url: - netloc = netloc[len(header):] - - glanceClient = glance.GlanceClientWrapper( - context, - netloc=netloc, - use_ssl=False, - version="2") - glanceClient.call(context, 'update', image_id, - remove_props=None, locations=locations) - LOG.debug(_('cascade ino:upload volume to image,finish update' - 'image %s locations %s.'), (image_id, locations)) - - volume = self.db.volume_get(context, volume_id) - if (volume['instance_uuid'] is None and - volume['attached_host'] is None): - self.db.volume_update(context, volume_id, - {'status': 'available'}) - else: - self.db.volume_update(context, volume_id, - {'status': 'in-use'}) - - def initialize_connection(self, context, volume_id, connector): - """Prepare volume for connection from host represented by connector. - volume in openstack cascading level is just a logical data, - initialize connection has losts its meaning, so the interface here - just return a None value - """ - return None - - def terminate_connection(self, context, volume_id, connector, force=False): - """Cleanup connection from host represented by connector. - volume in openstack cascading level is just a logical data, - terminate connection has losts its meaning, so the interface here - just return a None value - """ - return None - - @periodic_task.periodic_task - def _report_driver_status(self, context): - """cinder cascading driver has losts its meaning. - so driver-report info here is just a copy of simulation message - """ - LOG.info(_("report simulation volume driver")) - simu_location_info = 'LVMVolumeDriver:Huawei:cinder-volumes:default:0' - - volume_backend_list = ['LVM_ISCSI'] - - volume_stats = { - 'QoS_support': True, - 'free_capacity_gb': 10240.0, - 'location_info': simu_location_info, - 'total_capacity_gb': 10240.0, - 'reserved_percentage': 0, - 'driver_version': '2.0.0', - 'vendor_name': 'Huawei', - 'storage_protocol': 'iSCSI'} - - if CONF.enabled_volume_types: - for voltype_name in CONF.enabled_volume_types: - vol_type =\ - self.db.volume_type_get_by_name(context, voltype_name) - for key, value in vol_type['extra_specs'].iteritems(): - LOG.debug("key %s, value %s", key, value) - if key == 'volume_backend_name' and \ - value not in volume_backend_list: - volume_backend_list.append(value) - else: - continue - - LOG.info('cascade info: proxy support volume backends: %s !!!!', - volume_backend_list) - for volume_backend in volume_backend_list: - volume_stats['volume_backend_name'] = volume_backend - self.update_service_capabilities(volume_stats) - - def publish_service_capabilities(self, context): - """Collect driver status and then publish.""" - self._report_driver_status(context) - self._publish_service_capabilities(context) - - def _reset_stats(self): - LOG.info(_("Clear capabilities")) - self._last_volume_stats = [] - - def notification(self, context, event): - LOG.info(_("Notification {%s} received"), event) - self._reset_stats() - - def _notify_about_volume_usage(self, - context, - volume, - event_suffix, - extra_usage_info=None): - volume_utils.notify_about_volume_usage( - context, volume, event_suffix, - extra_usage_info=extra_usage_info, host=self.host) - - def _notify_about_snapshot_usage(self, - context, - snapshot, - event_suffix, - extra_usage_info=None): - volume_utils.notify_about_snapshot_usage( - context, snapshot, event_suffix, - extra_usage_info=extra_usage_info, host=self.host) - - def extend_volume(self, context, volume_id, new_size, reservations): - volume = self.db.volume_get(context, volume_id) - - self._notify_about_volume_usage(context, volume, "resize.start") - try: - LOG.info(_("volume %s: extending"), volume['id']) - - cinderClient = self._get_cinder_cascaded_user_client(context) - - # vol_ref = self.db.volume_get(context, volume_id) - # cascaded_volume_id = vol_ref['mapping_uuid'] - cascaded_volume_id = \ - self.volumes_mapping_cache['volumes'].get(volume_id, '') - LOG.info(_("cascade ino: extend volume cascaded volume id is:%s"), - cascaded_volume_id) - cinderClient.volumes.extend(cascaded_volume_id, new_size) - LOG.info(_("cascade ino: volume %s: extended successfully"), - volume['id']) - - except Exception: - LOG.exception(_("volume %s: Error trying to extend volume"), - volume_id) - try: - self.db.volume_update(context, volume['id'], - {'status': 'error_extending'}) - finally: - QUOTAS.rollback(context, reservations) - return - - QUOTAS.commit(context, reservations) - self.db.volume_update(context, volume['id'], {'size': int(new_size), - 'status': 'extending'}) - self._notify_about_volume_usage( - context, volume, "resize.end", - extra_usage_info={'size': int(new_size)}) - - def migrate_volume(self, ctxt, volume_id, host, force_host_copy=False): - """Migrate the volume to the specified host (called on source host). - the interface is being realized - """ - return diff --git a/cinderproxy/installation/install.sh b/cinderproxy/installation/install.sh deleted file mode 100644 index 014a34ea..00000000 --- a/cinderproxy/installation/install.sh +++ /dev/null @@ -1,130 +0,0 @@ -#!/bin/bash - -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# Copyright (c) 2014 Huawei Technologies. - -_CINDER_CONF_DIR="/etc/cinder" -_CINDER_CONF_FILE="cinder.conf" -_CINDER_DIR="/usr/lib64/python2.6/site-packages/cinder" -_CINDER_INSTALL_LOG="/var/log/cinder/cinder-proxy/installation/install.log" - -# please set the option list set in cinder configure file -_CINDER_CONF_OPTION=("volume_manager=cinder.volume.cinder_proxy.CinderProxy volume_sync_interval=5 voltype_sync_interval=3600 periodic_interval=5 volume_sync_timestamp_flag=True cinder_tenant_name=admin cinder_tenant_id=1234 pagination_limit=50 cinder_username=admin cinder_password=1234 keystone_auth_url=http://10.67.148.210:5000/v2.0/ glance_cascading_flag=False cascading_glance_url=10.67.148.210:9292 cascaded_glance_url=http://10.67.148.201:9292 cascaded_cinder_url=http://10.67.148.201:8776/v2/%(project_id)s cascaded_region_name=Region_AZ1 cascaded_available_zone=AZ1") - -# if you did not make changes to the installation files, -# please do not edit the following directories. -_CODE_DIR="../cinder/" -_BACKUP_DIR="${_CINDER_DIR}/cinder-proxy-installation-backup" - - -function log() -{ - if [ ! -f "${_CINDER_INSTALL_LOG}" ] ; then - mkdir -p `dirname ${_CINDER_INSTALL_LOG}` - touch $_CINDER_INSTALL_LOG - chmod 777 $_CINDER_INSTALL_LOG - fi - echo "$@" - echo "`date -u +'%Y-%m-%d %T.%N'`: $@" >> $_CINDER_INSTALL_LOG -} - -if [[ ${EUID} -ne 0 ]]; then - log "Please run as root." - exit 1 -fi - - -cd `dirname $0` - - -log "checking installation directories..." -if [ ! -d "${_CINDER_DIR}" ] ; then - log "Could not find the cinder installation. Please check the variables in the beginning of the script." - log "aborted." - exit 1 -fi -if [ ! -f "${_CINDER_CONF_DIR}/${_CINDER_CONF_FILE}" ] ; then - log "Could not find cinder config file. Please check the variables in the beginning of the script." - log "aborted." - exit 1 -fi - -log "checking previous installation..." -if [ -d "${_BACKUP_DIR}/cinder" ] ; then - log "It seems cinder-proxy has already been installed!" - log "Please check README for solution if this is not true." - exit 1 -fi - -log "backing up current files that might be overwritten..." -mkdir -p "${_BACKUP_DIR}/cinder" -mkdir -p "${_BACKUP_DIR}/etc/cinder" -cp -r "${_CINDER_DIR}/volume" "${_BACKUP_DIR}/cinder/" -if [ $? -ne 0 ] ; then - rm -r "${_BACKUP_DIR}/cinder" - log "Error in code backup, aborted." - exit 1 -fi -cp "${_CINDER_CONF_DIR}/${_CINDER_CONF_FILE}" "${_BACKUP_DIR}/etc/cinder/" -if [ $? -ne 0 ] ; then - rm -r "${_BACKUP_DIR}/cinder" - rm -r "${_BACKUP_DIR}/etc" - log "Error in config backup, aborted." - exit 1 -fi - -log "copying in new files..." -cp -r "${_CODE_DIR}" `dirname ${_CINDER_DIR}` -if [ $? -ne 0 ] ; then - log "Error in copying, aborted." - log "Recovering original files..." - cp -r "${_BACKUP_DIR}/cinder" `dirname ${_CINDER_DIR}` && rm -r "${_BACKUP_DIR}/cinder" - if [ $? -ne 0 ] ; then - log "Recovering failed! Please install manually." - fi - exit 1 -fi - -log "updating config file..." -sed -i.backup -e "/volume_manager *=/d" "${_CINDER_CONF_DIR}/${_CINDER_CONF_FILE}" -sed -i.backup -e "/periodic_interval *=/d" "${_CINDER_CONF_DIR}/${_CINDER_CONF_FILE}" -for option in $_CINDER_CONF_OPTION -do -sed -i -e "/\[DEFAULT\]/a \\"$option "${_CINDER_CONF_DIR}/${_CINDER_CONF_FILE}" -done - -if [ $? -ne 0 ] ; then - log "Error in updating, aborted." - log "Recovering original files..." - cp -r "${_BACKUP_DIR}/cinder" `dirname ${_CINDER_DIR}` && rm -r "${_BACKUP_DIR}/cinder" - if [ $? -ne 0 ] ; then - log "Recovering /cinder failed! Please install manually." - fi - cp "${_BACKUP_DIR}/etc/cinder/${_CINDER_CONF_FILE}" "${_CINDER_CONF_DIR}" && rm -r "${_BACKUP_DIR}/etc" - if [ $? -ne 0 ] ; then - log "Recovering config failed! Please install manually." - fi - exit 1 -fi - -log "restarting cinder proxy..." -service openstack-cinder-volume restart -if [ $? -ne 0 ] ; then - log "There was an error in restarting the service, please restart cinder proxy manually." - exit 1 -fi - -log "Cinder proxy Completed." -log "See README to get started." - -exit 0 diff --git a/novaproxy/nova/compute/clients.py b/compute/clients.py similarity index 98% rename from novaproxy/nova/compute/clients.py rename to compute/clients.py index 674411ad..c776ff6b 100644 --- a/novaproxy/nova/compute/clients.py +++ b/compute/clients.py @@ -15,8 +15,8 @@ from oslo.config import cfg -from nova.openstack.common import importutils -from nova.openstack.common import log as logging +from oslo.utils import importutils +from oslo_log import log as logging logger = logging.getLogger(__name__) diff --git a/novaproxy/nova/compute/compute_context.py b/compute/compute_context.py similarity index 98% rename from novaproxy/nova/compute/compute_context.py rename to compute/compute_context.py index 173469de..703b4204 100644 --- a/novaproxy/nova/compute/compute_context.py +++ b/compute/compute_context.py @@ -17,9 +17,9 @@ from oslo.config import cfg from nova.openstack.common import local from nova import exception from nova import wsgi -from nova.openstack.common import context -from nova.openstack.common import importutils -from nova.openstack.common import uuidutils +from oslo_context import context +from oslo.utils import importutils +from oslo.utils import uuidutils def generate_request_id(): diff --git a/novaproxy/nova/compute/compute_keystoneclient.py b/compute/compute_keystoneclient.py similarity index 98% rename from novaproxy/nova/compute/compute_keystoneclient.py rename to compute/compute_keystoneclient.py index f86e7ed9..42cab696 100644 --- a/novaproxy/nova/compute/compute_keystoneclient.py +++ b/compute/compute_keystoneclient.py @@ -13,7 +13,7 @@ # License for the specific language governing permissions and limitations # under the License. -from nova.openstack.common import context +from oslo_context import context from nova import exception import eventlet @@ -21,8 +21,8 @@ import eventlet from keystoneclient.v2_0 import client as kc from keystoneclient.v3 import client as kc_v3 from oslo.config import cfg -from nova.openstack.common import importutils -from nova.openstack.common import log as logging +from oslo.utils import importutils +from oslo_log import log as logging logger = logging.getLogger('nova.compute.keystoneclient') diff --git a/novaproxy/nova/compute/manager_proxy.py b/compute/manager_proxy.py similarity index 98% rename from novaproxy/nova/compute/manager_proxy.py rename to compute/manager_proxy.py index c1c55b4e..d82a8131 100644 --- a/novaproxy/nova/compute/manager_proxy.py +++ b/compute/manager_proxy.py @@ -82,12 +82,12 @@ from nova.objects import quotas as quotas_obj from nova.objects import block_device as block_device_obj from nova.objects import compute_node as compute_node_obj from nova.objects import service as service_obj -from nova.openstack.common import excutils -from nova.openstack.common import jsonutils -from nova.openstack.common import log as logging +from oslo.utils import excutils +from oslo.serialization import jsonutils +from oslo_log import log as logging from nova.openstack.common import periodic_task -from nova.openstack.common import strutils -from nova.openstack.common import timeutils +from oslo.utils import strutils +from oslo.utils import timeutils from nova import paths from nova import rpc from nova.scheduler import rpcapi as scheduler_rpcapi @@ -176,50 +176,6 @@ compute_opts = [ ] interval_opts = [ - cfg.IntOpt('bandwidth_poll_interval', - default=600, - help='Interval to pull network bandwidth usage info. Not ' - 'supported on all hypervisors. Set to -1 to disable. ' - 'Setting this to 0 will disable, but this will change in ' - 'the K release to mean "run at the default rate".'), - # TODO(gilliard): Clean the above message after the K release - cfg.IntOpt('sync_power_state_interval', - default=600, - help='Interval to sync power states between the database and ' - 'the hypervisor. Set to -1 to disable. ' - 'Setting this to 0 will disable, but this will change in ' - 'Juno to mean "run at the default rate".'), - # TODO(gilliard): Clean the above message after the K release - cfg.IntOpt("heal_instance_info_cache_interval", - default=60, - help="Number of seconds between instance info_cache self " - "healing updates"), - cfg.IntOpt('reclaim_instance_interval', - default=0, - help='Interval in seconds for reclaiming deleted instances'), - cfg.IntOpt('volume_usage_poll_interval', - default=0, - help='Interval in seconds for gathering volume usages'), - cfg.IntOpt('shelved_poll_interval', - default=3600, - help='Interval in seconds for polling shelved instances to ' - 'offload. Set to -1 to disable.' - 'Setting this to 0 will disable, but this will change in ' - 'Juno to mean "run at the default rate".'), - # TODO(gilliard): Clean the above message after the K release - cfg.IntOpt('shelved_offload_time', - default=0, - help='Time in seconds before a shelved instance is eligible ' - 'for removing from a host. -1 never offload, 0 offload ' - 'when shelved'), - cfg.IntOpt('instance_delete_interval', - default=300, - help=('Interval in seconds for retrying failed instance file ' - 'deletes')), - cfg.IntOpt('block_device_allocate_retries_interval', - default=3, - help='Waiting time interval (seconds) between block' - ' device allocation retries on failures'), cfg.IntOpt('sync_instance_state_interval', default=5, help='interval to sync instance states between ' @@ -235,36 +191,9 @@ interval_opts = [ ] timeout_opts = [ - cfg.IntOpt("reboot_timeout", - default=0, - help="Automatically hard reboot an instance if it has been " - "stuck in a rebooting state longer than N seconds. " - "Set to 0 to disable."), - cfg.IntOpt("instance_build_timeout", - default=0, - help="Amount of time in seconds an instance can be in BUILD " - "before going into ERROR status." - "Set to 0 to disable."), - cfg.IntOpt("rescue_timeout", - default=0, - help="Automatically unrescue an instance after N seconds. " - "Set to 0 to disable."), - cfg.IntOpt("resize_confirm_window", - default=0, - help="Automatically confirm resizes after N seconds. " - "Set to 0 to disable."), - cfg.IntOpt("shutdown_timeout", - default=60, - help="Total amount of time to wait in seconds for an instance " - "to perform a clean shutdown."), ] running_deleted_opts = [ - cfg.StrOpt("running_deleted_instance_action", - default="reap", - help="Action to take if a running deleted instance is detected." - "Valid options are 'noop', 'log', 'shutdown', or 'reap'. " - "Set to 'noop' to take no action."), cfg.IntOpt("running_deleted_instance_poll_interval", default=1800, help="Number of seconds to wait between runs of the cleanup " @@ -701,7 +630,7 @@ class ComputeVirtAPI(virtapi.VirtAPI): class ComputeManager(manager.Manager): """Manages the running instances from creation to destruction.""" - target = messaging.Target(version='3.35') + target = messaging.Target(version='4.0') # How long to wait in seconds before re-issuing a shutdown # signal to a instance during power off. The overall @@ -749,6 +678,7 @@ class ComputeManager(manager.Manager): # NOTE(russellb) Load the driver last. It may call back into the # compute manager via the virtapi, so we want it to be fully # initialized before that happens. + self.driver = driver.load_compute_driver(self.virtapi, compute_driver) self.use_legacy_block_device_info = \ self.driver.need_legacy_block_device_info @@ -962,6 +892,23 @@ class ComputeManager(manager.Manager): self._update_resource_tracker(context, instance_ref) return instance_ref + # Vega: Since the patch for bug 1158684 is accepted, nova no longer + # automatically deletes pre-created ports. We need to handle ports + # deleting manually. + def _delete_proxy_port(self, context, instance_uuid): + search_opts = {'device_id': instance_uuid} + csd_neutron_client = ComputeManager.get_neutron_client(CONF.proxy_region_name) + data = csd_neutron_client.list_ports(**search_opts) + ports = [port['id'] for port in data.get('ports', [])] + for port in ports: + try: + csd_neutron_client.delete_port(port) + except NeutronClientException as ne: + if e.status_code == 404: + LOG.warning('Port %s does not exist', port) + else: + LOG.warning('Failed to delete port %s for server %s', port, instance_uuid) + def _delete_proxy_instance(self, context, instance): proxy_instance_id = self._get_csd_instance_uuid(instance) @@ -979,6 +926,7 @@ class ComputeManager(manager.Manager): task_state=None) LOG.debug(_('delete the server %s from nova-proxy'), instance['uuid']) + self._delete_proxy_port(context, proxy_instance_id) except Exception: if isinstance(sys.exc_info()[1], novaclient.exceptions.NotFound): return @@ -2410,7 +2358,7 @@ class ComputeManager(manager.Manager): def _start_building(self, context, instance): """Save the host and launched_on fields and log appropriately.""" - LOG.audit(_('Starting instance...'), context=context, + LOG.info(_('Starting instance...'), context=context, instance=instance) self._instance_update(context, instance.uuid, vm_state=vm_states.BUILDING, @@ -2784,7 +2732,7 @@ class ComputeManager(manager.Manager): node=None, limits=None): try: - LOG.audit(_('Starting instance...'), context=context, + LOG.info(_('Starting instance...'), context=context, instance=instance) instance.vm_state = vm_states.BUILDING instance.task_state = None @@ -3201,7 +3149,8 @@ class ComputeManager(manager.Manager): socket_dir = '/var/l2proxysock' if not os.path.exists(socket_dir): LOG.debug("socket file is not exist!") - raise + # Vega, temporary comment out this exception + # raise else: retry = 5 cas_ports = [cas_port_id["port"]["id"] for cas_port_id in cascaded_ports] @@ -3339,7 +3288,7 @@ class ComputeManager(manager.Manager): trying to teardown networking """ context = context.elevated() - LOG.audit(_('%(action_str)s instance') % {'action_str': 'Terminating'}, + LOG.info(_('%(action_str)s instance') % {'action_str': 'Terminating'}, context=context, instance=instance) if notify: @@ -3726,7 +3675,7 @@ class ComputeManager(manager.Manager): #cascading patch context = context.elevated() with self._error_out_instance_on_exception(context, instance): - LOG.audit(_("Rebuilding instance"), context=context, + LOG.info(_("Rebuilding instance"), context=context, instance=instance) # if bdms is None: # bdms = self.conductor_api. \ @@ -3887,7 +3836,7 @@ class ComputeManager(manager.Manager): instance.power_state = current_power_state instance.save() - LOG.audit(_('instance snapshotting'), context=context, + LOG.info(_('instance snapshotting'), context=context, instance=instance) if instance.power_state != power_state.RUNNING: @@ -3968,7 +3917,7 @@ class ComputeManager(manager.Manager): try: self.driver.set_admin_password(instance, new_pass) - LOG.audit(_("Root password set"), instance=instance) + LOG.info(_("Root password set"), instance=instance) instance.task_state = None instance.save( expected_task_state=task_states.UPDATING_PASSWORD) @@ -4013,7 +3962,7 @@ class ComputeManager(manager.Manager): {'current_state': current_power_state, 'expected_state': expected_state}, instance=instance) - LOG.audit(_('injecting file to %s'), path, + LOG.info(_('injecting file to %s'), path, instance=instance) self.driver.inject_file(instance, path, file_contents) @@ -4379,7 +4328,7 @@ class ComputeManager(manager.Manager): rt = self._get_resource_tracker(node) with rt.resize_claim(context, instance, instance_type, image_meta=image, limits=limits) as claim: - LOG.audit(_('Migrating'), context=context, instance=instance) + LOG.info(_('Migrating'), context=context, instance=instance) self.compute_rpcapi.resize_instance( context, instance, claim.migration, image, instance_type, quotas.reservations) @@ -4750,7 +4699,7 @@ class ComputeManager(manager.Manager): def pause_instance(self, context, instance): """Pause an instance on this host.""" context = context.elevated() - LOG.audit(_('Pausing'), context=context, instance=instance) + LOG.info(_('Pausing'), context=context, instance=instance) self._notify_about_instance_usage(context, instance, 'pause.start') # self.driver.pause(instance) # current_power_state = self._get_power_state(context, instance) @@ -4774,7 +4723,7 @@ class ComputeManager(manager.Manager): def unpause_instance(self, context, instance): """Unpause a paused instance on this host.""" context = context.elevated() - LOG.audit(_('Unpausing'), context=context, instance=instance) + LOG.info(_('Unpausing'), context=context, instance=instance) self._notify_about_instance_usage(context, instance, 'unpause.start') cascaded_instance_id = self._get_csd_instance_uuid(instance) if cascaded_instance_id is None: @@ -4814,7 +4763,7 @@ class ComputeManager(manager.Manager): """Resume the given suspended instance.""" #cascading patch context = context.elevated() - LOG.audit(_('Resuming'), context=context, instance=instance) + LOG.info(_('Resuming'), context=context, instance=instance) cascaded_instance_id = self._get_csd_instance_uuid(instance) if cascaded_instance_id is None: @@ -4840,7 +4789,7 @@ class ComputeManager(manager.Manager): def get_console_output(self, context, instance, tail_length): """Send the console output for the given instance.""" context = context.elevated() - LOG.audit(_("Get console output"), context=context, + LOG.info(_("Get console output"), context=context, instance=instance) # output = self.driver.get_console_output(context, instance) @@ -5009,7 +4958,7 @@ class ComputeManager(manager.Manager): def _attach_volume(self, context, instance, bdm): context = context.elevated() - LOG.audit(_('Attaching volume %(volume_id)s to %(mountpoint)s'), + LOG.info(_('Attaching volume %(volume_id)s to %(mountpoint)s'), {'volume_id': bdm.volume_id, 'mountpoint': bdm['mount_device']}, context=context, instance=instance) @@ -5055,7 +5004,7 @@ class ComputeManager(manager.Manager): mp = bdm.device_name volume_id = bdm.volume_id - LOG.audit(_('Detach volume %(volume_id)s from mountpoint %(mp)s'), + LOG.info(_('Detach volume %(volume_id)s from mountpoint %(mp)s'), {'volume_id': volume_id, 'mp': mp}, context=context, instance=instance) @@ -5489,7 +5438,7 @@ class ComputeManager(manager.Manager): resources['pci_stats'] = jsonutils.dumps([]) resources['stats'] = {} rt._update_usage_from_instances(context, resources, []) - rt._sync_compute_node(context, resources) + rt._init_compute_node(context, resources) @periodic_task.periodic_task def update_available_resource(self, context): diff --git a/envrc b/envrc deleted file mode 100755 index f4c673f2..00000000 --- a/envrc +++ /dev/null @@ -1,3 +0,0 @@ -#set up where the openstack is installed, before running the installation script, -#it's better to run 'source envrc' . -export OPENSTACK_INSTALL_DIR=/usr/lib/python2.7/dist-packages diff --git a/glancesync/README.md b/glancesync/README.md deleted file mode 100644 index 3bbab392..00000000 --- a/glancesync/README.md +++ /dev/null @@ -1,139 +0,0 @@ -Glance Sync Manager -=============================== - -This is a submodule of Tricircle Project, in which a sync function is added to support the glance images' sync between cascading and cascadeds. -When launching a instance, the nova will search the image which is in the same region with the instance to downland, this can speeded up the whole launching time of the instance. - -Key modules ------------ - -* Primarily, there is only new module in glance cascading: Sync, which is in the glance/sync package. - - glance/sync/__init__.py : Adds a ImageRepoProxy class, like store, policy .etc , to augment a sync mechanism layer on top of the api request handling chain. - glance/sync/base.py : Contains SyncManager object, execute the sync operations. - glance/sync/utils.py : Some help functions. - glance/sync/api/ : Support a Web Server of sync. - glance/sync/client/: Support a client to visit the Web Server , ImageRepoProxy use this client to call the sync requests. - glance/sync/task/: Each Sync operation is transformed into a task, we using queue to store the task an eventlet to handle the task simultaneously. - glance/sync/store/: We implements the independent-glance-store, separating the handles of image_data from image_metadata. - glance/cmd/sync.py: For the Sync Server starting launch (refer this in /usr/bin/glance-sync). - - - -* **Note:** - At present, the glance cascading only support v2 version of glance-api; - -Requirements ------------- - -* pexpect>=2.3 - -Installation ------------- -* **Note:** - - The Installation and configuration guidelines written below is just for the cascading layer of glance. For the cascaded layer, the glance is installed as normal. - -* **Prerequisites** - - Please install the python package: pexpect>=2.3 ( because we use pxssh for loginng and there is a bug in pxssh, see https://mail.python.org/pipermail/python-list/2008-February/510054.html, you should fix this before launch the service. ) - -* **Manual Installation** - - Please **make sure you have installed the glance patches in /juno-patches**. - - Make sure you have performed backups properly. -* **Manual Installation** - - 1. Under cascading Openstack, copy these files from glance-patch directory and glancesync directory to suitable place: - - | DIR | FROM | TO | - | ------------- |:-----------------|:-------------------------------------------| - | glancesync | glance/ | ${python_install_dir}/glance | - | glancesync | etc/glance/ | /etc/glance/ | - | glancesync | glance-sync | /usr/bin/ | - |${glance-patch}| glance/ | ${python_install_dir}/glance | - |${glance-patch}|glance.egg-info/entry_points.txt | ${glance_install_egg.info}/ | - - ${glance-patch} = `juno-patches/glance/glance_location_patch` ${python_install_dir} is where the openstack installed, e.g. `/usr/lib64/python2.6/site-packages` . - 2. Add/modify the config options - - | CONFIG_FILE | OPTION | ADD or MODIFY | - | ----------------|:---------------------------------------------------|:--------------:| - |glance-api.conf | show_multiple_locations=True | M | - |glance-api.conf | sync_server_host=${sync_mgr_host} | A | - |glance-api.conf | sync_server_port=9595 | A | - |glance-api.conf | sync_enabled=True | A | - |glance-sync.conf | cascading_endpoint_url=${glance_api_endpoint_url} | M | - |glance-sync.conf | sync_strategy=ALL | M | - |glance-sync.conf | auth_host=${keystone_host} | M | - 3. Re-launch services on cacading openstack, like: - - `service openstack-glance-api restart ` - `service openstack-glance-registry restart ` - `python /usr/bin/glance-sync --config-file=/etc/glance/glance-sync.conf & ` - -* **Automatic Installation** - 0. run `source envrc`. - 1. **make sure you have installed the glance patches in /juno-patches**: Enter the glance-patch installation dir: `cd ./tricircle/juno-patches/glance/glance_location_patch/installation` . - 2. Optional, modify the shell script variable: `_PYTHON_INSTALL_DIR` . - 3. Run the install script: `sh install.sh` - 4. Enter the glancesync installation dir: `cd ./tricircle/glancesync/installation` . - 5. Modify the cascading&cascaded glances' store scheme configuration, which is in the file: `./tricircle/glancesync/etc/glance/glance_store.yaml` . - 6. Run the install script: `sh install.sh` - -Configurations --------------- - -Besides glance-api.conf file, we add some new config files. They are described separately. - - - In glance-api.conf, three options added: - - [DEFAULT] - - # Indicate whether use the image sync, default value is False. - #If configuring on cascading layer, this value should be True. - sync_enabled = True - - #The sync server 's port number, default is 9595. - sync_server_port = 9595 - - #The sync server's host name (or ip address) - sync_server_host = 127.0.0.1 - - *Besides, the option show_multiple_locations value should be ture. - - In glance-sync.conf which newly increased, the options is similar with glance-registry.conf except: - - [DEFAULT] - - #How to sync the image, the value can be ["None", "ALL", "USER"] - #When "ALL" choosen, means to sync to all the cascaded glances; - #When "USER" choosen, means according to user's role, project, etc. - sync_strategy = ALL - - #What the cascading glance endpoint url is .(Note that this value should be consistent with what in keystone). - cascading_endpoint_url = http://127.0.0.1:9292/ - - #when snapshot sync, set the timeout time(second) of snapshot 's status - #changing into 'active'. - snapshot_timeout = 300 - - #when snapshot sync, set the polling interval time(second) to check the - #snapshot's status. - snapshot_sleep_interval = 10 - - #When sync task fails, set the retry times. - task_retry_times = 0 - - #When copy image data using 'scp' between filesystmes, set the timeout - #time of the copy. - scp_copy_timeout = 3600 - - #When snapshot, one can set the specific regions in which the snapshot - #will sync to. (e.g. physicalOpenstack001, physicalOpenstack002) - snapshot_region_names = - - - Last but not least, we add a yaml file for config the store backend's copy : glance_store.yaml in cascading glance. - these config correspond to various store scheme (at present, only filesystem is supported), the values - are based on your environment, so you have to config it before installation or restart the glance-sync - when modify it. - - - - diff --git a/glancesync/etc/glance-sync b/glancesync/etc/glance-sync deleted file mode 100644 index 6d35e4b6..00000000 --- a/glancesync/etc/glance-sync +++ /dev/null @@ -1,10 +0,0 @@ -#!/usr/bin/python -# PBR Generated from 'console_scripts' - -import sys - -from glance.cmd.sync import main - - -if __name__ == "__main__": - sys.exit(main()) diff --git a/glancesync/etc/glance/glance-sync-paste.ini b/glancesync/etc/glance/glance-sync-paste.ini deleted file mode 100644 index bef2656e..00000000 --- a/glancesync/etc/glance/glance-sync-paste.ini +++ /dev/null @@ -1,35 +0,0 @@ -# Use this pipeline for no auth - DEFAULT -[pipeline:glance-sync] -pipeline = versionnegotiation unauthenticated-context rootapp - -[filter:unauthenticated-context] -paste.filter_factory = glance.api.middleware.context:UnauthenticatedContextMiddleware.factory - -# Use this pipeline for keystone auth -[pipeline:glance-sync-keystone] -pipeline = versionnegotiation authtoken context rootapp - -# Use this pipeline for authZ only. This means that the registry will treat a -# user as authenticated without making requests to keystone to reauthenticate -# the user. -[pipeline:glance-sync-trusted-auth] -pipeline = versionnegotiation context rootapp - -[composite:rootapp] -paste.composite_factory = glance.sync.api:root_app_factory -/v1: syncv1app - -[app:syncv1app] -paste.app_factory = glance.sync.api.v1:API.factory - -[filter:context] -paste.filter_factory = glance.api.middleware.context:ContextMiddleware.factory - -[filter:versionnegotiation] -paste.filter_factory = glance.api.middleware.version_negotiation:VersionNegotiationFilter.factory - -[filter:unauthenticated-context] -paste.filter_factory = glance.api.middleware.context:UnauthenticatedContextMiddleware.factory - -[filter:authtoken] -paste.filter_factory = keystoneclient.middleware.auth_token:filter_factory \ No newline at end of file diff --git a/glancesync/etc/glance/glance-sync.conf b/glancesync/etc/glance/glance-sync.conf deleted file mode 100644 index b8636d23..00000000 --- a/glancesync/etc/glance/glance-sync.conf +++ /dev/null @@ -1,60 +0,0 @@ -[DEFAULT] -# Show debugging output in logs (sets DEBUG log level output) -debug = True - -# Address to bind the API server -bind_host = 0.0.0.0 - -# Port the bind the API server to -bind_port = 9595 - -#worker number -workers = 3 - -# Log to this file. Make sure you do not set the same log file for both the API -# and registry servers! -# -# If `log_file` is omitted and `use_syslog` is false, then log messages are -# sent to stdout as a fallback. -log_file = /var/log/glance/sync.log - -# Backlog requests when creating socket -backlog = 4096 - -#How to sync the image, the value can be ["None", "ALL", "USER"] -#When "ALL" choosen, means to sync to all the cascaded glances; -#When "USER" choosen, means according to user's role, project, etc. -sync_strategy = All - -#What the cascading glance endpoint is . -cascading_endpoint_url = http://127.0.0.1:9292/ - -#when snapshot sync, set the timeout time(second) of snapshot 's status -#changing into 'active'. -snapshot_timeout = 300 - -#when snapshot sync, set the polling interval time(second) to check the -#snapshot's status. -snapshot_sleep_interval = 10 - -#When sync task fails, set the retry times. -task_retry_times = 0 - -#When copy image data using 'scp' between filesystmes, set the timeout -#time of the copy. -scp_copy_timeout = 3600 - -#When snapshot, one can set the specific regions in which the snapshot -#will sync to. -snapshot_region_names = CascadedOne, CascadedTwo - -[keystone_authtoken] -auth_host = 127.0.0.1 -auth_port = 35357 -auth_protocol = http -admin_tenant_name = admin -admin_user = glance -admin_password = openstack -[paste_deploy] -config_file = /etc/glance/glance-sync-paste.ini -flavor=keystone diff --git a/glancesync/etc/glance/glance_store.yaml b/glancesync/etc/glance/glance_store.yaml deleted file mode 100644 index b11202e3..00000000 --- a/glancesync/etc/glance/glance_store.yaml +++ /dev/null @@ -1,29 +0,0 @@ ---- -glances: - - name: master - service_ip: "127.0.0.1" - schemes: - - name: http - parameters: - netloc: '127.0.0.1:8800' - path: '/' - image_name: 'test.img' - - name: filesystem - parameters: - host: '127.0.0.1' - datadir: '/var/lib/glance/images/' - login_user: 'glance' - login_password: 'glance' - - name: slave1 - service_ip: "0.0.0.0" - schemes: - - name: http - parameters: - netloc: '0.0.0.0:8800' - path: '/' - - name: filesystem - parameters: - host: '0.0.0.0' - datadir: '/var/lib/glance/images/' - login_user: 'glance' - login_password: 'glance' diff --git a/glancesync/glance/cmd/sync.py b/glancesync/glance/cmd/sync.py deleted file mode 100644 index 3632d6d5..00000000 --- a/glancesync/glance/cmd/sync.py +++ /dev/null @@ -1,65 +0,0 @@ -# Copyright (c) 2014 OpenStack Foundation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# @author: Jia Dong, HuaWei - -""" -Reference implementation server for Glance Sync -""" - -import eventlet -import os -import sys - -from glance.common import utils -# Monkey patch socket and time -eventlet.patcher.monkey_patch(all=False, socket=True, time=True, thread=True) - -# If ../glance/__init__.py exists, add ../ to Python search path, so that -# it will override what happens to be installed in /usr/(local/)lib/python... -possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), - os.pardir, - os.pardir)) -if os.path.exists(os.path.join(possible_topdir, 'glance', '__init__.py')): - sys.path.insert(0, possible_topdir) - -from glance.common import config -from glance.common import exception -from glance.common import wsgi -from glance.openstack.common import log -import glance.sync - - -def fail(returncode, e): - sys.stderr.write("ERROR: %s\n" % utils.exception_to_str(e)) - sys.exit(returncode) - - -def main(): - try: - config.parse_args(default_config_files='glance-sync.conf') - log.setup('glance') - - server = wsgi.Server() - server.start(config.load_paste_app('glance-sync'), default_port=9595) - server.wait() - except exception.WorkerCreationFailure as e: - fail(2, e) - except RuntimeError as e: - fail(1, e) - - -if __name__ == '__main__': - main() diff --git a/glancesync/glance/sync/__init__.py b/glancesync/glance/sync/__init__.py deleted file mode 100644 index 1c9713e9..00000000 --- a/glancesync/glance/sync/__init__.py +++ /dev/null @@ -1,257 +0,0 @@ -# Copyright (c) 2014 OpenStack Foundation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# @author: Jia Dong, HuaWei - -from oslo.config import cfg - -import glance.context -import glance.domain.proxy -import glance.openstack.common.log as logging -from glance.sync.clients import Clients as clients -from glance.sync import utils - - -LOG = logging.getLogger(__name__) - -_V2_IMAGE_CREATE_PROPERTIES = ['container_format', 'disk_format', 'min_disk', - 'min_ram', 'name', 'virtual_size', 'visibility', - 'protected'] - -_V2_IMAGE_UPDATE_PROPERTIES = ['container_format', 'disk_format', 'min_disk', - 'min_ram', 'name'] - - -def _check_trigger_sync(pre_image, image): - """ - check if it is the case that the cascaded glance has upload or first patch - location. - """ - return pre_image.status in ('saving', 'queued') and image.size and \ - [l for l in image.locations if not utils.is_glance_location(l['url'])] - - -def _from_snapshot_request(pre_image, image): - """ - when patch location, check if it's snapshot-sync case. - """ - if pre_image.status == 'queued' and len(image.locations) == 1: - loc_meta = image.locations[0]['metadata'] - return loc_meta and loc_meta.get('image_from', None) in ['snapshot', - 'volume'] - - -def get_adding_image_properties(image): - _tags = list(image.tags) or [] - kwargs = {} - kwargs['body'] = {} - for key in _V2_IMAGE_CREATE_PROPERTIES: - try: - value = getattr(image, key, None) - if value and value != 'None': - kwargs['body'][key] = value - except KeyError: - pass - _properties = getattr(image, 'extra_properties') or None - - if _properties: - extra_keys = _properties.keys() - for _key in extra_keys: - kwargs['body'][_key] = _properties[_key] - if _tags: - kwargs['body']['tags'] = _tags - return kwargs - - -def get_existing_image_locations(image): - return {'locations': image.locations} - - -class ImageRepoProxy(glance.domain.proxy.Repo): - - def __init__(self, image_repo, context, sync_api): - self.image_repo = image_repo - self.context = context - self.sync_client = sync_api.get_sync_client(context) - proxy_kwargs = {'context': context, 'sync_api': sync_api} - super(ImageRepoProxy, self).__init__(image_repo, - item_proxy_class=ImageProxy, - item_proxy_kwargs=proxy_kwargs) - - def _sync_saving_metadata(self, pre_image, image): - kwargs = {} - remove_keys = [] - changes = {} - """ - image base properties - """ - for key in _V2_IMAGE_UPDATE_PROPERTIES: - pre_value = getattr(pre_image, key, None) - my_value = getattr(image, key, None) - - if not my_value and not pre_value or my_value == pre_value: - continue - if not my_value and pre_value: - remove_keys.append(key) - else: - changes[key] = my_value - - """ - image extra_properties - """ - pre_props = pre_image.extra_properties or {} - _properties = image.extra_properties or {} - addset = set(_properties.keys()).difference(set(pre_props.keys())) - removeset = set(pre_props.keys()).difference(set(_properties.keys())) - mayrepset = set(pre_props.keys()).intersection(set(_properties.keys())) - - for key in addset: - changes[key] = _properties[key] - - for key in removeset: - remove_keys.append(key) - - for key in mayrepset: - if _properties[key] == pre_props[key]: - continue - changes[key] = _properties[key] - - """ - image tags - """ - tag_dict = {} - pre_tags = pre_image.tags - new_tags = image.tags - - added_tags = set(new_tags) - set(pre_tags) - removed_tags = set(pre_tags) - set(new_tags) - if added_tags: - tag_dict['add'] = added_tags - if removed_tags: - tag_dict['delete'] = removed_tags - if tag_dict: - kwargs['tags'] = tag_dict - - kwargs['changes'] = changes - kwargs['removes'] = remove_keys - if not changes and not remove_keys and not tag_dict: - return - LOG.debug(_('In image %s, some properties changed, sync...') - % (image.image_id)) - self.sync_client.update_image_matedata(image.image_id, **kwargs) - - def _try_sync_locations(self, pre_image, image): - image_id = image.image_id - """ - image locations - """ - locations_dict = {} - pre_locs = pre_image.locations - _locs = image.locations - - """ - if all locations of cascading removed, the image status become 'queued' - so the cascaded images should be 'queued' too. we replace all locations - with '[]' - """ - if pre_locs and not _locs: - LOG.debug(_('The image %s all locations removed, sync...') - % (image_id)) - self.sync_client.sync_locations(image_id, - action='CLEAR', - locs=pre_locs) - return - - added_locs = [] - removed_locs = [] - for _loc in pre_locs: - if _loc in _locs: - continue - removed_locs.append(_loc) - - for _loc in _locs: - if _loc in pre_locs: - continue - added_locs.append(_loc) - - if added_locs: - if _from_snapshot_request(pre_image, image): - add_kwargs = get_adding_image_properties(image) - else: - add_kwargs = {} - LOG.debug(_('The image %s add locations, sync...') % (image_id)) - self.sync_client.sync_locations(image_id, - action='INSERT', - locs=added_locs, - **add_kwargs) - elif removed_locs: - LOG.debug(_('The image %s remove some locations, sync...') - % (image_id)) - self.sync_client.sync_locations(image_id, - action='DELETE', - locs=removed_locs) - - def save(self, image): - pre_image = self.get(image.image_id) - result = super(ImageRepoProxy, self).save(image) - - image_id = image.image_id - if _check_trigger_sync(pre_image, image): - add_kwargs = get_adding_image_properties(image) - self.sync_client.sync_data(image_id, **add_kwargs) - LOG.debug(_('Sync data when image status changes ACTIVE, the ' - 'image id is %s.' % (image_id))) - else: - """ - In case of add/remove/replace locations property. - """ - self._try_sync_locations(pre_image, image) - """ - In case of sync the glance's properties - """ - if image.status == 'active': - self._sync_saving_metadata(pre_image, image) - - return result - - def remove(self, image): - result = super(ImageRepoProxy, self).remove(image) - LOG.debug(_('Image %s removed, sync...') % (image.image_id)) - delete_kwargs = get_existing_image_locations(image) - self.sync_client.remove_image(image.image_id, **delete_kwargs) - return result - - -class ImageFactoryProxy(glance.domain.proxy.ImageFactory): - - def __init__(self, factory, context, sync_api): - self.context = context - self.sync_api = sync_api - proxy_kwargs = {'context': context, 'sync_api': sync_api} - super(ImageFactoryProxy, self).__init__(factory, - proxy_class=ImageProxy, - proxy_kwargs=proxy_kwargs) - - def new_image(self, **kwargs): - return super(ImageFactoryProxy, self).new_image(**kwargs) - - -class ImageProxy(glance.domain.proxy.Image): - - def __init__(self, image, context, sync_api=None): - self.image = image - self.sync_api = sync_api - self.context = context - super(ImageProxy, self).__init__(image) diff --git a/glancesync/glance/sync/api/__init__.py b/glancesync/glance/sync/api/__init__.py deleted file mode 100644 index 52b5a85c..00000000 --- a/glancesync/glance/sync/api/__init__.py +++ /dev/null @@ -1,22 +0,0 @@ -# Copyright (c) 2014 OpenStack Foundation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# @author: Jia Dong, HuaWei - -import paste.urlmap - - -def root_app_factory(loader, global_conf, **local_conf): - return paste.urlmap.urlmap_factory(loader, global_conf, **local_conf) diff --git a/glancesync/glance/sync/api/v1/__init__.py b/glancesync/glance/sync/api/v1/__init__.py deleted file mode 100644 index e37c9929..00000000 --- a/glancesync/glance/sync/api/v1/__init__.py +++ /dev/null @@ -1,59 +0,0 @@ -# Copyright (c) 2014 OpenStack Foundation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# @author: Jia Dong, HuaWei - -from glance.common import wsgi -from glance.sync.api.v1 import images - - -def init(mapper): - - images_resource = images.create_resource() - - mapper.connect("/cascaded-eps", - controller=images_resource, - action="endpoints", - conditions={'method': ['POST']}) - - mapper.connect("/images/{id}", - controller=images_resource, - action="update", - conditions={'method': ['PATCH']}) - - mapper.connect("/images/{id}", - controller=images_resource, - action="remove", - conditions={'method': ['DELETE']}) - - mapper.connect("/images/{id}", - controller=images_resource, - action="upload", - conditions={'method': ['PUT']}) - - mapper.connect("/images/{id}/location", - controller=images_resource, - action="sync_loc", - conditions={'method': ['PUT']}) - - -class API(wsgi.Router): - - """WSGI entry point for all Registry requests.""" - - def __init__(self, mapper): - mapper = mapper or wsgi.APIMapper() - init(mapper) - super(API, self).__init__(mapper) diff --git a/glancesync/glance/sync/api/v1/images.py b/glancesync/glance/sync/api/v1/images.py deleted file mode 100644 index 1239945a..00000000 --- a/glancesync/glance/sync/api/v1/images.py +++ /dev/null @@ -1,95 +0,0 @@ -# Copyright (c) 2014 OpenStack Foundation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# @author: Jia Dong, HuaWei - -from oslo.config import cfg - -from glance.common import exception -from glance.common import wsgi -import glance.openstack.common.log as logging -from glance.sync.base import SyncManagerV2 as sync_manager -from glance.sync import utils as utils - - -LOG = logging.getLogger(__name__) - - -class Controller(object): - - def __init__(self): - self.sync_manager = sync_manager() - self.sync_manager.start() - - def test(self, req): - return {'body': 'for test'} - - def update(self, req, id, body): - LOG.debug(_('sync client start run UPDATE metadata operation for' - 'image_id: %s' % (id))) - self.sync_manager.sync_image_metadata(id, req.context.auth_tok, 'SAVE', - **body) - return dict({'body': id}) - - def remove(self, req, id, body): - LOG.debug(_('sync client start run DELETE operation for image_id: %s' - % (id))) - self.sync_manager.sync_image_metadata(id, req.context.auth_tok, - 'DELETE', **body) - return dict({'body': id}) - - def upload(self, req, id, body): - LOG.debug(_('sync client start run UPLOAD operation for image_id: %s' - % (id))) - self.sync_manager.sync_image_data(id, req.context.auth_tok, **body) - return dict({'body': id}) - - def sync_loc(self, req, id, body): - action = body['action'] - locs = body['locations'] - LOG.debug(_('sync client start run SYNC-LOC operation for image_id: %s' - % (id))) - if action == 'INSERT': - self.sync_manager.adding_locations(id, req.context.auth_tok, locs, - **body) - elif action == 'DELETE': - self.sync_manager.removing_locations(id, - req.context.auth_tok, - locs) - elif action == 'CLEAR': - self.sync_manager.clear_all_locations(id, - req.context.auth_tok, - locs) - - return dict({'body': id}) - - def endpoints(self, req, body): - regions = req.params.get('regions', []) - if not regions: - regions = body.pop('regions', []) - if not isinstance(regions, list): - regions = [regions] - LOG.debug(_('get cacaded endpoints of user/tenant: %s' - % (req.context.user or req.context.tenant or 'NONE'))) - return dict(eps=utils.get_endpoints(req.context.auth_tok, - req.context.tenant, - region_names=regions) or []) - - -def create_resource(): - """Images resource factory method.""" - deserializer = wsgi.JSONRequestDeserializer() - serializer = wsgi.JSONResponseSerializer() - return wsgi.Resource(Controller(), deserializer, serializer) diff --git a/glancesync/glance/sync/base.py b/glancesync/glance/sync/base.py deleted file mode 100644 index 2eb2ccdd..00000000 --- a/glancesync/glance/sync/base.py +++ /dev/null @@ -1,738 +0,0 @@ -# Copyright (c) 2014 OpenStack Foundation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# @author: Jia Dong, HuaWei - -import copy -import httplib -import Queue -import threading -import time - -import eventlet -from oslo.config import cfg -import six.moves.urllib.parse as urlparse - -from glance.common import exception -from glance.openstack.common import jsonutils -from glance.openstack.common import timeutils -import glance.openstack.common.log as logging - -from glance.sync import utils as s_utils -from glance.sync.clients import Clients as clients -from glance.sync.store.driver import StoreFactory as s_factory -from glance.sync.store.location import LocationFactory as l_factory -import glance.sync.store.glance_store as glance_store -from glance.sync.task import TaskObject -from glance.sync.task import PeriodicTask - -LOG = logging.getLogger(__name__) - -CONF = cfg.CONF -CONF.import_opt('sync_strategy', 'glance.common.config', group='sync') -CONF.import_opt('task_retry_times', 'glance.common.config', group='sync') -CONF.import_opt('snapshot_timeout', 'glance.common.config', group='sync') -CONF.import_opt('snapshot_sleep_interval', 'glance.common.config', - group='sync') - - -_IMAGE_LOCS_MAP = {} - - -def get_copy_location_url(image): - """ - choose a best location of an image for sync. - """ - global _IMAGE_LOCS_MAP - image_id = image.id - locations = image.locations - if not locations: - return '' - #First time store in the cache - if image_id not in _IMAGE_LOCS_MAP.keys(): - _IMAGE_LOCS_MAP[image_id] = { - 'locations': - [{'url': locations[0]['url'], - 'count': 1, - 'is_using':1 - }] - } - return locations[0]['url'] - else: - recorded_locs = _IMAGE_LOCS_MAP[image_id]['locations'] - record_urls = [loc['url'] for loc in recorded_locs] - for location in locations: - #the new, not-used location, cache and just return it. - if location['url'] not in record_urls: - recorded_locs.append({ - 'url': location['url'], - 'count':1, - 'is_using':1 - }) - return location['url'] - #find ever used and at present not used. - not_used_locs = [loc for loc in recorded_locs - if not loc['is_using']] - if not_used_locs: - _loc = not_used_locs[0] - _loc['is_using'] = 1 - _loc['count'] += 1 - return _loc['url'] - #the last case, just choose one that has the least using count. - _my_loc = sorted(recorded_locs, key=lambda my_loc: my_loc['count'])[0] - _my_loc['count'] += 1 - return _my_loc['url'] - - -def remove_invalid_location(id, url): - """ - when sync fail with a location, remove it from the cache. - :param id: the image_id - :param url: the location's url - :return: - """ - global _IMAGE_LOCS_MAP - image_map = _IMAGE_LOCS_MAP[id] - if not image_map: - return - locs = image_map['locations'] or [] - if not locs: - return - del_locs = [loc for loc in locs if loc['url'] == url] - if not del_locs: - return - locs.remove(del_locs[0]) - - -def return_sync_location(id, url): - """ - when sync finish, modify the using count and state. - """ - global _IMAGE_LOCS_MAP - image_map = _IMAGE_LOCS_MAP[id] - if not image_map: - return - locs = image_map['locations'] or [] - if not locs: - return - selectd_locs = [loc for loc in locs if loc['url'] == url] - if not selectd_locs: - return - selectd_locs[0]['is_using'] = 0 - selectd_locs[0]['count'] -= 1 - - -def choose_a_location(sync_f): - """ - the wrapper for the method which need a location for sync. - :param sync_f: - :return: - """ - def wrapper(*args, **kwargs): - _id = args[1] - _auth_token = args[2] - _image = create_self_glance_client(_auth_token).images.get(_id) - _url = get_copy_location_url(_image) - kwargs['src_image_url'] = _url - _sync_ok = False - while not _sync_ok: - try: - sync_f(*args, **kwargs) - _sync_ok = True - except Exception: - remove_invalid_location(_id, _url) - _url = get_copy_location_url(_image) - if not _url: - break - kwargs['src_image_url'] = _url - return wrapper - - -def get_image_servcie(): - return ImageService - - -def create_glance_client(auth_token, url): - return clients(auth_token).glance(url=url) - - -def create_self_glance_client(auth_token): - return create_glance_client(auth_token, - s_utils.get_cascading_endpoint_url()) - - -def create_restful_client(auth_token, url): - pieces = urlparse.urlparse(url) - return _create_restful_client(auth_token, pieces.netloc) - - -def create_self_restful_client(auth_token): - return create_restful_client(auth_token, - s_utils.get_cascading_endpoint_url()) - - -def _create_restful_client(auth_token, url): - server, port = url.split(':') - try: - port = int(port) - except Exception: - port = 9292 - conn = httplib.HTTPConnection(server.encode(), port) - image_service = get_image_servcie() - glance_client = image_service(conn, auth_token) - return glance_client - - -def get_mappings_from_image(auth_token, image_id): - client = create_self_glance_client(auth_token) - image = client.images.get(image_id) - locations = image.locations - if not locations: - return {} - return get_mappings_from_locations(locations) - - -def get_mappings_from_locations(locations): - mappings = {} - for loc in locations: - if s_utils.is_glance_location(loc['url']): - id = loc['metadata'].get('image_id') - if not id: - continue - ep_url = s_utils.create_ep_by_loc(loc) - mappings[ep_url] = id -# endpoints.append(utils.create_ep_by_loc(loc)) - return mappings - - -class AuthenticationException(Exception): - pass - - -class ImageAlreadyPresentException(Exception): - pass - - -class ServerErrorException(Exception): - pass - - -class UploadException(Exception): - pass - - -class ImageService(object): - - def __init__(self, conn, auth_token): - """Initialize the ImageService. - - conn: a httplib.HTTPConnection to the glance server - auth_token: authentication token to pass in the x-auth-token header - """ - self.auth_token = auth_token - self.conn = conn - - def _http_request(self, method, url, headers, body, - ignore_result_body=False): - """Perform an HTTP request against the server. - - method: the HTTP method to use - url: the URL to request (not including server portion) - headers: headers for the request - body: body to send with the request - ignore_result_body: the body of the result will be ignored - - Returns: a httplib response object - """ - if self.auth_token: - headers.setdefault('x-auth-token', self.auth_token) - - LOG.debug(_('Request: %(method)s http://%(server)s:%(port)s' - '%(url)s with headers %(headers)s') - % {'method': method, - 'server': self.conn.host, - 'port': self.conn.port, - 'url': url, - 'headers': repr(headers)}) - self.conn.request(method, url, body, headers) - - response = self.conn.getresponse() - headers = self._header_list_to_dict(response.getheaders()) - code = response.status - code_description = httplib.responses[code] - LOG.debug(_('Response: %(code)s %(status)s %(headers)s') - % {'code': code, - 'status': code_description, - 'headers': repr(headers)}) - - if code in [400, 500]: - raise ServerErrorException(response.read()) - - if code in [401, 403]: - raise AuthenticationException(response.read()) - - if code == 409: - raise ImageAlreadyPresentException(response.read()) - - if ignore_result_body: - # NOTE: because we are pipelining requests through a single HTTP - # connection, httplib requires that we read the response body - # before we can make another request. If the caller knows they - # don't care about the body, they can ask us to do that for them. - response.read() - return response - - @staticmethod - def _header_list_to_dict(headers): - """Expand a list of headers into a dictionary. - - headers: a list of [(key, value), (key, value), (key, value)] - - Returns: a dictionary representation of the list - """ - d = {} - for (header, value) in headers: - if header.startswith('x-image-meta-property-'): - prop = header.replace('x-image-meta-property-', '') - d.setdefault('properties', {}) - d['properties'][prop] = value - else: - d[header.replace('x-image-meta-', '')] = value - return d - - @staticmethod - def _dict_to_headers(d): - """Convert a dictionary into one suitable for a HTTP request. - - d: a dictionary - - Returns: the same dictionary, with x-image-meta added to every key - """ - h = {} - for key in d: - if key == 'properties': - for subkey in d[key]: - if d[key][subkey] is None: - h['x-image-meta-property-%s' % subkey] = '' - else: - h['x-image-meta-property-%s' % subkey] = d[key][subkey] - - else: - h['x-image-meta-%s' % key] = d[key] - return h - - def add_location(self, image_uuid, path_val, metadata=None): - """ - add an actual location - """ - LOG.debug(_('call restful api to add location: url is %s' % path_val)) - metadata = metadata or {} - url = '/v2/images/%s' % image_uuid - hdrs = {'Content-Type': 'application/openstack-images-v2.1-json-patch'} - body = [] - value = {'url': path_val, 'metadata': metadata} - body.append({'op': 'add', 'path': '/locations/-', 'value': value}) - return self._http_request('PATCH', url, hdrs, jsonutils.dumps(body)) - - def clear_locations(self, image_uuid): - """ - clear all the location infos, make the image status be 'queued'. - """ - LOG.debug(_('call restful api to clear image location: image id is %s' - % image_uuid)) - url = '/v2/images/%s' % image_uuid - hdrs = {'Content-Type': 'application/openstack-images-v2.1-json-patch'} - body = [] - body.append({'op': 'replace', 'path': '/locations', 'value': []}) - return self._http_request('PATCH', url, hdrs, jsonutils.dumps(body)) - - -class MetadataHelper(object): - - def execute(self, auth_token, endpoint, action_name='CREATE', - image_id=None, **kwargs): - - glance_client = create_glance_client(auth_token, endpoint) - if action_name.upper() == 'CREATE': - return self._do_create_action(glance_client, **kwargs) - if action_name.upper() == 'SAVE': - return self._do_save_action(glance_client, image_id, **kwargs) - if action_name.upper() == 'DELETE': - return self._do_delete_action(glance_client, image_id, **kwargs) - - return None - - @staticmethod - def _fetch_params(keys, **kwargs): - return tuple([kwargs.get(key, None) for key in keys]) - - def _do_create_action(self, glance_client, **kwargs): - body = kwargs['body'] - new_image = glance_client.images.create(**body) - return new_image.id - - def _do_save_action(self, glance_client, image_id, **kwargs): - keys = ['changes', 'removes', 'tags'] - changes, removes, tags = self._fetch_params(keys, **kwargs) - if changes or removes: - glance_client.images.update(image_id, - remove_props=removes, - **changes) - if tags: - if tags.get('add', None): - added = tags.get('add') - for tag in added: - glance_client.image_tags.update(image_id, tag) - elif tags.get('delete', None): - removed = tags.get('delete') - for tag in removed: - glance_client.image_tags.delete(image_id, tag) - return glance_client.images.get(image_id) - - def _do_delete_action(self, glance_client, image_id, **kwargs): - return glance_client.images.delete(image_id) - - -_task_queue = Queue.Queue(maxsize=150) - - -class SyncManagerV2(): - - MAX_TASK_RETRY_TIMES = 1 - - def __init__(self): - global _task_queue - self.mete_helper = MetadataHelper() - self.location_factory = l_factory() - self.store_factory = s_factory() - self.task_queue = _task_queue - self.task_handler = None - self.unhandle_task_list = [] - self.periodic_add_id_list = [] - self.periodic_add_done = True - self._load_glance_store_cfg() - self.ks_client = clients().keystone() - self.create_new_periodic_task = False - - def _load_glance_store_cfg(self): - glance_store.setup_glance_stores() - - def sync_image_metadata(self, image_id, auth_token, action, **kwargs): - if not action or CONF.sync.sync_strategy == 'None': - return - kwargs['image_id'] = image_id - if action == 'SAVE': - self.task_queue.put_nowait(TaskObject.get_instance('meta_update', - kwargs)) - elif action == 'DELETE': - self.task_queue.put_nowait(TaskObject.get_instance('meta_remove', - kwargs)) - - @choose_a_location - def sync_image_data(self, image_id, auth_token, eps=None, **kwargs): - if CONF.sync.sync_strategy in ['None', 'nova']: - return - - kwargs['image_id'] = image_id - cascading_ep = s_utils.get_cascading_endpoint_url() - kwargs['cascading_ep'] = cascading_ep - copy_url = kwargs.get('src_image_url', None) - if not copy_url: - LOG.warn(_('No copy url found, for image %s sync, Exit.'), - image_id) - return - LOG.info(_('choose the copy url %s for sync image %s'), - copy_url, image_id) - if s_utils.is_glance_location(copy_url): - kwargs['copy_ep'] = s_utils.create_ep_by_loc_url(copy_url) - kwargs['copy_id'] = s_utils.get_id_from_glance_loc_url(copy_url) - else: - kwargs['copy_ep'] = cascading_ep - kwargs['copy_id'] = image_id - - self.task_queue.put_nowait(TaskObject.get_instance('sync', kwargs)) - - def adding_locations(self, image_id, auth_token, locs, **kwargs): - if CONF.sync.sync_strategy == 'None': - return - for loc in locs: - if s_utils.is_glance_location(loc['url']): - if s_utils.is_snapshot_location(loc): - snapshot_ep = s_utils.create_ep_by_loc(loc) - snapshot_id = s_utils.get_id_from_glance_loc(loc) - snapshot_client = create_glance_client(auth_token, - snapshot_ep) - snapshot_image = snapshot_client.images.get(snapshot_id) - _pre_check_time = timeutils.utcnow() - _timout = CONF.sync.snapshot_timeout - while not timeutils.is_older_than(_pre_check_time, - _timout): - if snapshot_image.status == 'active': - break - LOG.debug(_('Check snapshot not active, wait for %i' - 'second.' - % CONF.sync.snapshot_sleep_interval)) - time.sleep(CONF.sync.snapshot_sleep_interval) - snapshot_image = snapshot_client.images.get( - snapshot_id) - - if snapshot_image.status != 'active': - LOG.error(_('Snapshot status to active Timeout')) - return - kwargs['image_id'] = image_id - kwargs['snapshot_ep'] = snapshot_ep - kwargs['snapshot_id'] = snapshot_id - snapshot_task = TaskObject.get_instance('snapshot', kwargs) - self.task_queue.put_nowait(snapshot_task) - else: - LOG.debug(_('patch a normal location %s to image %s' - % (loc['url'], image_id))) - input = {'image_id': image_id, 'location': loc} - self.task_queue.put_nowait(TaskObject.get_instance('patch', - input)) - - def removing_locations(self, image_id, auth_token, locs): - if CONF.sync.sync_strategy == 'None': - return - locs = filter(lambda loc: s_utils.is_glance_location(loc['url']), locs) - if not locs: - return - input = {'image_id': image_id, 'locations': locs} - remove_locs_task = TaskObject.get_instance('locs_remove', input) - self.task_queue.put_nowait(remove_locs_task) - - def clear_all_locations(self, image_id, auth_token, locs): - locs = filter(lambda loc: not s_utils.is_snapshot_location(loc), locs) - self.removing_locations(image_id, auth_token, locs) - - def create_new_cascaded_task(self, last_run_time=None): - LOG.debug(_('new_cascaded periodic task has been created.')) - glance_client = create_self_glance_client(self.ks_client.auth_token) - filters = {'status': 'active'} - image_list = glance_client.images.list(filters=filters) - input = {} - run_images = {} - cascading_ep = s_utils.get_cascading_endpoint_url() - input['cascading_ep'] = cascading_ep - input['image_id'] = 'ffffffff-ffff-ffff-ffff-ffffffffffff' - all_ep_urls = s_utils.get_endpoints() - for image in image_list: - glance_urls = [loc['url'] for loc in image.locations - if s_utils.is_glance_location(loc['url'])] - lack_ep_urls = s_utils.calculate_lack_endpoints(all_ep_urls, - glance_urls) - if lack_ep_urls: - image_core_props = s_utils.get_core_properties(image) - run_images[image.id] = {'body': image_core_props, - 'locations': lack_ep_urls} - if not run_images: - LOG.debug(_('No images need to sync to new cascaded glances.')) - input['images'] = run_images - return TaskObject.get_instance('periodic_add', input, - last_run_time=last_run_time) - - @staticmethod - def _fetch_params(keys, **kwargs): - return tuple([kwargs.get(key, None) for key in keys]) - - def _get_candidate_path(self, auth_token, from_ep, image_id, - scheme='file'): - g_client = create_glance_client(auth_token, from_ep) - image = g_client.images.get(image_id) - locs = image.locations or [] - for loc in locs: - if s_utils.is_glance_location(loc['url']): - continue - if loc['url'].startswith(scheme): - if scheme == 'file': - return loc['url'][len('file://'):] - return loc['url'] - return None - - def _do_image_data_copy(self, s_ep, d_ep, from_image_id, to_image_id, - candidate_path=None): - from_scheme, to_scheme = glance_store.choose_best_store_schemes(s_ep, - d_ep) - store_driver = self.store_factory.get_instance(from_scheme['name'], - to_scheme['name']) - from_params = from_scheme['parameters'] - from_params['image_id'] = from_image_id - to_params = to_scheme['parameters'] - to_params['image_id'] = to_image_id - from_location = self.location_factory.get_instance(from_scheme['name'], - **from_params) - to_location = self.location_factory.get_instance(to_scheme['name'], - **to_params) - return store_driver.copy_to(from_location, to_location, - candidate_path=candidate_path) - - def _patch_cascaded_location(self, auth_token, image_id, - cascaded_ep, cascaded_id, action=None): - self_restful_client = create_self_restful_client(auth_token) - path = s_utils.generate_glance_location(cascaded_ep, cascaded_id) - # add the auth_token, so this url can be visited, otherwise 404 error - path += '?auth_token=1' - metadata = {'image_id': cascaded_id} - if action: - metadata['action'] = action - self_restful_client.add_location(image_id, path, metadata) - - def meta_update(self, auth_token, cascaded_ep, image_id, **kwargs): - - return self.mete_helper.execute(auth_token, cascaded_ep, 'SAVE', - image_id, **kwargs) - - def meta_delete(self, auth_token, cascaded_ep, image_id): - - return self.mete_helper.execute(auth_token, cascaded_ep, 'DELETE', - image_id) - - def sync_image(self, auth_token, copy_ep=None, to_ep=None, - copy_image_id=None, cascading_image_id=None, **kwargs): - # Firstly, crate an image object with cascading image's properties. - LOG.debug(_('create an image metadata in ep: %s'), to_ep) - cascaded_id = self.mete_helper.execute(auth_token, to_ep, - **kwargs) - try: - c_path = self._get_candidate_path(auth_token, copy_ep, - copy_image_id) - LOG.debug(_('Chose candidate path: %s from ep %s'), c_path, copy_ep) - # execute copy operation to copy the image data. - copy_image_loc = self._do_image_data_copy(copy_ep, - to_ep, - copy_image_id, - cascaded_id, - candidate_path=c_path) - LOG.debug(_('Sync image data, synced loc is %s'), copy_image_loc) - # patch the copied image_data to the image - glance_client = create_restful_client(auth_token, to_ep) - glance_client.add_location(cascaded_id, copy_image_loc) - # patch the glance location to cascading glance - - msg = _("patch glance location to cascading image, with cascaded " - "endpoint : %s, cascaded id: %s, cascading image id: %s." % - (to_ep, cascaded_id, cascading_image_id)) - LOG.debug(msg) - self._patch_cascaded_location(auth_token, - cascading_image_id, - to_ep, - cascaded_id, - action='upload') - return cascaded_id - except exception.SyncStoreCopyError as e: - LOG.error(_("Exception occurs when syncing store copy.")) - raise exception.SyncServiceOperationError(reason=e.msg) - - def do_snapshot(self, auth_token, snapshot_ep, cascaded_ep, - snapshot_image_id, cascading_image_id, **kwargs): - - return self.sync_image(auth_token, copy_ep=snapshot_ep, - to_ep=cascaded_ep, copy_image_id=snapshot_image_id, - cascading_image_id=cascading_image_id, **kwargs) - - def patch_location(self, image_id, cascaded_id, auth_token, cascaded_ep, - location): - g_client = create_glance_client(auth_token, cascaded_ep) - cascaded_image = g_client.images.get(cascaded_id) - glance_client = create_restful_client(auth_token, cascaded_ep) - try: - glance_client.add_location(cascaded_id, location['url']) - if cascaded_image.status == 'queued': - self._patch_cascaded_location(auth_token, - image_id, - cascaded_ep, - cascaded_id, - action='patch') - except: - pass - - def remove_loc(self, cascaded_id, auth_token, cascaded_ep): - glance_client = create_glance_client(auth_token, cascaded_ep) - glance_client.images.delete(cascaded_id) - - def start(self): - # lanuch a new thread to read the task_task to handle. - _thread = threading.Thread(target=self.tasks_handle) - _thread.setDaemon(True) - _thread.start() - - def tasks_handle(self): - while True: - _task = self.task_queue.get() - if not isinstance(_task, TaskObject): - LOG.error(_('task type valid.')) - continue - LOG.debug(_('Task start to runs, task id is %s' % _task.id)) - _task.start_time = timeutils.strtime() - self.unhandle_task_list.append(copy.deepcopy(_task)) - - eventlet.spawn(_task.execute, self, self.ks_client.auth_token) - - def handle_tasks(self, task_result): - t_image_id = task_result.get('image_id') - t_type = task_result.get('type') - t_start_time = task_result.get('start_time') - t_status = task_result.get('status') - - handling_tasks = filter(lambda t: t.image_id == t_image_id and - t.start_time == t_start_time, - self.unhandle_task_list) - if not handling_tasks or len(handling_tasks) > 1: - LOG.error(_('The task not exist or duplicate, can not go handle. ' - 'Info is image: %(id)s, op_type: %(type)s, run time: ' - '%(time)s' - % {'id': t_image_id, - 'type': t_type, - 'time': t_start_time} - )) - return - - task = handling_tasks[0] - self.unhandle_task_list.remove(task) - - if isinstance(task, PeriodicTask): - LOG.debug(_('The periodic task executed done, with op %(type)s ' - 'runs at time: %(start_time)s, the status is ' - '%(status)s.' % - {'type': t_type, - 'start_time': t_start_time, - 'status': t_status - })) - - else: - if t_status == 'terminal': - LOG.debug(_('The task executed successful for image:' - '%(image_id)s with op %(type)s, which runs ' - 'at time: %(start_time)s' % - {'image_id': t_image_id, - 'type': t_type, - 'start_time': t_start_time - })) - elif t_status == 'param_error': - LOG.error(_('The task executed failed for params error. Image:' - '%(image_id)s with op %(type)s, which runs ' - 'at time: %(start_time)s' % - {'image_id': t_image_id, - 'type': t_type, - 'start_time': t_start_time - })) - elif t_status == 'error': - LOG.error(_('The task failed to execute. Detail info is: ' - '%(image_id)s with op %(op_type)s run_time:' - '%(start_time)s' % - {'image_id': t_image_id, - 'op_type': t_type, - 'start_time': t_start_time - })) diff --git a/glancesync/glance/sync/client/__init__.py b/glancesync/glance/sync/client/__init__.py deleted file mode 100644 index 114dad93..00000000 --- a/glancesync/glance/sync/client/__init__.py +++ /dev/null @@ -1,46 +0,0 @@ -from oslo.config import cfg - -sync_client_opts = [ - cfg.StrOpt('sync_client_protocol', default='http', - help=_('The protocol to use for communication with the ' - 'sync server. Either http or https.')), - cfg.StrOpt('sync_client_key_file', - help=_('The path to the key file to use in SSL connections ' - 'to the sync server.')), - cfg.StrOpt('sync_client_cert_file', - help=_('The path to the cert file to use in SSL connections ' - 'to the sync server.')), - cfg.StrOpt('sync_client_ca_file', - help=_('The path to the certifying authority cert file to ' - 'use in SSL connections to the sync server.')), - cfg.BoolOpt('sync_client_insecure', default=False, - help=_('When using SSL in connections to the sync server, ' - 'do not require validation via a certifying ' - 'authority.')), - cfg.IntOpt('sync_client_timeout', default=600, - help=_('The period of time, in seconds, that the API server ' - 'will wait for a sync request to complete. A ' - 'value of 0 implies no timeout.')), -] - -sync_client_ctx_opts = [ - cfg.BoolOpt('sync_use_user_token', default=True, - help=_('Whether to pass through the user token when ' - 'making requests to the sync.')), - cfg.StrOpt('sync_admin_user', secret=True, - help=_('The administrators user name.')), - cfg.StrOpt('sync_admin_password', secret=True, - help=_('The administrators password.')), - cfg.StrOpt('sync_admin_tenant_name', secret=True, - help=_('The tenant name of the administrative user.')), - cfg.StrOpt('sync_auth_url', - help=_('The URL to the keystone service.')), - cfg.StrOpt('sync_auth_strategy', default='noauth', - help=_('The strategy to use for authentication.')), - cfg.StrOpt('sync_auth_region', - help=_('The region for the authentication service.')), -] - -CONF = cfg.CONF -CONF.register_opts(sync_client_opts) -CONF.register_opts(sync_client_ctx_opts) diff --git a/glancesync/glance/sync/client/v1/api.py b/glancesync/glance/sync/client/v1/api.py deleted file mode 100644 index 736df07b..00000000 --- a/glancesync/glance/sync/client/v1/api.py +++ /dev/null @@ -1,124 +0,0 @@ -# Copyright (c) 2014 OpenStack Foundation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# @author: Jia Dong, HuaWei - -import os - -from oslo.config import cfg - -from glance.common import exception -from glance.openstack.common import jsonutils -import glance.openstack.common.log as logging -from glance.sync.client.v1 import client - -CONF = cfg.CONF -CONF.import_opt('sync_server_host', 'glance.common.config') -CONF.import_opt('sync_server_port', 'glance.common.config') - -sync_client_ctx_opts = [ - cfg.BoolOpt('sync_send_identity_headers', default=False, - help=_("Whether to pass through headers containing user " - "and tenant information when making requests to " - "the sync. This allows the sync to use the " - "context middleware without the keystoneclients' " - "auth_token middleware, removing calls to the keystone " - "auth service. It is recommended that when using this " - "option, secure communication between glance api and " - "glance sync is ensured by means other than " - "auth_token middleware.")), -] -CONF.register_opts(sync_client_ctx_opts) - -_sync_client = 'glance.sync.client' -CONF.import_opt('sync_client_protocol', _sync_client) -CONF.import_opt('sync_client_key_file', _sync_client) -CONF.import_opt('sync_client_cert_file', _sync_client) -CONF.import_opt('sync_client_ca_file', _sync_client) -CONF.import_opt('sync_client_insecure', _sync_client) -CONF.import_opt('sync_client_timeout', _sync_client) -CONF.import_opt('sync_use_user_token', _sync_client) -CONF.import_opt('sync_admin_user', _sync_client) -CONF.import_opt('sync_admin_password', _sync_client) -CONF.import_opt('sync_admin_tenant_name', _sync_client) -CONF.import_opt('sync_auth_url', _sync_client) -CONF.import_opt('sync_auth_strategy', _sync_client) -CONF.import_opt('sync_auth_region', _sync_client) -CONF.import_opt('metadata_encryption_key', 'glance.common.config') - -_CLIENT_CREDS = None -_CLIENT_HOST = None -_CLIENT_PORT = None -_CLIENT_KWARGS = {} - - -def get_sync_client(cxt): - global _CLIENT_CREDS, _CLIENT_KWARGS, _CLIENT_HOST, _CLIENT_PORT - kwargs = _CLIENT_KWARGS.copy() - if CONF.sync_use_user_token: - kwargs['auth_tok'] = cxt.auth_tok - if _CLIENT_CREDS: - kwargs['creds'] = _CLIENT_CREDS - - if CONF.sync_send_identity_headers: - identity_headers = { - 'X-User-Id': cxt.user, - 'X-Tenant-Id': cxt.tenant, - 'X-Roles': ','.join(cxt.roles), - 'X-Identity-Status': 'Confirmed', - 'X-Service-Catalog': jsonutils.dumps(cxt.service_catalog), - } - kwargs['identity_headers'] = identity_headers - return client.SyncClient(_CLIENT_HOST, _CLIENT_PORT, **kwargs) - - -def configure_sync_client(): - - global _CLIENT_KWARGS, _CLIENT_HOST, _CLIENT_PORT - host, port = CONF.sync_server_host, CONF.sync_server_port - - _CLIENT_HOST = host - _CLIENT_PORT = port - _METADATA_ENCRYPTION_KEY = CONF.metadata_encryption_key - _CLIENT_KWARGS = { - 'use_ssl': CONF.sync_client_protocol.lower() == 'https', - 'key_file': CONF.sync_client_key_file, - 'cert_file': CONF.sync_client_cert_file, - 'ca_file': CONF.sync_client_ca_file, - 'insecure': CONF.sync_client_insecure, - 'timeout': CONF.sync_client_timeout, - } - - if not CONF.sync_use_user_token: - configure_sync_admin_creds() - - -def configure_sync_admin_creds(): - global _CLIENT_CREDS - - if CONF.sync_auth_url or os.getenv('OS_AUTH_URL'): - strategy = 'keystone' - else: - strategy = CONF.sync_auth_strategy - - _CLIENT_CREDS = { - 'user': CONF.sync_admin_user, - 'password': CONF.sync_admin_password, - 'username': CONF.sync_admin_user, - 'tenant': CONF.sync_admin_tenant_name, - 'auth_url': CONF.sync_auth_url, - 'strategy': strategy, - 'region': CONF.sync_auth_region, - } diff --git a/glancesync/glance/sync/client/v1/client.py b/glancesync/glance/sync/client/v1/client.py deleted file mode 100644 index 05ef0486..00000000 --- a/glancesync/glance/sync/client/v1/client.py +++ /dev/null @@ -1,106 +0,0 @@ -# Copyright (c) 2014 OpenStack Foundation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# @author: Jia Dong, HuaWei - -from glance.common.client import BaseClient -from glance.openstack.common import jsonutils -import glance.openstack.common.log as logging - - -LOG = logging.getLogger(__name__) - - -class SyncClient(BaseClient): - - DEFAULT_PORT = 9595 - - def __init__(self, host=None, port=DEFAULT_PORT, identity_headers=None, - **kwargs): - self.identity_headers = identity_headers - BaseClient.__init__(self, host, port, configure_via_auth=False, - **kwargs) - - def do_request(self, method, action, **kwargs): - try: - kwargs['headers'] = kwargs.get('headers', {}) - res = super(SyncClient, self).do_request(method, action, **kwargs) - status = res.status - request_id = res.getheader('x-openstack-request-id') - msg = (_("Sync request %(method)s %(action)s HTTP %(status)s" - " request id %(request_id)s") % - {'method': method, 'action': action, - 'status': status, 'request_id': request_id}) - LOG.debug(msg) - - except Exception as exc: - exc_name = exc.__class__.__name__ - LOG.info(_("Sync client request %(method)s %(action)s " - "raised %(exc_name)s"), - {'method': method, 'action': action, - 'exc_name': exc_name}) - raise - return res - - def _add_common_params(self, id, kwargs): - pass - - def update_image_matedata(self, image_id, **kwargs): - headers = { - 'Content-Type': 'application/json', - } - body = jsonutils.dumps(kwargs) - res = self.do_request("PATCH", "/v1/images/%s" % (image_id), body=body, - headers=headers) - return res - - def remove_image(self, image_id, **kwargs): - headers = { - 'Content-Type': 'application/json', - } - body = jsonutils.dumps(kwargs) - res = self.do_request("DELETE", "/v1/images/%s" % - (image_id), body=body, headers=headers) - return res - - def sync_data(self, image_id, **kwargs): - headers = { - 'Content-Type': 'application/json', - } - body = jsonutils.dumps(kwargs) - res = self.do_request("PUT", "/v1/images/%s" % (image_id), body=body, - headers=headers) - return res - - def sync_locations(self, image_id, action=None, locs=None, **kwargs): - headers = { - 'Content-Type': 'application/json', - } - kwargs['action'] = action - kwargs['locations'] = locs - body = jsonutils.dumps(kwargs) - res = self.do_request("PUT", "/v1/images/%s/location" % (image_id), - body=body, headers=headers) - return res - - def get_cascaded_endpoints(self, regions=[]): - headers = { - 'Content-Type': 'application/json', - } - - body = jsonutils.dumps({'regions': regions}) - res = self.do_request('POST', '/v1/cascaded-eps', body=body, - headers=headers) - return jsonutils.loads(res.read())['eps'] diff --git a/glancesync/glance/sync/clients.py b/glancesync/glance/sync/clients.py deleted file mode 100644 index cadc8f4a..00000000 --- a/glancesync/glance/sync/clients.py +++ /dev/null @@ -1,89 +0,0 @@ -# Copyright (c) 2014 OpenStack Foundation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# @author: Jia Dong, HuaWei - -from oslo.config import cfg - -from keystoneclient.v2_0 import client as ksclient -import glance.openstack.common.log as logging -from glanceclient.v2 import client as gclient2 - - -LOG = logging.getLogger(__name__) -CONF = cfg.CONF - - -class Clients(object): - - def __init__(self, auth_token=None, tenant_id=None): - self._keystone = None - self._glance = None - self._cxt_token = auth_token - self._tenant_id = tenant_id - self._ks_conf = cfg.CONF.keystone_authtoken - - @property - def auth_token(self, token=None): - return token or self.keystone().auth_token - - @property - def ks_url(self): - protocol = self._ks_conf.auth_protocol or 'http' - auth_host = self._ks_conf.auth_host or '127.0.0.1' - auth_port = self._ks_conf.auth_port or '35357' - return protocol + '://' + auth_host + ':' + str(auth_port) + '/v2.0/' - - def url_for(self, **kwargs): - return self.keystone().service_catalog.url_for(**kwargs) - - def get_urls(self, **kwargs): - return self.keystone().service_catalog.get_urls(**kwargs) - - def keystone(self): - if self._keystone: - return self._keystone - - if self._cxt_token and self._tenant_id: - creds = {'token': self._cxt_token, - 'auth_url': self.ks_url, - 'project_id': self._tenant_id - } - else: - creds = {'username': self._ks_conf.admin_user, - 'password': self._ks_conf.admin_password, - 'auth_url': self.ks_url, - 'project_name': self._ks_conf.admin_tenant_name} - try: - self._keystone = ksclient.Client(**creds) - except Exception as e: - LOG.error(_('create keystone client error: reason: %s') % (e)) - return None - - return self._keystone - - def glance(self, auth_token=None, url=None): - gclient = gclient2 - if gclient is None: - return None - if self._glance: - return self._glance - args = { - 'token': auth_token or self.auth_token, - 'endpoint': url or self.url_for(service_type='image') - } - self._glance = gclient.Client(**args) - - return self._glance diff --git a/glancesync/glance/sync/pool.py b/glancesync/glance/sync/pool.py deleted file mode 100644 index 1e8fb938..00000000 --- a/glancesync/glance/sync/pool.py +++ /dev/null @@ -1,33 +0,0 @@ -# Copyright (c) 2014 OpenStack Foundation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# @author: Jia Dong, HuaWei - -from concurrent.futures import ThreadPoolExecutor - -import glance.openstack.common.log as logging - - -LOG = logging.getLogger(__name__) - - -class ThreadPool(object): - - def __init__(self): - self.pool = ThreadPoolExecutor(128) - - def execute(self, func, *args, **kwargs): - LOG.info(_('execute %s in a thread pool') % (func.__name__)) - self.pool.submit(func, *args, **kwargs) diff --git a/glancesync/glance/sync/store/_drivers/__init__.py b/glancesync/glance/sync/store/_drivers/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/glancesync/glance/sync/store/_drivers/filesystem.py b/glancesync/glance/sync/store/_drivers/filesystem.py deleted file mode 100644 index 0ef3d1ff..00000000 --- a/glancesync/glance/sync/store/_drivers/filesystem.py +++ /dev/null @@ -1,171 +0,0 @@ -# Copyright (c) 2014 OpenStack Foundation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# @author: Jia Dong, HuaWei - -""" -A simple filesystem-backed store -""" - -import logging -import os -import sys - -from oslo.config import cfg -import pxssh -import pexpect - -from glance.common import exception -import glance.sync.store.driver -import glance.sync.store.location -from glance.sync.store.location import Location -from glance.sync import utils as s_utils - -LOG = logging.getLogger(__name__) - - -CONF = cfg.CONF -CONF.import_opt('scp_copy_timeout', 'glance.common.config', group='sync') - - -def _login_ssh(host, passwd): - child_ssh = pexpect.spawn('ssh -p 22 %s' % (host)) - child_ssh.logfile = sys.stdout - login_flag = True - while True: - ssh_index = child_ssh.expect(['.yes/no.', '.assword:.', - pexpect.TIMEOUT]) - if ssh_index == 0: - child_ssh.sendline('yes') - elif ssh_index == 1: - child_ssh.sendline(passwd) - break - else: - login_flag = False - break - if not login_flag: - return None - - return child_ssh - - -def _get_ssh(hostname, username, password): - s = pxssh.pxssh() - s.login(hostname, username, password, original_prompt='[#$>]') - s.logfile = sys.stdout - return s - - -class LocationCreator(glance.sync.store.location.LocationCreator): - - def __init__(self): - self.scheme = 'file' - - def create(self, **kwargs): - image_id = kwargs.get('image_id') - image_file_name = kwargs.get('image_name', None) or image_id - datadir = kwargs.get('datadir') - path = os.path.join(datadir, str(image_file_name)) - login_user = kwargs.get('login_user') - login_password = kwargs.get('login_password') - host = kwargs.get('host') - store_specs = {'scheme': self.scheme, 'path': path, 'host': host, - 'login_user': login_user, - 'login_password': login_password} - return Location(self.scheme, StoreLocation, image_id=image_id, - store_specs=store_specs) - - -class StoreLocation(glance.sync.store.location.StoreLocation): - - def process_specs(self): - self.scheme = self.specs.get('scheme', 'file') - self.path = self.specs.get('path') - self.host = self.specs.get('host') - self.login_user = self.specs.get('login_user') - self.login_password = self.specs.get('login_password') - - -class Store(glance.sync.store.driver.Store): - - def copy_to(self, from_location, to_location, candidate_path=None): - - from_store_loc = from_location.store_location - to_store_loc = to_location.store_location - - if from_store_loc.host == to_store_loc.host and \ - from_store_loc.path == to_store_loc.path: - - LOG.info(_('The from_loc is same to to_loc, no need to copy. the ' - 'host:path is %s:%s') % (from_store_loc.host, - from_store_loc.path)) - return 'file://%s' % to_store_loc.path - - from_host = r"""{username}@{host}""".format( - username=from_store_loc.login_user, - host=from_store_loc.host) - - to_host = r"""{username}@{host}""".format( - username=to_store_loc.login_user, - host=to_store_loc.host) - - to_path = r"""{to_host}:{path}""".format(to_host=to_host, - path=to_store_loc.path) - - copy_path = from_store_loc.path - - try: - from_ssh = _get_ssh(from_store_loc.host, - from_store_loc.login_user, - from_store_loc.login_password) - except Exception: - raise exception.SyncStoreCopyError(reason="ssh login failed.") - - from_ssh.sendline('ls %s' % copy_path) - from_ssh.prompt() - if 'cannot access' in from_ssh.before or \ - 'No such file' in from_ssh.before: - if candidate_path: - from_ssh.sendline('ls %s' % candidate_path) - from_ssh.prompt() - if 'cannot access' not in from_ssh.before and \ - 'No such file' not in from_ssh.before: - copy_path = candidate_path - else: - msg = _("the image path for copy to is not exists, file copy" - "failed: path is %s" % (copy_path)) - raise exception.SyncStoreCopyError(reason=msg) - - from_ssh.sendline('scp -P 22 %s %s' % (copy_path, to_path)) - while True: - scp_index = from_ssh.expect(['.yes/no.', '.assword:.', - pexpect.TIMEOUT]) - if scp_index == 0: - from_ssh.sendline('yes') - from_ssh.prompt() - elif scp_index == 1: - from_ssh.sendline(to_store_loc.login_password) - from_ssh.prompt(timeout=CONF.sync.scp_copy_timeout) - break - else: - msg = _("scp commond execute failed, with copy_path %s and " - "to_path %s" % (copy_path, to_path)) - raise exception.SyncStoreCopyError(reason=msg) - break - - if from_ssh: - from_ssh.logout() - - return 'file://%s' % to_store_loc.path diff --git a/glancesync/glance/sync/store/driver.py b/glancesync/glance/sync/store/driver.py deleted file mode 100644 index e1275187..00000000 --- a/glancesync/glance/sync/store/driver.py +++ /dev/null @@ -1,63 +0,0 @@ -# Copyright (c) 2014 OpenStack Foundation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# @author: Jia Dong, HuaWei - -"""Base class for all storage backends""" - -from oslo.config import cfg -from stevedore import extension - -from glance.common import exception -import glance.openstack.common.log as logging -from glance.openstack.common.gettextutils import _ -from glance.openstack.common import importutils -from glance.openstack.common import strutils - -LOG = logging.getLogger(__name__) - - -class StoreFactory(object): - - SYNC_STORE_NAMESPACE = "glance.sync.store.driver" - - def __init__(self): - self._stores = {} - self._load_store_drivers() - - def _load_store_drivers(self): - extension_manager = extension.ExtensionManager( - namespace=self.SYNC_STORE_NAMESPACE, - invoke_on_load=True, - ) - for ext in extension_manager: - if ext.name in self._stores: - continue - ext.obj.name = ext.name - self._stores[ext.name] = ext.obj - - def get_instance(self, from_scheme='filesystem', to_scheme=None): - _store_driver = self._stores.get(from_scheme) - if to_scheme and to_scheme != from_scheme and _store_driver: - func_name = 'copy_to_%s' % to_scheme - if not getattr(_store_driver, func_name, None): - return None - return _store_driver - - -class Store(object): - - def copy_to(self, source_location, dest_location, candidate_path=None): - pass diff --git a/glancesync/glance/sync/store/glance_store.py b/glancesync/glance/sync/store/glance_store.py deleted file mode 100644 index 480365e8..00000000 --- a/glancesync/glance/sync/store/glance_store.py +++ /dev/null @@ -1,111 +0,0 @@ -# Copyright (c) 2014 OpenStack Foundation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# @author: Jia Dong, HuaWei - -import fnmatch -import operator -import os - -from oslo.config import cfg -import yaml - -from glance.sync import utils as s_utils - - -OPTS = [ - cfg.StrOpt('glance_store_cfg_file', - default="glance_store.yaml", - help="Configuration file for glance's store location " - "definition." - ), -] - -PRIOR_SOTRE_SCHEMES = ['filesystem', 'http', 'swift'] - -cfg.CONF.register_opts(OPTS) - - -def choose_best_store_schemes(source_endpoint, dest_endpoint): - global GLANCE_STORES - source_host = s_utils.get_host_from_ep(source_endpoint) - dest_host = s_utils.get_host_from_ep(dest_endpoint) - source_store = GLANCE_STORES.get_glance_store(source_host) - dest_store = GLANCE_STORES.get_glance_store(dest_host) - tmp_dict = {} - for s_scheme in source_store.schemes: - s_scheme_name = s_scheme['name'] - for d_scheme in dest_store.schemes: - d_scheme_name = d_scheme['name'] - if s_scheme_name == d_scheme_name: - tmp_dict[s_scheme_name] = (s_scheme, d_scheme) - if tmp_dict: - return tmp_dict[sorted(tmp_dict, key=lambda scheme: - PRIOR_SOTRE_SCHEMES.index(scheme))[0]] - - return (source_store.schemes[0], dest_store.schemes[0]) - - -class GlanceStore(object): - - def __init__(self, service_ip, name, schemes): - self.service_ip = service_ip - self.name = name - self.schemes = schemes - - -class ImageObject(object): - - def __init__(self, image_id, glance_store): - self.image_id = image_id - self.glance_store = glance_store - - -class GlanceStoreManager(object): - - def __init__(self, cfg): - self.cfg = cfg - self.g_stores = [] - - cfg_items = cfg['glances'] - for item in cfg_items: - self.g_stores.append(GlanceStore(item['service_ip'], - item['name'], - item['schemes'])) - - def get_glance_store(self, service_ip): - for g_store in self.g_stores: - if service_ip == g_store.service_ip: - return g_store - return None - - def generate_Image_obj(self, image_id, endpoint): - g_store = self.get_glance_store(s_utils.get_host_from_ep(endpoint)) - return ImageObject(image_id, g_store) - - -GLANCE_STORES = None - - -def setup_glance_stores(): - global GLANCE_STORES - cfg_file = cfg.CONF.glance_store_cfg_file - if not os.path.exists(cfg_file): - cfg_file = cfg.CONF.find_file(cfg_file) - with open(cfg_file) as fap: - data = fap.read() - - locs_cfg = yaml.safe_load(data) - GLANCE_STORES = GlanceStoreManager(locs_cfg) diff --git a/glancesync/glance/sync/store/location.py b/glancesync/glance/sync/store/location.py deleted file mode 100644 index 1c1e3346..00000000 --- a/glancesync/glance/sync/store/location.py +++ /dev/null @@ -1,95 +0,0 @@ -# Copyright (c) 2014 OpenStack Foundation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# @author: Jia Dong, HuaWei - -import logging -import urlparse - -from stevedore import extension - -LOG = logging.getLogger(__name__) - - -class LocationCreator(object): - - def __init__(self): - self.scheme = None - - def creator(self, **kwargs): - pass - - -class Location(object): - - """ - Class describing the location of an image that Glance knows about - """ - - def __init__(self, store_name, store_location_class, - uri=None, image_id=None, store_specs=None): - """ - Create a new Location object. - - :param store_name: The string identifier/scheme of the storage backend - :param store_location_class: The store location class to use - for this location instance. - :param image_id: The identifier of the image in whatever storage - backend is used. - :param uri: Optional URI to construct location from - :param store_specs: Dictionary of information about the location - of the image that is dependent on the backend - store - """ - self.store_name = store_name - self.image_id = image_id - self.store_specs = store_specs or {} - self.store_location = store_location_class(self.store_specs) - - -class StoreLocation(object): - - """ - Base class that must be implemented by each store - """ - - def __init__(self, store_specs): - self.specs = store_specs - if self.specs: - self.process_specs() - - -class LocationFactory(object): - - SYNC_LOCATION_NAMESPACE = "glance.sync.store.location" - - def __init__(self): - self._locations = {} - self._load_locations() - - def _load_locations(self): - extension_manager = extension.ExtensionManager( - namespace=self.SYNC_LOCATION_NAMESPACE, - invoke_on_load=True, - ) - for ext in extension_manager: - if ext.name in self._locations: - continue - ext.obj.name = ext.name - self._locations[ext.name] = ext.obj - - def get_instance(self, scheme, **kwargs): - loc_creator = self._locations.get(scheme, None) - return loc_creator.create(**kwargs) diff --git a/glancesync/glance/sync/task/__init__.py b/glancesync/glance/sync/task/__init__.py deleted file mode 100644 index b82823e7..00000000 --- a/glancesync/glance/sync/task/__init__.py +++ /dev/null @@ -1,356 +0,0 @@ -# Copyright (c) 2014 OpenStack Foundation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# @author: Jia Dong, HuaWei - -import threading -import Queue -import uuid - -import eventlet -from oslo.config import cfg - -import glance.openstack.common.log as logging -from glance.openstack.common import timeutils -from glance.sync import utils as s_utils - -LOG = logging.getLogger(__name__) - - -snapshot_opt = [ - cfg.ListOpt('snapshot_region_names', - default=[], - help=_("for what regions the snapshot sync to"), - deprecated_opts=[cfg.DeprecatedOpt('snapshot_region_names', - group='DEFAULT')]), -] - -CONF = cfg.CONF -CONF.register_opts(snapshot_opt) - - -class TaskObject(object): - - def __init__(self, type, input, retry_times=0): - self.id = str(uuid.uuid4()) - self.type = type - self.input = input - self.image_id = self.input.get('image_id') - self.status = 'new' - self.retry_times = retry_times - self.start_time = None - - @classmethod - def get_instance(cls, type, input, **kwargs): - _type_cls_dict = {'meta_update': MetaUpdateTask, - 'meta_remove': MetaDeleteTask, - 'sync': ImageActiveTask, - 'snapshot': PatchSnapshotLocationTask, - 'patch': PatchLocationTask, - 'locs_remove': RemoveLocationsTask, - 'periodic_add': ChkNewCascadedsPeriodicTask} - - if _type_cls_dict.get(type): - return _type_cls_dict[type](input, **kwargs) - - return None - - def _handle_result(self, sync_manager): - return sync_manager.handle_tasks({'image_id': self.image_id, - 'type': self.type, - 'start_time': self.start_time, - 'status': self.status - }) - - def execute(self, sync_manager, auth_token): - if not self.checkInput(): - self.status = 'param_error' - LOG.error(_('the input content not valid: %s.' % (self.input))) - return self._handle_result(sync_manager) - - try: - self.status = 'running' - green_threads = self.create_green_threads(sync_manager, auth_token) - for gt in green_threads: - gt.wait() - except Exception as e: - msg = _("Unable to execute task of image %(image_id)s: %(e)s") % \ - {'image_id': self.image_id, 'e': unicode(e)} - LOG.exception(msg) - self.status = 'error' - else: - self.status = 'terminal' - - return self._handle_result(sync_manager) - - def checkInput(self): - if not self.input.pop('image_id', None): - LOG.warn(_('No cascading image_id specified.')) - return False - return self.do_checkInput() - - -class MetaUpdateTask(TaskObject): - - def __init__(self, input): - super(MetaUpdateTask, self).__init__('meta_update', input) - - def do_checkInput(self): - params = self.input - changes = params.get('changes') - removes = params.get('removes') - tags = params.get('tags') - if not changes and not removes and not tags: - LOG.warn(_('No changes and removes and tags with the glance.')) - return True - - def create_green_threads(self, sync_manager, auth_token): - green_threads = [] - cascaded_mapping = s_utils.get_mappings_from_image(auth_token, - self.image_id) - for cascaded_ep in cascaded_mapping: - cascaded_id = cascaded_mapping[cascaded_ep] - green_threads.append(eventlet.spawn(sync_manager.meta_update, - auth_token, - cascaded_ep, - image_id=cascaded_id, - **self.input)) - - return green_threads - - -class MetaDeleteTask(TaskObject): - - def __init__(self, input): - super(MetaDeleteTask, self).__init__('meta_remove', input) - - def do_checkInput(self): - self.locations = self.input.get('locations') - return self.locations is not None - - def create_green_threads(self, sync_manager, auth_token): - green_threads = [] - cascaded_mapping = s_utils.get_mappings_from_locations(self.locations) - for cascaded_ep in cascaded_mapping: - cascaded_id = cascaded_mapping[cascaded_ep] - green_threads.append(eventlet.spawn(sync_manager.meta_delete, - auth_token, - cascaded_ep, - image_id=cascaded_id)) - - return green_threads - - -class ImageActiveTask(TaskObject): - """ - sync data task. - """ - def __init__(self, input): - super(ImageActiveTask, self).__init__('sync', input) - - def do_checkInput(self): - image_data = self.input.get('body') - self.cascading_endpoint = self.input.get('cascading_ep') - self.copy_endpoint = self.input.pop('copy_ep', None) - self.copy_image_id = self.input.pop('copy_id', None) - return image_data and self.cascading_endpoint and \ - self.copy_endpoint and self.copy_image_id - - def create_green_threads(self, sync_manager, auth_token): - green_threads = [] - cascaded_eps = s_utils.get_endpoints(auth_token) - for cascaded_ep in cascaded_eps: - green_threads.append(eventlet.spawn(sync_manager.sync_image, - auth_token, - copy_ep=self.copy_endpoint, - to_ep=cascaded_ep, - copy_image_id=self.copy_image_id, - cascading_image_id=self.image_id, - **self.input)) - - return green_threads - - -class PatchSnapshotLocationTask(TaskObject): - """ - sync data task - """ - def __init__(self, input): - super(PatchSnapshotLocationTask, self).__init__('snapshot', input) - - def do_checkInput(self): - image_metadata = self.input.get('body') - self.snapshot_endpoint = self.input.pop('snapshot_ep', None) - self.snapshot_id = self.input.pop('snapshot_id', None) - return image_metadata and self.snapshot_endpoint and self.snapshot_id - - def create_green_threads(self, sync_manager, auth_token): - green_threads = [] - _region_names = CONF.snapshot_region_names - cascaded_mapping = s_utils.get_endpoints(auth_token, - region_names=_region_names) - try: - if self.snapshot_endpoint in cascaded_mapping: - cascaded_mapping.remove(self.snapshot_endpoint) - except TypeError: - pass - for cascaded_ep in cascaded_mapping: - green_threads.append(eventlet.spawn(sync_manager.do_snapshot, - auth_token, - self.snapshot_endpoint, - cascaded_ep, - self.snapshot_id, - self.image_id, - **self.input)) - - return green_threads - - -class PatchLocationTask(TaskObject): - - def __init__(self, input): - super(PatchLocationTask, self).__init__('patch', input) - - def do_checkInput(self): - self.location = self.input.get('location') - return self.location is not None - - def create_green_threads(self, sync_manager, auth_token): - green_threads = [] - cascaded_mapping = s_utils.get_mappings_from_image(auth_token, - self.image_id) - for cascaded_ep in cascaded_mapping: - cascaded_id = cascaded_mapping[cascaded_ep] - green_threads.append(eventlet.spawn(sync_manager.patch_location, - self.image_id, - cascaded_id, - auth_token, - cascaded_ep, - self.location)) - return green_threads - - -class RemoveLocationsTask(TaskObject): - - def __init__(self, input): - super(RemoveLocationsTask, self).__init__('locs_remove', input) - - def do_checkInput(self): - self.locations = self.input.get('locations') - return self.locations is not None - - def create_green_threads(self, sync_manager, auth_token): - green_threads = [] - cascaded_mapping = s_utils.get_mappings_from_locations(self.locations) - for cascaded_ep in cascaded_mapping: - cascaded_id = cascaded_mapping[cascaded_ep] - green_threads.append(eventlet.spawn(sync_manager.remove_loc, - cascaded_id, - auth_token, - cascaded_ep)) - return green_threads - - -class PeriodicTask(TaskObject): - - MAX_SLEEP_SECONDS = 15 - - def __init__(self, type, input, interval, last_run_time, run_immediately): - super(PeriodicTask, self).__init__(type, input) - self.interval = interval - self.last_run_time = last_run_time - self.run_immediately = run_immediately - - def do_checkInput(self): - if not self.interval or self.interval < 0: - LOG.error(_('The Periodic Task interval invaild.')) - return False - - return True - - def ready(self): - # first time to run - if self.last_run_time is None: - self.last_run_time = timeutils.strtime() - return self.run_immediately - return timeutils.is_older_than(self.last_run_time, self.interval) - - def execute(self, sync_manager, auth_token): - while not self.ready(): - LOG.debug(_('the periodic task has not ready yet, sleep a while.' - 'current_start_time is %s, last_run_time is %s, and ' - 'the interval is %i.' % (self.start_time, - self.last_run_time, - self.interval))) - _max_sleep_time = self.MAX_SLEEP_SECONDS - eventlet.sleep(seconds=max(self.interval / 10, _max_sleep_time)) - - super(PeriodicTask, self).execute(sync_manager, auth_token) - - -class ChkNewCascadedsPeriodicTask(PeriodicTask): - - def __init__(self, input, interval=60, last_run_time=None, - run_immediately=False): - - super(ChkNewCascadedsPeriodicTask, self).__init__('periodic_add', - input, interval, - last_run_time, - run_immediately) - LOG.debug(_('create ChkNewCascadedsPeriodicTask.')) - - def do_checkInput(self): - self.images = self.input.get('images') - self.cascading_endpoint = self.input.get('cascading_ep') - if self.images is None or not self.cascading_endpoint: - return False - return super(ChkNewCascadedsPeriodicTask, self).do_checkInput() - - def _stil_need_synced(self, cascaded_ep, image_id, auth_token): - g_client = s_utils.create_self_glance_client(auth_token) - try: - image = g_client.images.get(image_id) - except Exception: - LOG.warn(_('The add cascaded periodic task checks that the image ' - 'has deleted, no need to sync. id is %s' % image_id)) - return False - else: - if image.status != 'active': - LOG.warn(_('The add cascaded period task checks image status ' - 'not active, no need to sync.' - 'image id is %s.' % image_id)) - return False - ep_list = [loc['url'] for loc in image.locations - if s_utils.is_glance_location(loc['url'])] - return not s_utils.is_ep_contains(cascaded_ep, ep_list) - - def create_green_threads(self, sync_manager, auth_token): - green_threads = [] - for image_id in self.images: - cascaded_eps = self.images[image_id].get('locations') - kwargs = {'body': self.images[image_id].get('body')} - for cascaded_ep in cascaded_eps: - if not self._stil_need_synced(cascaded_ep, - image_id, auth_token): - continue - green_threads.append(eventlet.spawn(sync_manager.sync_image, - auth_token, - self.cascading_endpoint, - cascaded_ep, - image_id, - image_id, - **kwargs)) - - return green_threads diff --git a/glancesync/glance/sync/utils.py b/glancesync/glance/sync/utils.py deleted file mode 100644 index 80176d22..00000000 --- a/glancesync/glance/sync/utils.py +++ /dev/null @@ -1,226 +0,0 @@ -# Copyright (c) 2014 OpenStack Foundation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# @author: Jia Dong, HuaWei - -import re - -from oslo.config import cfg -import six.moves.urllib.parse as urlparse - -from glance.sync.clients import Clients as clients - -CONF = cfg.CONF -CONF.import_opt('cascading_endpoint_url', 'glance.common.config', group='sync') -CONF.import_opt('sync_strategy', 'glance.common.config', group='sync') - - -def create_glance_client(auth_token, url): - """ - create glance clients - """ - return clients(auth_token).glance(url=url) - - -def create_self_glance_client(auth_token): - return create_glance_client(auth_token, get_cascading_endpoint_url()) - - -def get_mappings_from_image(auth_token, image_id): - """ - get image's patched glance-locations - """ - client = create_self_glance_client(auth_token) - image = client.images.get(image_id) - locations = image.locations - if not locations: - return {} - return get_mappings_from_locations(locations) - - -def get_mappings_from_locations(locations): - mappings = {} - for loc in locations: - if is_glance_location(loc['url']): - id = loc['metadata'].get('image_id') - if not id: - continue - ep_url = create_ep_by_loc(loc) - mappings[ep_url] = id - return mappings - - -def get_cascading_endpoint_url(): - return CONF.sync.cascading_endpoint_url - - -def get_host_from_ep(ep_url): - if not ep_url: - return None - pieces = urlparse.urlparse(ep_url) - return pieces.netloc.split(':')[0] - -pattern = re.compile(r'^https?://\S+/v2/images/\S+$') - - -def get_default_location(locations): - for location in locations: - if is_default_location(location): - return location - return None - - -def is_glance_location(loc_url): - return pattern.match(loc_url) - - -def is_snapshot_location(location): - l_meta = location['metadata'] - return l_meta and l_meta.get('image_from', None) in['snapshot', 'volume'] - - -def get_id_from_glance_loc(location): - if not is_glance_location(location['url']): - return None - loc_meta = location['metadata'] - if not loc_meta: - return None - return loc_meta.get('image_id', None) - - -def get_id_from_glance_loc_url(loc_url): - if not is_glance_location(loc_url): - return '' - _index = loc_url.find('/v2/images/') + len('/v2/images/') - return loc_url[_index:] - - -def is_default_location(location): - try: - return not is_glance_location(location['url']) \ - and location['metadata']['is_default'] == 'true' - except: - return False - - -def get_snapshot_glance_loc(locations): - for location in locations: - if is_snapshot_location(location): - return location - return None - - -def create_ep_by_loc(location): - loc_url = location['url'] - return create_ep_by_loc_url(loc_url) - - -def create_ep_by_loc_url(loc_url): - if not is_glance_location(loc_url): - return None - piece = urlparse.urlparse(loc_url) - return piece.scheme + '://' + piece.netloc + '/' - - -def generate_glance_location(ep, image_id, port=None): - default_port = port or '9292' - piece = urlparse.urlparse(ep) - paths = [] - paths.append(piece.scheme) - paths.append('://') - paths.append(piece.netloc.split(':')[0]) - paths.append(':') - paths.append(default_port) - paths.append('/v2/images/') - paths.append(image_id) - return ''.join(paths) - - -def get_endpoints(auth_token=None, tenant_id=None, **kwargs): - """ - find which glance should be sync by strategy config - """ - strategy = CONF.sync.sync_strategy - if strategy not in ['All', 'User', 'nova']: - return None - - openstack_clients = clients(auth_token, tenant_id) - ksclient = openstack_clients.keystone() - - ''' - suppose that the cascading glance is 'public' endpoint type, and the - cascaded glacne endpoints are 'internal' - ''' - regions = kwargs.pop('region_names', []) - if strategy in ['All', 'nova'] and not regions: - urls = ksclient.service_catalog.get_urls(service_type='image', - endpoint_type='publicURL') - if urls: - result = [u for u in urls if u != get_cascading_endpoint_url()] - else: - result = [] - return result - else: - user_urls = [] - for region_name in regions: - urls = ksclient.service_catalog.get_urls(service_type='image', - endpoint_type='publicURL', - region_name=region_name) - if urls: - user_urls.extend(urls) - result = [u for u in set(user_urls) if u != - get_cascading_endpoint_url()] - return result - - -_V2_IMAGE_CREATE_PROPERTIES = ['container_format', - 'disk_format', 'min_disk', 'min_ram', 'name', - 'virtual_size', 'visibility', 'protected'] - - -def get_core_properties(image): - """ - when sync, create image object, get the sync info - """ - _tags = list(image.tags) or [] - kwargs = {} - for key in _V2_IMAGE_CREATE_PROPERTIES: - try: - value = getattr(image, key, None) - if value and value != 'None': - kwargs[key] = value - except KeyError: - pass - if _tags: - kwargs['tags'] = _tags - return kwargs - - -def calculate_lack_endpoints(all_ep_urls, glance_urls): - """ - calculate endpoints which exists in all_eps but not in glance_eps - """ - if not glance_urls: - return all_ep_urls - - def _contain(ep): - _hosts = [urlparse.urlparse(_ep).netloc for _ep in glance_urls] - return not urlparse.urlparse(ep).netloc in _hosts - return filter(_contain, all_ep_urls) - - -def is_ep_contains(ep_url, glance_urls): - _hosts = [urlparse.urlparse(_ep).netloc for _ep in glance_urls] - return urlparse.urlparse(ep_url) in _hosts diff --git a/glancesync/installation/install.sh b/glancesync/installation/install.sh deleted file mode 100644 index 31185dfc..00000000 --- a/glancesync/installation/install.sh +++ /dev/null @@ -1,160 +0,0 @@ -#!/bin/bash - -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# Copyright (c) 2014 Huawei Technologies. - -CURPATH=$(cd "$(dirname "$0")"; pwd) -_GLANCE_SYNC_CMD_FILE="glance-sync" -_PYTHON_INSTALL_DIR=${OPENSTACK_INSTALL_DIR} -if [ ! -n ${_PYTHON_INSTALL_DIR} ];then - _PYTHON_INSTALL_DIR="/usr/lib/python2.7/dist-packages" -fi -_GLANCE_DIR="${_PYTHON_INSTALL_DIR}/glance" - -# if you did not make changes to the installation files, -# please do not edit the following directories. -_CODE_DIR="${CURPATH}/../glance" -_CONF_DIR="${CURPATH}/../etc" -_BACKUP_DIR="${_GLANCE_DIR}/glance-sync-backup" - -_SCRIPT_LOGFILE="/var/log/glance/installation/install.log" - -export PS4='+{$LINENO:${FUNCNAME[0]}}' - -ERRTRAP() -{ - echo "[LINE:$1] Error: Command or function exited with status $?" -} - -function log() -{ - echo "$@" - echo "`date -u +'%Y-%m-%d %T.%N'`: $@" >> $_SCRIPT_LOGFILE -} - - -function process_stop -{ - PID=`ps -efw|grep "$1"|grep -v grep|awk '{print $2}'` - echo "PID is: $PID">>$_SCRIPT_LOGFILE - if [ "x${PID}" != "x" ]; then - for kill_id in $PID - do - kill -9 ${kill_id} - if [ $? -ne 0 ]; then - echo "[[stop glance-sync]]$1 stop failed.">>$_SCRIPT_LOGFILE - exit 1 - fi - done - echo "[[stop glance-sync]]$1 stop ok.">>$_SCRIPT_LOGFILE - fi -} - -function backup -{ - log "checking previous installation..." - if [ -d "${_BACKUP_DIR}/glance" ] ; then - log "It seems glance cascading has already been installed!" - log "Please check README for solution if this is not true." - exit 1 - fi - - log "backing up current files that might be overwritten..." - mkdir -p "${_BACKUP_DIR}/glance" - mkdir -p "${_BACKUP_DIR}/etc/glance" - - if [ $? -ne 0 ] ; then - rm -r "${_BACKUP_DIR}/glance" - rm -r "${_BACKUP_DIR}/etc" - log "Error in config backup, aborted." - exit 1 - fi -} - -function restart_services -{ - log "restarting glance ..." - service glance-api restart - service glance-registry restart - process_stop "glance-sync" - python /usr/bin/glance-sync --config-file=/etc/glance/glance-sync.conf & -} - -function preinstall -{ - if [[ ${EUID} -ne 0 ]]; then - log "Please run as root." - exit 1 - fi - - if [ ! -d "/var/log/glance/installation" ]; then - mkdir -p /var/log/glance/installation - touch _SCRIPT_LOGFILE - fi - - log "checking installation directories..." - if [ ! -d "${_GLANCE_DIR}" ] ; then - log "Could not find the glance installation. Please check the variables in the beginning of the script." - log "aborted." - exit 1 - fi - - if [ ! -f "${_CONF_DIR}/${_GLANCE_SYNC_CMD_FILE}" ]; then - log "Could not find the glance-sync file. Please check the variables in the beginning of the script." - log "aborted." - exit 1 - fi -} - -# -#Start to execute here -# - -trap 'ERRTRAP $LINENO' ERR - -preinstall -if [ $? -ne 0 ] ; then - exit 1 -fi - -backup -if [ $? -ne 0 ] ; then - exit 1 -fi - - - -log "copying in new files..." -cp -r "${_CODE_DIR}" `dirname ${_GLANCE_DIR}` -cp -r "${_CONF_DIR}/glance" "/etc" -cp "${_CONF_DIR}/${_GLANCE_SYNC_CMD_FILE}" "/usr/bin/" - -#Config options -log "configurate the glance options which is in script/tricircle.cfg" -cd `dirname $0`/../../script -python config.py glance -if [ $? -ne 0 ] ; then - log "configurate the glance options error." - exit 1 -fi -cd - - -restart_services -if [ $? -ne 0 ] ; then - log "There was an error in restarting the service, please restart glance manually." - exit 1 -fi - -log "Completed." - -exit 0 diff --git a/novaproxy/nova/image/cascading.py b/image/cascading.py similarity index 89% rename from novaproxy/nova/image/cascading.py rename to image/cascading.py index 9806a607..492f7826 100644 --- a/novaproxy/nova/image/cascading.py +++ b/image/cascading.py @@ -7,7 +7,6 @@ import urlparse from oslo.config import cfg -from nova.openstack.common.gettextutils import _ from nova.image import glance from nova.image.sync import drivers as drivermgr @@ -18,42 +17,42 @@ LOG = logging.getLogger(__name__) glance_cascading_opt = [ cfg.StrOpt('image_copy_dest_location_url', default='file:///var/lib/glance/images', - help=_("The path cascaded image_data copy to."), + help=("The path cascaded image_data copy to."), deprecated_opts=[cfg.DeprecatedOpt('dest_location_url', group='DEFAULT')]), cfg.StrOpt('image_copy_dest_host', default='127.0.0.1', - help=_("The host name where image_data copy to."), + help=("The host name where image_data copy to."), deprecated_opts=[cfg.DeprecatedOpt('dest_host', group='DEFAULT')]), cfg.StrOpt('image_copy_dest_user', default='glance', - help=_("The user name of cascaded glance for copy."), + help=("The user name of cascaded glance for copy."), deprecated_opts=[cfg.DeprecatedOpt('dest_user', group='DEFAULT')]), cfg.StrOpt('image_copy_dest_password', default='openstack', - help=_("The passowrd of cascaded glance for copy."), + help=("The passowrd of cascaded glance for copy."), deprecated_opts=[cfg.DeprecatedOpt('dest_password', group='DEFAULT')]), cfg.StrOpt('image_copy_source_location_url', default='file:///var/lib/glance/images', - help=_("where the cascaded image data from"), + help=("where the cascaded image data from"), deprecated_opts=[cfg.DeprecatedOpt('source_location_url', group='DEFAULT')]), cfg.StrOpt('image_copy_source_host', default='0.0.0.1', - help=_("The host name where image_data copy from."), + help=("The host name where image_data copy from."), deprecated_opts=[cfg.DeprecatedOpt('source_host', group='DEFAULT')]), cfg.StrOpt('image_copy_source_user', default='glance', - help=_("The user name of glance for copy."), + help=("The user name of glance for copy."), deprecated_opts=[cfg.DeprecatedOpt('source_user', group='DEFAULT')]), cfg.StrOpt('image_copy_source_password', default='openstack', - help=_("The passowrd of glance for copy."), + help=("The passowrd of glance for copy."), deprecated_opts=[cfg.DeprecatedOpt('source_password', group='DEFAULT')]), ] @@ -123,11 +122,11 @@ class GlanceCascadingService(object): try: image_loc = self._copy_data(image_id, cascaded_id, candidate_path) except Exception as e: - LOG.exception(_("copy image failed, reason=%s") % e) + LOG.exception(("copy image failed, reason=%s") % e) raise else: if not image_loc: - LOG.exception(_("copy image Exception, no cascaded_loc")) + LOG.exception(("copy image Exception, no cascaded_loc")) try: # patch loc to the cascaded image csd_locs = [{'url': image_loc, @@ -137,7 +136,7 @@ class GlanceCascadingService(object): remove_props=None, locations=csd_locs) except Exception as e: - LOG.exception(_("patch loc to cascaded image Exception, reason: %s" + LOG.exception(("patch loc to cascaded image Exception, reason: %s" % e)) raise @@ -154,7 +153,7 @@ class GlanceCascadingService(object): self._client.call(context, 2, 'update', image_id, remove_props=None, locations=csg_locs) except Exception as e: - LOG.exception(_("patch loc to cascading image Exception, reason: %s" + LOG.exception(("patch loc to cascading image Exception, reason: %s" % e)) raise diff --git a/novaproxy/nova/image/exception.py b/image/exception.py old mode 100755 new mode 100644 similarity index 100% rename from novaproxy/nova/image/exception.py rename to image/exception.py diff --git a/novaproxy/nova/image/sync/__init__.py b/image/sync/__init__.py similarity index 100% rename from novaproxy/nova/image/sync/__init__.py rename to image/sync/__init__.py diff --git a/novaproxy/nova/image/sync/drivers/__init__.py b/image/sync/drivers/__init__.py similarity index 100% rename from novaproxy/nova/image/sync/drivers/__init__.py rename to image/sync/drivers/__init__.py diff --git a/novaproxy/nova/image/sync/drivers/filesystem.py b/image/sync/drivers/filesystem.py similarity index 100% rename from novaproxy/nova/image/sync/drivers/filesystem.py rename to image/sync/drivers/filesystem.py diff --git a/juno-patches/cinder/timestamp-query-patch/README.md b/juno-patches/cinder/timestamp-query-patch/README.md deleted file mode 100644 index 7c103132..00000000 --- a/juno-patches/cinder/timestamp-query-patch/README.md +++ /dev/null @@ -1,54 +0,0 @@ -Cinder timestamp-query-patch -=============================== -it will be patched in cascaded level's control node - -cinder juno version database has update_at attribute for change_since -query filter function, however cinder db api this version don't support -timestamp query function. So it is needed to make this patch in cascaded level -while syncronization state between cascading and cascaded openstack level - -Key modules ------------ - -* adding timestamp query function while list volumes: - - cinder\db\sqlalchemy\api.py - - -Requirements ------------- -* openstack juno has been installed - -Installation ------------- - -We provide two ways to install the timestamp query patch code. In this section, we will guide you through installing the timestamp query patch. - -* **Note:** - - - Make sure you have an existing installation of **Openstack Juno**. - - We recommend that you Do backup at least the following files before installation, because they are to be overwritten or modified: - -* **Manual Installation** - - - Make sure you have performed backups properly. - - - Navigate to the local repository and copy the contents in 'cinder' sub-directory to the corresponding places in existing cinder, e.g. - ```cp -r $LOCAL_REPOSITORY_DIR/cinder $CINDER_PARENT_DIR``` - (replace the $... with actual directory name.) - - - restart cinder api service - - - Done. The Cinder-Proxy should be working with a demo configuration. - -* **Automatic Installation** - - - Make sure you have performed backups properly. - - - Navigate to the installation directory and run installation script. - ``` - cd $LOCAL_REPOSITORY_DIR/installation - sudo bash ./install.sh - ``` - (replace the $... with actual directory name.) - diff --git a/juno-patches/cinder/timestamp-query-patch/cinder/db/sqlalchemy/api.py b/juno-patches/cinder/timestamp-query-patch/cinder/db/sqlalchemy/api.py deleted file mode 100644 index 009a1422..00000000 --- a/juno-patches/cinder/timestamp-query-patch/cinder/db/sqlalchemy/api.py +++ /dev/null @@ -1,3153 +0,0 @@ -# Copyright (c) 2011 X.commerce, a business unit of eBay Inc. -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# Copyright 2014 IBM Corp. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Implementation of SQLAlchemy backend.""" - - -import functools -import sys -import threading -import time -import uuid -import warnings - -from oslo.config import cfg -from oslo.db import exception as db_exc -from oslo.db import options -from oslo.db.sqlalchemy import session as db_session -import osprofiler.sqlalchemy -import sqlalchemy -from sqlalchemy import or_ -from sqlalchemy.orm import joinedload, joinedload_all -from sqlalchemy.orm import RelationshipProperty -from sqlalchemy.sql.expression import literal_column -from sqlalchemy.sql import func - -from cinder.common import sqlalchemyutils -from cinder.db.sqlalchemy import models -from cinder import exception -from cinder.i18n import _ -from cinder.openstack.common import log as logging -from cinder.openstack.common import timeutils -from cinder.openstack.common import uuidutils - - -CONF = cfg.CONF -CONF.import_group("profiler", "cinder.service") -LOG = logging.getLogger(__name__) - -options.set_defaults(CONF, connection='sqlite:///$state_path/cinder.sqlite') - -_LOCK = threading.Lock() -_FACADE = None - - -def _create_facade_lazily(): - global _LOCK - with _LOCK: - global _FACADE - if _FACADE is None: - _FACADE = db_session.EngineFacade( - CONF.database.connection, - **dict(CONF.database.iteritems()) - ) - - if CONF.profiler.profiler_enabled: - if CONF.profiler.trace_sqlalchemy: - osprofiler.sqlalchemy.add_tracing(sqlalchemy, - _FACADE.get_engine(), - "db") - - return _FACADE - - -def get_engine(): - facade = _create_facade_lazily() - return facade.get_engine() - - -def get_session(**kwargs): - facade = _create_facade_lazily() - return facade.get_session(**kwargs) - -_DEFAULT_QUOTA_NAME = 'default' - - -def get_backend(): - """The backend is this module itself.""" - - return sys.modules[__name__] - - -def is_admin_context(context): - """Indicates if the request context is an administrator.""" - if not context: - warnings.warn(_('Use of empty request context is deprecated'), - DeprecationWarning) - raise Exception('die') - return context.is_admin - - -def is_user_context(context): - """Indicates if the request context is a normal user.""" - if not context: - return False - if context.is_admin: - return False - if not context.user_id or not context.project_id: - return False - return True - - -def authorize_project_context(context, project_id): - """Ensures a request has permission to access the given project.""" - if is_user_context(context): - if not context.project_id: - raise exception.NotAuthorized() - elif context.project_id != project_id: - raise exception.NotAuthorized() - - -def authorize_user_context(context, user_id): - """Ensures a request has permission to access the given user.""" - if is_user_context(context): - if not context.user_id: - raise exception.NotAuthorized() - elif context.user_id != user_id: - raise exception.NotAuthorized() - - -def authorize_quota_class_context(context, class_name): - """Ensures a request has permission to access the given quota class.""" - if is_user_context(context): - if not context.quota_class: - raise exception.NotAuthorized() - elif context.quota_class != class_name: - raise exception.NotAuthorized() - - -def require_admin_context(f): - """Decorator to require admin request context. - - The first argument to the wrapped function must be the context. - - """ - - def wrapper(*args, **kwargs): - if not is_admin_context(args[0]): - raise exception.AdminRequired() - return f(*args, **kwargs) - return wrapper - - -def require_context(f): - """Decorator to require *any* user or admin context. - - This does no authorization for user or project access matching, see - :py:func:`authorize_project_context` and - :py:func:`authorize_user_context`. - - The first argument to the wrapped function must be the context. - - """ - - def wrapper(*args, **kwargs): - if not is_admin_context(args[0]) and not is_user_context(args[0]): - raise exception.NotAuthorized() - return f(*args, **kwargs) - return wrapper - - -def require_volume_exists(f): - """Decorator to require the specified volume to exist. - - Requires the wrapped function to use context and volume_id as - their first two arguments. - """ - - def wrapper(context, volume_id, *args, **kwargs): - volume_get(context, volume_id) - return f(context, volume_id, *args, **kwargs) - wrapper.__name__ = f.__name__ - return wrapper - - -def require_snapshot_exists(f): - """Decorator to require the specified snapshot to exist. - - Requires the wrapped function to use context and snapshot_id as - their first two arguments. - """ - - def wrapper(context, snapshot_id, *args, **kwargs): - snapshot_get(context, snapshot_id) - return f(context, snapshot_id, *args, **kwargs) - wrapper.__name__ = f.__name__ - return wrapper - - -def _retry_on_deadlock(f): - """Decorator to retry a DB API call if Deadlock was received.""" - @functools.wraps(f) - def wrapped(*args, **kwargs): - while True: - try: - return f(*args, **kwargs) - except db_exc.DBDeadlock: - LOG.warn(_("Deadlock detected when running " - "'%(func_name)s': Retrying..."), - dict(func_name=f.__name__)) - # Retry! - time.sleep(0.5) - continue - functools.update_wrapper(wrapped, f) - return wrapped - - -def model_query(context, *args, **kwargs): - """Query helper that accounts for context's `read_deleted` field. - - :param context: context to query under - :param session: if present, the session to use - :param read_deleted: if present, overrides context's read_deleted field. - :param project_only: if present and context is user-type, then restrict - query to match the context's project_id. - """ - session = kwargs.get('session') or get_session() - read_deleted = kwargs.get('read_deleted') or context.read_deleted - project_only = kwargs.get('project_only') - - query = session.query(*args) - - if read_deleted == 'no': - query = query.filter_by(deleted=False) - elif read_deleted == 'yes': - pass # omit the filter to include deleted and active - elif read_deleted == 'only': - query = query.filter_by(deleted=True) - else: - raise Exception( - _("Unrecognized read_deleted value '%s'") % read_deleted) - - if project_only and is_user_context(context): - query = query.filter_by(project_id=context.project_id) - - return query - - -def _sync_volumes(context, project_id, session, volume_type_id=None, - volume_type_name=None): - (volumes, gigs) = _volume_data_get_for_project( - context, project_id, volume_type_id=volume_type_id, session=session) - key = 'volumes' - if volume_type_name: - key += '_' + volume_type_name - return {key: volumes} - - -def _sync_snapshots(context, project_id, session, volume_type_id=None, - volume_type_name=None): - (snapshots, gigs) = _snapshot_data_get_for_project( - context, project_id, volume_type_id=volume_type_id, session=session) - key = 'snapshots' - if volume_type_name: - key += '_' + volume_type_name - return {key: snapshots} - - -def _sync_backups(context, project_id, session, volume_type_id=None, - volume_type_name=None): - (backups, gigs) = _backup_data_get_for_project( - context, project_id, volume_type_id=volume_type_id, session=session) - key = 'backups' - return {key: backups} - - -def _sync_gigabytes(context, project_id, session, volume_type_id=None, - volume_type_name=None): - (_junk, vol_gigs) = _volume_data_get_for_project( - context, project_id, volume_type_id=volume_type_id, session=session) - key = 'gigabytes' - if volume_type_name: - key += '_' + volume_type_name - if CONF.no_snapshot_gb_quota: - return {key: vol_gigs} - (_junk, snap_gigs) = _snapshot_data_get_for_project( - context, project_id, volume_type_id=volume_type_id, session=session) - return {key: vol_gigs + snap_gigs} - - -def _sync_consistencygroups(context, project_id, session, - volume_type_id=None, - volume_type_name=None): - (_junk, groups) = _consistencygroup_data_get_for_project( - context, project_id, session=session) - key = 'consistencygroups' - return {key: groups} - - -def _sync_backup_gigabytes(context, project_id, session, volume_type_id=None, - volume_type_name=None): - key = 'backup_gigabytes' - (_junk, backup_gigs) = _backup_data_get_for_project( - context, project_id, volume_type_id=volume_type_id, session=session) - return {key: backup_gigs} - - -QUOTA_SYNC_FUNCTIONS = { - '_sync_volumes': _sync_volumes, - '_sync_snapshots': _sync_snapshots, - '_sync_gigabytes': _sync_gigabytes, - '_sync_consistencygroups': _sync_consistencygroups, - '_sync_backups': _sync_backups, - '_sync_backup_gigabytes': _sync_backup_gigabytes -} - - -################### - - -@require_admin_context -def service_destroy(context, service_id): - session = get_session() - with session.begin(): - service_ref = _service_get(context, service_id, session=session) - service_ref.delete(session=session) - - -@require_admin_context -def _service_get(context, service_id, session=None): - result = model_query( - context, - models.Service, - session=session).\ - filter_by(id=service_id).\ - first() - if not result: - raise exception.ServiceNotFound(service_id=service_id) - - return result - - -@require_admin_context -def service_get(context, service_id): - return _service_get(context, service_id) - - -@require_admin_context -def service_get_all(context, disabled=None): - query = model_query(context, models.Service) - - if disabled is not None: - query = query.filter_by(disabled=disabled) - - return query.all() - - -@require_admin_context -def service_get_all_by_topic(context, topic, disabled=None): - query = model_query( - context, models.Service, read_deleted="no").\ - filter_by(topic=topic) - - if disabled is not None: - query = query.filter_by(disabled=disabled) - - return query.all() - - -@require_admin_context -def service_get_by_host_and_topic(context, host, topic): - result = model_query( - context, models.Service, read_deleted="no").\ - filter_by(disabled=False).\ - filter_by(host=host).\ - filter_by(topic=topic).\ - first() - if not result: - raise exception.ServiceNotFound(service_id=None) - return result - - -@require_admin_context -def service_get_all_by_host(context, host): - return model_query( - context, models.Service, read_deleted="no").\ - filter_by(host=host).\ - all() - - -@require_admin_context -def _service_get_all_topic_subquery(context, session, topic, subq, label): - sort_value = getattr(subq.c, label) - return model_query(context, models.Service, - func.coalesce(sort_value, 0), - session=session, read_deleted="no").\ - filter_by(topic=topic).\ - filter_by(disabled=False).\ - outerjoin((subq, models.Service.host == subq.c.host)).\ - order_by(sort_value).\ - all() - - -@require_admin_context -def service_get_all_volume_sorted(context): - session = get_session() - with session.begin(): - topic = CONF.volume_topic - label = 'volume_gigabytes' - subq = model_query(context, models.Volume.host, - func.sum(models.Volume.size).label(label), - session=session, read_deleted="no").\ - group_by(models.Volume.host).\ - subquery() - return _service_get_all_topic_subquery(context, - session, - topic, - subq, - label) - - -@require_admin_context -def service_get_by_args(context, host, binary): - result = model_query(context, models.Service).\ - filter_by(host=host).\ - filter_by(binary=binary).\ - first() - - if not result: - raise exception.HostBinaryNotFound(host=host, binary=binary) - - return result - - -@require_admin_context -def service_create(context, values): - service_ref = models.Service() - service_ref.update(values) - if not CONF.enable_new_services: - service_ref.disabled = True - - session = get_session() - with session.begin(): - service_ref.save(session) - return service_ref - - -@require_admin_context -def service_update(context, service_id, values): - session = get_session() - with session.begin(): - service_ref = _service_get(context, service_id, session=session) - service_ref.update(values) - return service_ref - - -################### - - -def _metadata_refs(metadata_dict, meta_class): - metadata_refs = [] - if metadata_dict: - for k, v in metadata_dict.iteritems(): - metadata_ref = meta_class() - metadata_ref['key'] = k - metadata_ref['value'] = v - metadata_refs.append(metadata_ref) - return metadata_refs - - -def _dict_with_extra_specs(inst_type_query): - """Convert type query result to dict with extra_spec and rate_limit. - - Takes a volume type query returned by sqlalchemy and returns it - as a dictionary, converting the extra_specs entry from a list - of dicts: - - 'extra_specs' : [{'key': 'k1', 'value': 'v1', ...}, ...] - to a single dict: - 'extra_specs' : {'k1': 'v1'} - """ - inst_type_dict = dict(inst_type_query) - extra_specs = dict([(x['key'], x['value']) - for x in inst_type_query['extra_specs']]) - inst_type_dict['extra_specs'] = extra_specs - return inst_type_dict - - -################### - - -@require_admin_context -def iscsi_target_count_by_host(context, host): - return model_query(context, models.IscsiTarget).\ - filter_by(host=host).\ - count() - - -@require_admin_context -def iscsi_target_create_safe(context, values): - iscsi_target_ref = models.IscsiTarget() - - for (key, value) in values.iteritems(): - iscsi_target_ref[key] = value - session = get_session() - - try: - with session.begin(): - session.add(iscsi_target_ref) - return iscsi_target_ref - # TODO(e0ne): Remove check on db_exc.DBError, when - # Cinder will use oslo.db 0.4.0 or higher. - except (db_exc.DBError, db_exc.DBDuplicateEntry): - return None - - -################### - - -@require_context -def _quota_get(context, project_id, resource, session=None): - result = model_query(context, models.Quota, session=session, - read_deleted="no").\ - filter_by(project_id=project_id).\ - filter_by(resource=resource).\ - first() - - if not result: - raise exception.ProjectQuotaNotFound(project_id=project_id) - - return result - - -@require_context -def quota_get(context, project_id, resource): - return _quota_get(context, project_id, resource) - - -@require_context -def quota_get_all_by_project(context, project_id): - authorize_project_context(context, project_id) - - rows = model_query(context, models.Quota, read_deleted="no").\ - filter_by(project_id=project_id).\ - all() - - result = {'project_id': project_id} - for row in rows: - result[row.resource] = row.hard_limit - - return result - - -@require_admin_context -def quota_create(context, project_id, resource, limit): - quota_ref = models.Quota() - quota_ref.project_id = project_id - quota_ref.resource = resource - quota_ref.hard_limit = limit - - session = get_session() - with session.begin(): - quota_ref.save(session) - return quota_ref - - -@require_admin_context -def quota_update(context, project_id, resource, limit): - session = get_session() - with session.begin(): - quota_ref = _quota_get(context, project_id, resource, session=session) - quota_ref.hard_limit = limit - return quota_ref - - -@require_admin_context -def quota_destroy(context, project_id, resource): - session = get_session() - with session.begin(): - quota_ref = _quota_get(context, project_id, resource, session=session) - quota_ref.delete(session=session) - - -################### - - -@require_context -def _quota_class_get(context, class_name, resource, session=None): - result = model_query(context, models.QuotaClass, session=session, - read_deleted="no").\ - filter_by(class_name=class_name).\ - filter_by(resource=resource).\ - first() - - if not result: - raise exception.QuotaClassNotFound(class_name=class_name) - - return result - - -@require_context -def quota_class_get(context, class_name, resource): - return _quota_class_get(context, class_name, resource) - - -def quota_class_get_default(context): - rows = model_query(context, models.QuotaClass, - read_deleted="no").\ - filter_by(class_name=_DEFAULT_QUOTA_NAME).all() - - result = {'class_name': _DEFAULT_QUOTA_NAME} - for row in rows: - result[row.resource] = row.hard_limit - - return result - - -@require_context -def quota_class_get_all_by_name(context, class_name): - authorize_quota_class_context(context, class_name) - - rows = model_query(context, models.QuotaClass, read_deleted="no").\ - filter_by(class_name=class_name).\ - all() - - result = {'class_name': class_name} - for row in rows: - result[row.resource] = row.hard_limit - - return result - - -@require_admin_context -def quota_class_create(context, class_name, resource, limit): - quota_class_ref = models.QuotaClass() - quota_class_ref.class_name = class_name - quota_class_ref.resource = resource - quota_class_ref.hard_limit = limit - - session = get_session() - with session.begin(): - quota_class_ref.save(session) - return quota_class_ref - - -@require_admin_context -def quota_class_update(context, class_name, resource, limit): - session = get_session() - with session.begin(): - quota_class_ref = _quota_class_get(context, class_name, resource, - session=session) - quota_class_ref.hard_limit = limit - return quota_class_ref - - -@require_admin_context -def quota_class_destroy(context, class_name, resource): - session = get_session() - with session.begin(): - quota_class_ref = _quota_class_get(context, class_name, resource, - session=session) - quota_class_ref.delete(session=session) - - -@require_admin_context -def quota_class_destroy_all_by_name(context, class_name): - session = get_session() - with session.begin(): - quota_classes = model_query(context, models.QuotaClass, - session=session, read_deleted="no").\ - filter_by(class_name=class_name).\ - all() - - for quota_class_ref in quota_classes: - quota_class_ref.delete(session=session) - - -################### - - -@require_context -def quota_usage_get(context, project_id, resource): - result = model_query(context, models.QuotaUsage, read_deleted="no").\ - filter_by(project_id=project_id).\ - filter_by(resource=resource).\ - first() - - if not result: - raise exception.QuotaUsageNotFound(project_id=project_id) - - return result - - -@require_context -def quota_usage_get_all_by_project(context, project_id): - authorize_project_context(context, project_id) - - rows = model_query(context, models.QuotaUsage, read_deleted="no").\ - filter_by(project_id=project_id).\ - all() - - result = {'project_id': project_id} - for row in rows: - result[row.resource] = dict(in_use=row.in_use, reserved=row.reserved) - - return result - - -@require_admin_context -def _quota_usage_create(context, project_id, resource, in_use, reserved, - until_refresh, session=None): - - quota_usage_ref = models.QuotaUsage() - quota_usage_ref.project_id = project_id - quota_usage_ref.resource = resource - quota_usage_ref.in_use = in_use - quota_usage_ref.reserved = reserved - quota_usage_ref.until_refresh = until_refresh - quota_usage_ref.save(session=session) - - return quota_usage_ref - - -################### - - -def _reservation_create(context, uuid, usage, project_id, resource, delta, - expire, session=None): - reservation_ref = models.Reservation() - reservation_ref.uuid = uuid - reservation_ref.usage_id = usage['id'] - reservation_ref.project_id = project_id - reservation_ref.resource = resource - reservation_ref.delta = delta - reservation_ref.expire = expire - reservation_ref.save(session=session) - return reservation_ref - - -################### - - -# NOTE(johannes): The quota code uses SQL locking to ensure races don't -# cause under or over counting of resources. To avoid deadlocks, this -# code always acquires the lock on quota_usages before acquiring the lock -# on reservations. - -def _get_quota_usages(context, session, project_id): - # Broken out for testability - rows = model_query(context, models.QuotaUsage, - read_deleted="no", - session=session).\ - filter_by(project_id=project_id).\ - with_lockmode('update').\ - all() - return dict((row.resource, row) for row in rows) - - -@require_context -@_retry_on_deadlock -def quota_reserve(context, resources, quotas, deltas, expire, - until_refresh, max_age, project_id=None): - elevated = context.elevated() - session = get_session() - with session.begin(): - if project_id is None: - project_id = context.project_id - - # Get the current usages - usages = _get_quota_usages(context, session, project_id) - - # Handle usage refresh - work = set(deltas.keys()) - while work: - resource = work.pop() - - # Do we need to refresh the usage? - refresh = False - if resource not in usages: - usages[resource] = _quota_usage_create(elevated, - project_id, - resource, - 0, 0, - until_refresh or None, - session=session) - refresh = True - elif usages[resource].in_use < 0: - # Negative in_use count indicates a desync, so try to - # heal from that... - refresh = True - elif usages[resource].until_refresh is not None: - usages[resource].until_refresh -= 1 - if usages[resource].until_refresh <= 0: - refresh = True - elif max_age and usages[resource].updated_at is not None and ( - (usages[resource].updated_at - - timeutils.utcnow()).seconds >= max_age): - refresh = True - - # OK, refresh the usage - if refresh: - # Grab the sync routine - sync = QUOTA_SYNC_FUNCTIONS[resources[resource].sync] - volume_type_id = getattr(resources[resource], - 'volume_type_id', None) - volume_type_name = getattr(resources[resource], - 'volume_type_name', None) - updates = sync(elevated, project_id, - volume_type_id=volume_type_id, - volume_type_name=volume_type_name, - session=session) - for res, in_use in updates.items(): - # Make sure we have a destination for the usage! - if res not in usages: - usages[res] = _quota_usage_create( - elevated, - project_id, - res, - 0, 0, - until_refresh or None, - session=session - ) - - # Update the usage - usages[res].in_use = in_use - usages[res].until_refresh = until_refresh or None - - # Because more than one resource may be refreshed - # by the call to the sync routine, and we don't - # want to double-sync, we make sure all refreshed - # resources are dropped from the work set. - work.discard(res) - - # NOTE(Vek): We make the assumption that the sync - # routine actually refreshes the - # resources that it is the sync routine - # for. We don't check, because this is - # a best-effort mechanism. - - # Check for deltas that would go negative - unders = [r for r, delta in deltas.items() - if delta < 0 and delta + usages[r].in_use < 0] - - # Now, let's check the quotas - # NOTE(Vek): We're only concerned about positive increments. - # If a project has gone over quota, we want them to - # be able to reduce their usage without any - # problems. - overs = [r for r, delta in deltas.items() - if quotas[r] >= 0 and delta >= 0 and - quotas[r] < delta + usages[r].total] - - # NOTE(Vek): The quota check needs to be in the transaction, - # but the transaction doesn't fail just because - # we're over quota, so the OverQuota raise is - # outside the transaction. If we did the raise - # here, our usage updates would be discarded, but - # they're not invalidated by being over-quota. - - # Create the reservations - if not overs: - reservations = [] - for resource, delta in deltas.items(): - reservation = _reservation_create(elevated, - str(uuid.uuid4()), - usages[resource], - project_id, - resource, delta, expire, - session=session) - reservations.append(reservation.uuid) - - # Also update the reserved quantity - # NOTE(Vek): Again, we are only concerned here about - # positive increments. Here, though, we're - # worried about the following scenario: - # - # 1) User initiates resize down. - # 2) User allocates a new instance. - # 3) Resize down fails or is reverted. - # 4) User is now over quota. - # - # To prevent this, we only update the - # reserved value if the delta is positive. - if delta > 0: - usages[resource].reserved += delta - - if unders: - LOG.warning(_("Change will make usage less than 0 for the following " - "resources: %s") % unders) - if overs: - usages = dict((k, dict(in_use=v['in_use'], reserved=v['reserved'])) - for k, v in usages.items()) - raise exception.OverQuota(overs=sorted(overs), quotas=quotas, - usages=usages) - - return reservations - - -def _quota_reservations(session, context, reservations): - """Return the relevant reservations.""" - - # Get the listed reservations - return model_query(context, models.Reservation, - read_deleted="no", - session=session).\ - filter(models.Reservation.uuid.in_(reservations)).\ - with_lockmode('update').\ - all() - - -@require_context -@_retry_on_deadlock -def reservation_commit(context, reservations, project_id=None): - session = get_session() - with session.begin(): - usages = _get_quota_usages(context, session, project_id) - - for reservation in _quota_reservations(session, context, reservations): - usage = usages[reservation.resource] - if reservation.delta >= 0: - usage.reserved -= reservation.delta - usage.in_use += reservation.delta - - reservation.delete(session=session) - - -@require_context -@_retry_on_deadlock -def reservation_rollback(context, reservations, project_id=None): - session = get_session() - with session.begin(): - usages = _get_quota_usages(context, session, project_id) - - for reservation in _quota_reservations(session, context, reservations): - usage = usages[reservation.resource] - if reservation.delta >= 0: - usage.reserved -= reservation.delta - - reservation.delete(session=session) - - -@require_admin_context -@_retry_on_deadlock -def quota_destroy_all_by_project(context, project_id): - session = get_session() - with session.begin(): - quotas = model_query(context, models.Quota, session=session, - read_deleted="no").\ - filter_by(project_id=project_id).\ - all() - - for quota_ref in quotas: - quota_ref.delete(session=session) - - quota_usages = model_query(context, models.QuotaUsage, - session=session, read_deleted="no").\ - filter_by(project_id=project_id).\ - all() - - for quota_usage_ref in quota_usages: - quota_usage_ref.delete(session=session) - - reservations = model_query(context, models.Reservation, - session=session, read_deleted="no").\ - filter_by(project_id=project_id).\ - all() - - for reservation_ref in reservations: - reservation_ref.delete(session=session) - - -@require_admin_context -@_retry_on_deadlock -def reservation_expire(context): - session = get_session() - with session.begin(): - current_time = timeutils.utcnow() - results = model_query(context, models.Reservation, session=session, - read_deleted="no").\ - filter(models.Reservation.expire < current_time).\ - all() - - if results: - for reservation in results: - if reservation.delta >= 0: - reservation.usage.reserved -= reservation.delta - reservation.usage.save(session=session) - - reservation.delete(session=session) - - -################### - - -@require_admin_context -@_retry_on_deadlock -def volume_allocate_iscsi_target(context, volume_id, host): - session = get_session() - with session.begin(): - iscsi_target_ref = model_query(context, models.IscsiTarget, - session=session, read_deleted="no").\ - filter_by(volume=None).\ - filter_by(host=host).\ - with_lockmode('update').\ - first() - - # NOTE(vish): if with_lockmode isn't supported, as in sqlite, - # then this has concurrency issues - if not iscsi_target_ref: - raise exception.NoMoreTargets() - - iscsi_target_ref.volume_id = volume_id - session.add(iscsi_target_ref) - - return iscsi_target_ref.target_num - - -@require_admin_context -def volume_attached(context, volume_id, instance_uuid, host_name, mountpoint): - if instance_uuid and not uuidutils.is_uuid_like(instance_uuid): - raise exception.InvalidUUID(uuid=instance_uuid) - - session = get_session() - with session.begin(): - volume_ref = _volume_get(context, volume_id, session=session) - volume_ref['status'] = 'in-use' - volume_ref['mountpoint'] = mountpoint - volume_ref['attach_status'] = 'attached' - volume_ref['instance_uuid'] = instance_uuid - volume_ref['attached_host'] = host_name - return volume_ref - - -@require_context -def volume_create(context, values): - values['volume_metadata'] = _metadata_refs(values.get('metadata'), - models.VolumeMetadata) - if is_admin_context(context): - values['volume_admin_metadata'] = \ - _metadata_refs(values.get('admin_metadata'), - models.VolumeAdminMetadata) - elif values.get('volume_admin_metadata'): - del values['volume_admin_metadata'] - - volume_ref = models.Volume() - if not values.get('id'): - values['id'] = str(uuid.uuid4()) - volume_ref.update(values) - - session = get_session() - with session.begin(): - session.add(volume_ref) - - return _volume_get(context, values['id'], session=session) - - -@require_admin_context -def volume_data_get_for_host(context, host, count_only=False): - if count_only: - result = model_query(context, - func.count(models.Volume.id), - read_deleted="no").\ - filter_by(host=host).\ - first() - return result[0] or 0 - else: - result = model_query(context, - func.count(models.Volume.id), - func.sum(models.Volume.size), - read_deleted="no").\ - filter_by(host=host).\ - first() - # NOTE(vish): convert None to 0 - return (result[0] or 0, result[1] or 0) - - -@require_admin_context -def _volume_data_get_for_project(context, project_id, volume_type_id=None, - session=None): - query = model_query(context, - func.count(models.Volume.id), - func.sum(models.Volume.size), - read_deleted="no", - session=session).\ - filter_by(project_id=project_id) - - if volume_type_id: - query = query.filter_by(volume_type_id=volume_type_id) - - result = query.first() - - # NOTE(vish): convert None to 0 - return (result[0] or 0, result[1] or 0) - - -@require_admin_context -def _backup_data_get_for_project(context, project_id, volume_type_id=None, - session=None): - query = model_query(context, - func.count(models.Backup.id), - func.sum(models.Backup.size), - read_deleted="no", - session=session).\ - filter_by(project_id=project_id) - - if volume_type_id: - query = query.filter_by(volume_type_id=volume_type_id) - - result = query.first() - - # NOTE(vish): convert None to 0 - return (result[0] or 0, result[1] or 0) - - -@require_admin_context -def volume_data_get_for_project(context, project_id, volume_type_id=None): - return _volume_data_get_for_project(context, project_id, volume_type_id) - - -@require_admin_context -def finish_volume_migration(context, src_vol_id, dest_vol_id): - """Copy almost all columns from dest to source.""" - session = get_session() - with session.begin(): - src_volume_ref = _volume_get(context, src_vol_id, session=session) - dest_volume_ref = _volume_get(context, dest_vol_id, session=session) - - # NOTE(rpodolyaka): we should copy only column values, while model - # instances also have relationships attributes, which - # should be ignored - def is_column(inst, attr): - return attr in inst.__class__.__table__.columns - - for key, value in dest_volume_ref.iteritems(): - if key == 'id' or not is_column(dest_volume_ref, key): - continue - elif key == 'migration_status': - value = None - elif key == '_name_id': - value = dest_volume_ref['_name_id'] or dest_volume_ref['id'] - - setattr(src_volume_ref, key, value) - - -@require_admin_context -@_retry_on_deadlock -def volume_destroy(context, volume_id): - session = get_session() - now = timeutils.utcnow() - with session.begin(): - model_query(context, models.Volume, session=session).\ - filter_by(id=volume_id).\ - update({'status': 'deleted', - 'deleted': True, - 'deleted_at': now, - 'updated_at': literal_column('updated_at')}) - model_query(context, models.IscsiTarget, session=session).\ - filter_by(volume_id=volume_id).\ - update({'volume_id': None}) - model_query(context, models.VolumeMetadata, session=session).\ - filter_by(volume_id=volume_id).\ - update({'deleted': True, - 'deleted_at': now, - 'updated_at': literal_column('updated_at')}) - model_query(context, models.VolumeAdminMetadata, session=session).\ - filter_by(volume_id=volume_id).\ - update({'deleted': True, - 'deleted_at': now, - 'updated_at': literal_column('updated_at')}) - model_query(context, models.Transfer, session=session).\ - filter_by(volume_id=volume_id).\ - update({'deleted': True, - 'deleted_at': now, - 'updated_at': literal_column('updated_at')}) - - -@require_admin_context -def volume_detached(context, volume_id): - session = get_session() - with session.begin(): - volume_ref = _volume_get(context, volume_id, session=session) - # Hide status update from user if we're performing a volume migration - if not volume_ref['migration_status']: - volume_ref['status'] = 'available' - volume_ref['mountpoint'] = None - volume_ref['attach_status'] = 'detached' - volume_ref['instance_uuid'] = None - volume_ref['attached_host'] = None - volume_ref['attach_time'] = None - - -@require_context -def _volume_get_query(context, session=None, project_only=False): - if is_admin_context(context): - return model_query(context, models.Volume, session=session, - project_only=project_only).\ - options(joinedload('volume_metadata')).\ - options(joinedload('volume_admin_metadata')).\ - options(joinedload('volume_type')).\ - options(joinedload('consistencygroup')) - else: - return model_query(context, models.Volume, session=session, - project_only=project_only).\ - options(joinedload('volume_metadata')).\ - options(joinedload('volume_type')).\ - options(joinedload('consistencygroup')) - - -@require_context -def _volume_get(context, volume_id, session=None): - result = _volume_get_query(context, session=session, project_only=True).\ - filter_by(id=volume_id).\ - first() - - if not result: - raise exception.VolumeNotFound(volume_id=volume_id) - - return result - - -@require_context -def volume_get(context, volume_id): - return _volume_get(context, volume_id) - - -@require_admin_context -def volume_get_all(context, marker, limit, sort_key, sort_dir, - filters=None): - """Retrieves all volumes. - - :param context: context to query under - :param marker: the last item of the previous page, used to determine the - next page of results to return - :param limit: maximum number of items to return - :param sort_key: single attributes by which results should be sorted - :param sort_dir: direction in which results should be sorted (asc, desc) - :param filters: Filters for the query. A filter key/value of - 'no_migration_targets'=True causes volumes with either - a NULL 'migration_status' or a 'migration_status' that - does not start with 'target:' to be retrieved. - :returns: list of matching volumes - """ - session = get_session() - with session.begin(): - # Generate the query - query = _generate_paginate_query(context, session, marker, limit, - sort_key, sort_dir, filters) - # No volumes would match, return empty list - if query is None: - return [] - return query.all() - - -@require_admin_context -def volume_get_all_by_host(context, host): - """Retrieves all volumes hosted on a host.""" - # As a side effect of the introduction of pool-aware scheduler, - # newly created volumes will have pool information appended to - # 'host' field of a volume record. So a volume record in DB can - # now be either form below: - # Host - # Host#Pool - if host and isinstance(host, basestring): - session = get_session() - with session.begin(): - host_attr = getattr(models.Volume, 'host') - conditions = [host_attr == host, - host_attr.op('LIKE')(host + '#%')] - result = _volume_get_query(context).filter(or_(*conditions)).all() - return result - elif not host: - return [] - - -@require_admin_context -def volume_get_all_by_group(context, group_id): - return _volume_get_query(context).filter_by(consistencygroup_id=group_id).\ - all() - - -@require_context -def volume_get_all_by_project(context, project_id, marker, limit, sort_key, - sort_dir, filters=None): - """"Retrieves all volumes in a project. - - :param context: context to query under - :param project_id: project for all volumes being retrieved - :param marker: the last item of the previous page, used to determine the - next page of results to return - :param limit: maximum number of items to return - :param sort_key: single attributes by which results should be sorted - :param sort_dir: direction in which results should be sorted (asc, desc) - :param filters: Filters for the query. A filter key/value of - 'no_migration_targets'=True causes volumes with either - a NULL 'migration_status' or a 'migration_status' that - does not start with 'target:' to be retrieved. - :returns: list of matching volumes - """ - session = get_session() - with session.begin(): - authorize_project_context(context, project_id) - # Add in the project filter without modifying the given filters - filters = filters.copy() if filters else {} - filters['project_id'] = project_id - # Generate the query - query = _generate_paginate_query(context, session, marker, limit, - sort_key, sort_dir, filters) - # No volumes would match, return empty list - if query is None: - return [] - return query.all() - - -def _generate_paginate_query(context, session, marker, limit, sort_key, - sort_dir, filters): - """Generate the query to include the filters and the paginate options. - - Returns a query with sorting / pagination criteria added or None - if the given filters will not yield any results. - - :param context: context to query under - :param session: the session to use - :param marker: the last item of the previous page; we returns the next - results after this value. - :param limit: maximum number of items to return - :param sort_key: single attributes by which results should be sorted - :param sort_dir: direction in which results should be sorted (asc, desc) - :param filters: dictionary of filters; values that are lists, - tuples, sets, or frozensets cause an 'IN' test to - be performed, while exact matching ('==' operator) - is used for other values - :returns: updated query or None - """ - query = _volume_get_query(context, session=session) - - if filters: - filters = filters.copy() - - # 'no_migration_targets' is unique, must be either NULL or - # not start with 'target:' - if ('no_migration_targets' in filters and - filters['no_migration_targets'] is True): - filters.pop('no_migration_targets') - try: - column_attr = getattr(models.Volume, 'migration_status') - conditions = [column_attr == None, # noqa - column_attr.op('NOT LIKE')('target:%')] - query = query.filter(or_(*conditions)) - except AttributeError: - log_msg = _("'migration_status' column could not be found.") - LOG.debug(log_msg) - return None - - if ('changes-since' in filters and - filters['changes-since'] is not None): - try: - timeStr = filters['changes-since'] - change_since_isotime = timeutils.parse_isotime(timeStr) - changes_since = timeutils.normalize_time(change_since_isotime) - column_attr = getattr(models.Volume, 'updated_at') - query = query.filter(column_attr >= changes_since) - filters.pop('changes-since') - except AttributeError: - log_msg = _("'update_at' column could not be found.") - LOG.debug(log_msg) - return None - - # Apply exact match filters for everything else, ensure that the - # filter value exists on the model - for key in filters.keys(): - # metadata is unique, must be a dict - if key == 'metadata': - if not isinstance(filters[key], dict): - log_msg = _("'metadata' filter value is not valid.") - LOG.debug(log_msg) - return None - continue - try: - column_attr = getattr(models.Volume, key) - # Do not allow relationship properties since those require - # schema specific knowledge - prop = getattr(column_attr, 'property') - if isinstance(prop, RelationshipProperty): - log_msg = (_("'%s' filter key is not valid, " - "it maps to a relationship.")) % key - LOG.debug(log_msg) - return None - except AttributeError: - log_msg = _("'%s' filter key is not valid.") % key - LOG.debug(log_msg) - return None - - # Holds the simple exact matches - filter_dict = {} - - # Iterate over all filters, special case the filter is necessary - for key, value in filters.iteritems(): - if key == 'metadata': - # model.VolumeMetadata defines the backref to Volumes as - # 'volume_metadata' or 'volume_admin_metadata', use those as - # column attribute keys - col_attr = getattr(models.Volume, 'volume_metadata') - col_ad_attr = getattr(models.Volume, 'volume_admin_metadata') - for k, v in value.iteritems(): - query = query.filter(or_(col_attr.any(key=k, value=v), - col_ad_attr.any(key=k, value=v))) - elif isinstance(value, (list, tuple, set, frozenset)): - # Looking for values in a list; apply to query directly - column_attr = getattr(models.Volume, key) - query = query.filter(column_attr.in_(value)) - else: - # OK, simple exact match; save for later - filter_dict[key] = value - - # Apply simple exact matches - if filter_dict: - query = query.filter_by(**filter_dict) - - marker_volume = None - if marker is not None: - marker_volume = _volume_get(context, marker, session) - - return sqlalchemyutils.paginate_query(query, models.Volume, limit, - [sort_key, 'created_at', 'id'], - marker=marker_volume, - sort_dir=sort_dir) - - -@require_admin_context -def volume_get_iscsi_target_num(context, volume_id): - result = model_query(context, models.IscsiTarget, read_deleted="yes").\ - filter_by(volume_id=volume_id).\ - first() - - if not result: - raise exception.ISCSITargetNotFoundForVolume(volume_id=volume_id) - - return result.target_num - - -@require_context -def volume_update(context, volume_id, values): - session = get_session() - with session.begin(): - metadata = values.get('metadata') - if metadata is not None: - _volume_user_metadata_update(context, - volume_id, - values.pop('metadata'), - delete=True, - session=session) - - admin_metadata = values.get('admin_metadata') - if is_admin_context(context) and admin_metadata is not None: - _volume_admin_metadata_update(context, - volume_id, - values.pop('admin_metadata'), - delete=True, - session=session) - - volume_ref = _volume_get(context, volume_id, session=session) - volume_ref.update(values) - - return volume_ref - - -#################### - -def _volume_x_metadata_get_query(context, volume_id, model, session=None): - return model_query(context, model, session=session, read_deleted="no").\ - filter_by(volume_id=volume_id) - - -def _volume_x_metadata_get(context, volume_id, model, session=None): - rows = _volume_x_metadata_get_query(context, volume_id, model, - session=session).all() - result = {} - for row in rows: - result[row['key']] = row['value'] - - return result - - -def _volume_x_metadata_get_item(context, volume_id, key, model, notfound_exec, - session=None): - result = _volume_x_metadata_get_query(context, volume_id, - model, session=session).\ - filter_by(key=key).\ - first() - - if not result: - raise notfound_exec(metadata_key=key, volume_id=volume_id) - return result - - -def _volume_x_metadata_update(context, volume_id, metadata, delete, - model, notfound_exec, session=None): - if not session: - session = get_session() - - with session.begin(subtransactions=True): - # Set existing metadata to deleted if delete argument is True - if delete: - original_metadata = _volume_x_metadata_get(context, volume_id, - model, session=session) - for meta_key, meta_value in original_metadata.iteritems(): - if meta_key not in metadata: - meta_ref = _volume_x_metadata_get_item(context, volume_id, - meta_key, model, - notfound_exec, - session=session) - meta_ref.update({'deleted': True}) - meta_ref.save(session=session) - - meta_ref = None - - # Now update all existing items with new values, or create new meta - # objects - for meta_key, meta_value in metadata.items(): - - # update the value whether it exists or not - item = {"value": meta_value} - - try: - meta_ref = _volume_x_metadata_get_item(context, volume_id, - meta_key, model, - notfound_exec, - session=session) - except notfound_exec: - meta_ref = model() - item.update({"key": meta_key, "volume_id": volume_id}) - - meta_ref.update(item) - meta_ref.save(session=session) - - return _volume_x_metadata_get(context, volume_id, model) - - -def _volume_user_metadata_get_query(context, volume_id, session=None): - return _volume_x_metadata_get_query(context, volume_id, - models.VolumeMetadata, session=session) - - -@require_context -@require_volume_exists -def _volume_user_metadata_get(context, volume_id, session=None): - return _volume_x_metadata_get(context, volume_id, - models.VolumeMetadata, session=session) - - -@require_context -def _volume_user_metadata_get_item(context, volume_id, key, session=None): - return _volume_x_metadata_get_item(context, volume_id, key, - models.VolumeMetadata, - exception.VolumeMetadataNotFound, - session=session) - - -@require_context -@require_volume_exists -def _volume_user_metadata_update(context, volume_id, metadata, delete, - session=None): - return _volume_x_metadata_update(context, volume_id, metadata, delete, - models.VolumeMetadata, - exception.VolumeMetadataNotFound, - session=session) - - -@require_context -@require_volume_exists -def volume_metadata_get_item(context, volume_id, key): - return _volume_user_metadata_get_item(context, volume_id, key) - - -@require_context -@require_volume_exists -def volume_metadata_get(context, volume_id): - return _volume_user_metadata_get(context, volume_id) - - -@require_context -@require_volume_exists -@_retry_on_deadlock -def volume_metadata_delete(context, volume_id, key): - _volume_user_metadata_get_query(context, volume_id).\ - filter_by(key=key).\ - update({'deleted': True, - 'deleted_at': timeutils.utcnow(), - 'updated_at': literal_column('updated_at')}) - - -@require_context -@require_volume_exists -@_retry_on_deadlock -def volume_metadata_update(context, volume_id, metadata, delete): - return _volume_user_metadata_update(context, volume_id, metadata, delete) - - -################### - - -def _volume_admin_metadata_get_query(context, volume_id, session=None): - return _volume_x_metadata_get_query(context, volume_id, - models.VolumeAdminMetadata, - session=session) - - -@require_admin_context -@require_volume_exists -def _volume_admin_metadata_get(context, volume_id, session=None): - return _volume_x_metadata_get(context, volume_id, - models.VolumeAdminMetadata, session=session) - - -@require_admin_context -@require_volume_exists -def _volume_admin_metadata_update(context, volume_id, metadata, delete, - session=None): - return _volume_x_metadata_update(context, volume_id, metadata, delete, - models.VolumeAdminMetadata, - exception.VolumeAdminMetadataNotFound, - session=session) - - -@require_admin_context -@require_volume_exists -def volume_admin_metadata_get(context, volume_id): - return _volume_admin_metadata_get(context, volume_id) - - -@require_admin_context -@require_volume_exists -@_retry_on_deadlock -def volume_admin_metadata_delete(context, volume_id, key): - _volume_admin_metadata_get_query(context, volume_id).\ - filter_by(key=key).\ - update({'deleted': True, - 'deleted_at': timeutils.utcnow(), - 'updated_at': literal_column('updated_at')}) - - -@require_admin_context -@require_volume_exists -@_retry_on_deadlock -def volume_admin_metadata_update(context, volume_id, metadata, delete): - return _volume_admin_metadata_update(context, volume_id, metadata, delete) - - -################### - - -@require_context -def snapshot_create(context, values): - values['snapshot_metadata'] = _metadata_refs(values.get('metadata'), - models.SnapshotMetadata) - if not values.get('id'): - values['id'] = str(uuid.uuid4()) - - session = get_session() - with session.begin(): - snapshot_ref = models.Snapshot() - snapshot_ref.update(values) - session.add(snapshot_ref) - - return _snapshot_get(context, values['id'], session=session) - - -@require_admin_context -@_retry_on_deadlock -def snapshot_destroy(context, snapshot_id): - session = get_session() - with session.begin(): - model_query(context, models.Snapshot, session=session).\ - filter_by(id=snapshot_id).\ - update({'status': 'deleted', - 'deleted': True, - 'deleted_at': timeutils.utcnow(), - 'updated_at': literal_column('updated_at')}) - model_query(context, models.SnapshotMetadata, session=session).\ - filter_by(snapshot_id=snapshot_id).\ - update({'deleted': True, - 'deleted_at': timeutils.utcnow(), - 'updated_at': literal_column('updated_at')}) - - -@require_context -def _snapshot_get(context, snapshot_id, session=None): - result = model_query(context, models.Snapshot, session=session, - project_only=True).\ - options(joinedload('volume')).\ - options(joinedload('snapshot_metadata')).\ - filter_by(id=snapshot_id).\ - first() - - if not result: - raise exception.SnapshotNotFound(snapshot_id=snapshot_id) - - return result - - -@require_context -def snapshot_get(context, snapshot_id): - return _snapshot_get(context, snapshot_id) - - -@require_admin_context -def snapshot_get_all(context): - return model_query(context, models.Snapshot).\ - options(joinedload('snapshot_metadata')).\ - all() - - -@require_context -def snapshot_get_all_for_volume(context, volume_id): - return model_query(context, models.Snapshot, read_deleted='no', - project_only=True).\ - filter_by(volume_id=volume_id).\ - options(joinedload('snapshot_metadata')).\ - all() - - -@require_context -def snapshot_get_all_for_cgsnapshot(context, cgsnapshot_id): - return model_query(context, models.Snapshot, read_deleted='no', - project_only=True).\ - filter_by(cgsnapshot_id=cgsnapshot_id).\ - options(joinedload('volume')).\ - options(joinedload('snapshot_metadata')).\ - all() - - -@require_context -def snapshot_get_all_by_project(context, project_id): - authorize_project_context(context, project_id) - return model_query(context, models.Snapshot).\ - filter_by(project_id=project_id).\ - options(joinedload('snapshot_metadata')).\ - all() - - -@require_context -def _snapshot_data_get_for_project(context, project_id, volume_type_id=None, - session=None): - authorize_project_context(context, project_id) - query = model_query(context, - func.count(models.Snapshot.id), - func.sum(models.Snapshot.volume_size), - read_deleted="no", - session=session).\ - filter_by(project_id=project_id) - - if volume_type_id: - query = query.join('volume').filter_by(volume_type_id=volume_type_id) - - result = query.first() - - # NOTE(vish): convert None to 0 - return (result[0] or 0, result[1] or 0) - - -@require_context -def snapshot_data_get_for_project(context, project_id, volume_type_id=None): - return _snapshot_data_get_for_project(context, project_id, volume_type_id) - - -@require_context -def snapshot_get_active_by_window(context, begin, end=None, project_id=None): - """Return snapshots that were active during window.""" - - query = model_query(context, models.Snapshot, read_deleted="yes") - query = query.filter(or_(models.Snapshot.deleted_at == None, # noqa - models.Snapshot.deleted_at > begin)) - query = query.options(joinedload(models.Snapshot.volume)) - if end: - query = query.filter(models.Snapshot.created_at < end) - if project_id: - query = query.filter_by(project_id=project_id) - - return query.all() - - -@require_context -def snapshot_update(context, snapshot_id, values): - session = get_session() - with session.begin(): - snapshot_ref = _snapshot_get(context, snapshot_id, session=session) - snapshot_ref.update(values) - return snapshot_ref - -#################### - - -def _snapshot_metadata_get_query(context, snapshot_id, session=None): - return model_query(context, models.SnapshotMetadata, - session=session, read_deleted="no").\ - filter_by(snapshot_id=snapshot_id) - - -@require_context -@require_snapshot_exists -def _snapshot_metadata_get(context, snapshot_id, session=None): - rows = _snapshot_metadata_get_query(context, snapshot_id, session).all() - result = {} - for row in rows: - result[row['key']] = row['value'] - - return result - - -@require_context -@require_snapshot_exists -def snapshot_metadata_get(context, snapshot_id): - return _snapshot_metadata_get(context, snapshot_id) - - -@require_context -@require_snapshot_exists -@_retry_on_deadlock -def snapshot_metadata_delete(context, snapshot_id, key): - _snapshot_metadata_get_query(context, snapshot_id).\ - filter_by(key=key).\ - update({'deleted': True, - 'deleted_at': timeutils.utcnow(), - 'updated_at': literal_column('updated_at')}) - - -@require_context -def _snapshot_metadata_get_item(context, snapshot_id, key, session=None): - result = _snapshot_metadata_get_query(context, - snapshot_id, - session=session).\ - filter_by(key=key).\ - first() - - if not result: - raise exception.SnapshotMetadataNotFound(metadata_key=key, - snapshot_id=snapshot_id) - return result - - -@require_context -@require_snapshot_exists -@_retry_on_deadlock -def snapshot_metadata_update(context, snapshot_id, metadata, delete): - session = get_session() - with session.begin(): - # Set existing metadata to deleted if delete argument is True - if delete: - original_metadata = _snapshot_metadata_get(context, snapshot_id, - session) - for meta_key, meta_value in original_metadata.iteritems(): - if meta_key not in metadata: - meta_ref = _snapshot_metadata_get_item(context, - snapshot_id, - meta_key, session) - meta_ref.update({'deleted': True}) - meta_ref.save(session=session) - - meta_ref = None - - # Now update all existing items with new values, or create new meta - # objects - for meta_key, meta_value in metadata.items(): - - # update the value whether it exists or not - item = {"value": meta_value} - - try: - meta_ref = _snapshot_metadata_get_item(context, snapshot_id, - meta_key, session) - except exception.SnapshotMetadataNotFound: - meta_ref = models.SnapshotMetadata() - item.update({"key": meta_key, "snapshot_id": snapshot_id}) - - meta_ref.update(item) - meta_ref.save(session=session) - - return snapshot_metadata_get(context, snapshot_id) - -################### - - -@require_admin_context -def volume_type_create(context, values): - """Create a new instance type. - - In order to pass in extra specs, the values dict should contain a - 'extra_specs' key/value pair: - {'extra_specs' : {'k1': 'v1', 'k2': 'v2', ...}} - """ - if not values.get('id'): - values['id'] = str(uuid.uuid4()) - - session = get_session() - with session.begin(): - try: - _volume_type_get_by_name(context, values['name'], session) - raise exception.VolumeTypeExists(id=values['name']) - except exception.VolumeTypeNotFoundByName: - pass - try: - _volume_type_get(context, values['id'], session) - raise exception.VolumeTypeExists(id=values['id']) - except exception.VolumeTypeNotFound: - pass - try: - values['extra_specs'] = _metadata_refs(values.get('extra_specs'), - models.VolumeTypeExtraSpecs) - volume_type_ref = models.VolumeTypes() - volume_type_ref.update(values) - session.add(volume_type_ref) - except Exception as e: - raise db_exc.DBError(e) - return volume_type_ref - - -@require_context -def volume_type_get_all(context, inactive=False, filters=None): - """Returns a dict describing all volume_types with name as key.""" - filters = filters or {} - - read_deleted = "yes" if inactive else "no" - rows = model_query(context, models.VolumeTypes, - read_deleted=read_deleted).\ - options(joinedload('extra_specs')).\ - order_by("name").\ - all() - - result = {} - for row in rows: - result[row['name']] = _dict_with_extra_specs(row) - - return result - - -@require_context -def _volume_type_get(context, id, session=None, inactive=False): - read_deleted = "yes" if inactive else "no" - result = model_query(context, - models.VolumeTypes, - session=session, - read_deleted=read_deleted).\ - options(joinedload('extra_specs')).\ - filter_by(id=id).\ - first() - - if not result: - raise exception.VolumeTypeNotFound(volume_type_id=id) - - return _dict_with_extra_specs(result) - - -@require_context -def volume_type_get(context, id, inactive=False): - """Return a dict describing specific volume_type.""" - - return _volume_type_get(context, id, None, inactive) - - -@require_context -def _volume_type_get_by_name(context, name, session=None): - result = model_query(context, models.VolumeTypes, session=session).\ - options(joinedload('extra_specs')).\ - filter_by(name=name).\ - first() - - if not result: - raise exception.VolumeTypeNotFoundByName(volume_type_name=name) - else: - return _dict_with_extra_specs(result) - - -@require_context -def volume_type_get_by_name(context, name): - """Return a dict describing specific volume_type.""" - - return _volume_type_get_by_name(context, name) - - -@require_context -def volume_types_get_by_name_or_id(context, volume_type_list): - """Return a dict describing specific volume_type.""" - req_volume_types = [] - for vol_t in volume_type_list: - if not uuidutils.is_uuid_like(vol_t): - vol_type = _volume_type_get_by_name(context, vol_t) - else: - vol_type = _volume_type_get(context, vol_t) - req_volume_types.append(vol_type) - return req_volume_types - - -@require_admin_context -def volume_type_qos_associations_get(context, qos_specs_id, inactive=False): - read_deleted = "yes" if inactive else "no" - return model_query(context, models.VolumeTypes, - read_deleted=read_deleted). \ - filter_by(qos_specs_id=qos_specs_id).all() - - -@require_admin_context -def volume_type_qos_associate(context, type_id, qos_specs_id): - session = get_session() - with session.begin(): - _volume_type_get(context, type_id, session) - - session.query(models.VolumeTypes). \ - filter_by(id=type_id). \ - update({'qos_specs_id': qos_specs_id, - 'updated_at': timeutils.utcnow()}) - - -@require_admin_context -def volume_type_qos_disassociate(context, qos_specs_id, type_id): - """Disassociate volume type from qos specs.""" - session = get_session() - with session.begin(): - _volume_type_get(context, type_id, session) - - session.query(models.VolumeTypes). \ - filter_by(id=type_id). \ - filter_by(qos_specs_id=qos_specs_id). \ - update({'qos_specs_id': None, - 'updated_at': timeutils.utcnow()}) - - -@require_admin_context -def volume_type_qos_disassociate_all(context, qos_specs_id): - """Disassociate all volume types associated with specified qos specs.""" - session = get_session() - with session.begin(): - session.query(models.VolumeTypes). \ - filter_by(qos_specs_id=qos_specs_id). \ - update({'qos_specs_id': None, - 'updated_at': timeutils.utcnow()}) - - -@require_admin_context -def volume_type_qos_specs_get(context, type_id): - """Return all qos specs for given volume type. - - result looks like: - { - 'qos_specs': - { - 'id': 'qos-specs-id', - 'name': 'qos_specs_name', - 'consumer': 'Consumer', - 'specs': { - 'key1': 'value1', - 'key2': 'value2', - 'key3': 'value3' - } - } - } - - """ - session = get_session() - with session.begin(): - _volume_type_get(context, type_id, session) - - row = session.query(models.VolumeTypes). \ - options(joinedload('qos_specs')). \ - filter_by(id=type_id). \ - first() - - # row.qos_specs is a list of QualityOfServiceSpecs ref - specs = _dict_with_qos_specs(row.qos_specs) - - if not specs: - # turn empty list to None - specs = None - else: - specs = specs[0] - - return {'qos_specs': specs} - - -@require_admin_context -@_retry_on_deadlock -def volume_type_destroy(context, id): - session = get_session() - with session.begin(): - _volume_type_get(context, id, session) - results = model_query(context, models.Volume, session=session). \ - filter_by(volume_type_id=id).all() - if results: - msg = _('VolumeType %s deletion failed, VolumeType in use.') % id - LOG.error(msg) - raise exception.VolumeTypeInUse(volume_type_id=id) - model_query(context, models.VolumeTypes, session=session).\ - filter_by(id=id).\ - update({'deleted': True, - 'deleted_at': timeutils.utcnow(), - 'updated_at': literal_column('updated_at')}) - model_query(context, models.VolumeTypeExtraSpecs, session=session).\ - filter_by(volume_type_id=id).\ - update({'deleted': True, - 'deleted_at': timeutils.utcnow(), - 'updated_at': literal_column('updated_at')}) - - -@require_context -def volume_get_active_by_window(context, - begin, - end=None, - project_id=None): - """Return volumes that were active during window.""" - query = model_query(context, models.Volume, read_deleted="yes") - query = query.filter(or_(models.Volume.deleted_at == None, # noqa - models.Volume.deleted_at > begin)) - if end: - query = query.filter(models.Volume.created_at < end) - if project_id: - query = query.filter_by(project_id=project_id) - - return query.all() - - -#################### - - -def _volume_type_extra_specs_query(context, volume_type_id, session=None): - return model_query(context, models.VolumeTypeExtraSpecs, session=session, - read_deleted="no").\ - filter_by(volume_type_id=volume_type_id) - - -@require_context -def volume_type_extra_specs_get(context, volume_type_id): - rows = _volume_type_extra_specs_query(context, volume_type_id).\ - all() - - result = {} - for row in rows: - result[row['key']] = row['value'] - - return result - - -@require_context -def volume_type_extra_specs_delete(context, volume_type_id, key): - session = get_session() - with session.begin(): - _volume_type_extra_specs_get_item(context, volume_type_id, key, - session) - _volume_type_extra_specs_query(context, volume_type_id, session).\ - filter_by(key=key).\ - update({'deleted': True, - 'deleted_at': timeutils.utcnow(), - 'updated_at': literal_column('updated_at')}) - - -@require_context -def _volume_type_extra_specs_get_item(context, volume_type_id, key, - session=None): - result = _volume_type_extra_specs_query( - context, volume_type_id, session=session).\ - filter_by(key=key).\ - first() - - if not result: - raise exception.VolumeTypeExtraSpecsNotFound( - extra_specs_key=key, - volume_type_id=volume_type_id) - - return result - - -@require_context -def volume_type_extra_specs_update_or_create(context, volume_type_id, - specs): - session = get_session() - with session.begin(): - spec_ref = None - for key, value in specs.iteritems(): - try: - spec_ref = _volume_type_extra_specs_get_item( - context, volume_type_id, key, session) - except exception.VolumeTypeExtraSpecsNotFound: - spec_ref = models.VolumeTypeExtraSpecs() - spec_ref.update({"key": key, "value": value, - "volume_type_id": volume_type_id, - "deleted": False}) - spec_ref.save(session=session) - - return specs - - -#################### - - -@require_admin_context -def qos_specs_create(context, values): - """Create a new QoS specs. - - :param values dictionary that contains specifications for QoS - e.g. {'name': 'Name', - 'qos_specs': { - 'consumer': 'front-end', - 'total_iops_sec': 1000, - 'total_bytes_sec': 1024000 - } - } - """ - specs_id = str(uuid.uuid4()) - - session = get_session() - with session.begin(): - try: - _qos_specs_get_by_name(context, values['name'], session) - raise exception.QoSSpecsExists(specs_id=values['name']) - except exception.QoSSpecsNotFound: - pass - try: - # Insert a root entry for QoS specs - specs_root = models.QualityOfServiceSpecs() - root = dict(id=specs_id) - # 'QoS_Specs_Name' is an internal reserved key to store - # the name of QoS specs - root['key'] = 'QoS_Specs_Name' - root['value'] = values['name'] - LOG.debug("DB qos_specs_create(): root %s", root) - specs_root.update(root) - specs_root.save(session=session) - - # Insert all specification entries for QoS specs - for k, v in values['qos_specs'].iteritems(): - item = dict(key=k, value=v, specs_id=specs_id) - item['id'] = str(uuid.uuid4()) - spec_entry = models.QualityOfServiceSpecs() - spec_entry.update(item) - spec_entry.save(session=session) - except Exception as e: - raise db_exc.DBError(e) - - return dict(id=specs_root.id, name=specs_root.value) - - -@require_admin_context -def _qos_specs_get_by_name(context, name, session=None, inactive=False): - read_deleted = 'yes' if inactive else 'no' - results = model_query(context, models.QualityOfServiceSpecs, - read_deleted=read_deleted, session=session). \ - filter_by(key='QoS_Specs_Name'). \ - filter_by(value=name). \ - options(joinedload('specs')).all() - - if not results: - raise exception.QoSSpecsNotFound(specs_id=name) - - return results - - -@require_admin_context -def _qos_specs_get_ref(context, qos_specs_id, session=None, inactive=False): - read_deleted = 'yes' if inactive else 'no' - result = model_query(context, models.QualityOfServiceSpecs, - read_deleted=read_deleted, session=session). \ - filter_by(id=qos_specs_id). \ - options(joinedload_all('specs')).all() - - if not result: - raise exception.QoSSpecsNotFound(specs_id=qos_specs_id) - - return result - - -def _dict_with_children_specs(specs): - """Convert specs list to a dict.""" - result = {} - for spec in specs: - # Skip deleted keys - if not spec['deleted']: - result.update({spec['key']: spec['value']}) - - return result - - -def _dict_with_qos_specs(rows): - """Convert qos specs query results to list. - - Qos specs query results are a list of quality_of_service_specs refs, - some are root entry of a qos specs (key == 'QoS_Specs_Name') and the - rest are children entry, a.k.a detailed specs for a qos specs. This - function converts query results to a dict using spec name as key. - """ - result = [] - for row in rows: - if row['key'] == 'QoS_Specs_Name': - member = {} - member['name'] = row['value'] - member.update(dict(id=row['id'])) - if row.specs: - spec_dict = _dict_with_children_specs(row.specs) - member.update(dict(consumer=spec_dict['consumer'])) - del spec_dict['consumer'] - member.update(dict(specs=spec_dict)) - result.append(member) - return result - - -@require_admin_context -def qos_specs_get(context, qos_specs_id, inactive=False): - rows = _qos_specs_get_ref(context, qos_specs_id, None, inactive) - - return _dict_with_qos_specs(rows)[0] - - -@require_admin_context -def qos_specs_get_all(context, inactive=False, filters=None): - """Returns a list of all qos_specs. - - Results is like: - [{ - 'id': SPECS-UUID, - 'name': 'qos_spec-1', - 'consumer': 'back-end', - 'specs': { - 'key1': 'value1', - 'key2': 'value2', - ... - } - }, - { - 'id': SPECS-UUID, - 'name': 'qos_spec-2', - 'consumer': 'front-end', - 'specs': { - 'key1': 'value1', - 'key2': 'value2', - ... - } - }, - ] - """ - filters = filters or {} - #TODO(zhiteng) Add filters for 'consumer' - - read_deleted = "yes" if inactive else "no" - rows = model_query(context, models.QualityOfServiceSpecs, - read_deleted=read_deleted). \ - options(joinedload_all('specs')).all() - - return _dict_with_qos_specs(rows) - - -@require_admin_context -def qos_specs_get_by_name(context, name, inactive=False): - rows = _qos_specs_get_by_name(context, name, None, inactive) - - return _dict_with_qos_specs(rows)[0] - - -@require_admin_context -def qos_specs_associations_get(context, qos_specs_id): - """Return all entities associated with specified qos specs. - - For now, the only entity that is possible to associate with - a qos specs is volume type, so this is just a wrapper of - volume_type_qos_associations_get(). But it's possible to - extend qos specs association to other entities, such as volumes, - sometime in future. - """ - # Raise QoSSpecsNotFound if no specs found - _qos_specs_get_ref(context, qos_specs_id, None) - return volume_type_qos_associations_get(context, qos_specs_id) - - -@require_admin_context -def qos_specs_associate(context, qos_specs_id, type_id): - """Associate volume type from specified qos specs.""" - return volume_type_qos_associate(context, type_id, qos_specs_id) - - -@require_admin_context -def qos_specs_disassociate(context, qos_specs_id, type_id): - """Disassociate volume type from specified qos specs.""" - return volume_type_qos_disassociate(context, qos_specs_id, type_id) - - -@require_admin_context -def qos_specs_disassociate_all(context, qos_specs_id): - """Disassociate all entities associated with specified qos specs. - - For now, the only entity that is possible to associate with - a qos specs is volume type, so this is just a wrapper of - volume_type_qos_disassociate_all(). But it's possible to - extend qos specs association to other entities, such as volumes, - sometime in future. - """ - return volume_type_qos_disassociate_all(context, qos_specs_id) - - -@require_admin_context -def qos_specs_item_delete(context, qos_specs_id, key): - session = get_session() - with session.begin(): - _qos_specs_get_item(context, qos_specs_id, key) - session.query(models.QualityOfServiceSpecs). \ - filter(models.QualityOfServiceSpecs.key == key). \ - filter(models.QualityOfServiceSpecs.specs_id == qos_specs_id). \ - update({'deleted': True, - 'deleted_at': timeutils.utcnow(), - 'updated_at': literal_column('updated_at')}) - - -@require_admin_context -def qos_specs_delete(context, qos_specs_id): - session = get_session() - with session.begin(): - _qos_specs_get_ref(context, qos_specs_id, session) - session.query(models.QualityOfServiceSpecs).\ - filter(or_(models.QualityOfServiceSpecs.id == qos_specs_id, - models.QualityOfServiceSpecs.specs_id == - qos_specs_id)).\ - update({'deleted': True, - 'deleted_at': timeutils.utcnow(), - 'updated_at': literal_column('updated_at')}) - - -@require_admin_context -def _qos_specs_get_item(context, qos_specs_id, key, session=None): - result = model_query(context, models.QualityOfServiceSpecs, - session=session). \ - filter(models.QualityOfServiceSpecs.key == key). \ - filter(models.QualityOfServiceSpecs.specs_id == qos_specs_id). \ - first() - - if not result: - raise exception.QoSSpecsKeyNotFound( - specs_key=key, - specs_id=qos_specs_id) - - return result - - -@require_admin_context -def qos_specs_update(context, qos_specs_id, specs): - """Make updates to an existing qos specs. - - Perform add, update or delete key/values to a qos specs. - """ - - session = get_session() - with session.begin(): - # make sure qos specs exists - _qos_specs_get_ref(context, qos_specs_id, session) - spec_ref = None - for key in specs.keys(): - try: - spec_ref = _qos_specs_get_item( - context, qos_specs_id, key, session) - except exception.QoSSpecsKeyNotFound: - spec_ref = models.QualityOfServiceSpecs() - id = None - if spec_ref.get('id', None): - id = spec_ref['id'] - else: - id = str(uuid.uuid4()) - value = dict(id=id, key=key, value=specs[key], - specs_id=qos_specs_id, - deleted=False) - LOG.debug('qos_specs_update() value: %s' % value) - spec_ref.update(value) - spec_ref.save(session=session) - - return specs - - -#################### - - -@require_context -def volume_type_encryption_get(context, volume_type_id, session=None): - return model_query(context, models.Encryption, session=session, - read_deleted="no").\ - filter_by(volume_type_id=volume_type_id).first() - - -@require_admin_context -def volume_type_encryption_delete(context, volume_type_id): - session = get_session() - with session.begin(): - encryption = volume_type_encryption_get(context, volume_type_id, - session) - encryption.update({'deleted': True, - 'deleted_at': timeutils.utcnow(), - 'updated_at': literal_column('updated_at')}) - - -@require_admin_context -def volume_type_encryption_create(context, volume_type_id, values): - session = get_session() - with session.begin(): - encryption = models.Encryption() - - if 'volume_type_id' not in values: - values['volume_type_id'] = volume_type_id - - encryption.update(values) - session.add(encryption) - - return encryption - - -@require_admin_context -def volume_type_encryption_update(context, volume_type_id, values): - session = get_session() - with session.begin(): - encryption = volume_type_encryption_get(context, volume_type_id, - session) - - if not encryption: - raise exception.VolumeTypeEncryptionNotFound(type_id= - volume_type_id) - - encryption.update(values) - - return encryption - - -def volume_type_encryption_volume_get(context, volume_type_id, session=None): - volume_list = _volume_get_query(context, session=session, - project_only=False).\ - filter_by(volume_type_id=volume_type_id).\ - all() - return volume_list - -#################### - - -@require_context -def volume_encryption_metadata_get(context, volume_id, session=None): - """Return the encryption key id for a given volume.""" - - volume_ref = _volume_get(context, volume_id) - encryption_ref = volume_type_encryption_get(context, - volume_ref['volume_type_id']) - - return { - 'encryption_key_id': volume_ref['encryption_key_id'], - 'control_location': encryption_ref['control_location'], - 'cipher': encryption_ref['cipher'], - 'key_size': encryption_ref['key_size'], - 'provider': encryption_ref['provider'], - } - - -#################### - - -@require_context -def _volume_glance_metadata_get_all(context, session=None): - query = model_query(context, - models.VolumeGlanceMetadata, - session=session) - if is_user_context(context): - query = query.filter( - models.Volume.id == models.VolumeGlanceMetadata.volume_id, - models.Volume.project_id == context.project_id) - return query.all() - - -@require_context -def volume_glance_metadata_get_all(context): - """Return the Glance metadata for all volumes.""" - - return _volume_glance_metadata_get_all(context) - - -@require_context -@require_volume_exists -def _volume_glance_metadata_get(context, volume_id, session=None): - rows = model_query(context, models.VolumeGlanceMetadata, session=session).\ - filter_by(volume_id=volume_id).\ - filter_by(deleted=False).\ - all() - - if not rows: - raise exception.GlanceMetadataNotFound(id=volume_id) - - return rows - - -@require_context -@require_volume_exists -def volume_glance_metadata_get(context, volume_id): - """Return the Glance metadata for the specified volume.""" - - return _volume_glance_metadata_get(context, volume_id) - - -@require_context -@require_snapshot_exists -def _volume_snapshot_glance_metadata_get(context, snapshot_id, session=None): - rows = model_query(context, models.VolumeGlanceMetadata, session=session).\ - filter_by(snapshot_id=snapshot_id).\ - filter_by(deleted=False).\ - all() - - if not rows: - raise exception.GlanceMetadataNotFound(id=snapshot_id) - - return rows - - -@require_context -@require_snapshot_exists -def volume_snapshot_glance_metadata_get(context, snapshot_id): - """Return the Glance metadata for the specified snapshot.""" - - return _volume_snapshot_glance_metadata_get(context, snapshot_id) - - -@require_context -@require_volume_exists -def volume_glance_metadata_create(context, volume_id, key, value): - """Update the Glance metadata for a volume by adding a new key:value pair. - - This API does not support changing the value of a key once it has been - created. - """ - - session = get_session() - with session.begin(): - rows = session.query(models.VolumeGlanceMetadata).\ - filter_by(volume_id=volume_id).\ - filter_by(key=key).\ - filter_by(deleted=False).all() - - if len(rows) > 0: - raise exception.GlanceMetadataExists(key=key, - volume_id=volume_id) - - vol_glance_metadata = models.VolumeGlanceMetadata() - vol_glance_metadata.volume_id = volume_id - vol_glance_metadata.key = key - vol_glance_metadata.value = str(value) - - session.add(vol_glance_metadata) - - return - - -@require_context -@require_snapshot_exists -def volume_glance_metadata_copy_to_snapshot(context, snapshot_id, volume_id): - """Update the Glance metadata for a snapshot. - - This copies all of the key:value pairs from the originating volume, to - ensure that a volume created from the snapshot will retain the - original metadata. - """ - - session = get_session() - with session.begin(): - metadata = _volume_glance_metadata_get(context, volume_id, - session=session) - for meta in metadata: - vol_glance_metadata = models.VolumeGlanceMetadata() - vol_glance_metadata.snapshot_id = snapshot_id - vol_glance_metadata.key = meta['key'] - vol_glance_metadata.value = meta['value'] - - vol_glance_metadata.save(session=session) - - -@require_context -@require_volume_exists -def volume_glance_metadata_copy_from_volume_to_volume(context, - src_volume_id, - volume_id): - """Update the Glance metadata for a volume. - - This copies all all of the key:value pairs from the originating volume, - to ensure that a volume created from the volume (clone) will - retain the original metadata. - """ - - session = get_session() - with session.begin(): - metadata = _volume_glance_metadata_get(context, - src_volume_id, - session=session) - for meta in metadata: - vol_glance_metadata = models.VolumeGlanceMetadata() - vol_glance_metadata.volume_id = volume_id - vol_glance_metadata.key = meta['key'] - vol_glance_metadata.value = meta['value'] - - vol_glance_metadata.save(session=session) - - -@require_context -@require_volume_exists -def volume_glance_metadata_copy_to_volume(context, volume_id, snapshot_id): - """Update the Glance metadata from a volume (created from a snapshot) by - copying all of the key:value pairs from the originating snapshot. - - This is so that the Glance metadata from the original volume is retained. - """ - - session = get_session() - with session.begin(): - metadata = _volume_snapshot_glance_metadata_get(context, snapshot_id, - session=session) - for meta in metadata: - vol_glance_metadata = models.VolumeGlanceMetadata() - vol_glance_metadata.volume_id = volume_id - vol_glance_metadata.key = meta['key'] - vol_glance_metadata.value = meta['value'] - - vol_glance_metadata.save(session=session) - - -@require_context -def volume_glance_metadata_delete_by_volume(context, volume_id): - model_query(context, models.VolumeGlanceMetadata, read_deleted='no').\ - filter_by(volume_id=volume_id).\ - update({'deleted': True, - 'deleted_at': timeutils.utcnow(), - 'updated_at': literal_column('updated_at')}) - - -@require_context -def volume_glance_metadata_delete_by_snapshot(context, snapshot_id): - model_query(context, models.VolumeGlanceMetadata, read_deleted='no').\ - filter_by(snapshot_id=snapshot_id).\ - update({'deleted': True, - 'deleted_at': timeutils.utcnow(), - 'updated_at': literal_column('updated_at')}) - - -############################### - - -@require_context -def backup_get(context, backup_id): - result = model_query(context, models.Backup, project_only=True).\ - filter_by(id=backup_id).\ - first() - - if not result: - raise exception.BackupNotFound(backup_id=backup_id) - - return result - - -def _backup_get_all(context, filters=None): - session = get_session() - with session.begin(): - # Generate the query - query = model_query(context, models.Backup) - if filters: - query = query.filter_by(**filters) - - return query.all() - - -@require_admin_context -def backup_get_all(context, filters=None): - return _backup_get_all(context, filters) - - -@require_admin_context -def backup_get_all_by_host(context, host): - return model_query(context, models.Backup).filter_by(host=host).all() - - -@require_context -def backup_get_all_by_project(context, project_id, filters=None): - - authorize_project_context(context, project_id) - if not filters: - filters = {} - else: - filters = filters.copy() - - filters['project_id'] = project_id - - return _backup_get_all(context, filters) - - -@require_context -def backup_create(context, values): - backup = models.Backup() - if not values.get('id'): - values['id'] = str(uuid.uuid4()) - backup.update(values) - - session = get_session() - with session.begin(): - backup.save(session) - return backup - - -@require_context -def backup_update(context, backup_id, values): - session = get_session() - with session.begin(): - backup = model_query(context, models.Backup, - session=session, read_deleted="yes").\ - filter_by(id=backup_id).first() - - if not backup: - raise exception.BackupNotFound( - _("No backup with id %s") % backup_id) - - backup.update(values) - - return backup - - -@require_admin_context -def backup_destroy(context, backup_id): - model_query(context, models.Backup).\ - filter_by(id=backup_id).\ - update({'status': 'deleted', - 'deleted': True, - 'deleted_at': timeutils.utcnow(), - 'updated_at': literal_column('updated_at')}) - - -############################### - - -@require_context -def _transfer_get(context, transfer_id, session=None): - query = model_query(context, models.Transfer, - session=session).\ - filter_by(id=transfer_id) - - if not is_admin_context(context): - volume = models.Volume - query = query.filter(models.Transfer.volume_id == volume.id, - volume.project_id == context.project_id) - - result = query.first() - - if not result: - raise exception.TransferNotFound(transfer_id=transfer_id) - - return result - - -@require_context -def transfer_get(context, transfer_id): - return _transfer_get(context, transfer_id) - - -def _translate_transfers(transfers): - results = [] - for transfer in transfers: - r = {} - r['id'] = transfer['id'] - r['volume_id'] = transfer['volume_id'] - r['display_name'] = transfer['display_name'] - r['created_at'] = transfer['created_at'] - r['deleted'] = transfer['deleted'] - results.append(r) - return results - - -@require_admin_context -def transfer_get_all(context): - results = model_query(context, models.Transfer).all() - return _translate_transfers(results) - - -@require_context -def transfer_get_all_by_project(context, project_id): - authorize_project_context(context, project_id) - - query = model_query(context, models.Transfer).\ - filter(models.Volume.id == models.Transfer.volume_id, - models.Volume.project_id == project_id) - results = query.all() - return _translate_transfers(results) - - -@require_context -def transfer_create(context, values): - if not values.get('id'): - values['id'] = str(uuid.uuid4()) - session = get_session() - with session.begin(): - volume_ref = _volume_get(context, - values['volume_id'], - session=session) - if volume_ref['status'] != 'available': - msg = _('Volume must be available') - LOG.error(msg) - raise exception.InvalidVolume(reason=msg) - volume_ref['status'] = 'awaiting-transfer' - transfer = models.Transfer() - transfer.update(values) - session.add(transfer) - volume_ref.update(volume_ref) - - return transfer - - -@require_context -@_retry_on_deadlock -def transfer_destroy(context, transfer_id): - session = get_session() - with session.begin(): - transfer_ref = _transfer_get(context, - transfer_id, - session=session) - volume_ref = _volume_get(context, - transfer_ref['volume_id'], - session=session) - # If the volume state is not 'awaiting-transfer' don't change it, but - # we can still mark the transfer record as deleted. - if volume_ref['status'] != 'awaiting-transfer': - msg = _('Volume in unexpected state %s, ' - 'expected awaiting-transfer') % volume_ref['status'] - LOG.error(msg) - else: - volume_ref['status'] = 'available' - volume_ref.update(volume_ref) - volume_ref.save(session=session) - model_query(context, models.Transfer, session=session).\ - filter_by(id=transfer_id).\ - update({'deleted': True, - 'deleted_at': timeutils.utcnow(), - 'updated_at': literal_column('updated_at')}) - - -@require_context -def transfer_accept(context, transfer_id, user_id, project_id): - session = get_session() - with session.begin(): - transfer_ref = _transfer_get(context, transfer_id, session) - volume_id = transfer_ref['volume_id'] - volume_ref = _volume_get(context, volume_id, session=session) - if volume_ref['status'] != 'awaiting-transfer': - msg = _('Transfer %(transfer_id)s: Volume id %(volume_id)s in ' - 'unexpected state %(status)s, expected ' - 'awaiting-transfer') % {'transfer_id': transfer_id, - 'volume_id': volume_ref['id'], - 'status': volume_ref['status']} - LOG.error(msg) - raise exception.InvalidVolume(reason=msg) - - volume_ref['status'] = 'available' - volume_ref['user_id'] = user_id - volume_ref['project_id'] = project_id - volume_ref['updated_at'] = literal_column('updated_at') - volume_ref.update(volume_ref) - - session.query(models.Transfer).\ - filter_by(id=transfer_ref['id']).\ - update({'deleted': True, - 'deleted_at': timeutils.utcnow(), - 'updated_at': literal_column('updated_at')}) - - -############################### - - -@require_admin_context -def _consistencygroup_data_get_for_project(context, project_id, - session=None): - query = model_query(context, - func.count(models.ConsistencyGroup.id), - read_deleted="no", - session=session).\ - filter_by(project_id=project_id) - - result = query.first() - - return (0, result[0] or 0) - - -@require_admin_context -def consistencygroup_data_get_for_project(context, project_id): - return _consistencygroup_data_get_for_project(context, project_id) - - -@require_context -def _consistencygroup_get(context, consistencygroup_id, session=None): - result = model_query(context, models.ConsistencyGroup, session=session, - project_only=True).\ - filter_by(id=consistencygroup_id).\ - first() - - if not result: - raise exception.ConsistencyGroupNotFound( - consistencygroup_id=consistencygroup_id) - - return result - - -@require_context -def consistencygroup_get(context, consistencygroup_id): - return _consistencygroup_get(context, consistencygroup_id) - - -@require_admin_context -def consistencygroup_get_all(context): - return model_query(context, models.ConsistencyGroup).all() - - -@require_admin_context -def consistencygroup_get_all_by_host(context, host): - return model_query(context, models.ConsistencyGroup).\ - filter_by(host=host).all() - - -@require_context -def consistencygroup_get_all_by_project(context, project_id): - authorize_project_context(context, project_id) - - return model_query(context, models.ConsistencyGroup).\ - filter_by(project_id=project_id).all() - - -@require_context -def consistencygroup_create(context, values): - consistencygroup = models.ConsistencyGroup() - if not values.get('id'): - values['id'] = str(uuid.uuid4()) - - session = get_session() - with session.begin(): - consistencygroup.update(values) - session.add(consistencygroup) - - return _consistencygroup_get(context, values['id'], session=session) - - -@require_context -def consistencygroup_update(context, consistencygroup_id, values): - session = get_session() - with session.begin(): - result = model_query(context, models.ConsistencyGroup, project_only=True).\ - filter_by(id=consistencygroup_id).\ - first() - - if not result: - raise exception.ConsistencyGroupNotFound( - _("No consistency group with id %s") % consistencygroup_id) - - result.update(values) - result.save(session=session) - return result - - -@require_admin_context -def consistencygroup_destroy(context, consistencygroup_id): - session = get_session() - with session.begin(): - model_query(context, models.ConsistencyGroup, session=session).\ - filter_by(id=consistencygroup_id).\ - update({'status': 'deleted', - 'deleted': True, - 'deleted_at': timeutils.utcnow(), - 'updated_at': literal_column('updated_at')}) - - -############################### - - -@require_context -def _cgsnapshot_get(context, cgsnapshot_id, session=None): - result = model_query(context, models.Cgsnapshot, session=session, - project_only=True).\ - filter_by(id=cgsnapshot_id).\ - first() - - if not result: - raise exception.CgSnapshotNotFound(cgsnapshot_id=cgsnapshot_id) - - return result - - -@require_context -def cgsnapshot_get(context, cgsnapshot_id): - return _cgsnapshot_get(context, cgsnapshot_id) - - -@require_admin_context -def cgsnapshot_get_all(context): - return model_query(context, models.Cgsnapshot).all() - - -@require_admin_context -def cgsnapshot_get_all_by_host(context, host): - return model_query(context, models.Cgsnapshot).filter_by(host=host).all() - - -@require_admin_context -def cgsnapshot_get_all_by_group(context, group_id): - return model_query(context, models.Cgsnapshot).\ - filter_by(consistencygroup_id=group_id).all() - - -@require_context -def cgsnapshot_get_all_by_project(context, project_id): - authorize_project_context(context, project_id) - - return model_query(context, models.Cgsnapshot).\ - filter_by(project_id=project_id).all() - - -@require_context -def cgsnapshot_create(context, values): - cgsnapshot = models.Cgsnapshot() - if not values.get('id'): - values['id'] = str(uuid.uuid4()) - - session = get_session() - with session.begin(): - cgsnapshot.update(values) - session.add(cgsnapshot) - - return _cgsnapshot_get(context, values['id'], session=session) - - -@require_context -def cgsnapshot_update(context, cgsnapshot_id, values): - session = get_session() - with session.begin(): - result = model_query(context, models.Cgsnapshot, project_only=True).\ - filter_by(id=cgsnapshot_id).\ - first() - - if not result: - raise exception.CgSnapshotNotFound( - _("No cgsnapshot with id %s") % cgsnapshot_id) - - result.update(values) - result.save(session=session) - return result - - -@require_admin_context -def cgsnapshot_destroy(context, cgsnapshot_id): - session = get_session() - with session.begin(): - model_query(context, models.Cgsnapshot, session=session).\ - filter_by(id=cgsnapshot_id).\ - update({'status': 'deleted', - 'deleted': True, - 'deleted_at': timeutils.utcnow(), - 'updated_at': literal_column('updated_at')}) diff --git a/juno-patches/cinder/timestamp-query-patch/installation/install.sh b/juno-patches/cinder/timestamp-query-patch/installation/install.sh deleted file mode 100644 index e33309db..00000000 --- a/juno-patches/cinder/timestamp-query-patch/installation/install.sh +++ /dev/null @@ -1,87 +0,0 @@ -#!/bin/bash - -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# Copyright (c) 2014 Huawei Technologies. - -_CINDER_DIR="/usr/lib64/python2.6/site-packages/cinder" -# if you did not make changes to the installation files, -# please do not edit the following directories. -_CODE_DIR="../cinder" -_BACKUP_DIR="${_CINDER_DIR}/cinder_timestamp_query_patch-installation-backup" -_SCRIPT_LOGFILE="/var/log/cinder/cinder_timestamp_query_patch/installation/install.log" - -function log() -{ - log_path=`dirname ${_SCRIPT_LOGFILE}` - if [ ! -d $log_path ] ; then - mkdir -p $log_path - chmod 777 $_SCRIPT_LOGFILE - fi - echo "$@" - echo "`date -u +'%Y-%m-%d %T.%N'`: $@" >> $_SCRIPT_LOGFILE -} - -if [[ ${EUID} -ne 0 ]]; then - log "Please run as root." - exit 1 -fi - - -cd `dirname $0` - -log "checking installation directories..." -if [ ! -d "${_CINDER_DIR}" ] ; then - log "Could not find the cinder installation. Please check the variables in the beginning of the script." - log "aborted." - exit 1 -fi - -log "checking previous installation..." -if [ -d "${_BACKUP_DIR}/cinder" ] ; then - log "It seems cinder timestamp query has already been installed!" - log "Please check README for solution if this is not true." - exit 1 -fi - -log "backing up current files that might be overwritten..." -mkdir -p "${_BACKUP_DIR}/cinder" -mkdir -p "${_BACKUP_DIR}/etc/cinder" -cp -r "${_CINDER_DIR}/db" "${_BACKUP_DIR}/cinder" -if [ $? -ne 0 ] ; then - rm -r "${_BACKUP_DIR}/cinder" - echo "Error in code backup, aborted." - exit 1 -fi - -log "copying in new files..." -cp -r "${_CODE_DIR}" `dirname ${_CINDER_DIR}` -if [ $? -ne 0 ] ; then - log "Error in copying, aborted." - log "Recovering original files..." - cp -r "${_BACKUP_DIR}/cinder" `dirname ${_CINDER_DIR}` && rm -r "${_BACKUP_DIR}/cinder" - if [ $? -ne 0 ] ; then - log "Recovering failed! Please install manually." - fi - exit 1 -fi - -service openstack-cinder-api restart - -if [ $? -ne 0 ] ; then - log "There was an error in restarting the service, please restart cinder api manually." - exit 1 -fi - -log "Completed." -log "See README to get started." -exit 0 diff --git a/juno-patches/glance/glance_location_patch/README.md b/juno-patches/glance/glance_location_patch/README.md deleted file mode 100644 index 82768a4a..00000000 --- a/juno-patches/glance/glance_location_patch/README.md +++ /dev/null @@ -1,22 +0,0 @@ -Glance-Cascading Patch -================ - - -Introduction ------------------------------ - -*For glance cascading, we have to create the relationship bewteen one cascading-glance and some cascaded-glances. In order to achieve this goal, we using glance's multi-location feature, the relationshiop can be as a location with the special format. Besides, we modify the image status changing-rule: The image's active toggle into 'active' only if the cascaded have been synced. Because of these two reasons, a few existing source files were modified for adapting the cascading: - - glance/api/v2/image.py - glance/gateway.py - glance/common/utils.py - glance/common/config.py - glance/common/exception.py - -**Because in Juno, the code of glance store part is move out of the glance as an independent python project, the modification in Juno about the glance store is moved to the glance_store in Juno. ** - - Install - ------------------------------ - - - *To implement this patch just replacing the original files to these files, or run the install.sh in glancesync/installation/ directory. diff --git a/juno-patches/glance/glance_location_patch/glance-egg-info/entry_points.txt b/juno-patches/glance/glance_location_patch/glance-egg-info/entry_points.txt deleted file mode 100644 index 476cf23a..00000000 --- a/juno-patches/glance/glance_location_patch/glance-egg-info/entry_points.txt +++ /dev/null @@ -1,21 +0,0 @@ -[console_scripts] -glance-api = glance.cmd.api:main -glance-cache-cleaner = glance.cmd.cache_cleaner:main -glance-cache-manage = glance.cmd.cache_manage:main -glance-cache-prefetcher = glance.cmd.cache_prefetcher:main -glance-cache-pruner = glance.cmd.cache_pruner:main -glance-control = glance.cmd.control:main -glance-manage = glance.cmd.manage:main -glance-registry = glance.cmd.registry:main -glance-replicator = glance.cmd.replicator:main -glance-scrubber = glance.cmd.scrubber:main - -[glance.common.image_location_strategy.modules] -location_order_strategy = glance.common.location_strategy.location_order -store_type_strategy = glance.common.location_strategy.store_type - -[glance.sync.store.location] -filesystem = glance.sync.store._drivers.filesystem:LocationCreator - -[glance.sync.store.driver] -filesystem = glance.sync.store._drivers.filesystem:Store \ No newline at end of file diff --git a/juno-patches/glance/glance_location_patch/glance/api/v2/images.py b/juno-patches/glance/glance_location_patch/glance/api/v2/images.py deleted file mode 100644 index fc287dbd..00000000 --- a/juno-patches/glance/glance_location_patch/glance/api/v2/images.py +++ /dev/null @@ -1,856 +0,0 @@ -# Copyright 2012 OpenStack Foundation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import re - -import glance_store -from oslo.config import cfg -import six -import six.moves.urllib.parse as urlparse -import webob.exc - -from glance.api import policy -from glance.common import exception -from glance.common import location_strategy -from glance.common import utils -from glance.common import wsgi -import glance.db -import glance.gateway -import glance.notifier -from glance.openstack.common import gettextutils -from glance.openstack.common import jsonutils as json -import glance.openstack.common.log as logging -from glance.openstack.common import timeutils -import glance.schema -import glance.sync.client.v1.api as sync_api - -LOG = logging.getLogger(__name__) -_LI = gettextutils._LI -_LW = gettextutils._LW - -CONF = cfg.CONF -CONF.import_opt('disk_formats', 'glance.common.config', group='image_format') -CONF.import_opt('container_formats', 'glance.common.config', - group='image_format') - - -class ImagesController(object): - def __init__(self, db_api=None, policy_enforcer=None, notifier=None, - store_api=None): - self.db_api = db_api or glance.db.get_api() - self.policy = policy_enforcer or policy.Enforcer() - self.notifier = notifier or glance.notifier.Notifier() - self.store_api = store_api or glance_store - self.sync_api = sync_api - self.sync_api.configure_sync_client() - self.gateway = glance.gateway.Gateway(self.db_api, self.store_api, - self.notifier, self.policy, - self.sync_api) - - @utils.mutating - def create(self, req, image, extra_properties, tags): - image_factory = self.gateway.get_image_factory(req.context) - image_repo = self.gateway.get_repo(req.context) - try: - image = image_factory.new_image(extra_properties=extra_properties, - tags=tags, **image) - image_repo.add(image) - except exception.DuplicateLocation as dup: - raise webob.exc.HTTPBadRequest(explanation=dup.msg) - except exception.Forbidden as e: - raise webob.exc.HTTPForbidden(explanation=e.msg) - except exception.InvalidParameterValue as e: - raise webob.exc.HTTPBadRequest(explanation=e.msg) - except exception.LimitExceeded as e: - LOG.info(utils.exception_to_str(e)) - raise webob.exc.HTTPRequestEntityTooLarge( - explanation=e.msg, request=req, content_type='text/plain') - except exception.Duplicate as dupex: - raise webob.exc.HTTPConflict(explanation=dupex.msg) - - return image - - def index(self, req, marker=None, limit=None, sort_key='created_at', - sort_dir='desc', filters=None, member_status='accepted'): - result = {} - if filters is None: - filters = {} - filters['deleted'] = False - - if limit is None: - limit = CONF.limit_param_default - limit = min(CONF.api_limit_max, limit) - - image_repo = self.gateway.get_repo(req.context) - try: - images = image_repo.list(marker=marker, limit=limit, - sort_key=sort_key, sort_dir=sort_dir, - filters=filters, - member_status=member_status) - if len(images) != 0 and len(images) == limit: - result['next_marker'] = images[-1].image_id - except (exception.NotFound, exception.InvalidSortKey, - exception.InvalidFilterRangeValue) as e: - raise webob.exc.HTTPBadRequest(explanation=e.msg) - except exception.Forbidden as e: - raise webob.exc.HTTPForbidden(explanation=e.msg) - result['images'] = images - return result - - def show(self, req, image_id): - image_repo = self.gateway.get_repo(req.context) - try: - return image_repo.get(image_id) - except exception.Forbidden as e: - raise webob.exc.HTTPForbidden(explanation=e.msg) - except exception.NotFound as e: - raise webob.exc.HTTPNotFound(explanation=e.msg) - - @utils.mutating - def update(self, req, image_id, changes): - image_repo = self.gateway.get_repo(req.context) - try: - image = image_repo.get(image_id) - - for change in changes: - change_method_name = '_do_%s' % change['op'] - assert hasattr(self, change_method_name) - change_method = getattr(self, change_method_name) - change_method(req, image, change) - - if changes: - image_repo.save(image) - except exception.NotFound as e: - raise webob.exc.HTTPNotFound(explanation=e.msg) - except exception.Forbidden as e: - raise webob.exc.HTTPForbidden(explanation=e.msg) - except exception.InvalidParameterValue as e: - raise webob.exc.HTTPBadRequest(explanation=e.msg) - except exception.StorageQuotaFull as e: - msg = (_LI("Denying attempt to upload image because it exceeds the" - " .quota: %s") % utils.exception_to_str(e)) - LOG.info(msg) - raise webob.exc.HTTPRequestEntityTooLarge( - explanation=msg, request=req, content_type='text/plain') - except exception.LimitExceeded as e: - LOG.info(utils.exception_to_str(e)) - raise webob.exc.HTTPRequestEntityTooLarge( - explanation=e.msg, request=req, content_type='text/plain') - - return image - - def _do_replace(self, req, image, change): - path = change['path'] - path_root = path[0] - value = change['value'] - if path_root == 'locations': - self._do_replace_locations(image, value) - else: - if hasattr(image, path_root): - setattr(image, path_root, value) - elif path_root in image.extra_properties: - image.extra_properties[path_root] = value - else: - msg = _("Property %s does not exist.") - raise webob.exc.HTTPConflict(msg % path_root) - - def _do_add(self, req, image, change): - path = change['path'] - path_root = path[0] - value = change['value'] - if path_root == 'locations': - self._do_add_locations(image, path[1], value) - else: - if (hasattr(image, path_root) or - path_root in image.extra_properties): - msg = _("Property %s already present.") - raise webob.exc.HTTPConflict(msg % path_root) - image.extra_properties[path_root] = value - - def _do_remove(self, req, image, change): - path = change['path'] - path_root = path[0] - if path_root == 'locations': - self._do_remove_locations(image, path[1]) - else: - if hasattr(image, path_root): - msg = _("Property %s may not be removed.") - raise webob.exc.HTTPForbidden(msg % path_root) - elif path_root in image.extra_properties: - del image.extra_properties[path_root] - else: - msg = _("Property %s does not exist.") - raise webob.exc.HTTPConflict(msg % path_root) - - @utils.mutating - def delete(self, req, image_id): - image_repo = self.gateway.get_repo(req.context) - try: - image = image_repo.get(image_id) - image.delete() - image_repo.remove(image) - except exception.Forbidden as e: - raise webob.exc.HTTPForbidden(explanation=e.msg) - except exception.NotFound as e: - msg = (_("Failed to find image %(image_id)s to delete") % - {'image_id': image_id}) - LOG.info(msg) - raise webob.exc.HTTPNotFound(explanation=msg) - except exception.InUseByStore as e: - msg = (_LI("Image %s could not be deleted " - "because it is in use: %s") % (image_id, e.msg)) - LOG.info(msg) - raise webob.exc.HTTPConflict(explanation=msg) - - def _get_locations_op_pos(self, path_pos, max_pos, allow_max): - if path_pos is None or max_pos is None: - return None - pos = max_pos if allow_max else max_pos - 1 - if path_pos.isdigit(): - pos = int(path_pos) - elif path_pos != '-': - return None - if (not allow_max) and (pos not in range(max_pos)): - return None - return pos - - def _do_replace_locations(self, image, value): - if len(image.locations) > 0 and len(value) > 0: - msg = _("Cannot replace locations from a non-empty " - "list to a non-empty list.") - raise webob.exc.HTTPBadRequest(explanation=msg) - if len(value) == 0: - # NOTE(zhiyan): this actually deletes the location - # from the backend store. - del image.locations[:] - if image.status == 'active': - image.status = 'queued' - else: # NOTE(zhiyan): len(image.locations) == 0 - try: - image.locations = value - if image.status == 'queued': - image.status = 'active' - except (exception.BadStoreUri, exception.DuplicateLocation) as bse: - raise webob.exc.HTTPBadRequest(explanation=bse.msg) - except ValueError as ve: # update image status failed. - raise webob.exc.HTTPBadRequest(explanation= - utils.exception_to_str(ve)) - - def _do_add_locations(self, image, path_pos, value): - pos = self._get_locations_op_pos(path_pos, - len(image.locations), True) - if pos is None: - msg = _("Invalid position for adding a location.") - raise webob.exc.HTTPBadRequest(explanation=msg) - try: - image.locations.insert(pos, value) - if image.status == 'queued': - image.status = 'active' - except (exception.BadStoreUri, exception.DuplicateLocation) as bse: - raise webob.exc.HTTPBadRequest(explanation=bse.msg) - except ValueError as ve: # update image status failed. - raise webob.exc.HTTPBadRequest(explanation= - utils.exception_to_str(ve)) - - def _do_remove_locations(self, image, path_pos): - pos = self._get_locations_op_pos(path_pos, - len(image.locations), False) - if pos is None: - msg = _("Invalid position for removing a location.") - raise webob.exc.HTTPBadRequest(explanation=msg) - try: - # NOTE(zhiyan): this actually deletes the location - # from the backend store. - image.locations.pop(pos) - except Exception as e: - raise webob.exc.HTTPInternalServerError(explanation= - utils.exception_to_str(e)) - if (len(image.locations) == 0) and (image.status == 'active'): - image.status = 'queued' - - -class RequestDeserializer(wsgi.JSONRequestDeserializer): - - _disallowed_properties = ['direct_url', 'self', 'file', 'schema'] - _readonly_properties = ['created_at', 'updated_at', 'status', 'checksum', - 'size', 'virtual_size', 'direct_url', 'self', - 'file', 'schema'] - _reserved_properties = ['owner', 'is_public', 'location', 'deleted', - 'deleted_at'] - _base_properties = ['checksum', 'created_at', 'container_format', - 'disk_format', 'id', 'min_disk', 'min_ram', 'name', - 'size', 'virtual_size', 'status', 'tags', - 'updated_at', 'visibility', 'protected'] - _path_depth_limits = {'locations': {'add': 2, 'remove': 2, 'replace': 1}} - - def __init__(self, schema=None): - super(RequestDeserializer, self).__init__() - self.schema = schema or get_schema() - - def _get_request_body(self, request): - output = super(RequestDeserializer, self).default(request) - if 'body' not in output: - msg = _('Body expected in request.') - raise webob.exc.HTTPBadRequest(explanation=msg) - return output['body'] - - @classmethod - def _check_allowed(cls, image): - for key in cls._disallowed_properties: - if key in image: - msg = _("Attribute '%s' is read-only.") % key - raise webob.exc.HTTPForbidden(explanation= - utils.exception_to_str(msg)) - - def create(self, request): - body = self._get_request_body(request) - self._check_allowed(body) - try: - self.schema.validate(body) - except exception.InvalidObject as e: - raise webob.exc.HTTPBadRequest(explanation=e.msg) - image = {} - properties = body - tags = properties.pop('tags', None) - for key in self._base_properties: - try: - # NOTE(flwang): Instead of changing the _check_unexpected - # of ImageFactory. It would be better to do the mapping - # at here. - if key == 'id': - image['image_id'] = properties.pop(key) - else: - image[key] = properties.pop(key) - except KeyError: - pass - return dict(image=image, extra_properties=properties, tags=tags) - - def _get_change_operation_d10(self, raw_change): - try: - return raw_change['op'] - except KeyError: - msg = _("Unable to find '%s' in JSON Schema change") % 'op' - raise webob.exc.HTTPBadRequest(explanation=msg) - - def _get_change_operation_d4(self, raw_change): - op = None - for key in ['replace', 'add', 'remove']: - if key in raw_change: - if op is not None: - msg = _('Operation objects must contain only one member' - ' named "add", "remove", or "replace".') - raise webob.exc.HTTPBadRequest(explanation=msg) - op = key - if op is None: - msg = _('Operation objects must contain exactly one member' - ' named "add", "remove", or "replace".') - raise webob.exc.HTTPBadRequest(explanation=msg) - return op - - def _get_change_path_d10(self, raw_change): - try: - return raw_change['path'] - except KeyError: - msg = _("Unable to find '%s' in JSON Schema change") % 'path' - raise webob.exc.HTTPBadRequest(explanation=msg) - - def _get_change_path_d4(self, raw_change, op): - return raw_change[op] - - def _decode_json_pointer(self, pointer): - """Parse a json pointer. - - Json Pointers are defined in - http://tools.ietf.org/html/draft-pbryan-zyp-json-pointer . - The pointers use '/' for separation between object attributes, such - that '/A/B' would evaluate to C in {"A": {"B": "C"}}. A '/' character - in an attribute name is encoded as "~1" and a '~' character is encoded - as "~0". - """ - self._validate_json_pointer(pointer) - ret = [] - for part in pointer.lstrip('/').split('/'): - ret.append(part.replace('~1', '/').replace('~0', '~').strip()) - return ret - - def _validate_json_pointer(self, pointer): - """Validate a json pointer. - - We only accept a limited form of json pointers. - """ - if not pointer.startswith('/'): - msg = _('Pointer `%s` does not start with "/".') % pointer - raise webob.exc.HTTPBadRequest(explanation=msg) - if re.search('/\s*?/', pointer[1:]): - msg = _('Pointer `%s` contains adjacent "/".') % pointer - raise webob.exc.HTTPBadRequest(explanation=msg) - if len(pointer) > 1 and pointer.endswith('/'): - msg = _('Pointer `%s` end with "/".') % pointer - raise webob.exc.HTTPBadRequest(explanation=msg) - if pointer[1:].strip() == '/': - msg = _('Pointer `%s` does not contains valid token.') % pointer - raise webob.exc.HTTPBadRequest(explanation=msg) - if re.search('~[^01]', pointer) or pointer.endswith('~'): - msg = _('Pointer `%s` contains "~" not part of' - ' a recognized escape sequence.') % pointer - raise webob.exc.HTTPBadRequest(explanation=msg) - - def _get_change_value(self, raw_change, op): - if 'value' not in raw_change: - msg = _('Operation "%s" requires a member named "value".') - raise webob.exc.HTTPBadRequest(explanation=msg % op) - return raw_change['value'] - - def _validate_change(self, change): - path_root = change['path'][0] - if path_root in self._readonly_properties: - msg = _("Attribute '%s' is read-only.") % path_root - raise webob.exc.HTTPForbidden(explanation=six.text_type(msg)) - if path_root in self._reserved_properties: - msg = _("Attribute '%s' is reserved.") % path_root - raise webob.exc.HTTPForbidden(explanation=six.text_type(msg)) - - if change['op'] == 'delete': - return - - partial_image = None - if len(change['path']) == 1: - partial_image = {path_root: change['value']} - elif ((path_root in get_base_properties().keys()) and - (get_base_properties()[path_root].get('type', '') == 'array')): - # NOTE(zhiyan): cient can use PATCH API to adding element to - # the image's existing set property directly. - # Such as: 1. using '/locations/N' path to adding a location - # to the image's 'locations' list at N position. - # (implemented) - # 2. using '/tags/-' path to appending a tag to the - # image's 'tags' list at last. (Not implemented) - partial_image = {path_root: [change['value']]} - - if partial_image: - try: - self.schema.validate(partial_image) - except exception.InvalidObject as e: - raise webob.exc.HTTPBadRequest(explanation=e.msg) - - def _validate_path(self, op, path): - path_root = path[0] - limits = self._path_depth_limits.get(path_root, {}) - if len(path) != limits.get(op, 1): - msg = _("Invalid JSON pointer for this resource: " - "'/%s'") % '/'.join(path) - raise webob.exc.HTTPBadRequest(explanation=six.text_type(msg)) - - def _parse_json_schema_change(self, raw_change, draft_version): - if draft_version == 10: - op = self._get_change_operation_d10(raw_change) - path = self._get_change_path_d10(raw_change) - elif draft_version == 4: - op = self._get_change_operation_d4(raw_change) - path = self._get_change_path_d4(raw_change, op) - else: - msg = _('Unrecognized JSON Schema draft version') - raise webob.exc.HTTPBadRequest(explanation=msg) - - path_list = self._decode_json_pointer(path) - return op, path_list - - def update(self, request): - changes = [] - content_types = { - 'application/openstack-images-v2.0-json-patch': 4, - 'application/openstack-images-v2.1-json-patch': 10, - } - if request.content_type not in content_types: - headers = {'Accept-Patch': - ', '.join(sorted(content_types.keys()))} - raise webob.exc.HTTPUnsupportedMediaType(headers=headers) - - json_schema_version = content_types[request.content_type] - - body = self._get_request_body(request) - - if not isinstance(body, list): - msg = _('Request body must be a JSON array of operation objects.') - raise webob.exc.HTTPBadRequest(explanation=msg) - - for raw_change in body: - if not isinstance(raw_change, dict): - msg = _('Operations must be JSON objects.') - raise webob.exc.HTTPBadRequest(explanation=msg) - - (op, path) = self._parse_json_schema_change(raw_change, - json_schema_version) - - # NOTE(zhiyan): the 'path' is a list. - self._validate_path(op, path) - change = {'op': op, 'path': path} - - if not op == 'remove': - change['value'] = self._get_change_value(raw_change, op) - self._validate_change(change) - - changes.append(change) - - return {'changes': changes} - - def _validate_limit(self, limit): - try: - limit = int(limit) - except ValueError: - msg = _("limit param must be an integer") - raise webob.exc.HTTPBadRequest(explanation=msg) - - if limit < 0: - msg = _("limit param must be positive") - raise webob.exc.HTTPBadRequest(explanation=msg) - - return limit - - def _validate_sort_dir(self, sort_dir): - if sort_dir not in ['asc', 'desc']: - msg = _('Invalid sort direction: %s') % sort_dir - raise webob.exc.HTTPBadRequest(explanation=msg) - - return sort_dir - - def _validate_member_status(self, member_status): - if member_status not in ['pending', 'accepted', 'rejected', 'all']: - msg = _('Invalid status: %s') % member_status - raise webob.exc.HTTPBadRequest(explanation=msg) - - return member_status - - def _get_filters(self, filters): - visibility = filters.get('visibility') - if visibility: - if visibility not in ['public', 'private', 'shared']: - msg = _('Invalid visibility value: %s') % visibility - raise webob.exc.HTTPBadRequest(explanation=msg) - changes_since = filters.get('changes-since', None) - if changes_since: - msg = _('The "changes-since" filter is no longer available on v2.') - raise webob.exc.HTTPBadRequest(explanation=msg) - - return filters - - def index(self, request): - params = request.params.copy() - limit = params.pop('limit', None) - marker = params.pop('marker', None) - sort_dir = params.pop('sort_dir', 'desc') - member_status = params.pop('member_status', 'accepted') - - # NOTE (flwang) To avoid using comma or any predefined chars to split - # multiple tags, now we allow user specify multiple 'tag' parameters - # in URL, such as v2/images?tag=x86&tag=64bit. - tags = [] - while 'tag' in params: - tags.append(params.pop('tag').strip()) - - query_params = { - 'sort_key': params.pop('sort_key', 'created_at'), - 'sort_dir': self._validate_sort_dir(sort_dir), - 'filters': self._get_filters(params), - 'member_status': self._validate_member_status(member_status), - } - - if marker is not None: - query_params['marker'] = marker - - if limit is not None: - query_params['limit'] = self._validate_limit(limit) - - if tags: - query_params['filters']['tags'] = tags - - return query_params - - -class ResponseSerializer(wsgi.JSONResponseSerializer): - def __init__(self, schema=None): - super(ResponseSerializer, self).__init__() - self.schema = schema or get_schema() - - def _get_image_href(self, image, subcollection=''): - base_href = '/v2/images/%s' % image.image_id - if subcollection: - base_href = '%s/%s' % (base_href, subcollection) - return base_href - - def _format_image(self, image): - image_view = dict() - try: - image_view = dict(image.extra_properties) - attributes = ['name', 'disk_format', 'container_format', - 'visibility', 'size', 'virtual_size', 'status', - 'checksum', 'protected', 'min_ram', 'min_disk', - 'owner'] - for key in attributes: - image_view[key] = getattr(image, key) - image_view['id'] = image.image_id - image_view['created_at'] = timeutils.isotime(image.created_at) - image_view['updated_at'] = timeutils.isotime(image.updated_at) - - if CONF.show_multiple_locations: - locations = list(image.locations) - if locations: - image_view['locations'] = [] - for loc in locations: - tmp = dict(loc) - tmp.pop('id', None) - tmp.pop('status', None) - image_view['locations'].append(tmp) - else: - # NOTE (flwang): We will still show "locations": [] if - # image.locations is None to indicate it's allowed to show - # locations but it's just non-existent. - image_view['locations'] = [] - LOG.debug("There is not available location " - "for image %s" % image.image_id) - - if CONF.show_image_direct_url: - if image.locations: - # Choose best location configured strategy - l = location_strategy.choose_best_location(image.locations) - image_view['direct_url'] = l['url'] - else: - LOG.debug("There is not available location " - "for image %s" % image.image_id) - - image_view['tags'] = list(image.tags) - image_view['self'] = self._get_image_href(image) - image_view['file'] = self._get_image_href(image, 'file') - image_view['schema'] = '/v2/schemas/image' - image_view = self.schema.filter(image_view) # domain - except exception.Forbidden as e: - raise webob.exc.HTTPForbidden(explanation=e.msg) - return image_view - - def create(self, response, image): - response.status_int = 201 - self.show(response, image) - response.location = self._get_image_href(image) - - def show(self, response, image): - image_view = self._format_image(image) - body = json.dumps(image_view, ensure_ascii=False) - response.unicode_body = six.text_type(body) - response.content_type = 'application/json' - - def update(self, response, image): - image_view = self._format_image(image) - body = json.dumps(image_view, ensure_ascii=False) - response.unicode_body = six.text_type(body) - response.content_type = 'application/json' - - def index(self, response, result): - params = dict(response.request.params) - params.pop('marker', None) - query = urlparse.urlencode(params) - body = { - 'images': [self._format_image(i) for i in result['images']], - 'first': '/v2/images', - 'schema': '/v2/schemas/images', - } - if query: - body['first'] = '%s?%s' % (body['first'], query) - if 'next_marker' in result: - params['marker'] = result['next_marker'] - next_query = urlparse.urlencode(params) - body['next'] = '/v2/images?%s' % next_query - response.unicode_body = six.text_type(json.dumps(body, - ensure_ascii=False)) - response.content_type = 'application/json' - - def delete(self, response, result): - response.status_int = 204 - - -def get_base_properties(): - return { - 'id': { - 'type': 'string', - 'description': _('An identifier for the image'), - 'pattern': ('^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}' - '-([0-9a-fA-F]){4}-([0-9a-fA-F]){12}$'), - }, - 'name': { - 'type': 'string', - 'description': _('Descriptive name for the image'), - 'maxLength': 255, - }, - 'status': { - 'type': 'string', - 'description': _('Status of the image (READ-ONLY)'), - 'enum': ['queued', 'saving', 'active', 'killed', - 'deleted', 'pending_delete'], - }, - 'visibility': { - 'type': 'string', - 'description': _('Scope of image accessibility'), - 'enum': ['public', 'private'], - }, - 'protected': { - 'type': 'boolean', - 'description': _('If true, image will not be deletable.'), - }, - 'checksum': { - 'type': 'string', - 'description': _('md5 hash of image contents. (READ-ONLY)'), - 'maxLength': 32, - }, - 'owner': { - 'type': 'string', - 'description': _('Owner of the image'), - 'maxLength': 255, - }, - 'size': { - 'type': 'integer', - 'description': _('Size of image file in bytes (READ-ONLY)'), - }, - 'virtual_size': { - 'type': 'integer', - 'description': _('Virtual size of image in bytes (READ-ONLY)'), - }, - 'container_format': { - 'type': 'string', - 'description': _('Format of the container'), - 'enum': CONF.image_format.container_formats, - }, - 'disk_format': { - 'type': 'string', - 'description': _('Format of the disk'), - 'enum': CONF.image_format.disk_formats, - }, - 'created_at': { - 'type': 'string', - 'description': _('Date and time of image registration' - ' (READ-ONLY)'), - #TODO(bcwaldon): our jsonschema library doesn't seem to like the - # format attribute, figure out why! - #'format': 'date-time', - }, - 'updated_at': { - 'type': 'string', - 'description': _('Date and time of the last image modification' - ' (READ-ONLY)'), - #'format': 'date-time', - }, - 'tags': { - 'type': 'array', - 'description': _('List of strings related to the image'), - 'items': { - 'type': 'string', - 'maxLength': 255, - }, - }, - 'direct_url': { - 'type': 'string', - 'description': _('URL to access the image file kept in external ' - 'store (READ-ONLY)'), - }, - 'min_ram': { - 'type': 'integer', - 'description': _('Amount of ram (in MB) required to boot image.'), - }, - 'min_disk': { - 'type': 'integer', - 'description': _('Amount of disk space (in GB) required to boot ' - 'image.'), - }, - 'self': { - 'type': 'string', - 'description': '(READ-ONLY)' - }, - 'file': { - 'type': 'string', - 'description': '(READ-ONLY)' - }, - 'schema': { - 'type': 'string', - 'description': '(READ-ONLY)' - }, - 'locations': { - 'type': 'array', - 'items': { - 'type': 'object', - 'properties': { - 'url': { - 'type': 'string', - 'maxLength': 255, - }, - 'metadata': { - 'type': 'object', - }, - }, - 'required': ['url', 'metadata'], - }, - 'description': _('A set of URLs to access the image file kept in ' - 'external store'), - }, - } - - -def _get_base_links(): - return [ - {'rel': 'self', 'href': '{self}'}, - {'rel': 'enclosure', 'href': '{file}'}, - {'rel': 'describedby', 'href': '{schema}'}, - ] - - -def get_schema(custom_properties=None): - properties = get_base_properties() - links = _get_base_links() - if CONF.allow_additional_image_properties: - schema = glance.schema.PermissiveSchema('image', properties, links) - else: - schema = glance.schema.Schema('image', properties) - - if custom_properties: - for property_value in custom_properties.values(): - property_value['is_base'] = False - schema.merge_properties(custom_properties) - return schema - - -def get_collection_schema(custom_properties=None): - image_schema = get_schema(custom_properties) - return glance.schema.CollectionSchema('images', image_schema) - - -def load_custom_properties(): - """Find the schema properties files and load them into a dict.""" - filename = 'schema-image.json' - match = CONF.find_file(filename) - if match: - with open(match, 'r') as schema_file: - schema_data = schema_file.read() - return json.loads(schema_data) - else: - msg = (_LW('Could not find schema properties file %s. Continuing ' - 'without custom properties') % filename) - LOG.warn(msg) - return {} - - -def create_resource(custom_properties=None): - """Images resource factory method""" - schema = get_schema(custom_properties) - deserializer = RequestDeserializer(schema) - serializer = ResponseSerializer(schema) - controller = ImagesController() - return wsgi.Resource(controller, deserializer, serializer) diff --git a/juno-patches/glance/glance_location_patch/glance/common/config.py b/juno-patches/glance/glance_location_patch/glance/common/config.py deleted file mode 100644 index b6092a70..00000000 --- a/juno-patches/glance/glance_location_patch/glance/common/config.py +++ /dev/null @@ -1,286 +0,0 @@ -#!/usr/bin/env python - -# Copyright 2011 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Routines for configuring Glance -""" - -import logging -import logging.config -import logging.handlers -import os - -from oslo.config import cfg -from paste import deploy - -from glance.version import version_info as version - -paste_deploy_opts = [ - cfg.StrOpt('flavor', - help=_('Partial name of a pipeline in your paste configuration ' - 'file with the service name removed. For example, if ' - 'your paste section name is ' - '[pipeline:glance-api-keystone] use the value ' - '"keystone"')), - cfg.StrOpt('config_file', - help=_('Name of the paste configuration file.')), -] -image_format_opts = [ - cfg.ListOpt('container_formats', - default=['ami', 'ari', 'aki', 'bare', 'ovf', 'ova'], - help=_("Supported values for the 'container_format' " - "image attribute"), - deprecated_opts=[cfg.DeprecatedOpt('container_formats', - group='DEFAULT')]), - cfg.ListOpt('disk_formats', - default=['ami', 'ari', 'aki', 'vhd', 'vmdk', 'raw', 'qcow2', - 'vdi', 'iso'], - help=_("Supported values for the 'disk_format' " - "image attribute"), - deprecated_opts=[cfg.DeprecatedOpt('disk_formats', - group='DEFAULT')]), -] -task_opts = [ - cfg.IntOpt('task_time_to_live', - default=48, - help=_("Time in hours for which a task lives after, either " - "succeeding or failing"), - deprecated_opts=[cfg.DeprecatedOpt('task_time_to_live', - group='DEFAULT')]), - cfg.StrOpt('task_executor', - default='eventlet', - help=_("Specifies which task executor to be used to run the " - "task scripts.")), - cfg.IntOpt('eventlet_executor_pool_size', - default=1000, - help=_("Specifies the maximum number of eventlet threads which " - "can be spun up by the eventlet based task executor to " - "perform execution of Glance tasks.")), -] -manage_opts = [ - cfg.BoolOpt('db_enforce_mysql_charset', - default=True, - help=_('DEPRECATED. TO BE REMOVED IN THE JUNO RELEASE. ' - 'Whether or not to enforce that all DB tables have ' - 'charset utf8. If your database tables do not have ' - 'charset utf8 you will need to convert before this ' - 'option is removed. This option is only relevant if ' - 'your database engine is MySQL.')) -] -common_opts = [ - cfg.BoolOpt('allow_additional_image_properties', default=True, - help=_('Whether to allow users to specify image properties ' - 'beyond what the image schema provides')), - cfg.IntOpt('image_member_quota', default=128, - help=_('Maximum number of image members per image. ' - 'Negative values evaluate to unlimited.')), - cfg.IntOpt('image_property_quota', default=128, - help=_('Maximum number of properties allowed on an image. ' - 'Negative values evaluate to unlimited.')), - cfg.IntOpt('image_tag_quota', default=128, - help=_('Maximum number of tags allowed on an image. ' - 'Negative values evaluate to unlimited.')), - cfg.IntOpt('image_location_quota', default=10, - help=_('Maximum number of locations allowed on an image. ' - 'Negative values evaluate to unlimited.')), - cfg.StrOpt('data_api', default='glance.db.sqlalchemy.api', - help=_('Python module path of data access API')), - cfg.IntOpt('limit_param_default', default=25, - help=_('Default value for the number of items returned by a ' - 'request if not specified explicitly in the request')), - cfg.IntOpt('api_limit_max', default=1000, - help=_('Maximum permissible number of items that could be ' - 'returned by a request')), - cfg.BoolOpt('show_image_direct_url', default=False, - help=_('Whether to include the backend image storage location ' - 'in image properties. Revealing storage location can ' - 'be a security risk, so use this setting with ' - 'caution!')), - cfg.BoolOpt('show_multiple_locations', default=False, - help=_('Whether to include the backend image locations ' - 'in image properties. Revealing storage location can ' - 'be a security risk, so use this setting with ' - 'caution! The overrides show_image_direct_url.')), - cfg.IntOpt('image_size_cap', default=1099511627776, - help=_("Maximum size of image a user can upload in bytes. " - "Defaults to 1099511627776 bytes (1 TB).")), - cfg.StrOpt('user_storage_quota', default='0', - help=_("Set a system wide quota for every user. This value is " - "the total capacity that a user can use across " - "all storage systems. A value of 0 means unlimited." - "Optional unit can be specified for the value. Accepted " - "units are B, KB, MB, GB and TB representing " - "Bytes, KiloBytes, MegaBytes, GigaBytes and TeraBytes" - "respectively. If no unit is specified then Bytes is " - "assumed. Note that there should not be any space " - "between value and unit and units are case sensitive.")), - cfg.BoolOpt('enable_v1_api', default=True, - help=_("Deploy the v1 OpenStack Images API.")), - cfg.BoolOpt('enable_v2_api', default=True, - help=_("Deploy the v2 OpenStack Images API.")), - cfg.BoolOpt('enable_v1_registry', default=True, - help=_("Deploy the v1 OpenStack Registry API.")), - cfg.BoolOpt('enable_v2_registry', default=True, - help=_("Deploy the v2 OpenStack Registry API.")), - cfg.StrOpt('pydev_worker_debug_host', - help=_('The hostname/IP of the pydev process listening for ' - 'debug connections')), - cfg.IntOpt('pydev_worker_debug_port', default=5678, - help=_('The port on which a pydev process is listening for ' - 'connections.')), - cfg.StrOpt('metadata_encryption_key', secret=True, - help=_('Key used for encrypting sensitive metadata while ' - 'talking to the registry or database.')), - cfg.BoolOpt('sync_enabled', default=False, - help=_("Whether to launch the Sync function.")), - cfg.StrOpt('sync_server_host', default='127.0.0.1', - help=_('host ip where sync_web_server in.')), - cfg.IntOpt('sync_server_port', default=9595, - help=_('host port where sync_web_server in.')), -] -sync_opts = [ - cfg.StrOpt('cascading_endpoint_url', default='http://127.0.0.1:9292/', - help=_('host ip where glance in.'), - deprecated_opts=[cfg.DeprecatedOpt('cascading_endpoint_url', - group='DEFAULT')]), - cfg.StrOpt('sync_strategy', default='None', - help=_("Define the sync strategy, value can be All/User/None."), - deprecated_opts=[cfg.DeprecatedOpt('sync_strategy', - group='DEFAULT')]), - cfg.IntOpt('snapshot_timeout', default=300, - help=_('when snapshot, max wait (second)time for snapshot ' - 'status become active.'), - deprecated_opts=[cfg.DeprecatedOpt('snapshot_timeout', - group='DEFAULT')]), - cfg.IntOpt('snapshot_sleep_interval', default=10, - help=_('when snapshot, sleep interval for waiting snapshot ' - 'status become active.'), - deprecated_opts=[cfg.DeprecatedOpt('snapshot_sleep_interval', - group='DEFAULT')]), - cfg.IntOpt('task_retry_times', default=0, - help=_('sync task fail retry times.'), - deprecated_opts=[cfg.DeprecatedOpt('task_retry_times', - group='DEFAULT')]), - cfg.IntOpt('scp_copy_timeout', default=3600, - help=_('when snapshot, max wait (second)time for snapshot ' - 'status become active.'), - deprecated_opts=[cfg.DeprecatedOpt('scp_copy_timeout', - group='DEFAULT')]), -] - -CONF = cfg.CONF -CONF.register_opts(paste_deploy_opts, group='paste_deploy') -CONF.register_opts(image_format_opts, group='image_format') -CONF.register_opts(task_opts, group='task') -CONF.register_opts(sync_opts, group='sync') -CONF.register_opts(manage_opts) -CONF.register_opts(common_opts) - - -def parse_args(args=None, usage=None, default_config_files=None): - CONF(args=args, - project='glance', - version=version.cached_version_string(), - usage=usage, - default_config_files=default_config_files) - - -def parse_cache_args(args=None): - config_files = cfg.find_config_files(project='glance', prog='glance-cache') - parse_args(args=args, default_config_files=config_files) - - -def _get_deployment_flavor(flavor=None): - """ - Retrieve the paste_deploy.flavor config item, formatted appropriately - for appending to the application name. - - :param flavor: if specified, use this setting rather than the - paste_deploy.flavor configuration setting - """ - if not flavor: - flavor = CONF.paste_deploy.flavor - return '' if not flavor else ('-' + flavor) - - -def _get_paste_config_path(): - paste_suffix = '-paste.ini' - conf_suffix = '.conf' - if CONF.config_file: - # Assume paste config is in a paste.ini file corresponding - # to the last config file - path = CONF.config_file[-1].replace(conf_suffix, paste_suffix) - else: - path = CONF.prog + paste_suffix - return CONF.find_file(os.path.basename(path)) - - -def _get_deployment_config_file(): - """ - Retrieve the deployment_config_file config item, formatted as an - absolute pathname. - """ - path = CONF.paste_deploy.config_file - if not path: - path = _get_paste_config_path() - if not path: - msg = _("Unable to locate paste config file for %s.") % CONF.prog - raise RuntimeError(msg) - return os.path.abspath(path) - - -def load_paste_app(app_name, flavor=None, conf_file=None): - """ - Builds and returns a WSGI app from a paste config file. - - We assume the last config file specified in the supplied ConfigOpts - object is the paste config file, if conf_file is None. - - :param app_name: name of the application to load - :param flavor: name of the variant of the application to load - :param conf_file: path to the paste config file - - :raises RuntimeError when config file cannot be located or application - cannot be loaded from config file - """ - # append the deployment flavor to the application name, - # in order to identify the appropriate paste pipeline - app_name += _get_deployment_flavor(flavor) - - if not conf_file: - conf_file = _get_deployment_config_file() - - try: - logger = logging.getLogger(__name__) - logger.debug("Loading %(app_name)s from %(conf_file)s", - {'conf_file': conf_file, 'app_name': app_name}) - - app = deploy.loadapp("config:%s" % conf_file, name=app_name) - - # Log the options used when starting if we're in debug mode... - if CONF.debug: - CONF.log_opt_values(logger, logging.DEBUG) - - return app - except (LookupError, ImportError) as e: - msg = (_("Unable to load %(app_name)s from " - "configuration file %(conf_file)s." - "\nGot: %(e)r") % {'app_name': app_name, - 'conf_file': conf_file, - 'e': e}) - logger.error(msg) - raise RuntimeError(msg) diff --git a/juno-patches/glance/glance_location_patch/glance/common/exception.py b/juno-patches/glance/glance_location_patch/glance/common/exception.py deleted file mode 100644 index 03f17661..00000000 --- a/juno-patches/glance/glance_location_patch/glance/common/exception.py +++ /dev/null @@ -1,422 +0,0 @@ -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Glance exception subclasses""" - -import six -import six.moves.urllib.parse as urlparse - -_FATAL_EXCEPTION_FORMAT_ERRORS = False - - -class RedirectException(Exception): - def __init__(self, url): - self.url = urlparse.urlparse(url) - - -class GlanceException(Exception): - """ - Base Glance Exception - - To correctly use this class, inherit from it and define - a 'message' property. That message will get printf'd - with the keyword arguments provided to the constructor. - """ - message = _("An unknown exception occurred") - - def __init__(self, message=None, *args, **kwargs): - if not message: - message = self.message - try: - if kwargs: - message = message % kwargs - except Exception: - if _FATAL_EXCEPTION_FORMAT_ERRORS: - raise - else: - # at least get the core message out if something happened - pass - self.msg = message - super(GlanceException, self).__init__(message) - - def __unicode__(self): - # NOTE(flwang): By default, self.msg is an instance of Message, which - # can't be converted by str(). Based on the definition of - # __unicode__, it should return unicode always. - return six.text_type(self.msg) - - -class MissingCredentialError(GlanceException): - message = _("Missing required credential: %(required)s") - - -class BadAuthStrategy(GlanceException): - message = _("Incorrect auth strategy, expected \"%(expected)s\" but " - "received \"%(received)s\"") - - -class NotFound(GlanceException): - message = _("An object with the specified identifier was not found.") - - -class BadStoreUri(GlanceException): - message = _("The Store URI was malformed.") - - -class Duplicate(GlanceException): - message = _("An object with the same identifier already exists.") - - -class Conflict(GlanceException): - message = _("An object with the same identifier is currently being " - "operated on.") - - -class StorageQuotaFull(GlanceException): - message = _("The size of the data %(image_size)s will exceed the limit. " - "%(remaining)s bytes remaining.") - - -class AuthBadRequest(GlanceException): - message = _("Connect error/bad request to Auth service at URL %(url)s.") - - -class AuthUrlNotFound(GlanceException): - message = _("Auth service at URL %(url)s not found.") - - -class AuthorizationFailure(GlanceException): - message = _("Authorization failed.") - - -class NotAuthenticated(GlanceException): - message = _("You are not authenticated.") - - -class Forbidden(GlanceException): - message = _("You are not authorized to complete this action.") - - -class ForbiddenPublicImage(Forbidden): - message = _("You are not authorized to complete this action.") - - -class ProtectedImageDelete(Forbidden): - message = _("Image %(image_id)s is protected and cannot be deleted.") - - -class ProtectedMetadefNamespaceDelete(Forbidden): - message = _("Metadata definition namespace %(namespace)s is protected" - " and cannot be deleted.") - - -class ProtectedMetadefNamespacePropDelete(Forbidden): - message = _("Metadata definition property %(property_name)s is protected" - " and cannot be deleted.") - - -class ProtectedMetadefObjectDelete(Forbidden): - message = _("Metadata definition object %(object_name)s is protected" - " and cannot be deleted.") - - -class ProtectedMetadefResourceTypeAssociationDelete(Forbidden): - message = _("Metadata definition resource-type-association" - " %(resource_type)s is protected and cannot be deleted.") - - -class ProtectedMetadefResourceTypeSystemDelete(Forbidden): - message = _("Metadata definition resource-type %(resource_type_name)s is" - " a seeded-system type and cannot be deleted.") - - -class Invalid(GlanceException): - message = _("Data supplied was not valid.") - - -class InvalidSortKey(Invalid): - message = _("Sort key supplied was not valid.") - - -class InvalidPropertyProtectionConfiguration(Invalid): - message = _("Invalid configuration in property protection file.") - - -class InvalidSwiftStoreConfiguration(Invalid): - message = _("Invalid configuration in glance-swift conf file.") - - -class InvalidFilterRangeValue(Invalid): - message = _("Unable to filter using the specified range.") - - -class ReadonlyProperty(Forbidden): - message = _("Attribute '%(property)s' is read-only.") - - -class ReservedProperty(Forbidden): - message = _("Attribute '%(property)s' is reserved.") - - -class AuthorizationRedirect(GlanceException): - message = _("Redirecting to %(uri)s for authorization.") - - -class ClientConnectionError(GlanceException): - message = _("There was an error connecting to a server") - - -class ClientConfigurationError(GlanceException): - message = _("There was an error configuring the client.") - - -class MultipleChoices(GlanceException): - message = _("The request returned a 302 Multiple Choices. This generally " - "means that you have not included a version indicator in a " - "request URI.\n\nThe body of response returned:\n%(body)s") - - -class LimitExceeded(GlanceException): - message = _("The request returned a 413 Request Entity Too Large. This " - "generally means that rate limiting or a quota threshold was " - "breached.\n\nThe response body:\n%(body)s") - - def __init__(self, *args, **kwargs): - self.retry_after = (int(kwargs['retry']) if kwargs.get('retry') - else None) - super(LimitExceeded, self).__init__(*args, **kwargs) - - -class ServiceUnavailable(GlanceException): - message = _("The request returned 503 Service Unavailable. This " - "generally occurs on service overload or other transient " - "outage.") - - def __init__(self, *args, **kwargs): - self.retry_after = (int(kwargs['retry']) if kwargs.get('retry') - else None) - super(ServiceUnavailable, self).__init__(*args, **kwargs) - - -class ServerError(GlanceException): - message = _("The request returned 500 Internal Server Error.") - - -class UnexpectedStatus(GlanceException): - message = _("The request returned an unexpected status: %(status)s." - "\n\nThe response body:\n%(body)s") - - -class InvalidContentType(GlanceException): - message = _("Invalid content type %(content_type)s") - - -class BadRegistryConnectionConfiguration(GlanceException): - message = _("Registry was not configured correctly on API server. " - "Reason: %(reason)s") - - -class BadDriverConfiguration(GlanceException): - message = _("Driver %(driver_name)s could not be configured correctly. " - "Reason: %(reason)s") - - -class MaxRedirectsExceeded(GlanceException): - message = _("Maximum redirects (%(redirects)s) was exceeded.") - - -class InvalidRedirect(GlanceException): - message = _("Received invalid HTTP redirect.") - - -class NoServiceEndpoint(GlanceException): - message = _("Response from Keystone does not contain a Glance endpoint.") - - -class RegionAmbiguity(GlanceException): - message = _("Multiple 'image' service matches for region %(region)s. This " - "generally means that a region is required and you have not " - "supplied one.") - - -class WorkerCreationFailure(GlanceException): - message = _("Server worker creation failed: %(reason)s.") - - -class SchemaLoadError(GlanceException): - message = _("Unable to load schema: %(reason)s") - - -class InvalidObject(GlanceException): - message = _("Provided object does not match schema " - "'%(schema)s': %(reason)s") - - -class UnsupportedHeaderFeature(GlanceException): - message = _("Provided header feature is unsupported: %(feature)s") - - -class InUseByStore(GlanceException): - message = _("The image cannot be deleted because it is in use through " - "the backend store outside of Glance.") - - -class ImageSizeLimitExceeded(GlanceException): - message = _("The provided image is too large.") - - -class ImageMemberLimitExceeded(LimitExceeded): - message = _("The limit has been exceeded on the number of allowed image " - "members for this image. Attempted: %(attempted)s, " - "Maximum: %(maximum)s") - - -class ImagePropertyLimitExceeded(LimitExceeded): - message = _("The limit has been exceeded on the number of allowed image " - "properties. Attempted: %(attempted)s, Maximum: %(maximum)s") - - -class ImageTagLimitExceeded(LimitExceeded): - message = _("The limit has been exceeded on the number of allowed image " - "tags. Attempted: %(attempted)s, Maximum: %(maximum)s") - - -class ImageLocationLimitExceeded(LimitExceeded): - message = _("The limit has been exceeded on the number of allowed image " - "locations. Attempted: %(attempted)s, Maximum: %(maximum)s") - - -class RPCError(GlanceException): - message = _("%(cls)s exception was raised in the last rpc call: %(val)s") - - -class TaskException(GlanceException): - message = _("An unknown task exception occurred") - - -class TaskNotFound(TaskException, NotFound): - message = _("Task with the given id %(task_id)s was not found") - - -class InvalidTaskStatus(TaskException, Invalid): - message = _("Provided status of task is unsupported: %(status)s") - - -class InvalidTaskType(TaskException, Invalid): - message = _("Provided type of task is unsupported: %(type)s") - - -class InvalidTaskStatusTransition(TaskException, Invalid): - message = _("Status transition from %(cur_status)s to" - " %(new_status)s is not allowed") - - -class DuplicateLocation(Duplicate): - message = _("The location %(location)s already exists") - - -class ImageDataNotFound(NotFound): - message = _("No image data could be found") - - -class InvalidParameterValue(Invalid): - message = _("Invalid value '%(value)s' for parameter '%(param)s': " - "%(extra_msg)s") - - -class InvalidImageStatusTransition(Invalid): - message = _("Image status transition from %(cur_status)s to" - " %(new_status)s is not allowed") - - -class MetadefDuplicateNamespace(Duplicate): - message = _("The metadata definition namespace=%(namespace_name)s" - " already exists.") - - -class MetadefDuplicateObject(Duplicate): - message = _("A metadata definition object with name=%(object_name)s" - " already exists in namespace=%(namespace_name)s.") - - -class MetadefDuplicateProperty(Duplicate): - message = _("A metadata definition property with name=%(property_name)s" - " already exists in namespace=%(namespace_name)s.") - - -class MetadefDuplicateResourceType(Duplicate): - message = _("A metadata definition resource-type with" - " name=%(resource_type_name)s already exists.") - - -class MetadefDuplicateResourceTypeAssociation(Duplicate): - message = _("The metadata definition resource-type association of" - " resource-type=%(resource_type_name)s to" - " namespace=%(namespace_name)s" - " already exists.") - - -class MetadefForbidden(Forbidden): - message = _("You are not authorized to complete this action.") - - -class MetadefIntegrityError(Forbidden): - message = _("The metadata definition %(record_type)s with" - " name=%(record_name)s not deleted." - " Other records still refer to it.") - - -class MetadefNamespaceNotFound(NotFound): - message = _("Metadata definition namespace=%(namespace_name)s" - "was not found.") - - -class MetadefObjectNotFound(NotFound): - message = _("The metadata definition object with" - " name=%(object_name)s was not found in" - " namespace=%(namespace_name)s.") - - -class MetadefPropertyNotFound(NotFound): - message = _("The metadata definition property with" - " name=%(property_name)s was not found in" - " namespace=%(namespace_name)s.") - - -class MetadefResourceTypeNotFound(NotFound): - message = _("The metadata definition resource-type with" - " name=%(resource_type_name)s, was not found.") - - -class MetadefResourceTypeAssociationNotFound(NotFound): - message = _("The metadata definition resource-type association of" - " resource-type=%(resource_type_name)s to" - " namespace=%(namespace_name)s," - " was not found.") - - -class MetadefRecordNotFound(NotFound): - message = _("Metadata definition %(record_type)s record not found" - " for id %(id)s.") - - -class SyncServiceOperationError(GlanceException): - message = _("Image sync service execute failed with reason: %(reason)s") - - -class SyncStoreCopyError(GlanceException): - message = _("Image sync store failed with reason: %(reason)s") - diff --git a/juno-patches/glance/glance_location_patch/glance/common/utils.py b/juno-patches/glance/glance_location_patch/glance/common/utils.py deleted file mode 100644 index 3e05a5a2..00000000 --- a/juno-patches/glance/glance_location_patch/glance/common/utils.py +++ /dev/null @@ -1,657 +0,0 @@ -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# Copyright 2014 SoftLayer Technologies, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -System-level utilities and helper functions. -""" - -import errno - -try: - from eventlet import sleep -except ImportError: - from time import sleep -from eventlet.green import socket - -import functools -import os -import platform -import re -import subprocess -import sys -import uuid - -import netaddr -from OpenSSL import crypto -from oslo.config import cfg -from webob import exc - -import six - -from glance.common import exception -from glance.openstack.common import excutils -import glance.openstack.common.log as logging -from glance.openstack.common import network_utils -from glance.openstack.common import strutils - -CONF = cfg.CONF - -LOG = logging.getLogger(__name__) - -FEATURE_BLACKLIST = ['content-length', 'content-type', 'x-image-meta-size'] - -# Whitelist of v1 API headers of form x-image-meta-xxx -IMAGE_META_HEADERS = ['x-image-meta-location', 'x-image-meta-size', - 'x-image-meta-is_public', 'x-image-meta-disk_format', - 'x-image-meta-container_format', 'x-image-meta-name', - 'x-image-meta-status', 'x-image-meta-copy_from', - 'x-image-meta-uri', 'x-image-meta-checksum', - 'x-image-meta-created_at', 'x-image-meta-updated_at', - 'x-image-meta-deleted_at', 'x-image-meta-min_ram', - 'x-image-meta-min_disk', 'x-image-meta-owner', - 'x-image-meta-store', 'x-image-meta-id', - 'x-image-meta-protected', 'x-image-meta-deleted'] - -GLANCE_TEST_SOCKET_FD_STR = 'GLANCE_TEST_SOCKET_FD' - - -def chunkreadable(iter, chunk_size=65536): - """ - Wrap a readable iterator with a reader yielding chunks of - a preferred size, otherwise leave iterator unchanged. - - :param iter: an iter which may also be readable - :param chunk_size: maximum size of chunk - """ - return chunkiter(iter, chunk_size) if hasattr(iter, 'read') else iter - - -def chunkiter(fp, chunk_size=65536): - """ - Return an iterator to a file-like obj which yields fixed size chunks - - :param fp: a file-like object - :param chunk_size: maximum size of chunk - """ - while True: - chunk = fp.read(chunk_size) - if chunk: - yield chunk - else: - break - - -def cooperative_iter(iter): - """ - Return an iterator which schedules after each - iteration. This can prevent eventlet thread starvation. - - :param iter: an iterator to wrap - """ - try: - for chunk in iter: - sleep(0) - yield chunk - except Exception as err: - with excutils.save_and_reraise_exception(): - msg = _("Error: cooperative_iter exception %s") % err - LOG.error(msg) - - -def cooperative_read(fd): - """ - Wrap a file descriptor's read with a partial function which schedules - after each read. This can prevent eventlet thread starvation. - - :param fd: a file descriptor to wrap - """ - def readfn(*args): - result = fd.read(*args) - sleep(0) - return result - return readfn - - -class CooperativeReader(object): - """ - An eventlet thread friendly class for reading in image data. - - When accessing data either through the iterator or the read method - we perform a sleep to allow a co-operative yield. When there is more than - one image being uploaded/downloaded this prevents eventlet thread - starvation, ie allows all threads to be scheduled periodically rather than - having the same thread be continuously active. - """ - def __init__(self, fd): - """ - :param fd: Underlying image file object - """ - self.fd = fd - self.iterator = None - # NOTE(markwash): if the underlying supports read(), overwrite the - # default iterator-based implementation with cooperative_read which - # is more straightforward - if hasattr(fd, 'read'): - self.read = cooperative_read(fd) - - def read(self, length=None): - """Return the next chunk of the underlying iterator. - - This is replaced with cooperative_read in __init__ if the underlying - fd already supports read(). - """ - if self.iterator is None: - self.iterator = self.__iter__() - try: - return self.iterator.next() - except StopIteration: - return '' - - def __iter__(self): - return cooperative_iter(self.fd.__iter__()) - - -class LimitingReader(object): - """ - Reader designed to fail when reading image data past the configured - allowable amount. - """ - def __init__(self, data, limit): - """ - :param data: Underlying image data object - :param limit: maximum number of bytes the reader should allow - """ - self.data = data - self.limit = limit - self.bytes_read = 0 - - def __iter__(self): - for chunk in self.data: - self.bytes_read += len(chunk) - if self.bytes_read > self.limit: - raise exception.ImageSizeLimitExceeded() - else: - yield chunk - - def read(self, i): - result = self.data.read(i) - self.bytes_read += len(result) - if self.bytes_read > self.limit: - raise exception.ImageSizeLimitExceeded() - return result - - -def image_meta_to_http_headers(image_meta): - """ - Returns a set of image metadata into a dict - of HTTP headers that can be fed to either a Webob - Request object or an httplib.HTTP(S)Connection object - - :param image_meta: Mapping of image metadata - """ - headers = {} - for k, v in image_meta.items(): - if v is not None: - if k == 'properties': - for pk, pv in v.items(): - if pv is not None: - headers["x-image-meta-property-%s" - % pk.lower()] = six.text_type(pv) - else: - headers["x-image-meta-%s" % k.lower()] = six.text_type(v) - return headers - - -def get_image_meta_from_headers(response): - """ - Processes HTTP headers from a supplied response that - match the x-image-meta and x-image-meta-property and - returns a mapping of image metadata and properties - - :param response: Response to process - """ - result = {} - properties = {} - - if hasattr(response, 'getheaders'): # httplib.HTTPResponse - headers = response.getheaders() - else: # webob.Response - headers = response.headers.items() - - for key, value in headers: - key = str(key.lower()) - if key.startswith('x-image-meta-property-'): - field_name = key[len('x-image-meta-property-'):].replace('-', '_') - properties[field_name] = value or None - elif key.startswith('x-image-meta-'): - field_name = key[len('x-image-meta-'):].replace('-', '_') - if 'x-image-meta-' + field_name not in IMAGE_META_HEADERS: - msg = _("Bad header: %(header_name)s") % {'header_name': key} - raise exc.HTTPBadRequest(msg, content_type="text/plain") - result[field_name] = value or None - result['properties'] = properties - - for key in ('size', 'min_disk', 'min_ram'): - if key in result: - try: - result[key] = int(result[key]) - except ValueError: - extra = (_("Cannot convert image %(key)s '%(value)s' " - "to an integer.") - % {'key': key, 'value': result[key]}) - raise exception.InvalidParameterValue(value=result[key], - param=key, - extra_msg=extra) - if result[key] < 0: - extra = (_("Image %(key)s must be >= 0 " - "('%(value)s' specified).") - % {'key': key, 'value': result[key]}) - raise exception.InvalidParameterValue(value=result[key], - param=key, - extra_msg=extra) - - for key in ('is_public', 'deleted', 'protected'): - if key in result: - result[key] = strutils.bool_from_string(result[key]) - return result - - -def create_mashup_dict(image_meta): - """ - Returns a dictionary-like mashup of the image core properties - and the image custom properties from given image metadata. - - :param image_meta: metadata of image with core and custom properties - """ - - def get_items(): - for key, value in six.iteritems(image_meta): - if isinstance(value, dict): - for subkey, subvalue in six.iteritems( - create_mashup_dict(value)): - if subkey not in image_meta: - yield subkey, subvalue - else: - yield key, value - - return dict(get_items()) - - -def safe_mkdirs(path): - try: - os.makedirs(path) - except OSError as e: - if e.errno != errno.EEXIST: - raise - - -def safe_remove(path): - try: - os.remove(path) - except OSError as e: - if e.errno != errno.ENOENT: - raise - - -class PrettyTable(object): - """Creates an ASCII art table for use in bin/glance - - Example: - - ID Name Size Hits - --- ----------------- ------------ ----- - 122 image 22 0 - """ - def __init__(self): - self.columns = [] - - def add_column(self, width, label="", just='l'): - """Add a column to the table - - :param width: number of characters wide the column should be - :param label: column heading - :param just: justification for the column, 'l' for left, - 'r' for right - """ - self.columns.append((width, label, just)) - - def make_header(self): - label_parts = [] - break_parts = [] - for width, label, _ in self.columns: - # NOTE(sirp): headers are always left justified - label_part = self._clip_and_justify(label, width, 'l') - label_parts.append(label_part) - - break_part = '-' * width - break_parts.append(break_part) - - label_line = ' '.join(label_parts) - break_line = ' '.join(break_parts) - return '\n'.join([label_line, break_line]) - - def make_row(self, *args): - row = args - row_parts = [] - for data, (width, _, just) in zip(row, self.columns): - row_part = self._clip_and_justify(data, width, just) - row_parts.append(row_part) - - row_line = ' '.join(row_parts) - return row_line - - @staticmethod - def _clip_and_justify(data, width, just): - # clip field to column width - clipped_data = str(data)[:width] - - if just == 'r': - # right justify - justified = clipped_data.rjust(width) - else: - # left justify - justified = clipped_data.ljust(width) - - return justified - - -def get_terminal_size(): - - def _get_terminal_size_posix(): - import fcntl - import struct - import termios - - height_width = None - - try: - height_width = struct.unpack('hh', fcntl.ioctl(sys.stderr.fileno(), - termios.TIOCGWINSZ, - struct.pack('HH', 0, 0))) - except Exception: - pass - - if not height_width: - try: - p = subprocess.Popen(['stty', 'size'], - shell=False, - stdout=subprocess.PIPE, - stderr=open(os.devnull, 'w')) - result = p.communicate() - if p.returncode == 0: - return tuple(int(x) for x in result[0].split()) - except Exception: - pass - - return height_width - - def _get_terminal_size_win32(): - try: - from ctypes import create_string_buffer - from ctypes import windll - handle = windll.kernel32.GetStdHandle(-12) - csbi = create_string_buffer(22) - res = windll.kernel32.GetConsoleScreenBufferInfo(handle, csbi) - except Exception: - return None - if res: - import struct - unpack_tmp = struct.unpack("hhhhHhhhhhh", csbi.raw) - (bufx, bufy, curx, cury, wattr, - left, top, right, bottom, maxx, maxy) = unpack_tmp - height = bottom - top + 1 - width = right - left + 1 - return (height, width) - else: - return None - - def _get_terminal_size_unknownOS(): - raise NotImplementedError - - func = {'posix': _get_terminal_size_posix, - 'win32': _get_terminal_size_win32} - - height_width = func.get(platform.os.name, _get_terminal_size_unknownOS)() - - if height_width is None: - raise exception.Invalid() - - for i in height_width: - if not isinstance(i, int) or i <= 0: - raise exception.Invalid() - - return height_width[0], height_width[1] - - -def mutating(func): - """Decorator to enforce read-only logic""" - @functools.wraps(func) - def wrapped(self, req, *args, **kwargs): - if req.context.read_only: - msg = "Read-only access" - LOG.debug(msg) - raise exc.HTTPForbidden(msg, request=req, - content_type="text/plain") - return func(self, req, *args, **kwargs) - return wrapped - - -def setup_remote_pydev_debug(host, port): - error_msg = _('Error setting up the debug environment. Verify that the' - ' option pydev_worker_debug_host is pointing to a valid ' - 'hostname or IP on which a pydev server is listening on' - ' the port indicated by pydev_worker_debug_port.') - - try: - try: - from pydev import pydevd - except ImportError: - import pydevd - - pydevd.settrace(host, - port=port, - stdoutToServer=True, - stderrToServer=True) - return True - except Exception: - with excutils.save_and_reraise_exception(): - LOG.exception(error_msg) - - -class LazyPluggable(object): - """A pluggable backend loaded lazily based on some value.""" - - def __init__(self, pivot, config_group=None, **backends): - self.__backends = backends - self.__pivot = pivot - self.__backend = None - self.__config_group = config_group - - def __get_backend(self): - if not self.__backend: - if self.__config_group is None: - backend_name = CONF[self.__pivot] - else: - backend_name = CONF[self.__config_group][self.__pivot] - if backend_name not in self.__backends: - msg = _('Invalid backend: %s') % backend_name - raise exception.GlanceException(msg) - - backend = self.__backends[backend_name] - if isinstance(backend, tuple): - name = backend[0] - fromlist = backend[1] - else: - name = backend - fromlist = backend - - self.__backend = __import__(name, None, None, fromlist) - return self.__backend - - def __getattr__(self, key): - backend = self.__get_backend() - return getattr(backend, key) - - -def validate_key_cert(key_file, cert_file): - try: - error_key_name = "private key" - error_filename = key_file - with open(key_file, 'r') as keyfile: - key_str = keyfile.read() - key = crypto.load_privatekey(crypto.FILETYPE_PEM, key_str) - - error_key_name = "certificate" - error_filename = cert_file - with open(cert_file, 'r') as certfile: - cert_str = certfile.read() - cert = crypto.load_certificate(crypto.FILETYPE_PEM, cert_str) - except IOError as ioe: - raise RuntimeError(_("There is a problem with your %(error_key_name)s " - "%(error_filename)s. Please verify it." - " Error: %(ioe)s") % - {'error_key_name': error_key_name, - 'error_filename': error_filename, - 'ioe': ioe}) - except crypto.Error as ce: - raise RuntimeError(_("There is a problem with your %(error_key_name)s " - "%(error_filename)s. Please verify it. OpenSSL" - " error: %(ce)s") % - {'error_key_name': error_key_name, - 'error_filename': error_filename, - 'ce': ce}) - - try: - data = str(uuid.uuid4()) - digest = "sha1" - - out = crypto.sign(key, data, digest) - crypto.verify(cert, out, data, digest) - except crypto.Error as ce: - raise RuntimeError(_("There is a problem with your key pair. " - "Please verify that cert %(cert_file)s and " - "key %(key_file)s belong together. OpenSSL " - "error %(ce)s") % {'cert_file': cert_file, - 'key_file': key_file, - 'ce': ce}) - - -def get_test_suite_socket(): - global GLANCE_TEST_SOCKET_FD_STR - if GLANCE_TEST_SOCKET_FD_STR in os.environ: - fd = int(os.environ[GLANCE_TEST_SOCKET_FD_STR]) - sock = socket.fromfd(fd, socket.AF_INET, socket.SOCK_STREAM) - sock = socket.SocketType(_sock=sock) - sock.listen(CONF.backlog) - del os.environ[GLANCE_TEST_SOCKET_FD_STR] - os.close(fd) - return sock - return None - - -def is_uuid_like(val): - """Returns validation of a value as a UUID. - - For our purposes, a UUID is a canonical form string: - aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa - """ - try: - return str(uuid.UUID(val)) == val - except (TypeError, ValueError, AttributeError): - return False - - -def is_valid_port(port): - """Verify that port represents a valid port number.""" - return str(port).isdigit() and int(port) > 0 and int(port) <= 65535 - - -def is_valid_ipv4(address): - """Verify that address represents a valid IPv4 address.""" - try: - return netaddr.valid_ipv4(address) - except Exception: - return False - - -def is_valid_ipv6(address): - """Verify that address represents a valid IPv6 address.""" - try: - return netaddr.valid_ipv6(address) - except Exception: - return False - - -def is_valid_hostname(hostname): - """Verify whether a hostname (not an FQDN) is valid.""" - return re.match('^[a-zA-Z0-9-]+$', hostname) is not None - - -def is_valid_fqdn(fqdn): - """Verify whether a host is a valid FQDN.""" - return re.match('^[a-zA-Z0-9.-]+\.[a-zA-Z]{2,}$', fqdn) is not None - - -def parse_valid_host_port(host_port): - """ - Given a "host:port" string, attempts to parse it as intelligently as - possible to determine if it is valid. This includes IPv6 [host]:port form, - IPv4 ip:port form, and hostname:port or fqdn:port form. - - Invalid inputs will raise a ValueError, while valid inputs will return - a (host, port) tuple where the port will always be of type int. - """ - - try: - try: - host, port = network_utils.parse_host_port(host_port) - except Exception: - raise ValueError(_('Host and port "%s" is not valid.') % host_port) - - if not is_valid_port(port): - raise ValueError(_('Port "%s" is not valid.') % port) - - # First check for valid IPv6 and IPv4 addresses, then a generic - # hostname. Failing those, if the host includes a period, then this - # should pass a very generic FQDN check. The FQDN check for letters at - # the tail end will weed out any hilariously absurd IPv4 addresses. - - if not (is_valid_ipv6(host) or is_valid_ipv4(host) or - is_valid_hostname(host) or is_valid_fqdn(host)): - raise ValueError(_('Host "%s" is not valid.') % host) - - except Exception as ex: - raise ValueError(_('%s ' - 'Please specify a host:port pair, where host is an ' - 'IPv4 address, IPv6 address, hostname, or FQDN. If ' - 'using an IPv6 address, enclose it in brackets ' - 'separately from the port (i.e., ' - '"[fe80::a:b:c]:9876").') % ex) - - return (host, int(port)) - - -def exception_to_str(exc): - try: - error = six.text_type(exc) - except UnicodeError: - try: - error = str(exc) - except UnicodeError: - error = ("Caught '%(exception)s' exception." % - {"exception": exc.__class__.__name__}) - return strutils.safe_encode(error, errors='ignore') diff --git a/juno-patches/glance/glance_location_patch/glance/gateway.py b/juno-patches/glance/glance_location_patch/glance/gateway.py deleted file mode 100644 index 724dc34b..00000000 --- a/juno-patches/glance/glance_location_patch/glance/gateway.py +++ /dev/null @@ -1,214 +0,0 @@ -# Copyright 2012 OpenStack Foundation -# Copyright 2013 IBM Corp. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo.config import cfg - -from glance.api import authorization -from glance.api import policy -from glance.api import property_protections -from glance.common import property_utils -from glance.common import store_utils -import glance.db -import glance.domain -import glance.location -import glance.notifier -import glance.quota -import glance_store -from glance.sync.client.v1 import api as syncapi - - -CONF = cfg.CONF -CONF.import_opt('sync_enabled', 'glance.common.config') - - -class Gateway(object): - def __init__(self, db_api=None, store_api=None, notifier=None, - policy_enforcer=None, sync_api=None): - self.db_api = db_api or glance.db.get_api() - self.store_api = store_api or glance_store - self.store_utils = store_utils - self.notifier = notifier or glance.notifier.Notifier() - self.policy = policy_enforcer or policy.Enforcer() - self.sync_api = sync_api or syncapi - - def get_image_factory(self, context): - image_factory = glance.domain.ImageFactory() - store_image_factory = glance.location.ImageFactoryProxy( - image_factory, context, self.store_api, self.store_utils) - quota_image_factory = glance.quota.ImageFactoryProxy( - store_image_factory, context, self.db_api, self.store_utils) - policy_image_factory = policy.ImageFactoryProxy( - quota_image_factory, context, self.policy) - notifier_image_factory = glance.notifier.ImageFactoryProxy( - policy_image_factory, context, self.notifier) - if property_utils.is_property_protection_enabled(): - property_rules = property_utils.PropertyRules(self.policy) - protected_image_factory = property_protections.\ - ProtectedImageFactoryProxy(notifier_image_factory, context, - property_rules) - authorized_image_factory = authorization.ImageFactoryProxy( - protected_image_factory, context) - else: - authorized_image_factory = authorization.ImageFactoryProxy( - notifier_image_factory, context) - if CONF.sync_enabled: - sync_image_factory = glance.sync.ImageFactoryProxy( - authorized_image_factory, context, self.sync_api) - return sync_image_factory - - return authorized_image_factory - - def get_image_member_factory(self, context): - image_factory = glance.domain.ImageMemberFactory() - quota_image_factory = glance.quota.ImageMemberFactoryProxy( - image_factory, context, self.db_api, self.store_utils) - policy_member_factory = policy.ImageMemberFactoryProxy( - quota_image_factory, context, self.policy) - authorized_image_factory = authorization.ImageMemberFactoryProxy( - policy_member_factory, context) - return authorized_image_factory - - def get_repo(self, context): - image_repo = glance.db.ImageRepo(context, self.db_api) - store_image_repo = glance.location.ImageRepoProxy( - image_repo, context, self.store_api, self.store_utils) - quota_image_repo = glance.quota.ImageRepoProxy( - store_image_repo, context, self.db_api, self.store_utils) - policy_image_repo = policy.ImageRepoProxy( - quota_image_repo, context, self.policy) - notifier_image_repo = glance.notifier.ImageRepoProxy( - policy_image_repo, context, self.notifier) - if property_utils.is_property_protection_enabled(): - property_rules = property_utils.PropertyRules(self.policy) - protected_image_repo = property_protections.\ - ProtectedImageRepoProxy(notifier_image_repo, context, - property_rules) - authorized_image_repo = authorization.ImageRepoProxy( - protected_image_repo, context) - else: - authorized_image_repo = authorization.ImageRepoProxy( - notifier_image_repo, context) - if CONF.sync_enabled: - sync_image_repo = glance.sync.ImageRepoProxy( - authorized_image_repo, context, self.sync_api) - return sync_image_repo - - return authorized_image_repo - - def get_task_factory(self, context): - task_factory = glance.domain.TaskFactory() - policy_task_factory = policy.TaskFactoryProxy( - task_factory, context, self.policy) - notifier_task_factory = glance.notifier.TaskFactoryProxy( - policy_task_factory, context, self.notifier) - authorized_task_factory = authorization.TaskFactoryProxy( - notifier_task_factory, context) - return authorized_task_factory - - def get_task_repo(self, context): - task_repo = glance.db.TaskRepo(context, self.db_api) - policy_task_repo = policy.TaskRepoProxy( - task_repo, context, self.policy) - notifier_task_repo = glance.notifier.TaskRepoProxy( - policy_task_repo, context, self.notifier) - authorized_task_repo = authorization.TaskRepoProxy( - notifier_task_repo, context) - return authorized_task_repo - - def get_task_stub_repo(self, context): - task_stub_repo = glance.db.TaskRepo(context, self.db_api) - policy_task_stub_repo = policy.TaskStubRepoProxy( - task_stub_repo, context, self.policy) - notifier_task_stub_repo = glance.notifier.TaskStubRepoProxy( - policy_task_stub_repo, context, self.notifier) - authorized_task_stub_repo = authorization.TaskStubRepoProxy( - notifier_task_stub_repo, context) - return authorized_task_stub_repo - - def get_task_executor_factory(self, context): - task_repo = self.get_task_repo(context) - image_repo = self.get_repo(context) - image_factory = self.get_image_factory(context) - return glance.domain.TaskExecutorFactory(task_repo, - image_repo, - image_factory) - - def get_metadef_namespace_factory(self, context): - ns_factory = glance.domain.MetadefNamespaceFactory() - policy_ns_factory = policy.MetadefNamespaceFactoryProxy( - ns_factory, context, self.policy) - authorized_ns_factory = authorization.MetadefNamespaceFactoryProxy( - policy_ns_factory, context) - return authorized_ns_factory - - def get_metadef_namespace_repo(self, context): - ns_repo = glance.db.MetadefNamespaceRepo(context, self.db_api) - policy_ns_repo = policy.MetadefNamespaceRepoProxy( - ns_repo, context, self.policy) - authorized_ns_repo = authorization.MetadefNamespaceRepoProxy( - policy_ns_repo, context) - return authorized_ns_repo - - def get_metadef_object_factory(self, context): - object_factory = glance.domain.MetadefObjectFactory() - policy_object_factory = policy.MetadefObjectFactoryProxy( - object_factory, context, self.policy) - authorized_object_factory = authorization.MetadefObjectFactoryProxy( - policy_object_factory, context) - return authorized_object_factory - - def get_metadef_object_repo(self, context): - object_repo = glance.db.MetadefObjectRepo(context, self.db_api) - policy_object_repo = policy.MetadefObjectRepoProxy( - object_repo, context, self.policy) - authorized_object_repo = authorization.MetadefObjectRepoProxy( - policy_object_repo, context) - return authorized_object_repo - - def get_metadef_resource_type_factory(self, context): - resource_type_factory = glance.domain.MetadefResourceTypeFactory() - policy_resource_type_factory = policy.MetadefResourceTypeFactoryProxy( - resource_type_factory, context, self.policy) - authorized_resource_type_factory = \ - authorization.MetadefResourceTypeFactoryProxy( - policy_resource_type_factory, context) - return authorized_resource_type_factory - - def get_metadef_resource_type_repo(self, context): - resource_type_repo = glance.db.MetadefResourceTypeRepo( - context, self.db_api) - policy_object_repo = policy.MetadefResourceTypeRepoProxy( - resource_type_repo, context, self.policy) - authorized_resource_type_repo = \ - authorization.MetadefResourceTypeRepoProxy(policy_object_repo, - context) - return authorized_resource_type_repo - - def get_metadef_property_factory(self, context): - prop_factory = glance.domain.MetadefPropertyFactory() - policy_prop_factory = policy.MetadefPropertyFactoryProxy( - prop_factory, context, self.policy) - authorized_prop_factory = authorization.MetadefPropertyFactoryProxy( - policy_prop_factory, context) - return authorized_prop_factory - - def get_metadef_property_repo(self, context): - prop_repo = glance.db.MetadefPropertyRepo(context, self.db_api) - policy_prop_repo = policy.MetadefPropertyRepoProxy( - prop_repo, context, self.policy) - authorized_prop_repo = authorization.MetadefPropertyRepoProxy( - policy_prop_repo, context) - return authorized_prop_repo diff --git a/juno-patches/glance/glance_location_patch/glance/location.py b/juno-patches/glance/glance_location_patch/glance/location.py deleted file mode 100644 index 5adb4a84..00000000 --- a/juno-patches/glance/glance_location_patch/glance/location.py +++ /dev/null @@ -1,459 +0,0 @@ -# Copyright 2014 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import collections -import copy -import re - -import glance_store as store -from oslo.config import cfg - -from glance.common import exception -from glance.common import utils -import glance.domain.proxy -from glance.openstack.common import excutils -from glance.openstack.common import gettextutils -import glance.openstack.common.log as logging - - -_LE = gettextutils._LE - -CONF = cfg.CONF -LOG = logging.getLogger(__name__) - - -class ImageRepoProxy(glance.domain.proxy.Repo): - - def __init__(self, image_repo, context, store_api, store_utils): - self.context = context - self.store_api = store_api - proxy_kwargs = {'context': context, 'store_api': store_api, - 'store_utils': store_utils} - super(ImageRepoProxy, self).__init__(image_repo, - item_proxy_class=ImageProxy, - item_proxy_kwargs=proxy_kwargs) - - def _set_acls(self, image): - public = image.visibility == 'public' - member_ids = [] - if image.locations and not public: - member_repo = image.get_member_repo() - member_ids = [m.member_id for m in member_repo.list()] - for location in image.locations: - self.store_api.set_acls(location['url'], public=public, - read_tenants=member_ids, - context=self.context) - - def add(self, image): - result = super(ImageRepoProxy, self).add(image) - self._set_acls(image) - return result - - def save(self, image): - result = super(ImageRepoProxy, self).save(image) - self._set_acls(image) - return result - - -def _check_location_uri(context, store_api, uri): - """Check if an image location is valid. - - :param context: Glance request context - :param store_api: store API module - :param uri: location's uri string - """ - is_ok = True - try: - size = store_api.get_size_from_backend(uri, context=context) - # NOTE(zhiyan): Some stores return zero when it catch exception - is_ok = size > 0 - except (store.UnknownScheme, store.NotFound): - is_ok = False - if not is_ok: - reason = _('Invalid location') - raise exception.BadStoreUri(message=reason) - - -pattern = re.compile(r'^https?://\S+/v2/images/\S+$') - -def is_glance_location(loc_url): - return pattern.match(loc_url) - - -def _check_glance_loc(context, location): - uri = location['url'] - if not is_glance_location(uri): - return False - if 'auth_token=' in uri: - return True - location['url'] = uri + ('?auth_token=' + context.auth_tok) - return True - - -def _check_image_location(context, store_api, location): - if not _check_glance_loc(context, location): - _check_location_uri(context, store_api, location['url']) - store_api.check_location_metadata(location['metadata']) - - -def _set_image_size(context, image, locations): - if not image.size: - for location in locations: - size_from_backend = store.get_size_from_backend( - location['url'], context=context) - - if size_from_backend: - # NOTE(flwang): This assumes all locations have the same size - image.size = size_from_backend - break - - -def _count_duplicated_locations(locations, new): - """ - To calculate the count of duplicated locations for new one. - - :param locations: The exiting image location set - :param new: The new image location - :returns: The count of duplicated locations - """ - - ret = 0 - for loc in locations: - if (loc['url'] == new['url'] and loc['metadata'] == new['metadata']): - ret += 1 - return ret - - -def _remove_extra_info(location): - url = location['url'] - if url.startswith('http'): - start = url.find('auth_token') - if start == -1: - return - end = url.find('&', start) - if end == -1: - if url[start - 1] == '?': - url = re.sub(r'\?auth_token=\S+', r'', url) - elif url[start - 1] == '&': - url = re.sub(r'&auth_token=\S+', r'', url) - else: - url = re.sub(r'auth_token=\S+&', r'', url) - - location['url'] = url - - -class ImageFactoryProxy(glance.domain.proxy.ImageFactory): - def __init__(self, factory, context, store_api, store_utils): - self.context = context - self.store_api = store_api - proxy_kwargs = {'context': context, 'store_api': store_api, - 'store_utils': store_utils} - super(ImageFactoryProxy, self).__init__(factory, - proxy_class=ImageProxy, - proxy_kwargs=proxy_kwargs) - - def new_image(self, **kwargs): - locations = kwargs.get('locations', []) - for loc in locations: - _check_image_location(self.context, self.store_api, loc) - loc['status'] = 'active' - if _count_duplicated_locations(locations, loc) > 1: - raise exception.DuplicateLocation(location=loc['url']) - return super(ImageFactoryProxy, self).new_image(**kwargs) - - -class StoreLocations(collections.MutableSequence): - """ - The proxy for store location property. It takes responsibility for: - 1. Location uri correctness checking when adding a new location. - 2. Remove the image data from the store when a location is removed - from an image. - """ - def __init__(self, image_proxy, value): - self.image_proxy = image_proxy - if isinstance(value, list): - self.value = value - else: - self.value = list(value) - - def append(self, location): - # NOTE(flaper87): Insert this - # location at the very end of - # the value list. - self.insert(len(self.value), location) - - def extend(self, other): - if isinstance(other, StoreLocations): - locations = other.value - else: - locations = list(other) - - for location in locations: - self.append(location) - - def insert(self, i, location): - _check_image_location(self.image_proxy.context, - self.image_proxy.store_api, location) - _remove_extra_info(location) - location['status'] = 'active' - if _count_duplicated_locations(self.value, location) > 0: - raise exception.DuplicateLocation(location=location['url']) - - self.value.insert(i, location) - _set_image_size(self.image_proxy.context, - self.image_proxy, - [location]) - - def pop(self, i=-1): - location = self.value.pop(i) - try: - self.image_proxy.store_utils.delete_image_location_from_backend( - self.image_proxy.context, - self.image_proxy.image.image_id, - location) - except Exception: - with excutils.save_and_reraise_exception(): - self.value.insert(i, location) - return location - - def count(self, location): - return self.value.count(location) - - def index(self, location, *args): - return self.value.index(location, *args) - - def remove(self, location): - if self.count(location): - self.pop(self.index(location)) - else: - self.value.remove(location) - - def reverse(self): - self.value.reverse() - - # Mutable sequence, so not hashable - __hash__ = None - - def __getitem__(self, i): - return self.value.__getitem__(i) - - def __setitem__(self, i, location): - _check_image_location(self.image_proxy.context, - self.image_proxy.store_api, location) - location['status'] = 'active' - self.value.__setitem__(i, location) - _set_image_size(self.image_proxy.context, - self.image_proxy, - [location]) - - def __delitem__(self, i): - location = None - try: - location = self.value.__getitem__(i) - except Exception: - return self.value.__delitem__(i) - self.image_proxy.store_utils.delete_image_location_from_backend( - self.image_proxy.context, - self.image_proxy.image.image_id, - location) - self.value.__delitem__(i) - - def __delslice__(self, i, j): - i = max(i, 0) - j = max(j, 0) - locations = [] - try: - locations = self.value.__getslice__(i, j) - except Exception: - return self.value.__delslice__(i, j) - for location in locations: - self.image_proxy.store_utils.delete_image_location_from_backend( - self.image_proxy.context, - self.image_proxy.image.image_id, - location) - self.value.__delitem__(i) - - def __iadd__(self, other): - self.extend(other) - return self - - def __contains__(self, location): - return location in self.value - - def __len__(self): - return len(self.value) - - def __cast(self, other): - if isinstance(other, StoreLocations): - return other.value - else: - return other - - def __cmp__(self, other): - return cmp(self.value, self.__cast(other)) - - def __iter__(self): - return iter(self.value) - - def __copy__(self): - return type(self)(self.image_proxy, self.value) - - def __deepcopy__(self, memo): - # NOTE(zhiyan): Only copy location entries, others can be reused. - value = copy.deepcopy(self.value, memo) - self.image_proxy.image.locations = value - return type(self)(self.image_proxy, value) - - -def _locations_proxy(target, attr): - """ - Make a location property proxy on the image object. - - :param target: the image object on which to add the proxy - :param attr: the property proxy we want to hook - """ - def get_attr(self): - value = getattr(getattr(self, target), attr) - return StoreLocations(self, value) - - def set_attr(self, value): - if not isinstance(value, (list, StoreLocations)): - reason = _('Invalid locations') - raise exception.BadStoreUri(message=reason) - ori_value = getattr(getattr(self, target), attr) - if ori_value != value: - # NOTE(zhiyan): Enforced locations list was previously empty list. - if len(ori_value) > 0: - raise exception.Invalid(_('Original locations is not empty: ' - '%s') % ori_value) - # NOTE(zhiyan): Check locations are all valid. - for location in value: - _check_image_location(self.context, self.store_api, - location) - location['status'] = 'active' - if _count_duplicated_locations(value, location) > 1: - raise exception.DuplicateLocation(location=location['url']) - _set_image_size(self.context, getattr(self, target), value) - return setattr(getattr(self, target), attr, list(value)) - - def del_attr(self): - value = getattr(getattr(self, target), attr) - while len(value): - self.store_utils.delete_image_location_from_backend( - self.context, - self.image.image_id, - value[0]) - del value[0] - setattr(getattr(self, target), attr, value) - return delattr(getattr(self, target), attr) - - return property(get_attr, set_attr, del_attr) - - -class ImageProxy(glance.domain.proxy.Image): - - locations = _locations_proxy('image', 'locations') - - def __init__(self, image, context, store_api, store_utils): - self.image = image - self.context = context - self.store_api = store_api - self.store_utils = store_utils - proxy_kwargs = { - 'context': context, - 'image': self, - 'store_api': store_api, - } - super(ImageProxy, self).__init__( - image, member_repo_proxy_class=ImageMemberRepoProxy, - member_repo_proxy_kwargs=proxy_kwargs) - - def delete(self): - self.image.delete() - if self.image.locations: - for location in self.image.locations: - self.store_utils.delete_image_location_from_backend( - self.context, - self.image.image_id, - location) - - def set_data(self, data, size=None): - if size is None: - size = 0 # NOTE(markwash): zero -> unknown size - location, size, checksum, loc_meta = self.store_api.add_to_backend( - CONF, - self.image.image_id, - utils.LimitingReader(utils.CooperativeReader(data), - CONF.image_size_cap), - size, - context=self.context) - loc_meta = loc_meta or {} - loc_meta['is_default'] = 'true' - self.image.locations = [{'url': location, 'metadata': loc_meta, - 'status': 'active'}] - self.image.size = size - self.image.checksum = checksum - self.image.status = 'active' - - def get_data(self, offset=0, chunk_size=None): - if not self.image.locations: - raise store.NotFound(_("No image data could be found")) - err = None - for loc in self.image.locations: - if is_glance_location(loc['url']): - continue - try: - data, size = self.store_api.get_from_backend( - loc['url'], - offset=offset, - chunk_size=chunk_size, - context=self.context) - - return data - except Exception as e: - LOG.warn(_('Get image %(id)s data failed: ' - '%(err)s.') % {'id': self.image.image_id, - 'err': utils.exception_to_str(e)}) - err = e - # tried all locations - LOG.error(_LE('Glance tried all active locations to get data for ' - 'image %s but all have failed.') % self.image.image_id) - raise err - - -class ImageMemberRepoProxy(glance.domain.proxy.Repo): - def __init__(self, repo, image, context, store_api): - self.repo = repo - self.image = image - self.context = context - self.store_api = store_api - super(ImageMemberRepoProxy, self).__init__(repo) - - def _set_acls(self): - public = self.image.visibility == 'public' - if self.image.locations and not public: - member_ids = [m.member_id for m in self.repo.list()] - for location in self.image.locations: - self.store_api.set_acls(location['url'], public=public, - read_tenants=member_ids, - context=self.context) - - def add(self, member): - super(ImageMemberRepoProxy, self).add(member) - self._set_acls() - - def remove(self, member): - super(ImageMemberRepoProxy, self).remove(member) - self._set_acls() diff --git a/juno-patches/glance/glance_location_patch/installation/install.sh b/juno-patches/glance/glance_location_patch/installation/install.sh deleted file mode 100644 index f7140674..00000000 --- a/juno-patches/glance/glance_location_patch/installation/install.sh +++ /dev/null @@ -1,130 +0,0 @@ -#!/bin/bash - -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# Copyright (c) 2014 Huawei Technologies. - -CURPATH=$(cd "$(dirname "$0")"; pwd) -_PYTHON_INSTALL_DIR=${OPENSTACK_INSTALL_DIR} -if [ ! -n ${_PYTHON_INSTALL_DIR} ];then - _PYTHON_INSTALL_DIR="/usr/lib/python2.7/dist-packages" -fi -_GLANCE_DIR="${_PYTHON_INSTALL_DIR}/glance" - -# if you did not make changes to the installation files, -# please do not edit the following directories. -_PATCH_DIR="${CURPATH}/.." -_BACKUP_DIR="${_GLANCE_DIR}/glance-installation-backup" - -_SCRIPT_LOGFILE="/var/log/glance/installation/install.log" - -api_config_option_list="sync_enabled=True sync_server_port=9595 sync_server_host=127.0.0.1" - -export PS4='+{$LINENO:${FUNCNAME[0]}}' - -ERRTRAP() -{ - echo "[LINE:$1] Error: Command or function exited with status $?" -} - -function log() -{ - echo "$@" - echo "`date -u +'%Y-%m-%d %T.%N'`: $@" >> $_SCRIPT_LOGFILE -} - -function process_stop -{ - PID=`ps -efw|grep "$1"|grep -v grep|awk '{print $2}'` - echo "PID is: $PID">>$_SCRIPT_LOGFILE - if [ "x${PID}" != "x" ]; then - for kill_id in $PID - do - kill -9 ${kill_id} - if [ $? -ne 0 ]; then - echo "[[stop glance-sync]]$1 stop failed.">>$_SCRIPT_LOGFILE - exit 1 - fi - done - echo "[[stop glance-sync]]$1 stop ok.">>$_SCRIPT_LOGFILE - fi -} - -function restart_services -{ - log "restarting glance ..." - service glance-api restart - service glance-registry restart - process_stop "glance-sync" - python /usr/bin/glance-sync --config-file=/etc/glance/glance-sync.conf & -} - -trap 'ERRTRAP $LINENO' ERR - -if [[ ${EUID} -ne 0 ]]; then - log "Please run as root." - exit 1 -fi - -if [ ! -d "/var/log/glance/installation" ]; then - mkdir /var/log/glance/installation - touch _SCRIPT_LOGFILE -fi - -cd `dirname $0` - - -log "checking previous installation..." -if [ -d "${_BACKUP_DIR}/glance" ] ; then - log "It seems glance cascading has already been installed!" - log "Please check README for solution if this is not true." - exit 1 -fi - -log "backing up current files that might be overwritten..." -mkdir -p "${_BACKUP_DIR}/glance" -mkdir -p "${_BACKUP_DIR}/etc" -mkdir -p "${_BACKUP_DIR}/etc/glance" - -if [ $? -ne 0 ] ; then - rm -r "${_BACKUP_DIR}/glance" - rm -r "${_BACKUP_DIR}/etc" - log "Error in config backup, aborted." - exit 1 -fi - -log "copying in new files..." - -cp -r "${_PATCH_DIR}/glance" `dirname ${_GLANCE_DIR}` - -glanceEggDir=`ls ${_PYTHON_INSTALL_DIR} |grep -e glance- |grep -e egg-info ` -if [ ! -d ${_PYTHON_INSTALL_DIR}/${glanceEggDir} ]; then - log "glance install dir not exist. Pleas check manually." - exit 1 -fi -cp "${_PATCH_DIR}/glance-egg-info/entry_points.txt" "${_PYTHON_INSTALL_DIR}/${glanceEggDir}/" -if [ $? -ne 0 ] ; then - log "Error in copying, aborted. Please install manually." - exit 1 -fi - -#restart services -restart_services -if [ $? -ne 0 ] ; then - log "There was an error in restarting the service, please restart glance manually." - exit 1 -fi - -log "Completed." -log "See README to get started." - -exit 0 diff --git a/juno-patches/glance_store/glance_store_patch/README.md b/juno-patches/glance_store/glance_store_patch/README.md deleted file mode 100644 index 25c68c18..00000000 --- a/juno-patches/glance_store/glance_store_patch/README.md +++ /dev/null @@ -1,5 +0,0 @@ -Glance Store Patch ------------- - we simple modified the following two python file for the cascading need: - - backend.py: add a little code for handling the glance-location; - - _drivers/http.py: alike backend.py, adding some handle logic for glance-location. diff --git a/juno-patches/glance_store/glance_store_patch/glance_store/_drivers/http.py b/juno-patches/glance_store/glance_store_patch/glance_store/_drivers/http.py deleted file mode 100644 index e5fb4f82..00000000 --- a/juno-patches/glance_store/glance_store_patch/glance_store/_drivers/http.py +++ /dev/null @@ -1,230 +0,0 @@ -# Copyright 2010 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import httplib -import logging -import socket -import urlparse - -import glance_store.driver -from glance_store import exceptions -from glance_store.i18n import _ -from glance_store.openstack.common import jsonutils -import glance_store.location - -LOG = logging.getLogger(__name__) - - -MAX_REDIRECTS = 5 - - -class StoreLocation(glance_store.location.StoreLocation): - - """Class describing an HTTP(S) URI""" - - def process_specs(self): - self.scheme = self.specs.get('scheme', 'http') - self.netloc = self.specs['netloc'] - self.user = self.specs.get('user') - self.password = self.specs.get('password') - self.path = self.specs.get('path') - - def _get_credstring(self): - if self.user: - return '%s:%s@' % (self.user, self.password) - return '' - - def get_uri(self): - return "%s://%s%s%s" % ( - self.scheme, - self._get_credstring(), - self.netloc, - self.path) - - def parse_uri(self, uri): - """ - Parse URLs. This method fixes an issue where credentials specified - in the URL are interpreted differently in Python 2.6.1+ than prior - versions of Python. - """ - pieces = urlparse.urlparse(uri) - assert pieces.scheme in ('https', 'http') - self.scheme = pieces.scheme - netloc = pieces.netloc - path = pieces.path - try: - if '@' in netloc: - creds, netloc = netloc.split('@') - else: - creds = None - except ValueError: - # Python 2.6.1 compat - # see lp659445 and Python issue7904 - if '@' in path: - creds, path = path.split('@') - else: - creds = None - if creds: - try: - self.user, self.password = creds.split(':') - except ValueError: - reason = _("Credentials are not well-formatted.") - LOG.info(reason) - raise exceptions.BadStoreUri(message=reason) - else: - self.user = None - if netloc == '': - LOG.info(_("No address specified in HTTP URL")) - raise exceptions.BadStoreUri(uri=uri) - - self.netloc = netloc - self.path = path - self.token = None - if pieces.query: - params = pieces.query.split('&') - for param in params: - if 'auth_token' == param.split("=")[0].strip(): - self.token = param.split("=")[1] - break - - -def http_response_iterator(conn, response, size): - """ - Return an iterator for a file-like object. - - :param conn: HTTP(S) Connection - :param response: httplib.HTTPResponse object - :param size: Chunk size to iterate with - """ - chunk = response.read(size) - while chunk: - yield chunk - chunk = response.read(size) - conn.close() - - -class Store(glance_store.driver.Store): - - """An implementation of the HTTP(S) Backend Adapter""" - - def get(self, location, offset=0, chunk_size=None, context=None): - """ - Takes a `glance_store.location.Location` object that indicates - where to find the image file, and returns a tuple of generator - (for reading the image file) and image_size - - :param location `glance_store.location.Location` object, supplied - from glance_store.location.get_location_from_uri() - """ - conn, resp, content_length = self._query(location, 'GET') - - cs = chunk_size or self.READ_CHUNKSIZE - iterator = http_response_iterator(conn, resp, cs) - - class ResponseIndexable(glance_store.Indexable): - def another(self): - try: - return self.wrapped.next() - except StopIteration: - return '' - - return (ResponseIndexable(iterator, content_length), content_length) - - def get_schemes(self): - return ('http', 'https') - - def get_size(self, location, context=None): - """ - Takes a `glance_store.location.Location` object that indicates - where to find the image file, and returns the size - - :param location `glance_store.location.Location` object, supplied - from glance_store.location.get_location_from_uri() - """ - try: - size = self._query(location, 'HEAD')[2] - except socket.error: - reason = _("The HTTP URL is invalid.") - LOG.info(reason) - raise exceptions.BadStoreUri(message=reason) - except Exception: - # NOTE(flaper87): Catch more granular exceptions, - # keeping this branch for backwards compatibility. - return 0 - return size - - def _query(self, location, verb, depth=0): - if depth > MAX_REDIRECTS: - reason = (_("The HTTP URL exceeded %s maximum " - "redirects.") % MAX_REDIRECTS) - LOG.debug(reason) - raise exceptions.MaxRedirectsExceeded(message=reason) - loc = location.store_location - conn_class = self._get_conn_class(loc) - conn = conn_class(loc.netloc) - hearders = {} - if loc.token: - # hearders.setdefault('x-auth-token', loc.token) - # verb = 'GET' - # conn.request(verb, loc.path, "", hearders) - # resp = conn.getresponse() - # try: - # size = jsonutils.loads(resp.read())['size'] - # except Exception: - # size = 0 - # raise exception.BadStoreUri(loc.path, reason) - return (conn, None, 1) - - conn.request(verb, loc.path, "", {}) - resp = conn.getresponse() - - # Check for bad status codes - if resp.status >= 400: - if resp.status == httplib.NOT_FOUND: - reason = _("HTTP datastore could not find image at URI.") - LOG.debug(reason) - raise exceptions.NotFound(message=reason) - - reason = (_("HTTP URL %(url)s returned a " - "%(status)s status code.") % - dict(url=loc.path, status=resp.status)) - LOG.debug(reason) - raise exceptions.BadStoreUri(message=reason) - - location_header = resp.getheader("location") - if location_header: - if resp.status not in (301, 302): - reason = (_("The HTTP URL %(url)s attempted to redirect " - "with an invalid %(status)s status code.") % - dict(url=loc.path, status=resp.status)) - LOG.info(reason) - raise exceptions.BadStoreUri(message=reason) - location_class = glance_store.location.Location - new_loc = location_class(location.store_name, - location.store_location.__class__, - uri=location_header, - image_id=location.image_id, - store_specs=location.store_specs) - return self._query(new_loc, verb, depth + 1) - content_length = int(resp.getheader('content-length', 0)) - return (conn, resp, content_length) - - def _get_conn_class(self, loc): - """ - Returns connection class for accessing the resource. Useful - for dependency injection and stubouts in testing... - """ - return {'http': httplib.HTTPConnection, - 'https': httplib.HTTPSConnection}[loc.scheme] diff --git a/juno-patches/glance_store/glance_store_patch/glance_store/backend.py b/juno-patches/glance_store/glance_store_patch/glance_store/backend.py deleted file mode 100644 index bbe82a84..00000000 --- a/juno-patches/glance_store/glance_store_patch/glance_store/backend.py +++ /dev/null @@ -1,400 +0,0 @@ -# Copyright 2010-2011 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import logging -import re -import sys - -from oslo.config import cfg -from stevedore import driver - -from glance_store import exceptions -from glance_store.i18n import _ -from glance_store import location - - -LOG = logging.getLogger(__name__) - -_DEPRECATED_STORE_OPTS = [ - cfg.DeprecatedOpt('known_stores', group='DEFAULT'), - cfg.DeprecatedOpt('default_store', group='DEFAULT') -] - -_STORE_OPTS = [ - cfg.ListOpt('stores', default=['file', 'http'], - help=_('List of stores enabled'), - deprecated_opts=[_DEPRECATED_STORE_OPTS[0]]), - cfg.StrOpt('default_store', default='file', - help=_("Default scheme to use to store image data. The " - "scheme must be registered by one of the stores " - "defined by the 'stores' config option."), - deprecated_opts=[_DEPRECATED_STORE_OPTS[1]]) -] - -CONF = cfg.CONF -_STORE_CFG_GROUP = 'glance_store' - - -def _oslo_config_options(): - return ((opt, _STORE_CFG_GROUP) for opt in _STORE_OPTS) - - -def register_opts(conf): - for opt, group in _oslo_config_options(): - conf.register_opt(opt, group=group) - register_store_opts(conf) - - -def register_store_opts(conf): - for store_entry in set(conf.glance_store.stores): - LOG.debug("Registering options for %s" % store_entry) - store_cls = _load_store(conf, store_entry, False) - - if store_cls is None: - msg = _('Store %s not found') % store_entry - raise exceptions.GlanceStoreException(message=msg) - - if getattr(store_cls, 'OPTIONS', None) is not None: - # NOTE(flaper87): To be removed in k-2. This should - # give deployers enough time to migrate their systems - # and move configs under the new section. - for opt in store_cls.OPTIONS: - opt.deprecated_opts = [cfg.DeprecatedOpt(opt.name, - group='DEFAULT')] - conf.register_opt(opt, group=_STORE_CFG_GROUP) - - -class Indexable(object): - """Indexable for file-like objs iterators - - Wrapper that allows an iterator or filelike be treated as an indexable - data structure. This is required in the case where the return value from - Store.get() is passed to Store.add() when adding a Copy-From image to a - Store where the client library relies on eventlet GreenSockets, in which - case the data to be written is indexed over. - """ - - def __init__(self, wrapped, size): - """ - Initialize the object - - :param wrappped: the wrapped iterator or filelike. - :param size: the size of data available - """ - self.wrapped = wrapped - self.size = int(size) if size else (wrapped.len - if hasattr(wrapped, 'len') else 0) - self.cursor = 0 - self.chunk = None - - def __iter__(self): - """ - Delegate iteration to the wrapped instance. - """ - for self.chunk in self.wrapped: - yield self.chunk - - def __getitem__(self, i): - """ - Index into the next chunk (or previous chunk in the case where - the last data returned was not fully consumed). - - :param i: a slice-to-the-end - """ - start = i.start if isinstance(i, slice) else i - if start < self.cursor: - return self.chunk[(start - self.cursor):] - - self.chunk = self.another() - if self.chunk: - self.cursor += len(self.chunk) - - return self.chunk - - def another(self): - """Implemented by subclasses to return the next element""" - raise NotImplementedError - - def getvalue(self): - """ - Return entire string value... used in testing - """ - return self.wrapped.getvalue() - - def __len__(self): - """ - Length accessor. - """ - return self.size - - -def _load_store(conf, store_entry, invoke_load=True): - store_cls = None - try: - LOG.debug("Attempting to import store %s", store_entry) - mgr = driver.DriverManager('glance_store.drivers', - store_entry, - invoke_args=[conf], - invoke_on_load=invoke_load) - return mgr.driver - except RuntimeError as ex: - LOG.warn("Failed to load driver %(driver)s." - "The driver will be disabled" % dict(driver=driver)) - - -def _load_stores(conf): - for store_entry in set(conf.glance_store.stores): - try: - # FIXME(flaper87): Don't hide BadStoreConfiguration - # exceptions. These exceptions should be propagated - # to the user of the library. - store_instance = _load_store(conf, store_entry) - - if not store_instance: - continue - - yield (store_entry, store_instance) - - except exceptions.BadStoreConfiguration as e: - continue - - -pattern = re.compile(r'^https?://\S+/v2/images/\S+$') - -def is_glance_location(loc_url): - return pattern.match(loc_url) - - -def create_stores(conf=CONF): - """ - Registers all store modules and all schemes - from the given config. Duplicates are not re-registered. - """ - store_count = 0 - store_classes = set() - - for (store_entry, store_instance) in _load_stores(conf): - schemes = store_instance.get_schemes() - store_instance.configure() - if not schemes: - raise exceptions.BackendException('Unable to register store %s. ' - 'No schemes associated with it.' - % store_cls) - else: - LOG.debug("Registering store %s with schemes %s", - store_entry, schemes) - - scheme_map = {} - for scheme in schemes: - loc_cls = store_instance.get_store_location_class() - scheme_map[scheme] = { - 'store': store_instance, - 'location_class': loc_cls, - } - location.register_scheme_map(scheme_map) - store_count += 1 - - return store_count - - -def verify_default_store(): - scheme = cfg.CONF.glance_store.default_store - try: - get_store_from_scheme(scheme) - except exceptions.UnknownScheme: - msg = _("Store for scheme %s not found") % scheme - raise RuntimeError(msg) - - -def get_known_schemes(): - """Returns list of known schemes""" - return location.SCHEME_TO_CLS_MAP.keys() - - -def get_store_from_scheme(scheme): - """ - Given a scheme, return the appropriate store object - for handling that scheme. - """ - if scheme not in location.SCHEME_TO_CLS_MAP: - raise exceptions.UnknownScheme(scheme=scheme) - scheme_info = location.SCHEME_TO_CLS_MAP[scheme] - return scheme_info['store'] - - -def get_store_from_uri(uri): - """ - Given a URI, return the store object that would handle - operations on the URI. - - :param uri: URI to analyze - """ - scheme = uri[0:uri.find('/') - 1] - return get_store_from_scheme(scheme) - - -def get_from_backend(uri, offset=0, chunk_size=None, context=None): - """Yields chunks of data from backend specified by uri""" - - loc = location.get_location_from_uri(uri) - store = get_store_from_uri(uri) - - try: - return store.get(loc, offset=offset, - chunk_size=chunk_size, - context=context) - except NotImplementedError: - raise exceptions.StoreGetNotSupported - - -def get_size_from_backend(uri, context=None): - """Retrieves image size from backend specified by uri""" - if is_glance_location(uri): - uri += ('?auth_token=' + context.auth_tok) - loc = location.get_location_from_uri(uri) - store = get_store_from_uri(uri) - - return store.get_size(loc, context=context) - - -def delete_from_backend(uri, context=None): - """Removes chunks of data from backend specified by uri""" - loc = location.get_location_from_uri(uri) - store = get_store_from_uri(uri) - - try: - return store.delete(loc, context=context) - except NotImplementedError: - raise exceptions.StoreDeleteNotSupported - - -def get_store_from_location(uri): - """ - Given a location (assumed to be a URL), attempt to determine - the store from the location. We use here a simple guess that - the scheme of the parsed URL is the store... - - :param uri: Location to check for the store - """ - loc = location.get_location_from_uri(uri) - return loc.store_name - - -def safe_delete_from_backend(uri, image_id, context=None): - """Given a uri, delete an image from the store.""" - try: - return delete_from_backend(uri, context=context) - except exceptions.NotFound: - msg = _('Failed to delete image %s in store from URI') - LOG.warn(msg % image_id) - except exceptions.StoreDeleteNotSupported as e: - LOG.warn(str(e)) - except exceptions.UnsupportedBackend: - exc_type = sys.exc_info()[0].__name__ - msg = (_('Failed to delete image %(image_id)s ' - 'from store (%(exc_type)s)') % - dict(image_id=image_id, exc_type=exc_type)) - LOG.error(msg) - - -def _delete_image_from_backend(context, store_api, image_id, uri): - if CONF.delayed_delete: - store_api.schedule_delayed_delete_from_backend(context, uri, image_id) - else: - store_api.safe_delete_from_backend(context, uri, image_id) - - -def check_location_metadata(val, key=''): - if isinstance(val, dict): - for key in val: - check_location_metadata(val[key], key=key) - elif isinstance(val, list): - ndx = 0 - for v in val: - check_location_metadata(v, key='%s[%d]' % (key, ndx)) - ndx = ndx + 1 - elif not isinstance(val, unicode): - raise exceptions.BackendException(_("The image metadata key %(key)s " - "has an invalid type of %(type)s. " - "Only dict, list, and unicode are " - "supported.") - % dict(key=key, type=type(val))) - - -def store_add_to_backend(image_id, data, size, store, context=None): - """ - A wrapper around a call to each stores add() method. This gives glance - a common place to check the output - - :param image_id: The image add to which data is added - :param data: The data to be stored - :param size: The length of the data in bytes - :param store: The store to which the data is being added - :return: The url location of the file, - the size amount of data, - the checksum of the data - the storage systems metadata dictionary for the location - """ - (location, size, checksum, metadata) = store.add(image_id, data, size) - if metadata is not None: - if not isinstance(metadata, dict): - msg = (_("The storage driver %(driver)s returned invalid " - " metadata %(metadata)s. This must be a dictionary type") - % dict(driver=str(store), metadata=str(metadata))) - LOG.error(msg) - raise exceptions.BackendException(msg) - try: - check_location_metadata(metadata) - except exceptions.BackendException as e: - e_msg = (_("A bad metadata structure was returned from the " - "%(driver)s storage driver: %(metadata)s. %(e)s.") % - dict(driver=unicode(store), - metadata=unicode(metadata), - e=unicode(e))) - LOG.error(e_msg) - raise exceptions.BackendException(e_msg) - return (location, size, checksum, metadata) - - -def add_to_backend(conf, image_id, data, size, scheme=None, context=None): - if scheme is None: - scheme = conf['glance_store']['default_store'] - store = get_store_from_scheme(scheme) - try: - return store_add_to_backend(image_id, data, size, store, context) - except NotImplementedError: - raise exceptions.StoreAddNotSupported - - -def set_acls(location_uri, public=False, read_tenants=[], - write_tenants=None, context=None): - - if write_tenants is None: - write_tenants = [] - - loc = location.get_location_from_uri(location_uri) - scheme = get_store_from_location(location_uri) - store = get_store_from_scheme(scheme) - try: - store.set_acls(loc, public=public, - read_tenants=read_tenants, - write_tenants=write_tenants) - except NotImplementedError: - LOG.debug(_("Skipping store.set_acls... not implemented.")) - - -def validate_location(uri, context=None): - store = get_store_from_uri(uri) - store.validate_location(uri) diff --git a/juno-patches/glance_store/glance_store_patch/installation/install.sh b/juno-patches/glance_store/glance_store_patch/installation/install.sh deleted file mode 100644 index 0e73b4e0..00000000 --- a/juno-patches/glance_store/glance_store_patch/installation/install.sh +++ /dev/null @@ -1,103 +0,0 @@ -#!/bin/bash - -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# Copyright (c) 2014 Huawei Technologies. - -CURPATH=$(cd "$(dirname "$0")"; pwd) -_PYTHON_INSTALL_DIR=${OPENSTACK_INSTALL_DIR} -if [ ! -n ${_PYTHON_INSTALL_DIR} ];then - _PYTHON_INSTALL_DIR="/usr/lib/python2.7/dist-packages" -fi -_GLANCE_STORE_DIR="${_PYTHON_INSTALL_DIR}/glance_store" - -# if you did not make changes to the installation files, -# please do not edit the following directories. -_CODE_DIR="${CURPATH}/../glance_store" - -_SCRIPT_LOGFILE="/var/log/glance/installation/install_store.log" - -export PS4='+{$LINENO:${FUNCNAME[0]}}' - -ERRTRAP() -{ - echo "[LINE:$1] Error: Command or function exited with status $?" -} - -function log() -{ - echo "$@" - echo "`date -u +'%Y-%m-%d %T.%N'`: $@" >> $_SCRIPT_LOGFILE -} - -function process_stop -{ - PID=`ps -efw|grep "$1"|grep -v grep|awk '{print $2}'` - echo "PID is: $PID">>$_SCRIPT_LOGFILE - if [ "x${PID}" != "x" ]; then - for kill_id in $PID - do - kill -9 ${kill_id} - if [ $? -ne 0 ]; then - echo "[[stop glance-sync]]$1 stop failed.">>$_SCRIPT_LOGFILE - exit 1 - fi - done - echo "[[stop glance-sync]]$1 stop ok.">>$_SCRIPT_LOGFILE - fi -} - -function restart_services -{ - log "restarting glance ..." - service glance-api restart - service glance-registry restart - process_stop "glance-sync" - python /usr/bin/glance-sync --config-file=/etc/glance/glance-sync.conf & -} - -trap 'ERRTRAP $LINENO' ERR - -if [[ ${EUID} -ne 0 ]]; then - log "Please run as root." - exit 1 -fi - -if [ ! -d "/var/log/glance/installation" ]; then - mkdir -p /var/log/glance/installation - touch _SCRIPT_LOGFILE -fi - -cd `dirname $0` - -log "checking installation directories..." -if [ ! -d "${_GLANCE_STORE_DIR}" ] ; then - log "Could not find the glance installation. Please check the variables in the beginning of the script." - log "aborted." - exit 1 -fi - - -log "copying in new files..." - -cp -rf "${_CODE_DIR}" ${_PYTHON_INSTALL_DIR} - -restart_services -if [ $? -ne 0 ] ; then - log "There was an error in restarting the service, please restart glance manually." - exit 1 -fi - -log "Completed." -log "See README to get started." - -exit 0 diff --git a/juno-patches/neutron/neutron_cascaded_big2layer_patch/installation/install.sh b/juno-patches/neutron/neutron_cascaded_big2layer_patch/installation/install.sh deleted file mode 100644 index 58de0695..00000000 --- a/juno-patches/neutron/neutron_cascaded_big2layer_patch/installation/install.sh +++ /dev/null @@ -1,97 +0,0 @@ -#!/bin/bash -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# Copyright (c) 2014 Huawei Technologies. -_NEUTRON_CONF_DIR="/etc/neutron" -_NEUTRON_CONF_FILE='neutron.conf' -_NEUTRON_INSTALL="/usr/lib/python2.7/dist-packages" -_NEUTRON_DIR="${_NEUTRON_INSTALL}/neutron" -# if you did not make changes to the installation files, -# please do not edit the following directories. -_CODE_DIR="../neutron/" -_BACKUP_DIR="${_NEUTRON_INSTALL}/.neutron-cascaded-server-big2layer-patch-installation-backup" -if [[ ${EUID} -ne 0 ]]; then - echo "Please run as root." - exit 1 -fi - -##Redirecting output to logfile as well as stdout -#exec > >(tee -a ${_SCRIPT_LOGFILE}) -#exec 2> >(tee -a ${_SCRIPT_LOGFILE} >&2) - -cd `dirname $0` - -echo "checking installation directories..." -if [ ! -d "${_NEUTRON_DIR}" ] ; then - echo "Could not find the neutron installation. Please check the variables in the beginning of the script." - echo "aborted." - exit 1 -fi -if [ ! -f "${_NEUTRON_CONF_DIR}/${_NEUTRON_CONF_FILE}" ] ; then - echo "Could not find neutron config file. Please check the variables in the beginning of the script." - echo "aborted." - exit 1 -fi - -echo "checking previous installation..." -if [ -d "${_BACKUP_DIR}/neutron" ] ; then - echo "It seems neutron-server-big2layer-cascaded-patch has already been installed!" - echo "Please check README for solution if this is not true." - exit 1 -fi - -echo "backing up current files that might be overwritten..." -mkdir -p "${_BACKUP_DIR}" -cp -r "${_NEUTRON_DIR}/" "${_BACKUP_DIR}/" -if [ $? -ne 0 ] ; then - rm -r "${_BACKUP_DIR}/neutron" - echo "Error in code backup, aborted." - exit 1 -fi - -echo "copying in new files..." -cp -r "${_CODE_DIR}" `dirname ${_NEUTRON_DIR}` -if [ $? -ne 0 ] ; then - echo "Error in copying, aborted." - echo "Recovering original files..." - cp -r "${_BACKUP_DIR}/neutron" `dirname ${_NEUTRON_DIR}` && rm -r "${_BACKUP_DIR}/neutron" - if [ $? -ne 0 ] ; then - echo "Recovering failed! Please install manually." - fi - exit 1 -fi - - -echo "restarting cascaded neutron server..." -service neutron-server restart -if [ $? -ne 0 ] ; then - echo "There was an error in restarting the service, please restart cascaded neutron server manually." - exit 1 -fi - -echo "restarting cascaded neutron-plugin-openvswitch-agent..." -service neutron-plugin-openvswitch-agent restart -if [ $? -ne 0 ] ; then - echo "There was an error in restarting the service, please restart cascaded neutron-plugin-openvswitch-agent manually." - exit 1 -fi - -echo "restarting cascaded neutron-l3-agent..." -service neutron-l3-agent restart -if [ $? -ne 0 ] ; then - echo "There was an error in restarting the service, please restart cascaded neutron-l3-agent manually." - exit 1 -fi - -echo "Completed." -echo "See README to get started." -exit 0 diff --git a/juno-patches/neutron/neutron_cascaded_big2layer_patch/neutron/plugins/ml2/drivers/l2pop/config.py b/juno-patches/neutron/neutron_cascaded_big2layer_patch/neutron/plugins/ml2/drivers/l2pop/config.py deleted file mode 100644 index 95141761..00000000 --- a/juno-patches/neutron/neutron_cascaded_big2layer_patch/neutron/plugins/ml2/drivers/l2pop/config.py +++ /dev/null @@ -1,28 +0,0 @@ -# Copyright (c) 2013 OpenStack Foundation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo.config import cfg - - -l2_population_options = [ - cfg.IntOpt('agent_boot_time', default=180, - help=_('Delay within which agent is expected to update ' - 'existing ports whent it restarts')), - cfg.StrOpt('cascaded_gateway', default='no_gateway', - help=_('if not existing the gateway host Configure no_gateway' - 'else configure admin_gateway or population_opt')), -] - -cfg.CONF.register_opts(l2_population_options, "l2pop") diff --git a/juno-patches/neutron/neutron_cascaded_big2layer_patch/neutron/plugins/ml2/drivers/l2pop/db.py b/juno-patches/neutron/neutron_cascaded_big2layer_patch/neutron/plugins/ml2/drivers/l2pop/db.py deleted file mode 100644 index e1aa46a2..00000000 --- a/juno-patches/neutron/neutron_cascaded_big2layer_patch/neutron/plugins/ml2/drivers/l2pop/db.py +++ /dev/null @@ -1,136 +0,0 @@ -# Copyright (c) 2013 OpenStack Foundation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from sqlalchemy import sql - -from neutron.common import constants as const -from neutron.db import agents_db -from neutron.db import common_db_mixin as base_db -from neutron.db import models_v2 -from neutron.openstack.common import jsonutils -from neutron.openstack.common import timeutils -from neutron.plugins.ml2.drivers.l2pop import constants as l2_const -from neutron.plugins.ml2 import models as ml2_models - - -class L2populationDbMixin(base_db.CommonDbMixin): - - def get_agent_ip_by_host(self, session, agent_host): - agent = self.get_agent_by_host(session, agent_host) - if agent: - return self.get_agent_ip(agent) - - def get_agent_ip(self, agent): - configuration = jsonutils.loads(agent.configurations) - return configuration.get('tunneling_ip') - - def get_agent_uptime(self, agent): - return timeutils.delta_seconds(agent.started_at, - agent.heartbeat_timestamp) - - def get_agent_tunnel_types(self, agent): - configuration = jsonutils.loads(agent.configurations) - return configuration.get('tunnel_types') - - def get_agent_l2pop_network_types(self, agent): - configuration = jsonutils.loads(agent.configurations) - return configuration.get('l2pop_network_types') - - def get_agent_by_host(self, session, agent_host): - with session.begin(subtransactions=True): - query = session.query(agents_db.Agent) - query = query.filter(agents_db.Agent.host == agent_host, - agents_db.Agent.agent_type.in_( - l2_const.SUPPORTED_AGENT_TYPES)) - return query.first() - - def get_network_ports(self, session, network_id): - with session.begin(subtransactions=True): - query = session.query(ml2_models.PortBinding, - agents_db.Agent) - query = query.join(agents_db.Agent, - agents_db.Agent.host == - ml2_models.PortBinding.host) - query = query.join(models_v2.Port) - query = query.filter(models_v2.Port.network_id == network_id, - models_v2.Port.admin_state_up == sql.true(), - agents_db.Agent.agent_type.in_( - l2_const.SUPPORTED_AGENT_TYPES)) - return query - - def get_nondvr_network_ports(self, session, network_id): - query = self.get_network_ports(session, network_id) - return query.filter(models_v2.Port.device_owner != - const.DEVICE_OWNER_DVR_INTERFACE) - - def get_dvr_network_ports(self, session, network_id): - with session.begin(subtransactions=True): - query = session.query(ml2_models.DVRPortBinding, - agents_db.Agent) - query = query.join(agents_db.Agent, - agents_db.Agent.host == - ml2_models.DVRPortBinding.host) - query = query.join(models_v2.Port) - query = query.filter(models_v2.Port.network_id == network_id, - models_v2.Port.admin_state_up == sql.true(), - models_v2.Port.device_owner == - const.DEVICE_OWNER_DVR_INTERFACE, - agents_db.Agent.agent_type.in_( - l2_const.SUPPORTED_AGENT_TYPES)) - return query - - def get_agent_network_active_port_count(self, session, agent_host, - network_id): - with session.begin(subtransactions=True): - query = session.query(models_v2.Port) - query1 = query.join(ml2_models.PortBinding) - query1 = query1.filter(models_v2.Port.network_id == network_id, - models_v2.Port.status == - const.PORT_STATUS_ACTIVE, - models_v2.Port.device_owner != - const.DEVICE_OWNER_DVR_INTERFACE, - ml2_models.PortBinding.host == agent_host) - query2 = query.join(ml2_models.DVRPortBinding) - query2 = query2.filter(models_v2.Port.network_id == network_id, - ml2_models.DVRPortBinding.status == - const.PORT_STATUS_ACTIVE, - models_v2.Port.device_owner == - const.DEVICE_OWNER_DVR_INTERFACE, - ml2_models.DVRPortBinding.host == - agent_host) - return (query1.count() + query2.count()) - - def get_host_ip_from_binding_profile(self, profile): - if(not profile): - return - profile = jsonutils.loads(profile) - return profile.get('host_ip') - - def get_segment_by_network_id(self, session, network_id): - with session.begin(subtransactions=True): - query = session.query(ml2_models.NetworkSegment) - query = query.filter( - ml2_models.NetworkSegment.network_id == network_id, - ml2_models.NetworkSegment.network_type == 'vxlan') - return query.first() - - def get_remote_ports(self, session, network_id): - with session.begin(subtransactions=True): - query = session.query(ml2_models.PortBinding) - query = query.join(models_v2.Port) - query = query.filter( - models_v2.Port.network_id == network_id, - ml2_models.PortBinding.profile.contains('"port_key": "remote_port"')) - return query diff --git a/juno-patches/neutron/neutron_cascaded_big2layer_patch/neutron/plugins/ml2/drivers/l2pop/mech_driver.py b/juno-patches/neutron/neutron_cascaded_big2layer_patch/neutron/plugins/ml2/drivers/l2pop/mech_driver.py deleted file mode 100644 index 292230a1..00000000 --- a/juno-patches/neutron/neutron_cascaded_big2layer_patch/neutron/plugins/ml2/drivers/l2pop/mech_driver.py +++ /dev/null @@ -1,383 +0,0 @@ -# Copyright (c) 2013 OpenStack Foundation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo.config import cfg - -from neutron.common import constants as const -from neutron import context as n_context -from neutron.db import api as db_api -from neutron.openstack.common import log as logging -from neutron.plugins.ml2 import driver_api as api -from neutron.plugins.ml2.drivers.l2pop import config # noqa -from neutron.plugins.ml2.drivers.l2pop import db as l2pop_db -from neutron.plugins.ml2.drivers.l2pop import rpc as l2pop_rpc - -LOG = logging.getLogger(__name__) - - -class L2populationMechanismDriver(api.MechanismDriver, - l2pop_db.L2populationDbMixin): - - def __init__(self): - super(L2populationMechanismDriver, self).__init__() - self.L2populationAgentNotify = l2pop_rpc.L2populationAgentNotifyAPI() - - def initialize(self): - LOG.debug(_("Experimental L2 population driver")) - self.rpc_ctx = n_context.get_admin_context_without_session() - self.migrated_ports = {} - self.remove_fdb_entries = {} - self.remove_remote_ports_fdb = {} - - def _get_port_fdb_entries(self, port): - return [[port['mac_address'], - ip['ip_address']] for ip in port['fixed_ips']] - - def _is_remote_port(self, port): - return port['binding:profile'].get('port_key') == 'remote_port' - - def create_port_postcommit(self, context): - """ - if port is "remote_port", - then notify all l2-agent or only l2-gateway-agent - else do nothing - """ - port_context = context.current - if(self._is_remote_port(port_context)): - other_fdb_entries = self.get_remote_port_fdb(port_context) - if(not other_fdb_entries): - return - if(cfg.CONF.l2pop.cascaded_gateway == 'no_gateway'): - # notify all l2-agent - self.L2populationAgentNotify.add_fdb_entries(self.rpc_ctx, - other_fdb_entries) - else: - # only notify to l2-gateway-agent - pass - - def get_remote_port_fdb(self, port_context): - port_id = port_context['id'] - network_id = port_context['network_id'] - - session = db_api.get_session() - segment = self.get_segment_by_network_id(session, network_id) - if not segment: - LOG.warning(_("Network %(network_id)s has no " - " vxlan provider, so cannot get segment"), - {'network_id': network_id}) - return - ip = port_context['binding:profile'].get('host_ip') - if not ip: - LOG.debug(_("Unable to retrieve the ip from remote port, " - "check the remote port %(port_id)."), - {'port_id': port_id}) - return - other_fdb_entries = {network_id: - {'segment_id': segment.segmentation_id, - 'network_type': segment.network_type, - 'ports': {}}} - ports = other_fdb_entries[network_id]['ports'] - agent_ports = ports.get(ip, [const.FLOODING_ENTRY]) - agent_ports += self._get_port_fdb_entries(port_context) - ports[ip] = agent_ports - return other_fdb_entries - - def _get_agent_host(self, context, port): - if port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE: - agent_host = context.binding.host - else: - agent_host = port['binding:host_id'] - return agent_host - - def delete_port_precommit(self, context): - # TODO(matrohon): revisit once the original bound segment will be - # available in delete_port_postcommit. in delete_port_postcommit - # agent_active_ports will be equal to 0, and the _update_port_down - # won't need agent_active_ports_count_for_flooding anymore - port = context.current - if(self._is_remote_port(port)): - fdb_entry = self.get_remote_port_fdb(port) - self.remove_remote_ports_fdb[port['id']] = fdb_entry - agent_host = context.host #self._get_agent_host(context, port) - - if port['id'] not in self.remove_fdb_entries: - self.remove_fdb_entries[port['id']] = {} - - self.remove_fdb_entries[port['id']][agent_host] = ( - self._update_port_down(context, port, agent_host)) - - def delete_port_postcommit(self, context): - port = context.current - agent_host = context.host #self._get_agent_host(context, port) - - if port['id'] in self.remove_fdb_entries: - for agent_host in list(self.remove_fdb_entries[port['id']]): - self.L2populationAgentNotify.remove_fdb_entries( - self.rpc_ctx, - self.remove_fdb_entries[port['id']][agent_host]) - self.remove_fdb_entries[port['id']].pop(agent_host, 0) - self.remove_fdb_entries.pop(port['id'], 0) - - remote_port_fdb = self.remove_remote_ports_fdb.pop( - context.current['id'], - None) - if(remote_port_fdb): - self.L2populationAgentNotify.remove_fdb_entries( - self.rpc_ctx, remote_port_fdb) - - def _get_diff_ips(self, orig, port): - orig_ips = set([ip['ip_address'] for ip in orig['fixed_ips']]) - port_ips = set([ip['ip_address'] for ip in port['fixed_ips']]) - - # check if an ip has been added or removed - orig_chg_ips = orig_ips.difference(port_ips) - port_chg_ips = port_ips.difference(orig_ips) - - if orig_chg_ips or port_chg_ips: - return orig_chg_ips, port_chg_ips - - def _fixed_ips_changed(self, context, orig, port, diff_ips): - orig_ips, port_ips = diff_ips - - if (port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE): - agent_host = context.host - else: - agent_host = context.original_host - port_infos = self._get_port_infos( - context, orig, agent_host) - if not port_infos: - return - agent, agent_host, agent_ip, segment, port_fdb_entries = port_infos - - orig_mac_ip = [[port['mac_address'], ip] for ip in orig_ips] - port_mac_ip = [[port['mac_address'], ip] for ip in port_ips] - - upd_fdb_entries = {port['network_id']: {agent_ip: {}}} - - ports = upd_fdb_entries[port['network_id']][agent_ip] - if orig_mac_ip: - ports['before'] = orig_mac_ip - - if port_mac_ip: - ports['after'] = port_mac_ip - - self.L2populationAgentNotify.update_fdb_entries( - self.rpc_ctx, {'chg_ip': upd_fdb_entries}) - - return True - - def update_port_postcommit(self, context): - port = context.current - orig = context.original - - diff_ips = self._get_diff_ips(orig, port) - if diff_ips: - self._fixed_ips_changed(context, orig, port, diff_ips) - if port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE: - if context.status == const.PORT_STATUS_ACTIVE: - self._update_port_up(context) - if context.status == const.PORT_STATUS_DOWN: - agent_host = context.host - fdb_entries = self._update_port_down( - context, port, agent_host) - self.L2populationAgentNotify.remove_fdb_entries( - self.rpc_ctx, fdb_entries) - elif (context.host != context.original_host - and context.status == const.PORT_STATUS_ACTIVE - and not self.migrated_ports.get(orig['id'])): - # The port has been migrated. We have to store the original - # binding to send appropriate fdb once the port will be set - # on the destination host - self.migrated_ports[orig['id']] = ( - (orig, context.original_host)) - elif context.status != context.original_status: - if context.status == const.PORT_STATUS_ACTIVE: - self._update_port_up(context) - elif context.status == const.PORT_STATUS_DOWN: - fdb_entries = self._update_port_down( - context, port, context.host) - self.L2populationAgentNotify.remove_fdb_entries( - self.rpc_ctx, fdb_entries) - elif context.status == const.PORT_STATUS_BUILD: - orig = self.migrated_ports.pop(port['id'], None) - if orig: - original_port = orig[0] - original_host = orig[1] - # this port has been migrated: remove its entries from fdb - fdb_entries = self._update_port_down( - context, original_port, original_host) - self.L2populationAgentNotify.remove_fdb_entries( - self.rpc_ctx, fdb_entries) - - def _get_port_infos(self, context, port, agent_host): - if not agent_host: - return - - session = db_api.get_session() - agent = self.get_agent_by_host(session, agent_host) - if not agent: - return - - agent_ip = self.get_agent_ip(agent) - if not agent_ip: - LOG.warning(_("Unable to retrieve the agent ip, check the agent " - "configuration.")) - return - - segment = context.bound_segment - if not segment: - LOG.warning(_("Port %(port)s updated by agent %(agent)s " - "isn't bound to any segment"), - {'port': port['id'], 'agent': agent}) - return - - network_types = self.get_agent_l2pop_network_types(agent) - if network_types is None: - network_types = self.get_agent_tunnel_types(agent) - if segment['network_type'] not in network_types: - return - - fdb_entries = self._get_port_fdb_entries(port) - - return agent, agent_host, agent_ip, segment, fdb_entries - - def _update_port_up(self, context): - port = context.current - agent_host = context.host - port_infos = self._get_port_infos(context, port, agent_host) - if not port_infos: - return - agent, agent_host, agent_ip, segment, port_fdb_entries = port_infos - - network_id = port['network_id'] - - session = db_api.get_session() - agent_active_ports = self.get_agent_network_active_port_count( - session, agent_host, network_id) - - other_fdb_entries = {network_id: - {'segment_id': segment['segmentation_id'], - 'network_type': segment['network_type'], - 'ports': {agent_ip: []}}} - - if agent_active_ports == 1 or ( - self.get_agent_uptime(agent) < cfg.CONF.l2pop.agent_boot_time): - # First port activated on current agent in this network, - # we have to provide it with the whole list of fdb entries - agent_fdb_entries = {network_id: - {'segment_id': segment['segmentation_id'], - 'network_type': segment['network_type'], - 'ports': {}}} - ports = agent_fdb_entries[network_id]['ports'] - - nondvr_network_ports = self.get_nondvr_network_ports(session, - network_id) - for network_port in nondvr_network_ports: - binding, agent = network_port - if agent.host == agent_host: - continue - - ip = self.get_agent_ip(agent) - if not ip: - LOG.debug(_("Unable to retrieve the agent ip, check " - "the agent %(agent_host)s configuration."), - {'agent_host': agent.host}) - continue - - agent_ports = ports.get(ip, [const.FLOODING_ENTRY]) - agent_ports += self._get_port_fdb_entries(binding.port) - ports[ip] = agent_ports - - if cfg.CONF.l2pop.cascaded_gateway == 'no_gateway': - remote_ports = self.get_remote_ports(session, network_id) - else: - remote_ports = {} -# elif cfg.CONF.cascaded_gateway == 'admin_gateway' or -# cfg.CONF.cascaded_gateway == 'population_opt': -# if self.is_proxy_port(port_context): -# remote_ports = self.get_remote_ports(session, network_id) -# else: - for binding in remote_ports: - profile = binding['profile'] - ip = self.get_host_ip_from_binding_profile(profile) - if not ip: - LOG.debug(_("Unable to retrieve the agent ip, check " - "the agent %(agent_host)s configuration."), - {'agent_host': agent.host}) - continue - - agent_ports = ports.get(ip, [const.FLOODING_ENTRY]) - agent_ports += self._get_port_fdb_entries(binding.port) - ports[ip] = agent_ports - - dvr_network_ports = self.get_dvr_network_ports(session, network_id) - for network_port in dvr_network_ports: - binding, agent = network_port - if agent.host == agent_host: - continue - - ip = self.get_agent_ip(agent) - if not ip: - LOG.debug(_("Unable to retrieve the agent ip, check " - "the agent %(agent_host)s configuration."), - {'agent_host': agent.host}) - continue - - agent_ports = ports.get(ip, [const.FLOODING_ENTRY]) - ports[ip] = agent_ports - - # And notify other agents to add flooding entry - other_fdb_entries[network_id]['ports'][agent_ip].append( - const.FLOODING_ENTRY) - - if ports.keys(): - self.L2populationAgentNotify.add_fdb_entries( - self.rpc_ctx, agent_fdb_entries, agent_host) - - # Notify other agents to add fdb rule for current port - if port['device_owner'] != const.DEVICE_OWNER_DVR_INTERFACE: - other_fdb_entries[network_id]['ports'][agent_ip] += ( - port_fdb_entries) - - self.L2populationAgentNotify.add_fdb_entries(self.rpc_ctx, - other_fdb_entries) - - def _update_port_down(self, context, port, agent_host): - port_infos = self._get_port_infos(context, port, agent_host) - if not port_infos: - return - agent, agent_host, agent_ip, segment, port_fdb_entries = port_infos - - network_id = port['network_id'] - - session = db_api.get_session() - agent_active_ports = self.get_agent_network_active_port_count( - session, agent_host, network_id) - - other_fdb_entries = {network_id: - {'segment_id': segment['segmentation_id'], - 'network_type': segment['network_type'], - 'ports': {agent_ip: []}}} - if agent_active_ports == 0: - # Agent is removing its last activated port in this network, - # other agents needs to be notified to delete their flooding entry. - other_fdb_entries[network_id]['ports'][agent_ip].append( - const.FLOODING_ENTRY) - # Notify other agents to remove fdb rules for current port - if port['device_owner'] != const.DEVICE_OWNER_DVR_INTERFACE: - fdb_entries = port_fdb_entries - other_fdb_entries[network_id]['ports'][agent_ip] += fdb_entries - - return other_fdb_entries diff --git a/juno-patches/neutron/neutron_cascaded_l3_patch/README.md b/juno-patches/neutron/neutron_cascaded_l3_patch/README.md deleted file mode 100644 index 9a8c96cf..00000000 --- a/juno-patches/neutron/neutron_cascaded_l3_patch/README.md +++ /dev/null @@ -1,83 +0,0 @@ -Openstack Neutron cascaded_l3_patch -=============================== - - Neutron cascaded_l3_patch is mainly used to achieve L3 communications crossing OpenStack. To solve the problem, we add 'onlink' field for extra route of router based on the ip range in neutron-server, and add GRE Tunnel in l3-agent. This patch should be made to the Cascaded Neutron nodes. - - -Key modules ------------ - -* We add GRE Tunnel in l3-agent by modifying some files: - neutron/agent/linux/ip_lib.py - neutron/agent/l3_agent.py - -* We add 'onlink' field for extra route of router based on the ip range in neutron-server by modifying some files: - neutron/common/config.py - neutron/db/extraroute_db.py - - -Requirements ------------- -* openstack neutron-2014.2 has been installed. - -Installation ------------- - -We provide two ways to install the Neutron cascaded_l3_patch. In this section, we will guide you through installing the Neutron cascaded_l3_patch with modifying the configuration. - -* **Note:** - - - Make sure you have an existing installation of **Openstack Neutron of Juno Version**. - - We recommend that you Do backup at least the following files before installation, because they are to be overwritten or modified: - $NEUTRON_PARENT_DIR/neutron - (replace the $... with actual directory names.) - -* **Manual Installation** - - - Navigate to the local repository and copy the contents in 'neutron' sub-directory to the corresponding places in existing neutron, e.g. - ```cp -r $LOCAL_REPOSITORY_DIR/neutron $NEUTRON_PARENT_DIR``` - (replace the $... with actual directory name.) - ``` - - you can modify neutron config file - $CONFIG_FILE_PATH/plugins/ml2/ml2_conf.ini - Modify the value of firewall_driver option as: - [securitygroup] - firewall_driver=neutron.agent.firewall.NoopFirewallDriver - - $CONFIG_FILE_PATH/l3_agent.ini - Modify the value of agent_mode option as: - [DEFAULT] - agent_mode=dvr_snat - - $CONFIG_FILE_PATH/neutron.conf, you can also don't modify - Default value of 3gw_extern_net_ip_range option in config file, is - l3gw_extern_net_ip_range=100.64.0.0/16 - - - Restart the neutron-server and neutron-l3-agent. - ```service neutron-server restart``` - ```service neutron-l3-agent restart``` - - - Done. - -* **Automatic Installation** - - - Navigate to the installation directory and run installation script. - ``` - cd $LOCAL_REPOSITORY_DIR/installation - sudo bash ./install.sh - ``` - (replace the $... with actual directory name.) - - - Done. The installation script will automatically modify the neutron code and the configurations. - -* **Troubleshooting** - - In case the automatic installation process is not complete, please check the followings: - - - Make sure your OpenStack version is Juno. - - - Check the variables in the beginning of the install.sh scripts. Your installation directories may be different from the default values we provide. - - - The installation code will automatically modify the related codes to $NEUTRON_PARENT_DIR/neutron and the related configuration. - - - In case the automatic installation does not work, try to install manually. diff --git a/juno-patches/neutron/neutron_cascaded_l3_patch/installation/install.sh b/juno-patches/neutron/neutron_cascaded_l3_patch/installation/install.sh deleted file mode 100644 index de3049eb..00000000 --- a/juno-patches/neutron/neutron_cascaded_l3_patch/installation/install.sh +++ /dev/null @@ -1,127 +0,0 @@ -#!/bin/bash - -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# Copyright (c) 2014 Huawei Technologies. - -_NEUTRON_CONF_DIR="/etc/neutron" -_NEUTRON_CONF_FILE='neutron.conf' -_NEUTRON_ML2_CONF_FILE='plugins/ml2/ml2_conf.ini' -_NEUTRON_L3_CONF_FILE='l3_agent.ini' -_NEUTRON_INSTALL="/usr/lib/python2.7/dist-packages" -_NEUTRON_DIR="${_NEUTRON_INSTALL}/neutron" - -# if you did not make changes to the installation files, -# please do not edit the following directories. -_CODE_DIR="../neutron/" -_BACKUP_DIR="${_NEUTRON_INSTALL}/.neutron-cascaded-server-installation-backup" - -#_SCRIPT_NAME="${0##*/}" -#_SCRIPT_LOGFILE="/var/log/neutron-cascaded-server/installation/${_SCRIPT_NAME}.log" - -if [[ ${EUID} -ne 0 ]]; then - echo "Please run as root." - exit 1 -fi - -##Redirecting output to logfile as well as stdout -#exec > >(tee -a ${_SCRIPT_LOGFILE}) -#exec 2> >(tee -a ${_SCRIPT_LOGFILE} >&2) - -cd `dirname $0` - -echo "checking installation directories..." -if [ ! -d "${_NEUTRON_DIR}" ] ; then - echo "Could not find the neutron installation. Please check the variables in the beginning of the script." - echo "aborted." - exit 1 -fi -if [ ! -f "${_NEUTRON_CONF_DIR}/${_NEUTRON_CONF_FILE}" ] ; then - echo "Could not find neutron config file. Please check the variables in the beginning of the script." - echo "aborted." - exit 1 -fi -if [ ! -f "${_NEUTRON_CONF_DIR}/${_NEUTRON_ML2_CONF_FILE}" ] ; then - echo "Could not find ml2 config file. Please check the variables in the beginning of the script." - echo "aborted." - exit 1 -fi -if [ ! -f "${_NEUTRON_CONF_DIR}/${_NEUTRON_L3_CONF_FILE}" ] ; then - echo "Could not find l3_agent config file. Please check the variables in the beginning of the script." - echo "aborted." - exit 1 -fi - -echo "checking previous installation..." -if [ -d "${_BACKUP_DIR}/neutron" ] ; then - echo "It seems neutron-server-cascaded has already been installed!" - echo "Please check README for solution if this is not true." - exit 1 -fi - -echo "backing up current files that might be overwritten..." -mkdir -p "${_BACKUP_DIR}" -cp -r "${_NEUTRON_DIR}/" "${_BACKUP_DIR}/" -if [ $? -ne 0 ] ; then - rm -r "${_BACKUP_DIR}/neutron" - echo "Error in code backup, aborted." - exit 1 -fi - -echo "copying in new files..." -cp -r "${_CODE_DIR}" `dirname ${_NEUTRON_DIR}` -if [ $? -ne 0 ] ; then - echo "Error in copying, aborted." - echo "Recovering original files..." - cp -r "${_BACKUP_DIR}/neutron" `dirname ${_NEUTRON_DIR}` && rm -r "${_BACKUP_DIR}/neutron" - if [ $? -ne 0 ] ; then - echo "Recovering failed! Please install manually." - fi - exit 1 -fi - -echo "updating config file..." -cp "${_NEUTRON_CONF_DIR}/${_NEUTRON_L3_CONF_FILE}" "${_NEUTRON_CONF_DIR}/${_NEUTRON_L3_CONF_FILE}.bk" -cp "${_NEUTRON_CONF_DIR}/${_NEUTRON_ML2_CONF_FILE}" "${_NEUTRON_CONF_DIR}/${_NEUTRON_ML2_CONF_FILE}.bk" - -sed -i '/^firewall_driver/d' "${_NEUTRON_CONF_DIR}/${_NEUTRON_ML2_CONF_FILE}" -sed -i '/^\[securitygroup\]/a\firewall_driver=neutron.agent.firewall.NoopFirewallDriver' "${_NEUTRON_CONF_DIR}/${_NEUTRON_ML2_CONF_FILE}" - -sed -i '/^agent_mode/d' "${_NEUTRON_CONF_DIR}/${_NEUTRON_L3_CONF_FILE}" -sed -i '/^\[DEFAULT\]/a\agent_mode=dvr_snat' "${_NEUTRON_CONF_DIR}/${_NEUTRON_L3_CONF_FILE}" - -echo "restarting cascaded neutron server..." -service neutron-server restart -if [ $? -ne 0 ] ; then - echo "There was an error in restarting the service, please restart cascaded neutron server manually." - exit 1 -fi - -echo "restarting cascaded neutron-plugin-openvswitch-agent..." -service neutron-plugin-openvswitch-agent restart -if [ $? -ne 0 ] ; then - echo "There was an error in restarting the service, please restart cascaded neutron-plugin-openvswitch-agent manually." - exit 1 -fi - -echo "restarting cascaded neutron-l3-agent..." -service neutron-l3-agent restart -if [ $? -ne 0 ] ; then - echo "There was an error in restarting the service, please restart cascaded neutron-l3-agent manually." - exit 1 -fi - -echo "Completed." -echo "See README to get started." - -exit 0 - diff --git a/juno-patches/neutron/neutron_cascaded_l3_patch/installation/uninstall.sh b/juno-patches/neutron/neutron_cascaded_l3_patch/installation/uninstall.sh deleted file mode 100644 index 0254a6de..00000000 --- a/juno-patches/neutron/neutron_cascaded_l3_patch/installation/uninstall.sh +++ /dev/null @@ -1,86 +0,0 @@ -#!/bin/bash - -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# Copyright (c) 2014 Huawei Technologies. - -_NEUTRON_CONF_DIR="/etc/neutron" -_NEUTRON_CONF_FILE='neutron.conf' -_NEUTRON_ML2_CONF_FILE='plugins/ml2/ml2_conf.ini' -_NEUTRON_L3_CONF_FILE='l3_agent.ini' -_NEUTRON_INSTALL="/usr/lib/python2.7/dist-packages" -_NEUTRON_DIR="${_NEUTRON_INSTALL}/neutron" - - -_CODE_DIR="../neutron/" -_BACKUP_DIR="${_NEUTRON_INSTALL}/.neutron-cascaded-server-installation-backup" - - -if [[ ${EUID} -ne 0 ]]; then - echo "Please run as root." - exit 1 -fi - -echo "checking previous installation..." -if [ ! -d "${_BACKUP_DIR}/neutron" ] ; then - echo "Could not find the neutron backup. Please check the variables in the beginning of the script." - echo "aborted." - exit 1 -fi - -if [ ! -f "${_NEUTRON_CONF_DIR}/${_NEUTRON_ML2_CONF_FILE}.bk" ] ; then - echo "Could not find bak for ml2 config file. Please check the variables in the beginning of the script." - echo "aborted." - exit 1 -fi - -if [ ! -f "${_NEUTRON_CONF_DIR}/${_NEUTRON_L3_CONF_FILE}.bk" ] ; then - echo "Could not find bak for l3_agent config file. Please check the variables in the beginning of the script." - echo "aborted." - exit 1 -fi - -echo "starting uninstall cascaded ..." -rm -r "${_NEUTRON_INSTALL}/neutron/" -cp -r "${_BACKUP_DIR}/neutron/" "${_NEUTRON_INSTALL}" - -echo "updating config file..." -cp "${_NEUTRON_CONF_DIR}/${_NEUTRON_ML2_CONF_FILE}.bk" "${_NEUTRON_CONF_DIR}/${_NEUTRON_ML2_CONF_FILE}" -cp "${_NEUTRON_CONF_DIR}/${_NEUTRON_L3_CONF_FILE}.bk" "${_NEUTRON_CONF_DIR}/${_NEUTRON_L3_CONF_FILE}" - - -echo "restarting cascaded neutron server..." -service neutron-server restart -if [ $? -ne 0 ] ; then - echo "There was an error in restarting the service, please restart cascaded neutron server manually." - exit 1 -fi - -echo "restarting cascaded neutron-plugin-openvswitch-agent..." -service neutron-plugin-openvswitch-agent restart -if [ $? -ne 0 ] ; then - echo "There was an error in restarting the service, please restart cascaded neutron-plugin-openvswitch-agent manually." - exit 1 -fi - -echo "restarting cascaded neutron-l3-agent..." -service neutron-l3-agent restart -if [ $? -ne 0 ] ; then - echo "There was an error in restarting the service, please restart cascaded neutron-l3-agent manually." - exit 1 -fi -rm -rf $_BACKUP_DIR/* -echo "Completed." -echo "uninstall success." - -exit 0 - diff --git a/juno-patches/neutron/neutron_cascaded_l3_patch/neutron/agent/l3_agent.py b/juno-patches/neutron/neutron_cascaded_l3_patch/neutron/agent/l3_agent.py deleted file mode 100644 index 6aeaa039..00000000 --- a/juno-patches/neutron/neutron_cascaded_l3_patch/neutron/agent/l3_agent.py +++ /dev/null @@ -1,2127 +0,0 @@ -# Copyright 2012 VMware, Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -import sys - -import datetime -import eventlet -eventlet.monkey_patch() - -import netaddr -import os -from oslo.config import cfg -from oslo import messaging -import Queue - -from neutron.agent.common import config -from neutron.agent import l3_ha_agent -from neutron.agent.linux import external_process -from neutron.agent.linux import interface -from neutron.agent.linux import ip_lib -from neutron.agent.linux import iptables_manager -from neutron.agent.linux import ra -from neutron.agent import rpc as agent_rpc -from neutron.common import config as common_config -from neutron.common import constants as l3_constants -from neutron.common import ipv6_utils -from neutron.common import rpc as n_rpc -from neutron.common import topics -from neutron.common import utils as common_utils -from neutron import context -from neutron import manager -from neutron.openstack.common import excutils -from neutron.openstack.common.gettextutils import _LW -from neutron.openstack.common import importutils -from neutron.openstack.common import log as logging -from neutron.openstack.common import loopingcall -from neutron.openstack.common import periodic_task -from neutron.openstack.common import processutils -from neutron.openstack.common import service -from neutron.openstack.common import timeutils -from neutron import service as neutron_service -from neutron.services.firewall.agents.l3reference import firewall_l3_agent - -LOG = logging.getLogger(__name__) -NS_PREFIX = 'qrouter-' -INTERNAL_DEV_PREFIX = 'qr-' -EXTERNAL_DEV_PREFIX = 'qg-' -SNAT_INT_DEV_PREFIX = 'sg-' -FIP_NS_PREFIX = 'fip-' -SNAT_NS_PREFIX = 'snat-' -FIP_2_ROUTER_DEV_PREFIX = 'fpr-' -ROUTER_2_FIP_DEV_PREFIX = 'rfp-' -FIP_EXT_DEV_PREFIX = 'fg-' -FIP_LL_SUBNET = '169.254.30.0/23' -# Route Table index for FIPs -FIP_RT_TBL = 16 -# Rule priority range for FIPs -FIP_PR_START = 32768 -FIP_PR_END = FIP_PR_START + 40000 -RPC_LOOP_INTERVAL = 1 -FLOATING_IP_CIDR_SUFFIX = '/32' -# Lower value is higher priority -PRIORITY_RPC = 0 -PRIORITY_SYNC_ROUTERS_TASK = 1 -DELETE_ROUTER = 1 - - -class L3PluginApi(n_rpc.RpcProxy): - """Agent side of the l3 agent RPC API. - - API version history: - 1.0 - Initial version. - 1.1 - Floating IP operational status updates - 1.2 - DVR support: new L3 plugin methods added. - - get_ports_by_subnet - - get_agent_gateway_port - Needed by the agent when operating in DVR/DVR_SNAT mode - 1.3 - Get the list of activated services - - """ - - BASE_RPC_API_VERSION = '1.0' - - def __init__(self, topic, host): - super(L3PluginApi, self).__init__( - topic=topic, default_version=self.BASE_RPC_API_VERSION) - self.host = host - - def get_routers(self, context, router_ids=None): - """Make a remote process call to retrieve the sync data for routers.""" - return self.call(context, - self.make_msg('sync_routers', host=self.host, - router_ids=router_ids)) - - def get_external_network_id(self, context): - """Make a remote process call to retrieve the external network id. - - @raise n_rpc.RemoteError: with TooManyExternalNetworks as - exc_type if there are more than one - external network - """ - return self.call(context, - self.make_msg('get_external_network_id', - host=self.host)) - - def update_floatingip_statuses(self, context, router_id, fip_statuses): - """Call the plugin update floating IPs's operational status.""" - return self.call(context, - self.make_msg('update_floatingip_statuses', - router_id=router_id, - fip_statuses=fip_statuses), - version='1.1') - - def get_ports_by_subnet(self, context, subnet_id): - """Retrieve ports by subnet id.""" - return self.call(context, - self.make_msg('get_ports_by_subnet', host=self.host, - subnet_id=subnet_id), - topic=self.topic, - version='1.2') - - def get_agent_gateway_port(self, context, fip_net): - """Get or create an agent_gateway_port.""" - return self.call(context, - self.make_msg('get_agent_gateway_port', - network_id=fip_net, host=self.host), - topic=self.topic, - version='1.2') - - def get_service_plugin_list(self, context): - """Make a call to get the list of activated services.""" - return self.call(context, - self.make_msg('get_service_plugin_list'), - topic=self.topic, - version='1.3') - - -class LinkLocalAddressPair(netaddr.IPNetwork): - def __init__(self, addr): - super(LinkLocalAddressPair, self).__init__(addr) - - def get_pair(self): - """Builds an address pair from the first and last addresses. """ - return (netaddr.IPNetwork("%s/%s" % (self.network, self.prefixlen)), - netaddr.IPNetwork("%s/%s" % (self.broadcast, self.prefixlen))) - - -class LinkLocalAllocator(object): - """Manages allocation of link local IP addresses. - - These link local addresses are used for routing inside the fip namespaces. - The associations need to persist across agent restarts to maintain - consistency. Without this, there is disruption in network connectivity - as the agent rewires the connections with the new IP address assocations. - - Persisting these in the database is unnecessary and would degrade - performance. - """ - def __init__(self, state_file, subnet): - """Read the file with previous allocations recorded. - - See the note in the allocate method for more detail. - """ - self.state_file = state_file - subnet = netaddr.IPNetwork(subnet) - - self.allocations = {} - - self.remembered = {} - for line in self._read(): - key, cidr = line.strip().split(',') - self.remembered[key] = LinkLocalAddressPair(cidr) - - self.pool = set(LinkLocalAddressPair(s) for s in subnet.subnet(31)) - self.pool.difference_update(self.remembered.values()) - - def allocate(self, key): - """Try to allocate a link local address pair. - - I expect this to work in all cases because I expect the pool size to be - large enough for any situation. Nonetheless, there is some defensive - programming in here. - - Since the allocations are persisted, there is the chance to leak - allocations which should have been released but were not. This leak - could eventually exhaust the pool. - - So, if a new allocation is needed, the code first checks to see if - there are any remembered allocations for the key. If not, it checks - the free pool. If the free pool is empty then it dumps the remembered - allocations to free the pool. This final desparate step will not - happen often in practice. - """ - if key in self.remembered: - self.allocations[key] = self.remembered.pop(key) - return self.allocations[key] - - if not self.pool: - # Desparate times. Try to get more in the pool. - self.pool.update(self.remembered.values()) - self.remembered.clear() - if not self.pool: - # More than 256 routers on a compute node! - raise RuntimeError(_("Cannot allocate link local address")) - - self.allocations[key] = self.pool.pop() - self._write_allocations() - return self.allocations[key] - - def release(self, key): - self.pool.add(self.allocations.pop(key)) - self._write_allocations() - - def _write_allocations(self): - current = ["%s,%s\n" % (k, v) for k, v in self.allocations.items()] - remembered = ["%s,%s\n" % (k, v) for k, v in self.remembered.items()] - current.extend(remembered) - self._write(current) - - def _write(self, lines): - with open(self.state_file, "w") as f: - f.writelines(lines) - - def _read(self): - if not os.path.exists(self.state_file): - return [] - with open(self.state_file) as f: - return f.readlines() - - -class GreTunnel(object): - - def __init__(self, nexthop, gre_device_name): - self.remote_ip = nexthop - self.gre_device_name = gre_device_name - self.extra_route_set = set() - - def add_extra_route(self, destination): - self.extra_route_set.add(destination) - - def remove_extra_route(self, destination): - if(destination in self.extra_route_set): - self.extra_route_set.remove(destination) - - -class RouterInfo(l3_ha_agent.RouterMixin): - - def __init__(self, router_id, root_helper, use_namespaces, router, - use_ipv6=False): - self.router_id = router_id - self.ex_gw_port = None - self._snat_enabled = None - self._snat_action = None - self.internal_ports = [] - self.snat_ports = [] - self.floating_ips = set() - self.floating_ips_dict = {} - self.root_helper = root_helper - self.use_namespaces = use_namespaces - # Invoke the setter for establishing initial SNAT action - self.router = router - self.ns_name = NS_PREFIX + router_id if use_namespaces else None - self.iptables_manager = iptables_manager.IptablesManager( - root_helper=root_helper, - use_ipv6=use_ipv6, - namespace=self.ns_name) - self.snat_iptables_manager = None - self.routes = [] - # DVR Data - # Linklocal subnet for router and floating IP namespace link - self.rtr_fip_subnet = None - self.dist_fip_count = 0 - - # added by jiahaojie 00209498 ----begin - self.next_hop_gre_tunnel = {} - self.route_cidrs = set() - # added by jiahaojie 00209498 ----end - - super(RouterInfo, self).__init__() - - @property - def router(self): - return self._router - - @router.setter - def router(self, value): - self._router = value - if not self._router: - return - # enable_snat by default if it wasn't specified by plugin - self._snat_enabled = self._router.get('enable_snat', True) - # Set a SNAT action for the router - if self._router.get('gw_port'): - self._snat_action = ('add_rules' if self._snat_enabled - else 'remove_rules') - elif self.ex_gw_port: - # Gateway port was removed, remove rules - self._snat_action = 'remove_rules' - - def perform_snat_action(self, snat_callback, *args): - # Process SNAT rules for attached subnets - if self._snat_action: - snat_callback(self, self._router.get('gw_port'), - *args, action=self._snat_action) - self._snat_action = None - - -class RouterUpdate(object): - """Encapsulates a router update - - An instance of this object carries the information necessary to prioritize - and process a request to update a router. - """ - def __init__(self, router_id, priority, - action=None, router=None, timestamp=None): - self.priority = priority - self.timestamp = timestamp - if not timestamp: - self.timestamp = timeutils.utcnow() - self.id = router_id - self.action = action - self.router = router - - def __lt__(self, other): - """Implements priority among updates - - Lower numerical priority always gets precedence. When comparing two - updates of the same priority then the one with the earlier timestamp - gets procedence. In the unlikely event that the timestamps are also - equal it falls back to a simple comparison of ids meaning the - precedence is essentially random. - """ - if self.priority != other.priority: - return self.priority < other.priority - if self.timestamp != other.timestamp: - return self.timestamp < other.timestamp - return self.id < other.id - - -class ExclusiveRouterProcessor(object): - """Manager for access to a router for processing - - This class controls access to a router in a non-blocking way. The first - instance to be created for a given router_id is granted exclusive access to - the router. - - Other instances may be created for the same router_id while the first - instance has exclusive access. If that happens then it doesn't block and - wait for access. Instead, it signals to the master instance that an update - came in with the timestamp. - - This way, a thread will not block to wait for access to a router. Instead - it effectively signals to the thread that is working on the router that - something has changed since it started working on it. That thread will - simply finish its current iteration and then repeat. - - This class keeps track of the last time that a router data was fetched and - processed. The timestamp that it keeps must be before when the data used - to process the router last was fetched from the database. But, as close as - possible. The timestamp should not be recorded, however, until the router - has been processed using the fetch data. - """ - _masters = {} - _router_timestamps = {} - - def __init__(self, router_id): - self._router_id = router_id - - if router_id not in self._masters: - self._masters[router_id] = self - self._queue = [] - - self._master = self._masters[router_id] - - def _i_am_master(self): - return self == self._master - - def __enter__(self): - return self - - def __exit__(self, type, value, traceback): - if self._i_am_master(): - del self._masters[self._router_id] - - def _get_router_data_timestamp(self): - return self._router_timestamps.get(self._router_id, - datetime.datetime.min) - - def fetched_and_processed(self, timestamp): - """Records the data timestamp after it is used to update the router""" - new_timestamp = max(timestamp, self._get_router_data_timestamp()) - self._router_timestamps[self._router_id] = new_timestamp - - def queue_update(self, update): - """Queues an update from a worker - - This is the queue used to keep new updates that come in while a router - is being processed. These updates have already bubbled to the front of - the RouterProcessingQueue. - """ - self._master._queue.append(update) - - def updates(self): - """Processes the router until updates stop coming - - Only the master instance will process the router. However, updates may - come in from other workers while it is in progress. This method loops - until they stop coming. - """ - if self._i_am_master(): - while self._queue: - # Remove the update from the queue even if it is old. - update = self._queue.pop(0) - # Process the update only if it is fresh. - if self._get_router_data_timestamp() < update.timestamp: - yield update - - -class RouterProcessingQueue(object): - """Manager of the queue of routers to process.""" - def __init__(self): - self._queue = Queue.PriorityQueue() - - def add(self, update): - self._queue.put(update) - - def each_update_to_next_router(self): - """Grabs the next router from the queue and processes - - This method uses a for loop to process the router repeatedly until - updates stop bubbling to the front of the queue. - """ - next_update = self._queue.get() - - with ExclusiveRouterProcessor(next_update.id) as rp: - # Queue the update whether this worker is the master or not. - rp.queue_update(next_update) - - # Here, if the current worker is not the master, the call to - # rp.updates() will not yield and so this will essentially be a - # noop. - for update in rp.updates(): - yield (rp, update) - - -class L3NATAgent(firewall_l3_agent.FWaaSL3AgentRpcCallback, - l3_ha_agent.AgentMixin, - manager.Manager): - """Manager for L3NatAgent - - API version history: - 1.0 initial Version - 1.1 changed the type of the routers parameter - to the routers_updated method. - It was previously a list of routers in dict format. - It is now a list of router IDs only. - Per rpc versioning rules, it is backwards compatible. - 1.2 - DVR support: new L3 agent methods added. - - add_arp_entry - - del_arp_entry - Needed by the L3 service when dealing with DVR - """ - RPC_API_VERSION = '1.2' - - OPTS = [ - cfg.StrOpt('agent_mode', default='legacy', - help=_("The working mode for the agent. Allowed modes are: " - "'legacy' - this preserves the existing behavior " - "where the L3 agent is deployed on a centralized " - "networking node to provide L3 services like DNAT, " - "and SNAT. Use this mode if you do not want to " - "adopt DVR. 'dvr' - this mode enables DVR " - "functionality and must be used for an L3 agent " - "that runs on a compute host. 'dvr_snat' - this " - "enables centralized SNAT support in conjunction " - "with DVR. This mode must be used for an L3 agent " - "running on a centralized node (or in single-host " - "deployments, e.g. devstack)")), - cfg.StrOpt('external_network_bridge', default='br-ex', - help=_("Name of bridge used for external network " - "traffic.")), - cfg.IntOpt('metadata_port', - default=9697, - help=_("TCP Port used by Neutron metadata namespace " - "proxy.")), - cfg.IntOpt('send_arp_for_ha', - default=3, - help=_("Send this many gratuitous ARPs for HA setup, if " - "less than or equal to 0, the feature is disabled")), - cfg.StrOpt('router_id', default='', - help=_("If namespaces is disabled, the l3 agent can only" - " configure a router that has the matching router " - "ID.")), - cfg.BoolOpt('handle_internal_only_routers', - default=True, - help=_("Agent should implement routers with no gateway")), - cfg.StrOpt('gateway_external_network_id', default='', - help=_("UUID of external network for routers implemented " - "by the agents.")), - cfg.BoolOpt('enable_metadata_proxy', default=True, - help=_("Allow running metadata proxy.")), - cfg.BoolOpt('router_delete_namespaces', default=False, - help=_("Delete namespace after removing a router.")), - cfg.StrOpt('metadata_proxy_socket', - default='$state_path/metadata_proxy', - help=_('Location of Metadata Proxy UNIX domain ' - 'socket')), - ] - - def __init__(self, host, conf=None): - if conf: - self.conf = conf - else: - self.conf = cfg.CONF - self.root_helper = config.get_root_helper(self.conf) - self.router_info = {} - - self._check_config_params() - - try: - self.driver = importutils.import_object( - self.conf.interface_driver, - self.conf - ) - except Exception: - msg = _("Error importing interface driver " - "'%s'") % self.conf.interface_driver - LOG.error(msg) - raise SystemExit(1) - - self.context = context.get_admin_context_without_session() - self.plugin_rpc = L3PluginApi(topics.L3PLUGIN, host) - self.fullsync = True - self.sync_progress = False - - # Get the list of service plugins from Neutron Server - # This is the first place where we contact neutron-server on startup - # so retry in case its not ready to respond. - retry_count = 5 - while True: - retry_count = retry_count - 1 - try: - self.neutron_service_plugins = ( - self.plugin_rpc.get_service_plugin_list(self.context)) - except n_rpc.RemoteError as e: - with excutils.save_and_reraise_exception() as ctx: - ctx.reraise = False - LOG.warning(_LW('l3-agent cannot check service plugins ' - 'enabled at the neutron server when ' - 'startup due to RPC error. It happens ' - 'when the server does not support this ' - 'RPC API. If the error is ' - 'UnsupportedVersion you can ignore this ' - 'warning. Detail message: %s'), e) - self.neutron_service_plugins = None - except messaging.MessagingTimeout as e: - with excutils.save_and_reraise_exception() as ctx: - if retry_count > 0: - ctx.reraise = False - LOG.warning(_LW('l3-agent cannot check service ' - 'plugins enabled on the neutron ' - 'server. Retrying. ' - 'Detail message: %s'), e) - continue - break - - self._clean_stale_namespaces = self.conf.use_namespaces - - # dvr data - self.agent_gateway_port = None - self.agent_fip_count = 0 - self.local_subnets = LinkLocalAllocator( - os.path.join(self.conf.state_path, 'fip-linklocal-networks'), - FIP_LL_SUBNET) - self.fip_priorities = set(range(FIP_PR_START, FIP_PR_END)) - - self._queue = RouterProcessingQueue() - super(L3NATAgent, self).__init__(conf=self.conf) - - self.target_ex_net_id = None - self.use_ipv6 = ipv6_utils.is_enabled() - - def _check_config_params(self): - """Check items in configuration files. - - Check for required and invalid configuration items. - The actual values are not verified for correctness. - """ - if not self.conf.interface_driver: - msg = _('An interface driver must be specified') - LOG.error(msg) - raise SystemExit(1) - - if not self.conf.use_namespaces and not self.conf.router_id: - msg = _('Router id is required if not using namespaces.') - LOG.error(msg) - raise SystemExit(1) - - def _list_namespaces(self): - """Get a set of all router namespaces on host - - The argument routers is the list of routers that are recorded in - the database as being hosted on this node. - """ - try: - root_ip = ip_lib.IPWrapper(self.root_helper) - - host_namespaces = root_ip.get_namespaces(self.root_helper) - return set(ns for ns in host_namespaces - if (ns.startswith(NS_PREFIX) - or ns.startswith(SNAT_NS_PREFIX))) - except RuntimeError: - LOG.exception(_('RuntimeError in obtaining router list ' - 'for namespace cleanup.')) - return set() - - def _cleanup_namespaces(self, router_namespaces, router_ids): - """Destroy stale router namespaces on host when L3 agent restarts - - This routine is called when self._clean_stale_namespaces is True. - - The argument router_namespaces is the list of all routers namespaces - The argument router_ids is the list of ids for known routers. - """ - ns_to_ignore = set(NS_PREFIX + id for id in router_ids) - ns_to_ignore.update(SNAT_NS_PREFIX + id for id in router_ids) - ns_to_destroy = router_namespaces - ns_to_ignore - self._destroy_stale_router_namespaces(ns_to_destroy) - - def _destroy_stale_router_namespaces(self, router_namespaces): - """Destroys the stale router namespaces - - The argumenet router_namespaces is a list of stale router namespaces - - As some stale router namespaces may not be able to be deleted, only - one attempt will be made to delete them. - """ - for ns in router_namespaces: - ra.disable_ipv6_ra(ns[len(NS_PREFIX):], ns, self.root_helper) - try: - self._destroy_namespace(ns) - except RuntimeError: - LOG.exception(_('Failed to destroy stale router namespace ' - '%s'), ns) - self._clean_stale_namespaces = False - - def _destroy_namespace(self, ns): - if ns.startswith(NS_PREFIX): - if self.conf.enable_metadata_proxy: - self._destroy_metadata_proxy(ns[len(NS_PREFIX):], ns) - self._destroy_router_namespace(ns) - elif ns.startswith(FIP_NS_PREFIX): - self._destroy_fip_namespace(ns) - elif ns.startswith(SNAT_NS_PREFIX): - self._destroy_snat_namespace(ns) - - def _delete_namespace(self, ns_ip, ns): - try: - ns_ip.netns.delete(ns) - except RuntimeError: - msg = _('Failed trying to delete namespace: %s') % ns - LOG.exception(msg) - - def _destroy_snat_namespace(self, ns): - ns_ip = ip_lib.IPWrapper(self.root_helper, namespace=ns) - # delete internal interfaces - for d in ns_ip.get_devices(exclude_loopback=True): - if d.name.startswith(SNAT_INT_DEV_PREFIX): - LOG.debug('Unplugging DVR device %s', d.name) - self.driver.unplug(d.name, namespace=ns, - prefix=SNAT_INT_DEV_PREFIX) - - # TODO(mrsmith): delete ext-gw-port - LOG.debug('DVR: destroy snat ns: %s', ns) - if self.conf.router_delete_namespaces: - self._delete_namespace(ns_ip, ns) - - def _destroy_fip_namespace(self, ns): - ns_ip = ip_lib.IPWrapper(self.root_helper, namespace=ns) - for d in ns_ip.get_devices(exclude_loopback=True): - if d.name.startswith(FIP_2_ROUTER_DEV_PREFIX): - # internal link between IRs and FIP NS - ns_ip.del_veth(d.name) - elif d.name.startswith(FIP_EXT_DEV_PREFIX): - # single port from FIP NS to br-ext - # TODO(carl) Where does the port get deleted? - LOG.debug('DVR: unplug: %s', d.name) - self.driver.unplug(d.name, - bridge=self.conf.external_network_bridge, - namespace=ns, - prefix=FIP_EXT_DEV_PREFIX) - LOG.debug('DVR: destroy fip ns: %s', ns) - # TODO(mrsmith): add LOG warn if fip count != 0 - if self.conf.router_delete_namespaces: - self._delete_namespace(ns_ip, ns) - self.agent_gateway_port = None - - def _destroy_router_namespace(self, ns): - ns_ip = ip_lib.IPWrapper(self.root_helper, namespace=ns) - for d in ns_ip.get_devices(exclude_loopback=True): - if d.name.startswith(INTERNAL_DEV_PREFIX): - # device is on default bridge - self.driver.unplug(d.name, namespace=ns, - prefix=INTERNAL_DEV_PREFIX) - elif d.name.startswith(ROUTER_2_FIP_DEV_PREFIX): - ns_ip.del_veth(d.name) - elif d.name.startswith(EXTERNAL_DEV_PREFIX): - self.driver.unplug(d.name, - bridge=self.conf.external_network_bridge, - namespace=ns, - prefix=EXTERNAL_DEV_PREFIX) - - if self.conf.router_delete_namespaces: - self._delete_namespace(ns_ip, ns) - - def _create_namespace(self, name): - ip_wrapper_root = ip_lib.IPWrapper(self.root_helper) - ip_wrapper = ip_wrapper_root.ensure_namespace(name) - ip_wrapper.netns.execute(['sysctl', '-w', 'net.ipv4.ip_forward=1']) - if self.use_ipv6: - ip_wrapper.netns.execute(['sysctl', '-w', - 'net.ipv6.conf.all.forwarding=1']) - - def _create_router_namespace(self, ri): - self._create_namespace(ri.ns_name) - - def _fetch_external_net_id(self, force=False): - """Find UUID of single external network for this agent.""" - if self.conf.gateway_external_network_id: - return self.conf.gateway_external_network_id - - # L3 agent doesn't use external_network_bridge to handle external - # networks, so bridge_mappings with provider networks will be used - # and the L3 agent is able to handle any external networks. - if not self.conf.external_network_bridge: - return - - if not force and self.target_ex_net_id: - return self.target_ex_net_id - - try: - self.target_ex_net_id = self.plugin_rpc.get_external_network_id( - self.context) - return self.target_ex_net_id - except n_rpc.RemoteError as e: - with excutils.save_and_reraise_exception() as ctx: - if e.exc_type == 'TooManyExternalNetworks': - ctx.reraise = False - msg = _( - "The 'gateway_external_network_id' option must be " - "configured for this agent as Neutron has more than " - "one external network.") - raise Exception(msg) - - def _router_added(self, router_id, router): - ri = RouterInfo(router_id, self.root_helper, - self.conf.use_namespaces, router, - use_ipv6=self.use_ipv6) - self.router_info[router_id] = ri - if self.conf.use_namespaces: - self._create_router_namespace(ri) - for c, r in self.metadata_filter_rules(): - ri.iptables_manager.ipv4['filter'].add_rule(c, r) - for c, r in self.metadata_nat_rules(): - ri.iptables_manager.ipv4['nat'].add_rule(c, r) - ri.iptables_manager.apply() - self.process_router_add(ri) - - if ri.is_ha: - self.process_ha_router_added(ri) - - if self.conf.enable_metadata_proxy: - if ri.is_ha: - self._add_keepalived_notifiers(ri) - else: - self._spawn_metadata_proxy(ri.router_id, ri.ns_name) - - def _router_removed(self, router_id): - ri = self.router_info.get(router_id) - if ri is None: - LOG.warn(_("Info for router %s were not found. " - "Skipping router removal"), router_id) - return - - if ri.is_ha: - self.process_ha_router_removed(ri) - - ri.router['gw_port'] = None - ri.router[l3_constants.INTERFACE_KEY] = [] - ri.router[l3_constants.FLOATINGIP_KEY] = [] - self.process_router(ri) - for c, r in self.metadata_filter_rules(): - ri.iptables_manager.ipv4['filter'].remove_rule(c, r) - for c, r in self.metadata_nat_rules(): - ri.iptables_manager.ipv4['nat'].remove_rule(c, r) - ri.iptables_manager.apply() - if self.conf.enable_metadata_proxy: - self._destroy_metadata_proxy(ri.router_id, ri.ns_name) - del self.router_info[router_id] - self._destroy_router_namespace(ri.ns_name) - - def _get_metadata_proxy_callback(self, router_id): - - def callback(pid_file): - metadata_proxy_socket = cfg.CONF.metadata_proxy_socket - proxy_cmd = ['neutron-ns-metadata-proxy', - '--pid_file=%s' % pid_file, - '--metadata_proxy_socket=%s' % metadata_proxy_socket, - '--router_id=%s' % router_id, - '--state_path=%s' % self.conf.state_path, - '--metadata_port=%s' % self.conf.metadata_port] - proxy_cmd.extend(config.get_log_args( - cfg.CONF, 'neutron-ns-metadata-proxy-%s.log' % - router_id)) - return proxy_cmd - - return callback - - def _get_metadata_proxy_process_manager(self, router_id, ns_name): - return external_process.ProcessManager( - self.conf, - router_id, - self.root_helper, - ns_name) - - def _spawn_metadata_proxy(self, router_id, ns_name): - callback = self._get_metadata_proxy_callback(router_id) - pm = self._get_metadata_proxy_process_manager(router_id, ns_name) - pm.enable(callback) - - def _destroy_metadata_proxy(self, router_id, ns_name): - pm = self._get_metadata_proxy_process_manager(router_id, ns_name) - pm.disable() - - def _set_subnet_arp_info(self, ri, port): - """Set ARP info retrieved from Plugin for existing ports.""" - if 'id' not in port['subnet'] or not ri.router['distributed']: - return - subnet_id = port['subnet']['id'] - subnet_ports = ( - self.plugin_rpc.get_ports_by_subnet(self.context, - subnet_id)) - - for p in subnet_ports: - if (p['device_owner'] not in ( - l3_constants.DEVICE_OWNER_ROUTER_INTF, - l3_constants.DEVICE_OWNER_DVR_INTERFACE)): - for fixed_ip in p['fixed_ips']: - self._update_arp_entry(ri, fixed_ip['ip_address'], - p['mac_address'], - subnet_id, 'add') - - def _set_subnet_info(self, port): - ips = port['fixed_ips'] - if not ips: - raise Exception(_("Router port %s has no IP address") % port['id']) - if len(ips) > 1: - LOG.error(_("Ignoring multiple IPs on router port %s"), - port['id']) - prefixlen = netaddr.IPNetwork(port['subnet']['cidr']).prefixlen - port['ip_cidr'] = "%s/%s" % (ips[0]['ip_address'], prefixlen) - - def _get_existing_devices(self, ri): - ip_wrapper = ip_lib.IPWrapper(root_helper=self.root_helper, - namespace=ri.ns_name) - ip_devs = ip_wrapper.get_devices(exclude_loopback=True) - return [ip_dev.name for ip_dev in ip_devs] - - @common_utils.exception_logger() - def process_router(self, ri): - # TODO(mrsmith) - we shouldn't need to check here - if 'distributed' not in ri.router: - ri.router['distributed'] = False - ri.iptables_manager.defer_apply_on() - ex_gw_port = self._get_ex_gw_port(ri) - internal_ports = ri.router.get(l3_constants.INTERFACE_KEY, []) - snat_ports = ri.router.get(l3_constants.SNAT_ROUTER_INTF_KEY, []) - existing_port_ids = set([p['id'] for p in ri.internal_ports]) - current_port_ids = set([p['id'] for p in internal_ports - if p['admin_state_up']]) - new_ports = [p for p in internal_ports if - p['id'] in current_port_ids and - p['id'] not in existing_port_ids] - old_ports = [p for p in ri.internal_ports if - p['id'] not in current_port_ids] - - new_ipv6_port = False - old_ipv6_port = False - for p in new_ports: - self._set_subnet_info(p) - self.internal_network_added(ri, p) - ri.internal_ports.append(p) - self._set_subnet_arp_info(ri, p) - if (not new_ipv6_port and - netaddr.IPNetwork(p['subnet']['cidr']).version == 6): - new_ipv6_port = True - - for p in old_ports: - self.internal_network_removed(ri, p) - ri.internal_ports.remove(p) - if (not old_ipv6_port and - netaddr.IPNetwork(p['subnet']['cidr']).version == 6): - old_ipv6_port = True - - if new_ipv6_port or old_ipv6_port: - ra.enable_ipv6_ra(ri.router_id, - ri.ns_name, - internal_ports, - self.get_internal_device_name, - self.root_helper) - - existing_devices = self._get_existing_devices(ri) - current_internal_devs = set([n for n in existing_devices - if n.startswith(INTERNAL_DEV_PREFIX)]) - current_port_devs = set([self.get_internal_device_name(id) for - id in current_port_ids]) - stale_devs = current_internal_devs - current_port_devs - for stale_dev in stale_devs: - LOG.debug(_('Deleting stale internal router device: %s'), - stale_dev) - self.driver.unplug(stale_dev, - namespace=ri.ns_name, - prefix=INTERNAL_DEV_PREFIX) - - # TODO(salv-orlando): RouterInfo would be a better place for - # this logic too - ex_gw_port_id = (ex_gw_port and ex_gw_port['id'] or - ri.ex_gw_port and ri.ex_gw_port['id']) - - interface_name = None - if ex_gw_port_id: - interface_name = self.get_external_device_name(ex_gw_port_id) - if ex_gw_port: - def _gateway_ports_equal(port1, port2): - def _get_filtered_dict(d, ignore): - return dict((k, v) for k, v in d.iteritems() - if k not in ignore) - - keys_to_ignore = set(['binding:host_id']) - port1_filtered = _get_filtered_dict(port1, keys_to_ignore) - port2_filtered = _get_filtered_dict(port2, keys_to_ignore) - return port1_filtered == port2_filtered - - self._set_subnet_info(ex_gw_port) - if not ri.ex_gw_port: - self.external_gateway_added(ri, ex_gw_port, interface_name) - elif not _gateway_ports_equal(ex_gw_port, ri.ex_gw_port): - self.external_gateway_updated(ri, ex_gw_port, interface_name) - elif not ex_gw_port and ri.ex_gw_port: - self.external_gateway_removed(ri, ri.ex_gw_port, interface_name) - - stale_devs = [dev for dev in existing_devices - if dev.startswith(EXTERNAL_DEV_PREFIX) - and dev != interface_name] - for stale_dev in stale_devs: - LOG.debug(_('Deleting stale external router device: %s'), - stale_dev) - self.driver.unplug(stale_dev, - bridge=self.conf.external_network_bridge, - namespace=ri.ns_name, - prefix=EXTERNAL_DEV_PREFIX) - - # Process static routes for router - self.routes_updated(ri) - # Process SNAT rules for external gateway - if (not ri.router['distributed'] or - ex_gw_port and ri.router['gw_port_host'] == self.host): - # Get IPv4 only internal CIDRs - internal_cidrs = [p['ip_cidr'] for p in ri.internal_ports - if netaddr.IPNetwork(p['ip_cidr']).version == 4] -#commented by jiahaojie---begin - ri.perform_snat_action(self._handle_router_snat_rules, - internal_cidrs, interface_name) -#commented by jiahaojie---end - - # Process SNAT/DNAT rules for floating IPs - fip_statuses = {} - try: - if ex_gw_port: - existing_floating_ips = ri.floating_ips - self.process_router_floating_ip_nat_rules(ri) - ri.iptables_manager.defer_apply_off() - # Once NAT rules for floating IPs are safely in place - # configure their addresses on the external gateway port - fip_statuses = self.process_router_floating_ip_addresses( - ri, ex_gw_port) - except Exception: - # TODO(salv-orlando): Less broad catching - # All floating IPs must be put in error state - for fip in ri.router.get(l3_constants.FLOATINGIP_KEY, []): - fip_statuses[fip['id']] = l3_constants.FLOATINGIP_STATUS_ERROR - - if ex_gw_port: - # Identify floating IPs which were disabled - ri.floating_ips = set(fip_statuses.keys()) - for fip_id in existing_floating_ips - ri.floating_ips: - fip_statuses[fip_id] = l3_constants.FLOATINGIP_STATUS_DOWN - # Update floating IP status on the neutron server - self.plugin_rpc.update_floatingip_statuses( - self.context, ri.router_id, fip_statuses) - - # Update ex_gw_port and enable_snat on the router info cache - ri.ex_gw_port = ex_gw_port - ri.snat_ports = snat_ports - ri.enable_snat = ri.router.get('enable_snat') - - if ri.is_ha: - if ri.ha_port: - ri.spawn_keepalived() - else: - ri.disable_keepalived() - - def _handle_router_snat_rules(self, ri, ex_gw_port, internal_cidrs, - interface_name, action): - # Remove all the rules - # This is safe because if use_namespaces is set as False - # then the agent can only configure one router, otherwise - # each router's SNAT rules will be in their own namespace - if not ri.router['distributed']: - iptables_manager = ri.iptables_manager - elif ri.snat_iptables_manager: - iptables_manager = ri.snat_iptables_manager - else: - LOG.debug("DVR router: no snat rules to be handled") - return - - iptables_manager.ipv4['nat'].empty_chain('POSTROUTING') - iptables_manager.ipv4['nat'].empty_chain('snat') - - if not ri.router['distributed']: - # Add back the jump to float-snat - iptables_manager.ipv4['nat'].add_rule('snat', '-j $float-snat') - - # And add them back if the action is add_rules - if action == 'add_rules' and ex_gw_port: - # ex_gw_port should not be None in this case - # NAT rules are added only if ex_gw_port has an IPv4 address - for ip_addr in ex_gw_port['fixed_ips']: - ex_gw_ip = ip_addr['ip_address'] - if netaddr.IPAddress(ex_gw_ip).version == 4: - rules = self.external_gateway_nat_rules(ex_gw_ip, - internal_cidrs, - interface_name) - for rule in rules: - iptables_manager.ipv4['nat'].add_rule(*rule) - break - iptables_manager.apply() - - def _handle_router_fip_nat_rules(self, ri, interface_name, action): - """Configures NAT rules for Floating IPs for DVR. - - Remove all the rules. This is safe because if - use_namespaces is set as False then the agent can - only configure one router, otherwise each router's - NAT rules will be in their own namespace. - """ - ri.iptables_manager.ipv4['nat'].empty_chain('POSTROUTING') - ri.iptables_manager.ipv4['nat'].empty_chain('snat') - - # Add back the jump to float-snat - ri.iptables_manager.ipv4['nat'].add_rule('snat', '-j $float-snat') - - # And add them back if the action is add_rules - if action == 'add_rules' and interface_name: - rule = ('POSTROUTING', '! -i %(interface_name)s ' - '! -o %(interface_name)s -m conntrack ! ' - '--ctstate DNAT -j ACCEPT' % - {'interface_name': interface_name}) - ri.iptables_manager.ipv4['nat'].add_rule(*rule) - ri.iptables_manager.apply() - - def process_router_floating_ip_nat_rules(self, ri): - """Configure NAT rules for the router's floating IPs. - - Configures iptables rules for the floating ips of the given router - """ - # Clear out all iptables rules for floating ips - ri.iptables_manager.ipv4['nat'].clear_rules_by_tag('floating_ip') - - floating_ips = self.get_floating_ips(ri) - # Loop once to ensure that floating ips are configured. - for fip in floating_ips: - # Rebuild iptables rules for the floating ip. - fixed = fip['fixed_ip_address'] - fip_ip = fip['floating_ip_address'] - for chain, rule in self.floating_forward_rules(fip_ip, fixed): - ri.iptables_manager.ipv4['nat'].add_rule(chain, rule, - tag='floating_ip') - - ri.iptables_manager.apply() - - def _get_external_device_interface_name(self, ri, ex_gw_port, - floating_ips): - if ri.router['distributed']: - # filter out only FIPs for this host/agent - floating_ips = [i for i in floating_ips if i['host'] == self.host] - if floating_ips and self.agent_gateway_port is None: - self._create_agent_gateway_port(ri, floating_ips[0] - ['floating_network_id']) - - if self.agent_gateway_port: - if floating_ips and ri.dist_fip_count == 0: - self.create_rtr_2_fip_link(ri, floating_ips[0] - ['floating_network_id']) - return self.get_rtr_int_device_name(ri.router_id) - else: - # there are no fips or agent port, no work to do - return None - - return self.get_external_device_name(ex_gw_port['id']) - - def _add_floating_ip(self, ri, fip, interface_name, device): - fip_ip = fip['floating_ip_address'] - ip_cidr = str(fip_ip) + FLOATING_IP_CIDR_SUFFIX - - if ri.is_ha: - self._add_vip(ri, ip_cidr, interface_name) - else: - net = netaddr.IPNetwork(ip_cidr) - try: - device.addr.add(net.version, ip_cidr, str(net.broadcast)) - except (processutils.UnknownArgumentError, - processutils.ProcessExecutionError): - # any exception occurred here should cause the floating IP - # to be set in error state - LOG.warn(_("Unable to configure IP address for " - "floating IP: %s"), fip['id']) - return l3_constants.FLOATINGIP_STATUS_ERROR - if ri.router['distributed']: - # Special Handling for DVR - update FIP namespace - # and ri.namespace to handle DVR based FIP - self.floating_ip_added_dist(ri, fip) - else: - # As GARP is processed in a distinct thread the call below - # won't raise an exception to be handled. - self._send_gratuitous_arp_packet( - ri.ns_name, interface_name, fip_ip) - return l3_constants.FLOATINGIP_STATUS_ACTIVE - - def _remove_floating_ip(self, ri, device, ip_cidr): - if ri.is_ha: - self._remove_vip(ri, ip_cidr) - else: - net = netaddr.IPNetwork(ip_cidr) - device.addr.delete(net.version, ip_cidr) - self.driver.delete_conntrack_state(root_helper=self.root_helper, - namespace=ri.ns_name, - ip=ip_cidr) - if ri.router['distributed']: - self.floating_ip_removed_dist(ri, ip_cidr) - - def process_router_floating_ip_addresses(self, ri, ex_gw_port): - """Configure IP addresses on router's external gateway interface. - - Ensures addresses for existing floating IPs and cleans up - those that should not longer be configured. - """ - - fip_statuses = {} - floating_ips = ri.router.get(l3_constants.FLOATINGIP_KEY, []) - interface_name = self._get_external_device_interface_name( - ri, ex_gw_port, floating_ips) - if interface_name is None: - return fip_statuses - - device = ip_lib.IPDevice(interface_name, self.root_helper, - namespace=ri.ns_name) - existing_cidrs = set([addr['cidr'] for addr in device.addr.list()]) - new_cidrs = set() - - # Loop once to ensure that floating ips are configured. - for fip in floating_ips: - fip_ip = fip['floating_ip_address'] - ip_cidr = str(fip_ip) + FLOATING_IP_CIDR_SUFFIX - new_cidrs.add(ip_cidr) - fip_statuses[fip['id']] = l3_constants.FLOATINGIP_STATUS_ACTIVE - if ip_cidr not in existing_cidrs: - fip_statuses[fip['id']] = self._add_floating_ip( - ri, fip, interface_name, device) - - fips_to_remove = ( - ip_cidr for ip_cidr in existing_cidrs - new_cidrs if - ip_cidr.endswith(FLOATING_IP_CIDR_SUFFIX)) - for ip_cidr in fips_to_remove: - self._remove_floating_ip(ri, device, ip_cidr) - - return fip_statuses - - def _get_ex_gw_port(self, ri): - return ri.router.get('gw_port') - - def _arping(self, ns_name, interface_name, ip_address, distributed=False): - if distributed: - device = ip_lib.IPDevice(interface_name, self.root_helper, - namespace=ns_name) - ip_cidr = str(ip_address) + FLOATING_IP_CIDR_SUFFIX - net = netaddr.IPNetwork(ip_cidr) - device.addr.add(net.version, ip_cidr, str(net.broadcast)) - - arping_cmd = ['arping', '-A', - '-I', interface_name, - '-c', self.conf.send_arp_for_ha, - ip_address] - try: - ip_wrapper = ip_lib.IPWrapper(self.root_helper, - namespace=ns_name) - ip_wrapper.netns.execute(arping_cmd, check_exit_code=True) - except Exception as e: - LOG.error(_("Failed sending gratuitous ARP: %s"), str(e)) - if distributed: - device.addr.delete(net.version, ip_cidr) - - def _send_gratuitous_arp_packet(self, ns_name, interface_name, ip_address, - distributed=False): - if self.conf.send_arp_for_ha > 0: - eventlet.spawn_n(self._arping, ns_name, interface_name, ip_address, - distributed) - - def get_internal_port(self, ri, subnet_id): - """Return internal router port based on subnet_id.""" - router_ports = ri.router.get(l3_constants.INTERFACE_KEY, []) - for port in router_ports: - fips = port['fixed_ips'] - for f in fips: - if f['subnet_id'] == subnet_id: - return port - - def get_internal_device_name(self, port_id): - return (INTERNAL_DEV_PREFIX + port_id)[:self.driver.DEV_NAME_LEN] - - def get_external_device_name(self, port_id): - return (EXTERNAL_DEV_PREFIX + port_id)[:self.driver.DEV_NAME_LEN] - - def get_fip_ext_device_name(self, port_id): - return (FIP_EXT_DEV_PREFIX + port_id)[:self.driver.DEV_NAME_LEN] - - def get_rtr_int_device_name(self, router_id): - return (ROUTER_2_FIP_DEV_PREFIX + router_id)[:self.driver.DEV_NAME_LEN] - - def get_fip_int_device_name(self, router_id): - return (FIP_2_ROUTER_DEV_PREFIX + router_id)[:self.driver.DEV_NAME_LEN] - - def get_snat_int_device_name(self, port_id): - return (SNAT_INT_DEV_PREFIX + port_id)[:self.driver.DEV_NAME_LEN] - - def get_fip_ns_name(self, ext_net_id): - return (FIP_NS_PREFIX + ext_net_id) - - def get_snat_ns_name(self, router_id): - return (SNAT_NS_PREFIX + router_id) - - def get_snat_interfaces(self, ri): - return ri.router.get(l3_constants.SNAT_ROUTER_INTF_KEY, []) - - def get_floating_ips(self, ri): - """Filter Floating IPs to be hosted on this agent.""" - floating_ips = ri.router.get(l3_constants.FLOATINGIP_KEY, []) - if ri.router['distributed']: - floating_ips = [i for i in floating_ips if i['host'] == self.host] - return floating_ips - - def _map_internal_interfaces(self, ri, int_port, snat_ports): - """Return the SNAT port for the given internal interface port.""" - fixed_ip = int_port['fixed_ips'][0] - subnet_id = fixed_ip['subnet_id'] - match_port = [p for p in snat_ports if - p['fixed_ips'][0]['subnet_id'] == subnet_id] - if match_port: - return match_port[0] - else: - LOG.error(_('DVR: no map match_port found!')) - - def _create_dvr_gateway(self, ri, ex_gw_port, gw_interface_name, - snat_ports): - """Create SNAT namespace.""" - snat_ns_name = self.get_snat_ns_name(ri.router['id']) - self._create_namespace(snat_ns_name) - # connect snat_ports to br_int from SNAT namespace - for port in snat_ports: - # create interface_name - self._set_subnet_info(port) - interface_name = self.get_snat_int_device_name(port['id']) - self._internal_network_added(snat_ns_name, port['network_id'], - port['id'], port['ip_cidr'], - port['mac_address'], interface_name, - SNAT_INT_DEV_PREFIX) - self._external_gateway_added(ri, ex_gw_port, gw_interface_name, - snat_ns_name, preserve_ips=[]) - ri.snat_iptables_manager = iptables_manager.IptablesManager( - root_helper=self.root_helper, - namespace=snat_ns_name, - use_ipv6=self.use_ipv6) - # kicks the FW Agent to add rules for the snat namespace - self.process_router_add(ri) - - def external_gateway_added(self, ri, ex_gw_port, interface_name): - if ri.router['distributed']: - ip_wrapr = ip_lib.IPWrapper(self.root_helper, namespace=ri.ns_name) - ip_wrapr.netns.execute(['sysctl', '-w', - 'net.ipv4.conf.all.send_redirects=0']) - snat_ports = self.get_snat_interfaces(ri) - for p in ri.internal_ports: - gateway = self._map_internal_interfaces(ri, p, snat_ports) - id_name = self.get_internal_device_name(p['id']) - if gateway: - self._snat_redirect_add(ri, gateway['fixed_ips'][0] - ['ip_address'], p, id_name) - - if (self.conf.agent_mode == 'dvr_snat' and - ri.router['gw_port_host'] == self.host): - self._create_dvr_gateway(ri, ex_gw_port, interface_name, - snat_ports) - for port in snat_ports: - for ip in port['fixed_ips']: - self._update_arp_entry(ri, ip['ip_address'], - port['mac_address'], - ip['subnet_id'], 'add') - return - - # Compute a list of addresses this router is supposed to have. - # This avoids unnecessarily removing those addresses and - # causing a momentarily network outage. - floating_ips = self.get_floating_ips(ri) - preserve_ips = [ip['floating_ip_address'] + FLOATING_IP_CIDR_SUFFIX - for ip in floating_ips] - - self._external_gateway_added(ri, ex_gw_port, interface_name, - ri.ns_name, preserve_ips) - - if ri.is_ha: - self._ha_external_gateway_added(ri, ex_gw_port, interface_name) - - def external_gateway_updated(self, ri, ex_gw_port, interface_name): - preserve_ips = [] - if ri.router['distributed']: - if (self.conf.agent_mode == 'dvr_snat' and - ri.router['gw_port_host'] == self.host): - ns_name = self.get_snat_ns_name(ri.router['id']) - else: - # no centralized SNAT gateway for this node/agent - LOG.debug("not hosting snat for router: %s", ri.router['id']) - return - else: - ns_name = ri.ns_name - floating_ips = self.get_floating_ips(ri) - preserve_ips = [ip['floating_ip_address'] + FLOATING_IP_CIDR_SUFFIX - for ip in floating_ips] - - self._external_gateway_added(ri, ex_gw_port, interface_name, - ns_name, preserve_ips) - - if ri.is_ha: - self._ha_external_gateway_updated(ri, ex_gw_port, interface_name) - - def _external_gateway_added(self, ri, ex_gw_port, interface_name, - ns_name, preserve_ips): - if not ip_lib.device_exists(interface_name, - root_helper=self.root_helper, - namespace=ns_name): - self.driver.plug(ex_gw_port['network_id'], - ex_gw_port['id'], interface_name, - ex_gw_port['mac_address'], - bridge=self.conf.external_network_bridge, - namespace=ns_name, - prefix=EXTERNAL_DEV_PREFIX) - - if not ri.is_ha: - self.driver.init_l3( - interface_name, [ex_gw_port['ip_cidr']], namespace=ns_name, - gateway=ex_gw_port['subnet'].get('gateway_ip'), - extra_subnets=ex_gw_port.get('extra_subnets', []), - preserve_ips=preserve_ips) - ip_address = ex_gw_port['ip_cidr'].split('/')[0] - self._send_gratuitous_arp_packet(ns_name, - interface_name, ip_address) - - def agent_gateway_added(self, ns_name, ex_gw_port, - interface_name): - """Add Floating IP gateway port to FIP namespace.""" - if not ip_lib.device_exists(interface_name, - root_helper=self.root_helper, - namespace=ns_name): - self.driver.plug(ex_gw_port['network_id'], - ex_gw_port['id'], interface_name, - ex_gw_port['mac_address'], - bridge=self.conf.external_network_bridge, - namespace=ns_name, - prefix=FIP_EXT_DEV_PREFIX) - - self.driver.init_l3(interface_name, [ex_gw_port['ip_cidr']], - namespace=ns_name) - ip_address = ex_gw_port['ip_cidr'].split('/')[0] - self._send_gratuitous_arp_packet(ns_name, interface_name, ip_address) - - gw_ip = ex_gw_port['subnet']['gateway_ip'] - if gw_ip: - ipd = ip_lib.IPDevice(interface_name, self.root_helper, - namespace=ns_name) - ipd.route.add_gateway(gw_ip) - - cmd = ['sysctl', '-w', 'net.ipv4.conf.%s.proxy_arp=1' % interface_name] - ip_wrapper = ip_lib.IPWrapper(self.root_helper, namespace=ns_name) - ip_wrapper.netns.execute(cmd, check_exit_code=False) - - def internal_ns_interface_added(self, ip_cidr, - interface_name, ns_name): - ip_wrapper = ip_lib.IPWrapper(self.root_helper, namespace=ns_name) - ip_wrapper.netns.execute(['ip', 'addr', 'add', - ip_cidr, 'dev', interface_name]) - - def external_gateway_removed(self, ri, ex_gw_port, interface_name): - if ri.router['distributed']: - for p in ri.internal_ports: - internal_interface = self.get_internal_device_name(p['id']) - self._snat_redirect_remove(ri, p, internal_interface) - - if self.conf.agent_mode == 'dvr_snat' and ( - ri.router['gw_port_host'] == self.host): - ns_name = self.get_snat_ns_name(ri.router['id']) - else: - # not hosting agent - no work to do - LOG.debug('DVR: CSNAT not hosted: %s', ex_gw_port) - return - else: - ns_name = ri.ns_name - - if ri.is_ha: - self._ha_external_gateway_removed(ri, interface_name) - - self.driver.unplug(interface_name, - bridge=self.conf.external_network_bridge, - namespace=ns_name, - prefix=EXTERNAL_DEV_PREFIX) - if ri.router['distributed']: - self._destroy_snat_namespace(ns_name) - - def metadata_filter_rules(self): - rules = [] - if self.conf.enable_metadata_proxy: - rules.append(('INPUT', '-s 0.0.0.0/0 -d 127.0.0.1 ' - '-p tcp -m tcp --dport %s ' - '-j ACCEPT' % self.conf.metadata_port)) - return rules - - def metadata_nat_rules(self): - rules = [] - if self.conf.enable_metadata_proxy: - rules.append(('PREROUTING', '-s 0.0.0.0/0 -d 169.254.169.254/32 ' - '-p tcp -m tcp --dport 80 -j REDIRECT ' - '--to-port %s' % self.conf.metadata_port)) - return rules - - def external_gateway_nat_rules(self, ex_gw_ip, internal_cidrs, - interface_name): - rules = [('POSTROUTING', '! -i %(interface_name)s ' - '! -o %(interface_name)s -m conntrack ! ' - '--ctstate DNAT -j ACCEPT' % - {'interface_name': interface_name})] - for cidr in internal_cidrs: - rules.extend(self.internal_network_nat_rules(ex_gw_ip, cidr)) - return rules - - def _snat_redirect_add(self, ri, gateway, sn_port, sn_int): - """Adds rules and routes for SNAT redirection.""" - try: - snat_idx = netaddr.IPNetwork(sn_port['ip_cidr']).value - ns_ipr = ip_lib.IpRule(self.root_helper, namespace=ri.ns_name) - ns_ipd = ip_lib.IPDevice(sn_int, self.root_helper, - namespace=ri.ns_name) - ns_ipd.route.add_gateway(gateway, table=snat_idx) - ns_ipr.add_rule_from(sn_port['ip_cidr'], snat_idx, snat_idx) - ns_ipr.netns.execute(['sysctl', '-w', 'net.ipv4.conf.%s.' - 'send_redirects=0' % sn_int]) - except Exception: - LOG.exception(_('DVR: error adding redirection logic')) - - def _snat_redirect_remove(self, ri, sn_port, sn_int): - """Removes rules and routes for SNAT redirection.""" - try: - snat_idx = netaddr.IPNetwork(sn_port['ip_cidr']).value - ns_ipr = ip_lib.IpRule(self.root_helper, namespace=ri.ns_name) - ns_ipd = ip_lib.IPDevice(sn_int, self.root_helper, - namespace=ri.ns_name) - ns_ipd.route.delete_gateway(table=snat_idx) - ns_ipr.delete_rule_priority(snat_idx) - except Exception: - LOG.exception(_('DVR: removed snat failed')) - - def _internal_network_added(self, ns_name, network_id, port_id, - internal_cidr, mac_address, - interface_name, prefix, is_ha=False): - if not ip_lib.device_exists(interface_name, - root_helper=self.root_helper, - namespace=ns_name): - self.driver.plug(network_id, port_id, interface_name, mac_address, - namespace=ns_name, - prefix=prefix) - - if not is_ha: - self.driver.init_l3(interface_name, [internal_cidr], - namespace=ns_name) - ip_address = internal_cidr.split('/')[0] - self._send_gratuitous_arp_packet(ns_name, interface_name, - ip_address) - - def internal_network_added(self, ri, port): - network_id = port['network_id'] - port_id = port['id'] - internal_cidr = port['ip_cidr'] - mac_address = port['mac_address'] - - interface_name = self.get_internal_device_name(port_id) - - self._internal_network_added(ri.ns_name, network_id, port_id, - internal_cidr, mac_address, - interface_name, INTERNAL_DEV_PREFIX, - ri.is_ha) - - if ri.is_ha: - self._add_vip(ri, internal_cidr, interface_name) - - ex_gw_port = self._get_ex_gw_port(ri) - if ri.router['distributed'] and ex_gw_port: - snat_ports = self.get_snat_interfaces(ri) - sn_port = self._map_internal_interfaces(ri, port, snat_ports) - if sn_port: - self._snat_redirect_add(ri, sn_port['fixed_ips'][0] - ['ip_address'], port, interface_name) - if (self.conf.agent_mode == 'dvr_snat' and - ri.router['gw_port_host'] == self.host): - ns_name = self.get_snat_ns_name(ri.router['id']) - self._set_subnet_info(sn_port) - interface_name = ( - self.get_snat_int_device_name(sn_port['id'])) - self._internal_network_added(ns_name, - sn_port['network_id'], - sn_port['id'], - sn_port['ip_cidr'], - sn_port['mac_address'], - interface_name, - SNAT_INT_DEV_PREFIX) - - def internal_network_removed(self, ri, port): - port_id = port['id'] - interface_name = self.get_internal_device_name(port_id) - if ri.router['distributed'] and ri.ex_gw_port: - # DVR handling code for SNAT - self._snat_redirect_remove(ri, port, interface_name) - if self.conf.agent_mode == 'dvr_snat' and ( - ri.ex_gw_port['binding:host_id'] == self.host): - snat_port = self._map_internal_interfaces(ri, port, - ri.snat_ports) - if snat_port: - snat_interface = ( - self.get_snat_int_device_name(snat_port['id']) - ) - ns_name = self.get_snat_ns_name(ri.router['id']) - prefix = SNAT_INT_DEV_PREFIX - if ip_lib.device_exists(snat_interface, - root_helper=self.root_helper, - namespace=ns_name): - self.driver.unplug(snat_interface, namespace=ns_name, - prefix=prefix) - - if ip_lib.device_exists(interface_name, - root_helper=self.root_helper, - namespace=ri.ns_name): - if ri.is_ha: - self._clear_vips(ri, interface_name) - self.driver.unplug(interface_name, namespace=ri.ns_name, - prefix=INTERNAL_DEV_PREFIX) - - def internal_network_nat_rules(self, ex_gw_ip, internal_cidr): - rules = [('snat', '-s %s -j SNAT --to-source %s' % - (internal_cidr, ex_gw_ip))] - return rules - - def _create_agent_gateway_port(self, ri, network_id): - """Create Floating IP gateway port. - - Request port creation from Plugin then creates - Floating IP namespace and adds gateway port. - """ - self.agent_gateway_port = ( - self.plugin_rpc.get_agent_gateway_port( - self.context, network_id)) - if 'subnet' not in self.agent_gateway_port: - LOG.error(_('Missing subnet/agent_gateway_port')) - return - self._set_subnet_info(self.agent_gateway_port) - - # add fip-namespace and agent_gateway_port - fip_ns_name = ( - self.get_fip_ns_name(str(network_id))) - self._create_namespace(fip_ns_name) - ri.fip_iptables_manager = iptables_manager.IptablesManager( - root_helper=self.root_helper, namespace=fip_ns_name, - use_ipv6=self.use_ipv6) - # no connection tracking needed in fip namespace - ri.fip_iptables_manager.ipv4['raw'].add_rule('PREROUTING', - '-j CT --notrack') - ri.fip_iptables_manager.apply() - interface_name = ( - self.get_fip_ext_device_name(self.agent_gateway_port['id'])) - self.agent_gateway_added(fip_ns_name, self.agent_gateway_port, - interface_name) - - def create_rtr_2_fip_link(self, ri, network_id): - """Create interface between router and Floating IP namespace.""" - rtr_2_fip_name = self.get_rtr_int_device_name(ri.router_id) - fip_2_rtr_name = self.get_fip_int_device_name(ri.router_id) - fip_ns_name = self.get_fip_ns_name(str(network_id)) - - # add link local IP to interface - if ri.rtr_fip_subnet is None: - ri.rtr_fip_subnet = self.local_subnets.allocate(ri.router_id) - rtr_2_fip, fip_2_rtr = ri.rtr_fip_subnet.get_pair() - ip_wrapper = ip_lib.IPWrapper(self.root_helper, - namespace=ri.ns_name) - int_dev = ip_wrapper.add_veth(rtr_2_fip_name, - fip_2_rtr_name, fip_ns_name) - self.internal_ns_interface_added(str(rtr_2_fip), - rtr_2_fip_name, ri.ns_name) - self.internal_ns_interface_added(str(fip_2_rtr), - fip_2_rtr_name, fip_ns_name) - int_dev[0].link.set_up() - int_dev[1].link.set_up() - # add default route for the link local interface - device = ip_lib.IPDevice(rtr_2_fip_name, self.root_helper, - namespace=ri.ns_name) - device.route.add_gateway(str(fip_2_rtr.ip), table=FIP_RT_TBL) - #setup the NAT rules and chains - self._handle_router_fip_nat_rules(ri, rtr_2_fip_name, 'add_rules') - # kicks the FW Agent to add rules for the IR namespace if configured - self.process_router_add(ri) - - def floating_ip_added_dist(self, ri, fip): - """Add floating IP to FIP namespace.""" - floating_ip = fip['floating_ip_address'] - fixed_ip = fip['fixed_ip_address'] - rule_pr = self.fip_priorities.pop() - ri.floating_ips_dict[floating_ip] = rule_pr - fip_2_rtr_name = self.get_fip_int_device_name(ri.router_id) - ip_rule = ip_lib.IpRule(self.root_helper, namespace=ri.ns_name) - ip_rule.add_rule_from(fixed_ip, FIP_RT_TBL, rule_pr) - - #Add routing rule in fip namespace - fip_cidr = str(floating_ip) + FLOATING_IP_CIDR_SUFFIX - fip_ns_name = self.get_fip_ns_name(str(fip['floating_network_id'])) - rtr_2_fip, _ = ri.rtr_fip_subnet.get_pair() - device = ip_lib.IPDevice(fip_2_rtr_name, self.root_helper, - namespace=fip_ns_name) - device.route.add_route(fip_cidr, str(rtr_2_fip.ip)) - interface_name = ( - self.get_fip_ext_device_name(self.agent_gateway_port['id'])) - self._send_gratuitous_arp_packet(fip_ns_name, - interface_name, floating_ip, - distributed=True) - # update internal structures - self.agent_fip_count = self.agent_fip_count + 1 - ri.dist_fip_count = ri.dist_fip_count + 1 - - def floating_ip_removed_dist(self, ri, fip_cidr): - """Remove floating IP from FIP namespace.""" - floating_ip = fip_cidr.split('/')[0] - rtr_2_fip_name = self.get_rtr_int_device_name(ri.router_id) - fip_2_rtr_name = self.get_fip_int_device_name(ri.router_id) - rtr_2_fip, fip_2_rtr = ri.rtr_fip_subnet.get_pair() - fip_ns_name = self.get_fip_ns_name(str(self._fetch_external_net_id())) - ip_rule_rtr = ip_lib.IpRule(self.root_helper, namespace=ri.ns_name) - if floating_ip in ri.floating_ips_dict: - rule_pr = ri.floating_ips_dict[floating_ip] - #TODO(rajeev): Handle else case - exception/log? - else: - rule_pr = None - - ip_rule_rtr.delete_rule_priority(rule_pr) - self.fip_priorities.add(rule_pr) - device = ip_lib.IPDevice(fip_2_rtr_name, self.root_helper, - namespace=fip_ns_name) - - device.route.delete_route(fip_cidr, str(rtr_2_fip.ip)) - # check if this is the last FIP for this router - ri.dist_fip_count = ri.dist_fip_count - 1 - if ri.dist_fip_count == 0: - #remove default route entry - device = ip_lib.IPDevice(rtr_2_fip_name, self.root_helper, - namespace=ri.ns_name) - ns_ip = ip_lib.IPWrapper(self.root_helper, namespace=fip_ns_name) - device.route.delete_gateway(str(fip_2_rtr.ip), table=FIP_RT_TBL) - self.local_subnets.release(ri.router_id) - ri.rtr_fip_subnet = None - ns_ip.del_veth(fip_2_rtr_name) - # clean up fip-namespace if this is the last FIP - self.agent_fip_count = self.agent_fip_count - 1 - if self.agent_fip_count == 0: - self._destroy_fip_namespace(fip_ns_name) - - def floating_forward_rules(self, floating_ip, fixed_ip): - return [('PREROUTING', '-d %s -j DNAT --to %s' % - (floating_ip, fixed_ip)), - ('OUTPUT', '-d %s -j DNAT --to %s' % - (floating_ip, fixed_ip)), - ('float-snat', '-s %s -j SNAT --to %s' % - (fixed_ip, floating_ip))] - - def router_deleted(self, context, router_id): - """Deal with router deletion RPC message.""" - LOG.debug(_('Got router deleted notification for %s'), router_id) - update = RouterUpdate(router_id, PRIORITY_RPC, action=DELETE_ROUTER) - self._queue.add(update) - - def _update_arp_entry(self, ri, ip, mac, subnet_id, operation): - """Add or delete arp entry into router namespace for the subnet.""" - port = self.get_internal_port(ri, subnet_id) - # update arp entry only if the subnet is attached to the router - if port: - ip_cidr = str(ip) + '/32' - try: - # TODO(mrsmith): optimize the calls below for bulk calls - net = netaddr.IPNetwork(ip_cidr) - interface_name = self.get_internal_device_name(port['id']) - device = ip_lib.IPDevice(interface_name, self.root_helper, - namespace=ri.ns_name) - if operation == 'add': - device.neigh.add(net.version, ip, mac) - elif operation == 'delete': - device.neigh.delete(net.version, ip, mac) - except Exception: - LOG.exception(_("DVR: Failed updating arp entry")) - self.fullsync = True - - def add_arp_entry(self, context, payload): - """Add arp entry into router namespace. Called from RPC.""" - arp_table = payload['arp_table'] - router_id = payload['router_id'] - ip = arp_table['ip_address'] - mac = arp_table['mac_address'] - subnet_id = arp_table['subnet_id'] - ri = self.router_info.get(router_id) - if ri: - self._update_arp_entry(ri, ip, mac, subnet_id, 'add') - - def del_arp_entry(self, context, payload): - """Delete arp entry from router namespace. Called from RPC.""" - arp_table = payload['arp_table'] - router_id = payload['router_id'] - ip = arp_table['ip_address'] - mac = arp_table['mac_address'] - subnet_id = arp_table['subnet_id'] - ri = self.router_info.get(router_id) - if ri: - self._update_arp_entry(ri, ip, mac, subnet_id, 'delete') - - def routers_updated(self, context, routers): - """Deal with routers modification and creation RPC message.""" - LOG.debug(_('Got routers updated notification :%s'), routers) - if routers: - # This is needed for backward compatibility - if isinstance(routers[0], dict): - routers = [router['id'] for router in routers] - for id in routers: - update = RouterUpdate(id, PRIORITY_RPC) - self._queue.add(update) - - def router_removed_from_agent(self, context, payload): - LOG.debug(_('Got router removed from agent :%r'), payload) - router_id = payload['router_id'] - update = RouterUpdate(router_id, PRIORITY_RPC, action=DELETE_ROUTER) - self._queue.add(update) - - def router_added_to_agent(self, context, payload): - LOG.debug(_('Got router added to agent :%r'), payload) - self.routers_updated(context, payload) - - def _process_routers(self, routers, all_routers=False): - pool = eventlet.GreenPool() - if (self.conf.external_network_bridge and - not ip_lib.device_exists(self.conf.external_network_bridge)): - LOG.error(_("The external network bridge '%s' does not exist"), - self.conf.external_network_bridge) - return - - target_ex_net_id = self._fetch_external_net_id() - # if routers are all the routers we have (They are from router sync on - # starting or when error occurs during running), we seek the - # routers which should be removed. - # If routers are from server side notification, we seek them - # from subset of incoming routers and ones we have now. - if all_routers: - prev_router_ids = set(self.router_info) - else: - prev_router_ids = set(self.router_info) & set( - [router['id'] for router in routers]) - cur_router_ids = set() - for r in routers: - # If namespaces are disabled, only process the router associated - # with the configured agent id. - if (not self.conf.use_namespaces and - r['id'] != self.conf.router_id): - continue - ex_net_id = (r['external_gateway_info'] or {}).get('network_id') - if not ex_net_id and not self.conf.handle_internal_only_routers: - continue - if (target_ex_net_id and ex_net_id and - ex_net_id != target_ex_net_id): - # Double check that our single external_net_id has not changed - # by forcing a check by RPC. - if (ex_net_id != self._fetch_external_net_id(force=True)): - continue - cur_router_ids.add(r['id']) - if r['id'] not in self.router_info: - self._router_added(r['id'], r) - ri = self.router_info[r['id']] - ri.router = r - pool.spawn_n(self.process_router, ri) - # identify and remove routers that no longer exist - for router_id in prev_router_ids - cur_router_ids: - pool.spawn_n(self._router_removed, router_id) - pool.waitall() - - def _process_router_update(self): - for rp, update in self._queue.each_update_to_next_router(): - LOG.debug("Starting router update for %s", update.id) - router = update.router - if update.action != DELETE_ROUTER and not router: - try: - update.timestamp = timeutils.utcnow() - routers = self.plugin_rpc.get_routers(self.context, - [update.id]) - except Exception: - msg = _("Failed to fetch router information for '%s'") - LOG.exception(msg, update.id) - self.fullsync = True - continue - - if routers: - router = routers[0] - - if not router: - self._router_removed(update.id) - continue - - self._process_routers([router]) - LOG.debug("Finished a router update for %s", update.id) - rp.fetched_and_processed(update.timestamp) - - def _process_routers_loop(self): - LOG.debug("Starting _process_routers_loop") - pool = eventlet.GreenPool(size=8) - while True: - pool.spawn_n(self._process_router_update) - - def _router_ids(self): - if not self.conf.use_namespaces: - return [self.conf.router_id] - - @periodic_task.periodic_task - def periodic_sync_routers_task(self, context): - self._sync_routers_task(context) - - def _sync_routers_task(self, context): - if self.services_sync: - super(L3NATAgent, self).process_services_sync(context) - LOG.debug(_("Starting _sync_routers_task - fullsync:%s"), - self.fullsync) - if not self.fullsync: - return - - # Capture a picture of namespaces *before* fetching the full list from - # the database. This is important to correctly identify stale ones. - namespaces = set() - if self._clean_stale_namespaces: - namespaces = self._list_namespaces() - prev_router_ids = set(self.router_info) - - try: - router_ids = self._router_ids() - timestamp = timeutils.utcnow() - routers = self.plugin_rpc.get_routers( - context, router_ids) - - LOG.debug(_('Processing :%r'), routers) - for r in routers: - update = RouterUpdate(r['id'], - PRIORITY_SYNC_ROUTERS_TASK, - router=r, - timestamp=timestamp) - self._queue.add(update) - self.fullsync = False - LOG.debug(_("_sync_routers_task successfully completed")) - except n_rpc.RPCException: - LOG.exception(_("Failed synchronizing routers due to RPC error")) - self.fullsync = True - except Exception: - LOG.exception(_("Failed synchronizing routers")) - self.fullsync = True - else: - # Resync is not necessary for the cleanup of stale namespaces - curr_router_ids = set([r['id'] for r in routers]) - - # Two kinds of stale routers: Routers for which info is cached in - # self.router_info and the others. First, handle the former. - for router_id in prev_router_ids - curr_router_ids: - update = RouterUpdate(router_id, - PRIORITY_SYNC_ROUTERS_TASK, - timestamp=timestamp, - action=DELETE_ROUTER) - self._queue.add(update) - - # Next, one effort to clean out namespaces for which we don't have - # a record. (i.e. _clean_stale_namespaces=False after one pass) - if self._clean_stale_namespaces: - ids_to_keep = curr_router_ids | prev_router_ids - self._cleanup_namespaces(namespaces, ids_to_keep) - - def after_start(self): - eventlet.spawn_n(self._process_routers_loop) - LOG.info(_("L3 agent started")) - - def _update_routing_table(self, ri, operation, route): - cmd = ['ip', 'route', operation, 'to', route['destination'], - 'via', route['nexthop']] - ip_wrapper = ip_lib.IPWrapper(self.root_helper, - namespace=ri.ns_name) - ip_wrapper.netns.execute(cmd, check_exit_code=False) - - def get_ip_in_hex(self, ip_address): - try: - return '%08x' % netaddr.IPAddress(ip_address, version=4) - except Exception: - LOG.warn(_("Unable to create gre tunnel. Invalid remote IP: %s"), - ip_address) - return - - def _update_ip_route(self, ri, operation, dest_cidr, gre_interface): - snat_ns_name = self.get_snat_ns_name(ri.router['id']) - device = ip_lib.IPDevice(gre_interface, - self.root_helper, - namespace=snat_ns_name) - if(operation == 'add'): - device.route.add_onlink_route(dest_cidr) - if(operation == 'delete'): - device.route.delete_onlink_route(dest_cidr) - - def _create_gre_tunnel(self, ri, gre_device, remote_ip, local_ip): - snat_ns_name = self.get_snat_ns_name(ri.router['id']) - device = ip_lib.IPDevice(gre_device, - self.root_helper, - namespace=snat_ns_name) - device.tunnel.add('gre', remote_ip, local_ip) - device.link.set_up() - - def _delete_gre_tunnel(self, ri, gre_device): - snat_ns_name = self.get_snat_ns_name(ri.router['id']) - if ip_lib.device_exists(gre_device, - root_helper=self.root_helper, - namespace=snat_ns_name): - device = ip_lib.IPDevice(gre_device, - self.root_helper, - namespace=snat_ns_name) - device.tunnel.delete() - - def process_route_by_gre_tunnel(self, ri, operation, route): - next_hop = route['nexthop'] - dest_cidr = route['destination'] - ngt = ri.next_hop_gre_tunnel.get(next_hop) - if(operation == 'add'): - if(ngt and (dest_cidr not in ngt.extra_route_set)): - gre_interface = ngt.gre_device_name - self._update_ip_route(ri, 'add', dest_cidr, gre_interface) - ngt.add_extra_route(dest_cidr) - else: - gre_interface = 'gre_' + self.get_ip_in_hex(next_hop) - ngt = GreTunnel(next_hop, gre_interface) - ri.next_hop_gre_tunnel[next_hop] = ngt - if not ri.router['gw_port']: - return - self._create_gre_tunnel(ri, gre_interface, next_hop, - ri.router['gw_port']['fixed_ips'][0].get('ip_address')) - self._update_ip_route(ri, 'add', dest_cidr, gre_interface) - ngt.add_extra_route(dest_cidr) - if(operation == 'delete'): - if(ngt): - gre_interface = ngt.gre_device_name - self._update_ip_route(ri, 'delete', dest_cidr, gre_interface) - ngt.remove_extra_route(dest_cidr) - if(len(ngt.extra_route_set) == 0): - self._delete_gre_tunnel(ri, gre_interface) - ri.next_hop_gre_tunnel.pop(next_hop) - - def routes_updated(self, ri): - new_routes = ri.router['routes'] - if ri.is_ha: - self._process_virtual_routes(ri, new_routes) - return - - old_routes = ri.routes - adds, removes = common_utils.diff_list_of_dict(old_routes, - new_routes) - for route in adds: - LOG.debug(_("Added route entry is '%s'"), route) - # remove replaced route from deleted route - for del_route in removes: - if route['destination'] == del_route['destination']: - removes.remove(del_route) - if(route.get('onlink')): - if(self.conf.agent_mode == 'dvr_snat'): - self.process_route_by_gre_tunnel(ri, 'add', route) - else: - continue - else: - #replace success even if there is no existing route - self._update_routing_table(ri, 'replace', route) - for route in removes: - LOG.debug(_("Removed route entry is '%s'"), route) - if(route.get('onlink')): - if(self.conf.agent_mode == 'dvr_snat'): - self.process_route_by_gre_tunnel(ri, 'delete', route) - else: - continue - self._update_routing_table(ri, 'delete', route) - ri.routes = new_routes - - -class L3NATAgentWithStateReport(L3NATAgent): - - def __init__(self, host, conf=None): - super(L3NATAgentWithStateReport, self).__init__(host=host, conf=conf) - self.state_rpc = agent_rpc.PluginReportStateAPI(topics.PLUGIN) - self.agent_state = { - 'binary': 'neutron-l3-agent', - 'host': host, - 'topic': topics.L3_AGENT, - 'configurations': { - 'agent_mode': self.conf.agent_mode, - 'use_namespaces': self.conf.use_namespaces, - 'router_id': self.conf.router_id, - 'handle_internal_only_routers': - self.conf.handle_internal_only_routers, - 'external_network_bridge': self.conf.external_network_bridge, - 'gateway_external_network_id': - self.conf.gateway_external_network_id, - 'interface_driver': self.conf.interface_driver}, - 'start_flag': True, - 'agent_type': l3_constants.AGENT_TYPE_L3} - report_interval = cfg.CONF.AGENT.report_interval - self.use_call = True - if report_interval: - self.heartbeat = loopingcall.FixedIntervalLoopingCall( - self._report_state) - self.heartbeat.start(interval=report_interval) - - def _report_state(self): - LOG.debug(_("Report state task started")) - num_ex_gw_ports = 0 - num_interfaces = 0 - num_floating_ips = 0 - router_infos = self.router_info.values() - num_routers = len(router_infos) - for ri in router_infos: - ex_gw_port = self._get_ex_gw_port(ri) - if ex_gw_port: - num_ex_gw_ports += 1 - num_interfaces += len(ri.router.get(l3_constants.INTERFACE_KEY, - [])) - num_floating_ips += len(ri.router.get(l3_constants.FLOATINGIP_KEY, - [])) - configurations = self.agent_state['configurations'] - configurations['routers'] = num_routers - configurations['ex_gw_ports'] = num_ex_gw_ports - configurations['interfaces'] = num_interfaces - configurations['floating_ips'] = num_floating_ips - try: - self.state_rpc.report_state(self.context, self.agent_state, - self.use_call) - self.agent_state.pop('start_flag', None) - self.use_call = False - LOG.debug(_("Report state task successfully completed")) - except AttributeError: - # This means the server does not support report_state - LOG.warn(_("Neutron server does not support state report." - " State report for this agent will be disabled.")) - self.heartbeat.stop() - return - except Exception: - LOG.exception(_("Failed reporting state!")) - - def agent_updated(self, context, payload): - """Handle the agent_updated notification event.""" - self.fullsync = True - LOG.info(_("agent_updated by server side %s!"), payload) - - -def _register_opts(conf): - conf.register_opts(L3NATAgent.OPTS) - conf.register_opts(l3_ha_agent.OPTS) - config.register_interface_driver_opts_helper(conf) - config.register_use_namespaces_opts_helper(conf) - config.register_agent_state_opts_helper(conf) - config.register_root_helper(conf) - conf.register_opts(interface.OPTS) - conf.register_opts(external_process.OPTS) - - -def main(manager='neutron.agent.l3_agent.L3NATAgentWithStateReport'): - _register_opts(cfg.CONF) - common_config.init(sys.argv[1:]) - config.setup_logging() - server = neutron_service.Service.create( - binary='neutron-l3-agent', - topic=topics.L3_AGENT, - report_interval=cfg.CONF.AGENT.report_interval, - manager=manager) - service.launch(server).wait() diff --git a/juno-patches/neutron/neutron_cascaded_l3_patch/neutron/agent/linux/ip_lib.py b/juno-patches/neutron/neutron_cascaded_l3_patch/neutron/agent/linux/ip_lib.py deleted file mode 100644 index b04951e6..00000000 --- a/juno-patches/neutron/neutron_cascaded_l3_patch/neutron/agent/linux/ip_lib.py +++ /dev/null @@ -1,625 +0,0 @@ -# Copyright 2012 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import netaddr -from oslo.config import cfg - -from neutron.agent.linux import utils -from neutron.common import exceptions - - -OPTS = [ - cfg.BoolOpt('ip_lib_force_root', - default=False, - help=_('Force ip_lib calls to use the root helper')), -] - - -LOOPBACK_DEVNAME = 'lo' -# NOTE(ethuleau): depend of the version of iproute2, the vlan -# interface details vary. -VLAN_INTERFACE_DETAIL = ['vlan protocol 802.1q', - 'vlan protocol 802.1Q', - 'vlan id'] - - -class SubProcessBase(object): - def __init__(self, root_helper=None, namespace=None, - log_fail_as_error=True): - self.root_helper = root_helper - self.namespace = namespace - self.log_fail_as_error = log_fail_as_error - try: - self.force_root = cfg.CONF.ip_lib_force_root - except cfg.NoSuchOptError: - # Only callers that need to force use of the root helper - # need to register the option. - self.force_root = False - - def _run(self, options, command, args): - if self.namespace: - return self._as_root(options, command, args) - elif self.force_root: - # Force use of the root helper to ensure that commands - # will execute in dom0 when running under XenServer/XCP. - return self._execute(options, command, args, self.root_helper, - log_fail_as_error=self.log_fail_as_error) - else: - return self._execute(options, command, args, - log_fail_as_error=self.log_fail_as_error) - - def _as_root(self, options, command, args, use_root_namespace=False): - if not self.root_helper: - raise exceptions.SudoRequired() - - namespace = self.namespace if not use_root_namespace else None - - return self._execute(options, - command, - args, - self.root_helper, - namespace, - log_fail_as_error=self.log_fail_as_error) - - @classmethod - def _execute(cls, options, command, args, root_helper=None, - namespace=None, log_fail_as_error=True): - opt_list = ['-%s' % o for o in options] - if namespace: - ip_cmd = ['ip', 'netns', 'exec', namespace, 'ip'] - else: - ip_cmd = ['ip'] - return utils.execute(ip_cmd + opt_list + [command] + list(args), - root_helper=root_helper, - log_fail_as_error=log_fail_as_error) - - def set_log_fail_as_error(self, fail_with_error): - self.log_fail_as_error = fail_with_error - - -class IPWrapper(SubProcessBase): - def __init__(self, root_helper=None, namespace=None): - super(IPWrapper, self).__init__(root_helper=root_helper, - namespace=namespace) - self.netns = IpNetnsCommand(self) - - def device(self, name): - return IPDevice(name, self.root_helper, self.namespace) - - def get_devices(self, exclude_loopback=False): - retval = [] - output = self._execute(['o', 'd'], 'link', ('list',), - self.root_helper, self.namespace) - for line in output.split('\n'): - if '<' not in line: - continue - tokens = line.split(' ', 2) - if len(tokens) == 3: - if any(v in tokens[2] for v in VLAN_INTERFACE_DETAIL): - delimiter = '@' - else: - delimiter = ':' - name = tokens[1].rpartition(delimiter)[0].strip() - - if exclude_loopback and name == LOOPBACK_DEVNAME: - continue - - retval.append(IPDevice(name, - self.root_helper, - self.namespace)) - return retval - - def add_tuntap(self, name, mode='tap'): - self._as_root('', 'tuntap', ('add', name, 'mode', mode)) - return IPDevice(name, self.root_helper, self.namespace) - - def add_veth(self, name1, name2, namespace2=None): - args = ['add', name1, 'type', 'veth', 'peer', 'name', name2] - - if namespace2 is None: - namespace2 = self.namespace - else: - self.ensure_namespace(namespace2) - args += ['netns', namespace2] - - self._as_root('', 'link', tuple(args)) - - return (IPDevice(name1, self.root_helper, self.namespace), - IPDevice(name2, self.root_helper, namespace2)) - - def del_veth(self, name): - """Delete a virtual interface between two namespaces.""" - self._as_root('', 'link', ('del', name)) - - def ensure_namespace(self, name): - if not self.netns.exists(name): - ip = self.netns.add(name) - lo = ip.device(LOOPBACK_DEVNAME) - lo.link.set_up() - else: - ip = IPWrapper(self.root_helper, name) - return ip - - def namespace_is_empty(self): - return not self.get_devices(exclude_loopback=True) - - def garbage_collect_namespace(self): - """Conditionally destroy the namespace if it is empty.""" - if self.namespace and self.netns.exists(self.namespace): - if self.namespace_is_empty(): - self.netns.delete(self.namespace) - return True - return False - - def add_device_to_namespace(self, device): - if self.namespace: - device.link.set_netns(self.namespace) - - def add_vxlan(self, name, vni, group=None, dev=None, ttl=None, tos=None, - local=None, port=None, proxy=False): - cmd = ['add', name, 'type', 'vxlan', 'id', vni] - if group: - cmd.extend(['group', group]) - if dev: - cmd.extend(['dev', dev]) - if ttl: - cmd.extend(['ttl', ttl]) - if tos: - cmd.extend(['tos', tos]) - if local: - cmd.extend(['local', local]) - if proxy: - cmd.append('proxy') - # tuple: min,max - if port and len(port) == 2: - cmd.extend(['port', port[0], port[1]]) - elif port: - raise exceptions.NetworkVxlanPortRangeError(vxlan_range=port) - self._as_root('', 'link', cmd) - return (IPDevice(name, self.root_helper, self.namespace)) - - @classmethod - def get_namespaces(cls, root_helper): - output = cls._execute('', 'netns', ('list',), root_helper=root_helper) - return [l.strip() for l in output.split('\n')] - - -class IpRule(IPWrapper): - def add_rule_from(self, ip, table, rule_pr): - args = ['add', 'from', ip, 'lookup', table, 'priority', rule_pr] - ip = self._as_root('', 'rule', tuple(args)) - return ip - - def delete_rule_priority(self, rule_pr): - args = ['del', 'priority', rule_pr] - ip = self._as_root('', 'rule', tuple(args)) - return ip - - -class IPDevice(SubProcessBase): - def __init__(self, name, root_helper=None, namespace=None): - super(IPDevice, self).__init__(root_helper=root_helper, - namespace=namespace) - self.name = name - self.link = IpLinkCommand(self) - self.addr = IpAddrCommand(self) - self.route = IpRouteCommand(self) - self.neigh = IpNeighCommand(self) - self.tunnel = IpTunnelCommand(self) - - def __eq__(self, other): - return (other is not None and self.name == other.name - and self.namespace == other.namespace) - - def __str__(self): - return self.name - - -class IpCommandBase(object): - COMMAND = '' - - def __init__(self, parent): - self._parent = parent - - def _run(self, *args, **kwargs): - return self._parent._run(kwargs.get('options', []), self.COMMAND, args) - - def _as_root(self, *args, **kwargs): - return self._parent._as_root(kwargs.get('options', []), - self.COMMAND, - args, - kwargs.get('use_root_namespace', False)) - - -class IpDeviceCommandBase(IpCommandBase): - @property - def name(self): - return self._parent.name - - -class IpLinkCommand(IpDeviceCommandBase): - COMMAND = 'link' - - def set_address(self, mac_address): - self._as_root('set', self.name, 'address', mac_address) - - def set_mtu(self, mtu_size): - self._as_root('set', self.name, 'mtu', mtu_size) - - def set_up(self): - self._as_root('set', self.name, 'up') - - def set_down(self): - self._as_root('set', self.name, 'down') - - def set_netns(self, namespace): - self._as_root('set', self.name, 'netns', namespace) - self._parent.namespace = namespace - - def set_name(self, name): - self._as_root('set', self.name, 'name', name) - self._parent.name = name - - def set_alias(self, alias_name): - self._as_root('set', self.name, 'alias', alias_name) - - def delete(self): - self._as_root('delete', self.name) - - @property - def address(self): - return self.attributes.get('link/ether') - - @property - def state(self): - return self.attributes.get('state') - - @property - def mtu(self): - return self.attributes.get('mtu') - - @property - def qdisc(self): - return self.attributes.get('qdisc') - - @property - def qlen(self): - return self.attributes.get('qlen') - - @property - def alias(self): - return self.attributes.get('alias') - - @property - def attributes(self): - return self._parse_line(self._run('show', self.name, options='o')) - - def _parse_line(self, value): - if not value: - return {} - - device_name, settings = value.replace("\\", '').split('>', 1) - tokens = settings.split() - keys = tokens[::2] - values = [int(v) if v.isdigit() else v for v in tokens[1::2]] - - retval = dict(zip(keys, values)) - return retval - - -class IpTunnelCommand(IpDeviceCommandBase): - COMMAND = 'tunnel' - - def add(self, mode, remote_ip, local_ip): - self._as_root('add', - self.name, - 'mode', - mode, - 'remote', - remote_ip, - 'local', - local_ip) - - def delete(self): - self._as_root('delete', - self.name) - - - -class IpAddrCommand(IpDeviceCommandBase): - COMMAND = 'addr' - - def add(self, ip_version, cidr, broadcast, scope='global'): - self._as_root('add', - cidr, - 'brd', - broadcast, - 'scope', - scope, - 'dev', - self.name, - options=[ip_version]) - - def delete(self, ip_version, cidr): - self._as_root('del', - cidr, - 'dev', - self.name, - options=[ip_version]) - - def flush(self): - self._as_root('flush', self.name) - - def list(self, scope=None, to=None, filters=None): - if filters is None: - filters = [] - - retval = [] - - if scope: - filters += ['scope', scope] - if to: - filters += ['to', to] - - for line in self._run('show', self.name, *filters).split('\n'): - line = line.strip() - if not line.startswith('inet'): - continue - parts = line.split() - if parts[0] == 'inet6': - version = 6 - scope = parts[3] - broadcast = '::' - else: - version = 4 - if parts[2] == 'brd': - broadcast = parts[3] - scope = parts[5] - else: - # sometimes output of 'ip a' might look like: - # inet 192.168.100.100/24 scope global eth0 - # and broadcast needs to be calculated from CIDR - broadcast = str(netaddr.IPNetwork(parts[1]).broadcast) - scope = parts[3] - - retval.append(dict(cidr=parts[1], - broadcast=broadcast, - scope=scope, - ip_version=version, - dynamic=('dynamic' == parts[-1]))) - return retval - - -class IpRouteCommand(IpDeviceCommandBase): - COMMAND = 'route' - - def add_gateway(self, gateway, metric=None, table=None): - args = ['replace', 'default', 'via', gateway] - if metric: - args += ['metric', metric] - args += ['dev', self.name] - if table: - args += ['table', table] - self._as_root(*args) - - def delete_gateway(self, gateway=None, table=None): - args = ['del', 'default'] - if gateway: - args += ['via', gateway] - args += ['dev', self.name] - if table: - args += ['table', table] - self._as_root(*args) - - def list_onlink_routes(self): - def iterate_routes(): - output = self._run('list', 'dev', self.name, 'scope', 'link') - for line in output.split('\n'): - line = line.strip() - if line and not line.count('src'): - yield line - - return [x for x in iterate_routes()] - - def add_onlink_route(self, cidr): - self._as_root('replace', cidr, 'dev', self.name, 'scope', 'link') - - def delete_onlink_route(self, cidr): - self._as_root('del', cidr, 'dev', self.name, 'scope', 'link') - - def get_gateway(self, scope=None, filters=None): - if filters is None: - filters = [] - - retval = None - - if scope: - filters += ['scope', scope] - - route_list_lines = self._run('list', 'dev', self.name, - *filters).split('\n') - default_route_line = next((x.strip() for x in - route_list_lines if - x.strip().startswith('default')), None) - if default_route_line: - gateway_index = 2 - parts = default_route_line.split() - retval = dict(gateway=parts[gateway_index]) - if 'metric' in parts: - metric_index = parts.index('metric') + 1 - retval.update(metric=int(parts[metric_index])) - - return retval - - def pullup_route(self, interface_name): - """Ensures that the route entry for the interface is before all - others on the same subnet. - """ - device_list = [] - device_route_list_lines = self._run('list', 'proto', 'kernel', - 'dev', interface_name).split('\n') - for device_route_line in device_route_list_lines: - try: - subnet = device_route_line.split()[0] - except Exception: - continue - subnet_route_list_lines = self._run('list', 'proto', 'kernel', - 'match', subnet).split('\n') - for subnet_route_line in subnet_route_list_lines: - i = iter(subnet_route_line.split()) - while(i.next() != 'dev'): - pass - device = i.next() - try: - while(i.next() != 'src'): - pass - src = i.next() - except Exception: - src = '' - if device != interface_name: - device_list.append((device, src)) - else: - break - - for (device, src) in device_list: - self._as_root('del', subnet, 'dev', device) - if (src != ''): - self._as_root('append', subnet, 'proto', 'kernel', - 'src', src, 'dev', device) - else: - self._as_root('append', subnet, 'proto', 'kernel', - 'dev', device) - - def add_route(self, cidr, ip, table=None): - args = ['replace', cidr, 'via', ip, 'dev', self.name] - if table: - args += ['table', table] - self._as_root(*args) - - def delete_route(self, cidr, ip, table=None): - args = ['del', cidr, 'via', ip, 'dev', self.name] - if table: - args += ['table', table] - self._as_root(*args) - - -class IpNeighCommand(IpDeviceCommandBase): - COMMAND = 'neigh' - - def add(self, ip_version, ip_address, mac_address): - self._as_root('replace', - ip_address, - 'lladdr', - mac_address, - 'nud', - 'permanent', - 'dev', - self.name, - options=[ip_version]) - - def delete(self, ip_version, ip_address, mac_address): - self._as_root('del', - ip_address, - 'lladdr', - mac_address, - 'dev', - self.name, - options=[ip_version]) - - -class IpNetnsCommand(IpCommandBase): - COMMAND = 'netns' - - def add(self, name): - self._as_root('add', name, use_root_namespace=True) - wrapper = IPWrapper(self._parent.root_helper, name) - wrapper.netns.execute(['sysctl', '-w', - 'net.ipv4.conf.all.promote_secondaries=1']) - return wrapper - - def delete(self, name): - self._as_root('delete', name, use_root_namespace=True) - - def execute(self, cmds, addl_env={}, check_exit_code=True, - extra_ok_codes=None): - ns_params = [] - if self._parent.namespace: - if not self._parent.root_helper: - raise exceptions.SudoRequired() - ns_params = ['ip', 'netns', 'exec', self._parent.namespace] - - env_params = [] - if addl_env: - env_params = (['env'] + - ['%s=%s' % pair for pair in addl_env.items()]) - return utils.execute( - ns_params + env_params + list(cmds), - root_helper=self._parent.root_helper, - check_exit_code=check_exit_code, extra_ok_codes=extra_ok_codes) - - def exists(self, name): - output = self._parent._execute('o', 'netns', ['list']) - - for line in output.split('\n'): - if name == line.strip(): - return True - return False - - -def device_exists(device_name, root_helper=None, namespace=None): - """Return True if the device exists in the namespace.""" - try: - dev = IPDevice(device_name, root_helper, namespace) - dev.set_log_fail_as_error(False) - address = dev.link.address - except RuntimeError: - return False - return bool(address) - - -def device_exists_with_ip_mac(device_name, ip_cidr, mac, namespace=None, - root_helper=None): - """Return True if the device with the given IP and MAC addresses - exists in the namespace. - """ - try: - device = IPDevice(device_name, root_helper, namespace) - if mac != device.link.address: - return False - if ip_cidr not in (ip['cidr'] for ip in device.addr.list()): - return False - except RuntimeError: - return False - else: - return True - - -def ensure_device_is_ready(device_name, root_helper=None, namespace=None): - dev = IPDevice(device_name, root_helper, namespace) - dev.set_log_fail_as_error(False) - try: - # Ensure the device is up, even if it is already up. If the device - # doesn't exist, a RuntimeError will be raised. - dev.link.set_up() - except RuntimeError: - return False - return True - - -def iproute_arg_supported(command, arg, root_helper=None): - command += ['help'] - stdout, stderr = utils.execute(command, root_helper=root_helper, - check_exit_code=False, return_stderr=True) - return any(arg in line for line in stderr.split('\n')) diff --git a/juno-patches/neutron/neutron_cascaded_l3_patch/neutron/common/config.py b/juno-patches/neutron/neutron_cascaded_l3_patch/neutron/common/config.py deleted file mode 100644 index ea586c7a..00000000 --- a/juno-patches/neutron/neutron_cascaded_l3_patch/neutron/common/config.py +++ /dev/null @@ -1,196 +0,0 @@ -# Copyright 2011 VMware, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Routines for configuring Neutron -""" - -import os - -from oslo.config import cfg -from oslo.db import options as db_options -from oslo import messaging -from paste import deploy - -from neutron.api.v2 import attributes -from neutron.common import utils -from neutron.openstack.common import log as logging -from neutron import version - - -LOG = logging.getLogger(__name__) - -core_opts = [ - cfg.StrOpt('bind_host', default='0.0.0.0', - help=_("The host IP to bind to")), - cfg.IntOpt('bind_port', default=9696, - help=_("The port to bind to")), - cfg.StrOpt('api_paste_config', default="api-paste.ini", - help=_("The API paste config file to use")), - cfg.StrOpt('api_extensions_path', default="", - help=_("The path for API extensions")), - cfg.StrOpt('policy_file', default="policy.json", - help=_("The policy file to use")), - cfg.StrOpt('auth_strategy', default='keystone', - help=_("The type of authentication to use")), - cfg.StrOpt('core_plugin', - help=_("The core plugin Neutron will use")), - cfg.ListOpt('service_plugins', default=[], - help=_("The service plugins Neutron will use")), - cfg.StrOpt('base_mac', default="fa:16:3e:00:00:00", - help=_("The base MAC address Neutron will use for VIFs")), - cfg.IntOpt('mac_generation_retries', default=16, - help=_("How many times Neutron will retry MAC generation")), - cfg.BoolOpt('allow_bulk', default=True, - help=_("Allow the usage of the bulk API")), - cfg.BoolOpt('allow_pagination', default=False, - help=_("Allow the usage of the pagination")), - cfg.BoolOpt('allow_sorting', default=False, - help=_("Allow the usage of the sorting")), - cfg.StrOpt('pagination_max_limit', default="-1", - help=_("The maximum number of items returned in a single " - "response, value was 'infinite' or negative integer " - "means no limit")), - cfg.IntOpt('max_dns_nameservers', default=5, - help=_("Maximum number of DNS nameservers")), - cfg.IntOpt('max_subnet_host_routes', default=20, - help=_("Maximum number of host routes per subnet")), - cfg.IntOpt('max_fixed_ips_per_port', default=5, - help=_("Maximum number of fixed ips per port")), - cfg.IntOpt('dhcp_lease_duration', default=86400, - deprecated_name='dhcp_lease_time', - help=_("DHCP lease duration (in seconds). Use -1 to tell " - "dnsmasq to use infinite lease times.")), - cfg.BoolOpt('dhcp_agent_notification', default=True, - help=_("Allow sending resource operation" - " notification to DHCP agent")), - cfg.BoolOpt('allow_overlapping_ips', default=False, - help=_("Allow overlapping IP support in Neutron")), - cfg.StrOpt('host', default=utils.get_hostname(), - help=_("The hostname Neutron is running on")), - cfg.BoolOpt('force_gateway_on_subnet', default=True, - help=_("Ensure that configured gateway is on subnet. " - "For IPv6, validate only if gateway is not a link " - "local address. Deprecated, to be removed during the " - "K release, at which point the check will be " - "mandatory.")), - cfg.BoolOpt('notify_nova_on_port_status_changes', default=True, - help=_("Send notification to nova when port status changes")), - cfg.BoolOpt('notify_nova_on_port_data_changes', default=True, - help=_("Send notification to nova when port data (fixed_ips/" - "floatingip) changes so nova can update its cache.")), - cfg.StrOpt('nova_url', - default='http://127.0.0.1:8774/v2', - help=_('URL for connection to nova')), - cfg.StrOpt('nova_admin_username', - help=_('Username for connecting to nova in admin context')), - cfg.StrOpt('nova_admin_password', - help=_('Password for connection to nova in admin context'), - secret=True), - cfg.StrOpt('nova_admin_tenant_id', - help=_('The uuid of the admin nova tenant')), - cfg.StrOpt('nova_admin_auth_url', - default='http://localhost:5000/v2.0', - help=_('Authorization URL for connecting to nova in admin ' - 'context')), - cfg.StrOpt('nova_ca_certificates_file', - help=_('CA file for novaclient to verify server certificates')), - cfg.BoolOpt('nova_api_insecure', default=False, - help=_("If True, ignore any SSL validation issues")), - cfg.StrOpt('nova_region_name', - help=_('Name of nova region to use. Useful if keystone manages' - ' more than one region.')), - cfg.IntOpt('send_events_interval', default=2, - help=_('Number of seconds between sending events to nova if ' - 'there are any events to send.')), - - # add by j00209498 - cfg.StrOpt('cascade_str', default='cascaded', - help=_('cascade_str identity cascading openstack or cascaded' - 'openstack, value = cascaded or cascading.')), -] - -core_cli_opts = [ - cfg.StrOpt('state_path', - default='/var/lib/neutron', - help=_("Where to store Neutron state files. " - "This directory must be writable by the agent.")), -] - -# Register the configuration options -cfg.CONF.register_opts(core_opts) -cfg.CONF.register_cli_opts(core_cli_opts) - -# Ensure that the control exchange is set correctly -messaging.set_transport_defaults(control_exchange='neutron') -_SQL_CONNECTION_DEFAULT = 'sqlite://' -# Update the default QueuePool parameters. These can be tweaked by the -# configuration variables - max_pool_size, max_overflow and pool_timeout -db_options.set_defaults(cfg.CONF, - connection=_SQL_CONNECTION_DEFAULT, - sqlite_db='', max_pool_size=10, - max_overflow=20, pool_timeout=10) - - -def init(args, **kwargs): - cfg.CONF(args=args, project='neutron', - version='%%prog %s' % version.version_info.release_string(), - **kwargs) - - # FIXME(ihrachys): if import is put in global, circular import - # failure occurs - from neutron.common import rpc as n_rpc - n_rpc.init(cfg.CONF) - - # Validate that the base_mac is of the correct format - msg = attributes._validate_regex(cfg.CONF.base_mac, - attributes.MAC_PATTERN) - if msg: - msg = _("Base MAC: %s") % msg - raise Exception(msg) - - -def setup_logging(): - """Sets up the logging options for a log with supplied name.""" - product_name = "neutron" - logging.setup(product_name) - LOG.info(_("Logging enabled!")) - - -def load_paste_app(app_name): - """Builds and returns a WSGI app from a paste config file. - - :param app_name: Name of the application to load - :raises ConfigFilesNotFoundError when config file cannot be located - :raises RuntimeError when application cannot be loaded from config file - """ - - config_path = cfg.CONF.find_file(cfg.CONF.api_paste_config) - if not config_path: - raise cfg.ConfigFilesNotFoundError( - config_files=[cfg.CONF.api_paste_config]) - config_path = os.path.abspath(config_path) - LOG.info(_("Config paste file: %s"), config_path) - - try: - app = deploy.loadapp("config:%s" % config_path, name=app_name) - except (LookupError, ImportError): - msg = (_("Unable to load %(app_name)s from " - "configuration file %(config_path)s.") % - {'app_name': app_name, - 'config_path': config_path}) - LOG.exception(msg) - raise RuntimeError(msg) - return app diff --git a/juno-patches/neutron/neutron_cascaded_l3_patch/neutron/db/extraroute_db.py b/juno-patches/neutron/neutron_cascaded_l3_patch/neutron/db/extraroute_db.py deleted file mode 100644 index cc8c7831..00000000 --- a/juno-patches/neutron/neutron_cascaded_l3_patch/neutron/db/extraroute_db.py +++ /dev/null @@ -1,220 +0,0 @@ -# Copyright 2013, Nachi Ueno, NTT MCL, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import netaddr -from oslo.config import cfg -import sqlalchemy as sa -from sqlalchemy import orm - -from neutron.common import utils -from neutron.db import db_base_plugin_v2 -from neutron.db import l3_db -from neutron.db import model_base -from neutron.db import models_v2 -from neutron.extensions import extraroute -from neutron.extensions import l3 -from neutron.openstack.common import log as logging - - -LOG = logging.getLogger(__name__) - -extra_route_opts = [ - #TODO(nati): use quota framework when it support quota for attributes - cfg.IntOpt('max_routes', default=30, - help=_("Maximum number of routes")), - # add by j00209498 ---begin - cfg.StrOpt('l3gw_extern_net_ip_range', - default="100.64.0.0/16", - help=_('The l3gw external ip range(cidr) used for unique ' - 'like 100.64.0.0/16')), - # add by j00209498 ---end -] - -cfg.CONF.register_opts(extra_route_opts) - - -class RouterRoute(model_base.BASEV2, models_v2.Route): - router_id = sa.Column(sa.String(36), - sa.ForeignKey('routers.id', - ondelete="CASCADE"), - primary_key=True) - - router = orm.relationship(l3_db.Router, - backref=orm.backref("route_list", - lazy='joined', - cascade='delete')) - - -class ExtraRoute_dbonly_mixin(l3_db.L3_NAT_dbonly_mixin): - """Mixin class to support extra route configuration on router.""" - - def _extend_router_dict_extraroute(self, router_res, router_db): - router_res['routes'] = (ExtraRoute_dbonly_mixin. - _make_extra_route_list( - router_db['route_list'] - )) - - db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs( - l3.ROUTERS, ['_extend_router_dict_extraroute']) - - def update_router(self, context, id, router): - r = router['router'] - with context.session.begin(subtransactions=True): - #check if route exists and have permission to access - router_db = self._get_router(context, id) - if 'routes' in r: - self._update_extra_routes(context, router_db, r['routes']) - routes = self._get_extra_routes_by_router_id(context, id) - router_updated = super(ExtraRoute_dbonly_mixin, self).update_router( - context, id, router) - router_updated['routes'] = routes - - return router_updated - - def _get_subnets_by_cidr(self, context, cidr): - query_subnets = context.session.query(models_v2.Subnet) - return query_subnets.filter_by(cidr=cidr).all() - - def _validate_routes_nexthop(self, cidrs, ips, routes, nexthop): - #Note(nati): Nexthop should be connected, - # so we need to check - # nexthop belongs to one of cidrs of the router ports - extern_relay_cidr = cfg.CONF.l3gw_extern_net_ip_range - if not netaddr.all_matching_cidrs(nexthop, cidrs): - if(cfg.CONF.cascade_str == 'cascaded' - and extern_relay_cidr - and netaddr.all_matching_cidrs(nexthop, - [extern_relay_cidr])): - LOG.debug(_('nexthop(%s) is in extern_relay_cidr,' - 'so not raise InvalidRoutes exception'), nexthop) - return - raise extraroute.InvalidRoutes( - routes=routes, - reason=_('the nexthop is not connected with router')) - #Note(nati) nexthop should not be same as fixed_ips - if nexthop in ips: - raise extraroute.InvalidRoutes( - routes=routes, - reason=_('the nexthop is used by router')) - - def _validate_routes(self, context, - router_id, routes): - if len(routes) > cfg.CONF.max_routes: - raise extraroute.RoutesExhausted( - router_id=router_id, - quota=cfg.CONF.max_routes) - - filters = {'device_id': [router_id]} - ports = self._core_plugin.get_ports(context, filters) - cidrs = [] - ips = [] - for port in ports: - for ip in port['fixed_ips']: - cidrs.append(self._core_plugin._get_subnet( - context, ip['subnet_id'])['cidr']) - ips.append(ip['ip_address']) - for route in routes: - self._validate_routes_nexthop( - cidrs, ips, routes, route['nexthop']) - - def _update_extra_routes(self, context, router, routes): - self._validate_routes(context, router['id'], - routes) - old_routes, routes_dict = self._get_extra_routes_dict_by_router_id( - context, router['id']) - added, removed = utils.diff_list_of_dict(old_routes, - routes) - LOG.debug(_('Added routes are %s'), added) - for route in added: - router_routes = RouterRoute( - router_id=router['id'], - destination=route['destination'], - nexthop=route['nexthop']) - context.session.add(router_routes) - - LOG.debug(_('Removed routes are %s'), removed) - for route in removed: - context.session.delete( - routes_dict[(route['destination'], route['nexthop'])]) - - @staticmethod - def _make_extra_route_list(extra_routes): - # added by j00209498 ----begin - extern_relay_cidr = cfg.CONF.l3gw_extern_net_ip_range - if(cfg.CONF.cascade_str == 'cascaded' and extern_relay_cidr): - routes_list = [] - for route in extra_routes: - if(netaddr.all_matching_cidrs(route['nexthop'], - [extern_relay_cidr])): - routes_list.append({'destination': route['destination'], - 'nexthop': route['nexthop'], - 'onlink': True}) - else: - routes_list.append({'destination': route['destination'], - 'nexthop': route['nexthop']}) - return routes_list - # added by j00209498 ----end - return [{'destination': route['destination'], - 'nexthop': route['nexthop']} - for route in extra_routes] - - def _get_extra_routes_by_router_id(self, context, id): - query = context.session.query(RouterRoute) - query = query.filter_by(router_id=id) - return self._make_extra_route_list(query) - - def _get_extra_routes_dict_by_router_id(self, context, id): - query = context.session.query(RouterRoute) - query = query.filter_by(router_id=id) - routes = [] - routes_dict = {} - for route in query: - routes.append({'destination': route['destination'], - 'nexthop': route['nexthop']}) - routes_dict[(route['destination'], route['nexthop'])] = route - return routes, routes_dict - - def get_router(self, context, id, fields=None): - with context.session.begin(subtransactions=True): - router = super(ExtraRoute_dbonly_mixin, self).get_router( - context, id, fields) - return router - - def get_routers(self, context, filters=None, fields=None, - sorts=None, limit=None, marker=None, - page_reverse=False): - with context.session.begin(subtransactions=True): - routers = super(ExtraRoute_dbonly_mixin, self).get_routers( - context, filters, fields, sorts=sorts, limit=limit, - marker=marker, page_reverse=page_reverse) - return routers - - def _confirm_router_interface_not_in_use(self, context, router_id, - subnet_id): - super(ExtraRoute_dbonly_mixin, - self)._confirm_router_interface_not_in_use( - context, router_id, subnet_id) - subnet_db = self._core_plugin._get_subnet(context, subnet_id) - subnet_cidr = netaddr.IPNetwork(subnet_db['cidr']) - extra_routes = self._get_extra_routes_by_router_id(context, router_id) - for route in extra_routes: - if netaddr.all_matching_cidrs(route['nexthop'], [subnet_cidr]): - raise extraroute.RouterInterfaceInUseByRoute( - router_id=router_id, subnet_id=subnet_id) - - -class ExtraRoute_db_mixin(ExtraRoute_dbonly_mixin, l3_db.L3_NAT_db_mixin): - """Mixin class to support extra route configuration on router with rpc.""" - pass diff --git a/juno-patches/neutron/neutron_cascading_big2layer_patch/installation/install.sh b/juno-patches/neutron/neutron_cascading_big2layer_patch/installation/install.sh deleted file mode 100644 index db1c0a83..00000000 --- a/juno-patches/neutron/neutron_cascading_big2layer_patch/installation/install.sh +++ /dev/null @@ -1,97 +0,0 @@ -#!/bin/bash -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# Copyright (c) 2014 Huawei Technologies. -_NEUTRON_CONF_DIR="/etc/neutron" -_NEUTRON_CONF_FILE='neutron.conf' -_NEUTRON_INSTALL="/usr/lib/python2.7/dist-packages" -_NEUTRON_DIR="${_NEUTRON_INSTALL}/neutron" -# if you did not make changes to the installation files, -# please do not edit the following directories. -_CODE_DIR="../neutron/" -_BACKUP_DIR="${_NEUTRON_INSTALL}/.neutron-cascading-server-big2layer-patch-installation-backup" -if [[ ${EUID} -ne 0 ]]; then - echo "Please run as root." - exit 1 -fi - -##Redirecting output to logfile as well as stdout -#exec > >(tee -a ${_SCRIPT_LOGFILE}) -#exec 2> >(tee -a ${_SCRIPT_LOGFILE} >&2) - -cd `dirname $0` - -echo "checking installation directories..." -if [ ! -d "${_NEUTRON_DIR}" ] ; then - echo "Could not find the neutron installation. Please check the variables in the beginning of the script." - echo "aborted." - exit 1 -fi -if [ ! -f "${_NEUTRON_CONF_DIR}/${_NEUTRON_CONF_FILE}" ] ; then - echo "Could not find neutron config file. Please check the variables in the beginning of the script." - echo "aborted." - exit 1 -fi - -echo "checking previous installation..." -if [ -d "${_BACKUP_DIR}/neutron" ] ; then - echo "It seems neutron-server-big2layer-cascading-patch has already been installed!" - echo "Please check README for solution if this is not true." - exit 1 -fi - -echo "backing up current files that might be overwritten..." -mkdir -p "${_BACKUP_DIR}" -cp -r "${_NEUTRON_DIR}/" "${_BACKUP_DIR}/" -if [ $? -ne 0 ] ; then - rm -r "${_BACKUP_DIR}/neutron" - echo "Error in code backup, aborted." - exit 1 -fi - -echo "copying in new files..." -cp -r "${_CODE_DIR}" `dirname ${_NEUTRON_DIR}` -if [ $? -ne 0 ] ; then - echo "Error in copying, aborted." - echo "Recovering original files..." - cp -r "${_BACKUP_DIR}/neutron" `dirname ${_NEUTRON_DIR}` && rm -r "${_BACKUP_DIR}/neutron" - if [ $? -ne 0 ] ; then - echo "Recovering failed! Please install manually." - fi - exit 1 -fi - - -echo "restarting cascading neutron server..." -service neutron-server restart -if [ $? -ne 0 ] ; then - echo "There was an error in restarting the service, please restart cascading neutron server manually." - exit 1 -fi - -echo "restarting cascading neutron-plugin-openvswitch-agent..." -service neutron-plugin-openvswitch-agent restart -if [ $? -ne 0 ] ; then - echo "There was an error in restarting the service, please restart cascading neutron-plugin-openvswitch-agent manually." - exit 1 -fi - -echo "restarting cascading neutron-l3-agent..." -service neutron-l3-agent restart -if [ $? -ne 0 ] ; then - echo "There was an error in restarting the service, please restart cascading neutron-l3-agent manually." - exit 1 -fi - -echo "Completed." -echo "See README to get started." -exit 0 diff --git a/juno-patches/neutron/neutron_cascading_big2layer_patch/neutron/plugins/ml2/drivers/l2pop/db.py b/juno-patches/neutron/neutron_cascading_big2layer_patch/neutron/plugins/ml2/drivers/l2pop/db.py deleted file mode 100644 index 92dec9c1..00000000 --- a/juno-patches/neutron/neutron_cascading_big2layer_patch/neutron/plugins/ml2/drivers/l2pop/db.py +++ /dev/null @@ -1,123 +0,0 @@ -# Copyright (c) 2013 OpenStack Foundation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from sqlalchemy import sql - -from neutron.common import constants as const -from neutron.db import agents_db -from neutron.db import common_db_mixin as base_db -from neutron.db import models_v2 -from neutron.openstack.common import jsonutils -from neutron.openstack.common import timeutils -from neutron.plugins.ml2.drivers.l2pop import constants as l2_const -from neutron.plugins.ml2 import models as ml2_models - - -class L2populationDbMixin(base_db.CommonDbMixin): - - def get_agent_ip_by_host(self, session, agent_host): - agent = self.get_agent_by_host(session, agent_host) - if agent: - return self.get_agent_ip(agent) - - def get_agent_ip(self, agent): - configuration = jsonutils.loads(agent.configurations) - return configuration.get('tunneling_ip') - - def get_host_ip_from_binding_profile(self, port): - ip = port['binding:profile'].get('host_ip') - return ip - - def get_host_ip_from_binding_profile_str(self, profile): - if(not profile): - return - profile = jsonutils.loads(profile) - return profile.get('host_ip') - - def get_agent_uptime(self, agent): - return timeutils.delta_seconds(agent.started_at, - agent.heartbeat_timestamp) - - def get_agent_tunnel_types(self, agent): - configuration = jsonutils.loads(agent.configurations) - return configuration.get('tunnel_types') - - def get_agent_l2pop_network_types(self, agent): - configuration = jsonutils.loads(agent.configurations) - return configuration.get('l2pop_network_types') - - def get_agent_by_host(self, session, agent_host): - with session.begin(subtransactions=True): - query = session.query(agents_db.Agent) - query = query.filter(agents_db.Agent.host == agent_host, - agents_db.Agent.agent_type.in_( - l2_const.SUPPORTED_AGENT_TYPES)) - return query.first() - - def get_network_ports(self, session, network_id): - with session.begin(subtransactions=True): - query = session.query(ml2_models.PortBinding, - agents_db.Agent) - query = query.join(agents_db.Agent, - agents_db.Agent.host == - ml2_models.PortBinding.host) - query = query.join(models_v2.Port) - query = query.filter(models_v2.Port.network_id == network_id, - models_v2.Port.admin_state_up == sql.true(), - agents_db.Agent.agent_type.in_( - l2_const.SUPPORTED_AGENT_TYPES)) - return query - - def get_nondvr_network_ports(self, session, network_id): - query = self.get_network_ports(session, network_id) - return query.filter(models_v2.Port.device_owner != - const.DEVICE_OWNER_DVR_INTERFACE) - - def get_dvr_network_ports(self, session, network_id): - with session.begin(subtransactions=True): - query = session.query(ml2_models.DVRPortBinding, - agents_db.Agent) - query = query.join(agents_db.Agent, - agents_db.Agent.host == - ml2_models.DVRPortBinding.host) - query = query.join(models_v2.Port) - query = query.filter(models_v2.Port.network_id == network_id, - models_v2.Port.admin_state_up == sql.true(), - models_v2.Port.device_owner == - const.DEVICE_OWNER_DVR_INTERFACE, - agents_db.Agent.agent_type.in_( - l2_const.SUPPORTED_AGENT_TYPES)) - return query - - def get_agent_network_active_port_count(self, session, agent_host, - network_id): - with session.begin(subtransactions=True): - query = session.query(models_v2.Port) - query1 = query.join(ml2_models.PortBinding) - query1 = query1.filter(models_v2.Port.network_id == network_id, - models_v2.Port.status == - const.PORT_STATUS_ACTIVE, - models_v2.Port.device_owner != - const.DEVICE_OWNER_DVR_INTERFACE, - ml2_models.PortBinding.host == agent_host) - query2 = query.join(ml2_models.DVRPortBinding) - query2 = query2.filter(models_v2.Port.network_id == network_id, - ml2_models.DVRPortBinding.status == - const.PORT_STATUS_ACTIVE, - models_v2.Port.device_owner == - const.DEVICE_OWNER_DVR_INTERFACE, - ml2_models.DVRPortBinding.host == - agent_host) - return (query1.count() + query2.count()) diff --git a/juno-patches/neutron/neutron_cascading_big2layer_patch/neutron/plugins/ml2/drivers/l2pop/mech_driver.py b/juno-patches/neutron/neutron_cascading_big2layer_patch/neutron/plugins/ml2/drivers/l2pop/mech_driver.py deleted file mode 100644 index 5f9fb65c..00000000 --- a/juno-patches/neutron/neutron_cascading_big2layer_patch/neutron/plugins/ml2/drivers/l2pop/mech_driver.py +++ /dev/null @@ -1,304 +0,0 @@ -# Copyright (c) 2013 OpenStack Foundation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo.config import cfg - -from neutron.common import constants as const -from neutron import context as n_context -from neutron.db import api as db_api -from neutron.openstack.common import log as logging -from neutron.plugins.ml2 import driver_api as api -from neutron.plugins.ml2.drivers.l2pop import config # noqa -from neutron.plugins.ml2.drivers.l2pop import db as l2pop_db -from neutron.plugins.ml2.drivers.l2pop import rpc as l2pop_rpc - -LOG = logging.getLogger(__name__) - - -class L2populationMechanismDriver(api.MechanismDriver, - l2pop_db.L2populationDbMixin): - - def __init__(self): - super(L2populationMechanismDriver, self).__init__() - self.L2populationAgentNotify = l2pop_rpc.L2populationAgentNotifyAPI() - - def initialize(self): - LOG.debug(_("Experimental L2 population driver")) - self.rpc_ctx = n_context.get_admin_context_without_session() - self.migrated_ports = {} - self.remove_fdb_entries = {} - - def _get_port_fdb_entries(self, port): - return [[port['mac_address'], port['device_owner'], - ip['ip_address']] for ip in port['fixed_ips']] - - def _get_agent_host(self, context, port): - if port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE: - agent_host = context.binding.host - else: - agent_host = port['binding:host_id'] - return agent_host - - def delete_port_precommit(self, context): - # TODO(matrohon): revisit once the original bound segment will be - # available in delete_port_postcommit. in delete_port_postcommit - # agent_active_ports will be equal to 0, and the _update_port_down - # won't need agent_active_ports_count_for_flooding anymore - port = context.current - agent_host = context.host #self._get_agent_host(context, port) - - if port['id'] not in self.remove_fdb_entries: - self.remove_fdb_entries[port['id']] = {} - - self.remove_fdb_entries[port['id']][agent_host] = ( - self._update_port_down(context, port, 1)) - - def delete_port_postcommit(self, context): - port = context.current - agent_host = context.host - - fdb_entries = self._update_port_down(context, port, agent_host) - self.L2populationAgentNotify.remove_fdb_entries(self.rpc_ctx, - fdb_entries) - - def _get_diff_ips(self, orig, port): - orig_ips = set([ip['ip_address'] for ip in orig['fixed_ips']]) - port_ips = set([ip['ip_address'] for ip in port['fixed_ips']]) - - # check if an ip has been added or removed - orig_chg_ips = orig_ips.difference(port_ips) - port_chg_ips = port_ips.difference(orig_ips) - - if orig_chg_ips or port_chg_ips: - return orig_chg_ips, port_chg_ips - - def _fixed_ips_changed(self, context, orig, port, diff_ips): - orig_ips, port_ips = diff_ips - - if (port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE): - agent_host = context.host - else: - agent_host = context.original_host - port_infos = self._get_port_infos( - context, orig, agent_host) - if not port_infos: - return - agent, agent_host, agent_ip, segment, port_fdb_entries = port_infos - - orig_mac_ip = [[port['mac_address'], port['device_owner'], ip] - for ip in orig_ips] - port_mac_ip = [[port['mac_address'], port['device_owner'], ip] - for ip in port_ips] - - upd_fdb_entries = {port['network_id']: {agent_ip: {}}} - - ports = upd_fdb_entries[port['network_id']][agent_ip] - if orig_mac_ip: - ports['before'] = orig_mac_ip - - if port_mac_ip: - ports['after'] = port_mac_ip - - self.L2populationAgentNotify.update_fdb_entries( - self.rpc_ctx, {'chg_ip': upd_fdb_entries}) - - return True - - def update_port_postcommit(self, context): - port = context.current - orig = context.original - - diff_ips = self._get_diff_ips(orig, port) - if diff_ips: - self._fixed_ips_changed(context, orig, port, diff_ips) - if port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE: - if context.status == const.PORT_STATUS_ACTIVE: - self._update_port_up(context) - if context.status == const.PORT_STATUS_DOWN: - agent_host = context.host - fdb_entries = self._update_port_down( - context, port, agent_host) - self.L2populationAgentNotify.remove_fdb_entries( - self.rpc_ctx, fdb_entries) - elif (context.host != context.original_host - and context.status == const.PORT_STATUS_ACTIVE - and not self.migrated_ports.get(orig['id'])): - # The port has been migrated. We have to store the original - # binding to send appropriate fdb once the port will be set - # on the destination host - self.migrated_ports[orig['id']] = ( - (orig, context.original_host)) - elif context.status != context.original_status: - if context.status == const.PORT_STATUS_ACTIVE: - self._update_port_up(context) - elif context.status == const.PORT_STATUS_DOWN: - fdb_entries = self._update_port_down( - context, port, context.host) - self.L2populationAgentNotify.remove_fdb_entries( - self.rpc_ctx, fdb_entries) - elif context.status == const.PORT_STATUS_BUILD: - orig = self.migrated_ports.pop(port['id'], None) - if orig: - original_port = orig[0] - original_host = orig[1] - # this port has been migrated: remove its entries from fdb - fdb_entries = self._update_port_down( - context, original_port, original_host) - self.L2populationAgentNotify.remove_fdb_entries( - self.rpc_ctx, fdb_entries) - - def _get_port_infos(self, context, port, agent_host): - if not agent_host: - return - - session = db_api.get_session() - agent = self.get_agent_by_host(session, agent_host) - if not agent: - return - - if port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE: - agent_ip = self.get_agent_ip(agent) - else: - agent_ip = self.get_host_ip_from_binding_profile(port) - if not agent_ip: - LOG.warning(_("Unable to retrieve the agent ip, check the agent " - "configuration.")) - return - - segment = context.bound_segment - if not segment: - LOG.warning(_("Port %(port)s updated by agent %(agent)s " - "isn't bound to any segment"), - {'port': port['id'], 'agent': agent}) - return - - network_types = self.get_agent_l2pop_network_types(agent) - if network_types is None: - network_types = self.get_agent_tunnel_types(agent) - if segment['network_type'] not in network_types: - return - - fdb_entries = self._get_port_fdb_entries(port) - - return agent, agent_host, agent_ip, segment, fdb_entries - - def _update_port_up(self, context): - port = context.current - agent_host = context.host - port_infos = self._get_port_infos(context, port, agent_host) - if not port_infos: - return - agent, agent_host, agent_ip, segment, port_fdb_entries = port_infos - - network_id = port['network_id'] - - session = db_api.get_session() - agent_active_ports = self.get_agent_network_active_port_count( - session, agent_host, network_id) - - other_fdb_entries = {network_id: - {'segment_id': segment['segmentation_id'], - 'network_type': segment['network_type'], - 'ports': {agent_ip: []}}} - - if agent_active_ports == 1 or ( - self.get_agent_uptime(agent) < cfg.CONF.l2pop.agent_boot_time): - # First port activated on current agent in this network, - # we have to provide it with the whole list of fdb entries - agent_fdb_entries = {network_id: - {'segment_id': segment['segmentation_id'], - 'network_type': segment['network_type'], - 'ports': {}}} - ports = agent_fdb_entries[network_id]['ports'] - #import pdb;pdb.set_trace() - nondvr_network_ports = self.get_nondvr_network_ports(session, - network_id) - for network_port in nondvr_network_ports: - binding, agent = network_port - if agent.host == agent_host: - continue - - #ip = self.get_agent_ip(agent) - profile = binding['profile'] - ip = self.get_host_ip_from_binding_profile_str(profile) - if not ip: - LOG.debug(_("Unable to retrieve the agent ip, check " - "the agent %(agent_host)s configuration."), - {'agent_host': agent.host}) - continue - - agent_ports = ports.get(ip, [const.FLOODING_ENTRY]) - agent_ports += self._get_port_fdb_entries(binding.port) - ports[ip] = agent_ports - # comment by j00209498 -# dvr_network_ports = self.get_dvr_network_ports(session, network_id) -# for network_port in dvr_network_ports: -# binding, agent = network_port -# if agent.host == agent_host: -# continue -# -# ip = self.get_agent_ip(agent) -# if not ip: -# LOG.debug(_("Unable to retrieve the agent ip, check " -# "the agent %(agent_host)s configuration."), -# {'agent_host': agent.host}) -# continue -# -# agent_ports = ports.get(ip, [const.FLOODING_ENTRY]) -# ports[ip] = agent_ports - - # And notify other agents to add flooding entry - other_fdb_entries[network_id]['ports'][agent_ip].append( - const.FLOODING_ENTRY) - - if ports.keys(): - self.L2populationAgentNotify.add_fdb_entries( - self.rpc_ctx, agent_fdb_entries, agent_host) - - # Notify other agents to add fdb rule for current port - if port['device_owner'] != const.DEVICE_OWNER_DVR_INTERFACE: - other_fdb_entries[network_id]['ports'][agent_ip] += ( - port_fdb_entries) - - self.L2populationAgentNotify.add_fdb_entries(self.rpc_ctx, - other_fdb_entries) - - def _update_port_down(self, context, port, agent_host): - port_infos = self._get_port_infos(context, port, agent_host) - if not port_infos: - return - agent, agent_host, agent_ip, segment, port_fdb_entries = port_infos - - network_id = port['network_id'] - - session = db_api.get_session() - agent_active_ports = self.get_agent_network_active_port_count( - session, agent_host, network_id) - - other_fdb_entries = {network_id: - {'segment_id': segment['segmentation_id'], - 'network_type': segment['network_type'], - 'ports': {agent_ip: []}}} - if agent_active_ports == 0: - # Agent is removing its last activated port in this network, - # other agents needs to be notified to delete their flooding entry. - other_fdb_entries[network_id]['ports'][agent_ip].append( - const.FLOODING_ENTRY) - # Notify other agents to remove fdb rules for current port - if port['device_owner'] != const.DEVICE_OWNER_DVR_INTERFACE: - fdb_entries = port_fdb_entries - other_fdb_entries[network_id]['ports'][agent_ip] += fdb_entries - - return other_fdb_entries diff --git a/juno-patches/neutron/neutron_cascading_l3_patch/etc/neutron/neutron.conf b/juno-patches/neutron/neutron_cascading_l3_patch/etc/neutron/neutron.conf deleted file mode 100644 index 09ab7852..00000000 --- a/juno-patches/neutron/neutron_cascading_l3_patch/etc/neutron/neutron.conf +++ /dev/null @@ -1,39 +0,0 @@ -[DEFAULT] -cascade_str = cascading -debug=true -verbose=true -core_plugin = ml2 -service_plugins = router -allow_overlapping_ips = True -rpc_backend=rabbit -rabbit_host = CASCADING_CONTROL_IP -rabbit_password = USER_PWD -notify_nova_on_port_status_changes = True -notify_nova_on_port_data_changes = True -nova_url = http://CASCADING_CONTROL_IP:8774/v2 -nova_admin_username = nova -nova_admin_tenant_id = -nova_admin_password = openstack -nova_admin_auth_url = http://CASCADING_CONTROL_IP:35357/v2.0 -lock_path = $state_path/lock -core_plugin = ml2 -auth_strategy = keystone -nova_region_name = CASCADING_REGION_NAME - -[agent] -root_helper = sudo /usr/bin/neutron-rootwrap /etc/neutron/rootwrap.conf -[keystone_authtoken] -identity_uri = http://CASCADING_CONTROL_IP:5000 -auth_host = CASCADING_CONTROL_IP -auth_port = 35357 -auth_protocol = http -admin_tenant_name = TENANT_NAME -admin_user = USER_NAME -admin_password = USER_PWD - -[database] -connection = mysql://neutron:openstack@CASCADING_CONTROL_IP/neutron - -[service_providers] -service_provider=LOADBALANCER:Haproxy:neutron.services.loadbalancer.drivers.haproxy.plugin_driver.HaproxyOnHostPluginDriver:default -service_provider=VPN:openswan:neutron.services.vpn.service_drivers.ipsec.IPsecVPNDriver:default diff --git a/juno-patches/neutron/neutron_cascading_l3_patch/etc/neutron/plugins/ml2/ml2_conf.ini b/juno-patches/neutron/neutron_cascading_l3_patch/etc/neutron/plugins/ml2/ml2_conf.ini deleted file mode 100644 index 77d1adb9..00000000 --- a/juno-patches/neutron/neutron_cascading_l3_patch/etc/neutron/plugins/ml2/ml2_conf.ini +++ /dev/null @@ -1,107 +0,0 @@ -[ovs] -bridge_mappings = default:br-eth1,external:br-ex -integration_bridge = br-int -network_vlan_ranges = default:1:4094 -tunnel_type = vxlan,gre -enable_tunneling = True -local_ip = LOCAL_IP - - -[ml2] -type_drivers = local,flat,vlan,gre,vxlan -tenant_network_types = local,flat,vlan,gre,vxlan -mechanism_drivers = openvswitch,l2population - -# (ListOpt) List of network type driver entrypoints to be loaded from -# the neutron.ml2.type_drivers namespace. -# -# type_drivers = local,flat,vlan,gre,vxlan -# Example: type_drivers = flat,vlan,gre,vxlan - -# (ListOpt) Ordered list of network_types to allocate as tenant -# networks. The default value 'local' is useful for single-box testing -# but provides no connectivity between hosts. -# -# tenant_network_types = local -# Example: tenant_network_types = vlan,gre,vxlan - -# (ListOpt) Ordered list of networking mechanism driver entrypoints -# to be loaded from the neutron.ml2.mechanism_drivers namespace. -# mechanism_drivers = -# Example: mechanism_drivers = openvswitch,mlnx -# Example: mechanism_drivers = arista -# Example: mechanism_drivers = cisco,logger -# Example: mechanism_drivers = openvswitch,brocade -# Example: mechanism_drivers = linuxbridge,brocade - -# (ListOpt) Ordered list of extension driver entrypoints -# to be loaded from the neutron.ml2.extension_drivers namespace. -# extension_drivers = -# Example: extension_drivers = anewextensiondriver - -[ml2_type_flat] -flat_networks = external - -# (ListOpt) List of physical_network names with which flat networks -# can be created. Use * to allow flat networks with arbitrary -# physical_network names. -# -# flat_networks = -# Example:flat_networks = physnet1,physnet2 -# Example:flat_networks = * - -[ml2_type_vlan] -# (ListOpt) List of [::] tuples -# specifying physical_network names usable for VLAN provider and -# tenant networks, as well as ranges of VLAN tags on each -# physical_network available for allocation as tenant networks. -# -# network_vlan_ranges = -# Example: network_vlan_ranges = physnet1:1000:2999,physnet2 -network_vlan_ranges = default:1:4094 - -[ml2_type_gre] - -tunnel_id_ranges = 1:1000 -# (ListOpt) Comma-separated list of : tuples enumerating ranges of GRE tunnel IDs that are available for tenant network allocation -# tunnel_id_ranges = - -[ml2_type_vxlan] -# (ListOpt) Comma-separated list of : tuples enumerating -# ranges of VXLAN VNI IDs that are available for tenant network allocation. -# -vni_ranges = 4097:200000 - -# (StrOpt) Multicast group for the VXLAN interface. When configured, will -# enable sending all broadcast traffic to this multicast group. When left -# unconfigured, will disable multicast VXLAN mode. -# -# vxlan_group = -# Example: vxlan_group = 239.1.1.1 - -[securitygroup] -#firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver -firewall_driver=neutron.agent.firewall.NoopFirewallDriver -enable_security_group = True -enable_ipset = True -# Controls if neutron security group is enabled or not. -# It should be false when you use nova security group. -# enable_security_group = True -[agent] -tunnel_types = vxlan, gre -l2_population = True -arp_responder = True -enable_distributed_routing = True - -#configure added by j00209498 -keystone_auth_url = http://CASCADING_CONTROL_IP:35357/v2.0 -neutron_user_name = USER_NAME -neutron_password = USER_PWD -neutron_tenant_name = TENANT_NAME -os_region_name = CASCADED_REGION_NAME - -cascading_os_region_name = CASCADING_REGION_NAME -cascading_auth_url = http://CASCADING_CONTROL_IP:35357/v2.0 -cascading_user_name = USER_NAME -cascading_password = USER_PWD -cascading_tenant_name = TENANT_NAME diff --git a/juno-patches/neutron/neutron_cascading_l3_patch/installation/install.sh b/juno-patches/neutron/neutron_cascading_l3_patch/installation/install.sh deleted file mode 100644 index 61dd453d..00000000 --- a/juno-patches/neutron/neutron_cascading_l3_patch/installation/install.sh +++ /dev/null @@ -1,159 +0,0 @@ -#!/bin/bash - -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# Copyright (c) 2014 Huawei Technologies. - - -CASCADING_CONTROL_IP=127.0.0.1 -CASCADING_REGION_NAME=Cascading_Openstack -CASCADED_REGION_NAME=AZ1 -USER_NAME=neutron -USER_PWD=openstack -TENANT_NAME=service - -#For test path or the path is not standard -_PREFIX_DIR="" - -_NEUTRON_CONF_DIR="${_PREFIX_DIR}/etc/neutron" -_NEUTRON_CONF_FILE='neutron.conf' -_NEUTRON_INSTALL="${_PREFIX_DIR}/usr/lib/python2.7/dist-packages" -_NEUTRON_DIR="${_NEUTRON_INSTALL}/neutron" -_NEUTRON_CONF="${_NEUTRON_CONF_DIR}/neutron.conf" -_NEUTRON_L2_PROXY_FILE="plugins/ml2/ml2_conf.ini" -_NEUTRON_L2_PROXY_CONF="${_NEUTRON_CONF_DIR}/${_NEUTRON_L2_PROXY_FILE}" - -# if you did not make changes to the installation files, -# please do not edit the following directories. -_CONF_DIR="../etc/neutron/" -_CONF_BACKUP_DIR="`dirname ${_NEUTRON_CONF_DIR}`/.neutron-cascading-server-installation-backup" -_CODE_DIR="../neutron/" -_BACKUP_DIR="${_NEUTRON_INSTALL}/.neutron-cascading-server-installation-backup" - -#for test begin -#rm -rf "${_CONF_BACKUP_DIR}/neutron" -#rm -rf "${_BACKUP_DIR}/neutron" -#for test end - - -#_SCRIPT_NAME="${0##*/}" -#_SCRIPT_LOGFILE="/var/log/neutron-server-cascading/installation/${_SCRIPT_NAME}.log" - -if [ "$EUID" != "0" ]; then - echo "Please run as root." - exit 1 -fi - -##Redirecting output to logfile as well as stdout -#exec > >(tee -a ${_SCRIPT_LOGFILE}) -#exec 2> >(tee -a ${_SCRIPT_LOGFILE} >&2) - -cd `dirname $0` - -echo "checking installation directories..." -if [ ! -d "${_NEUTRON_DIR}" ] ; then - echo "Could not find the neutron installation. Please check the variables in the beginning of the script." - echo "aborted." - exit 1 -fi -if [ ! -f "${_NEUTRON_CONF_DIR}/${_NEUTRON_CONF_FILE}" ] ; then - echo "Could not find neutron config file. Please check the variables in the beginning of the script." - echo "aborted." - exit 1 -fi - -echo "checking previous installation..." -if [ -d "${_BACKUP_DIR}/neutron" -o -d "${_CONF_BACKUP_DIR}/neutron" ] ; then - echo "It seems neutron-server-cascading has already been installed!" - echo "Please check README for solution if this is not true." - exit 1 -fi - -echo "backing up current files that might be overwritten..." -mkdir -p "${_CONF_BACKUP_DIR}" -cp -r "${_NEUTRON_CONF_DIR}/" "${_CONF_BACKUP_DIR}/" -if [ $? -ne 0 ] ; then - rm -r "${_CONF_BACKUP_DIR}/neutron" - echo "Error in code backup, aborted." - exit 1 -fi - -mkdir -p "${_BACKUP_DIR}" -cp -r "${_NEUTRON_DIR}/" "${_BACKUP_DIR}/" -if [ $? -ne 0 ] ; then - rm -r "${_BACKUP_DIR}/neutron" - echo "Error in code backup, aborted." - exit 1 -fi - -echo "copying in config files..." -cp -r "${_CONF_DIR}" `dirname ${_NEUTRON_CONF_DIR}` -if [ $? -ne 0 ] ; then - echo "Error in copying, aborted." - echo "Recovering original files..." - cp -r "${_CONF_BACKUP_DIR}/neutron" `dirname ${_NEUTRON_CONF_DIR}` && rm -r "${_CONF_BACKUP_DIR}/neutron" - if [ $? -ne 0 ] ; then - echo "Recovering failed! Please install manually." - fi - exit 1 -fi - -echo "copying in new files..." -cp -r "${_CODE_DIR}" `dirname ${_NEUTRON_DIR}` -if [ $? -ne 0 ] ; then - echo "Error in copying, aborted." - echo "Recovering original files..." - cp -r "${_BACKUP_DIR}/neutron" `dirname ${_NEUTRON_DIR}` && rm -r "${_BACKUP_DIR}/neutron" - if [ $? -ne 0 ] ; then - echo "Recovering failed! Please install manually." - fi - exit 1 -fi - -echo "updating config file..." -sed -i "s/CASCADING_CONTROL_IP/$CASCADING_CONTROL_IP/g" "${_NEUTRON_CONF}" -sed -i "s/CASCADING_REGION_NAME/$CASCADING_REGION_NAME/g" "${_NEUTRON_CONF}" -sed -i "s/USER_NAME/$USER_NAME/g" "${_NEUTRON_CONF}" -sed -i "s/USER_PWD/$USER_PWD/g" "${_NEUTRON_CONF}" -sed -i "s/TENANT_NAME/$TENANT_NAME/g" "${_NEUTRON_CONF}" - -sed -i "s/CASCADING_CONTROL_IP/$CASCADING_CONTROL_IP/g" "${_NEUTRON_CONF_DIR}/${_NEUTRON_L2_PROXY_FILE}" -sed -i "s/CASCADING_REGION_NAME/$CASCADING_REGION_NAME/g" "${_NEUTRON_CONF_DIR}/${_NEUTRON_L2_PROXY_FILE}" -sed -i "s/CASCADED_REGION_NAME/$CASCADED_REGION_NAME/g" "${_NEUTRON_CONF_DIR}/${_NEUTRON_L2_PROXY_FILE}" -sed -i "s/USER_NAME/$USER_NAME/g" "${_NEUTRON_CONF_DIR}/${_NEUTRON_L2_PROXY_FILE}" -sed -i "s/USER_PWD/$USER_PWD/g" "${_NEUTRON_CONF_DIR}/${_NEUTRON_L2_PROXY_FILE}" -sed -i "s/TENANT_NAME/$TENANT_NAME/g" "${_NEUTRON_CONF_DIR}/${_NEUTRON_L2_PROXY_FILE}" - -echo "upgrade and syc neutron DB for cascading-server-l3-patch..." -_MYSQL_PASS='openstack' -exec_sql_str="DROP DATABASE if exists neutron;CREATE DATABASE neutron;GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'%' IDENTIFIED BY \"$_MYSQL_PASS\";GRANT ALL PRIVILEGES ON *.* TO 'neutron'@'%'IDENTIFIED BY \"$_MYSQL_PASS\";" -mysql -u root -p$_MYSQL_PASS -e "$exec_sql_str" -neutron-db-manage --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/ml2_conf.ini upgrade head -if [ $? -ne 0 ] ; then - echo "There was an error in upgrading DB for cascading-server-l3-patch, please check cascacaded neutron server code manually." - exit 1 -fi - -echo "restarting cascading neutron server..." -service neutron-server restart -if [ $? -ne 0 ] ; then - echo "There was an error in restarting the service, please restart cascading neutron server manually." - exit 1 -fi - -echo "Completed." -echo "See README to get started." - -exit 0 - - - diff --git a/juno-patches/neutron/neutron_cascading_l3_patch/neutron/api/rpc/handlers/l3_rpc.py b/juno-patches/neutron/neutron_cascading_l3_patch/neutron/api/rpc/handlers/l3_rpc.py deleted file mode 100644 index a4da5921..00000000 --- a/juno-patches/neutron/neutron_cascading_l3_patch/neutron/api/rpc/handlers/l3_rpc.py +++ /dev/null @@ -1,275 +0,0 @@ -# Copyright (c) 2012 OpenStack Foundation. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from oslo.config import cfg - -from neutron.common import constants -from neutron.common import exceptions -from neutron.common import rpc as n_rpc -from neutron.common import utils -from neutron import context as neutron_context -from neutron.extensions import l3 -from neutron.extensions import portbindings -from neutron import manager -from neutron.openstack.common import jsonutils -from neutron.openstack.common import log as logging -from neutron.plugins.common import constants as plugin_constants - - -LOG = logging.getLogger(__name__) - - -class L3RpcCallback(n_rpc.RpcCallback): - """L3 agent RPC callback in plugin implementations.""" - - # 1.0 L3PluginApi BASE_RPC_API_VERSION - # 1.1 Support update_floatingip_statuses - # 1.2 Added methods for DVR support - # 1.3 Added a method that returns the list of activated services - # 1.4 Added L3 HA update_router_state - RPC_API_VERSION = '1.4' - - @property - def plugin(self): - if not hasattr(self, '_plugin'): - self._plugin = manager.NeutronManager.get_plugin() - return self._plugin - - @property - def l3plugin(self): - if not hasattr(self, '_l3plugin'): - self._l3plugin = manager.NeutronManager.get_service_plugins()[ - plugin_constants.L3_ROUTER_NAT] - return self._l3plugin - - def sync_routers(self, context, **kwargs): - """Sync routers according to filters to a specific agent. - - @param context: contain user information - @param kwargs: host, router_ids - @return: a list of routers - with their interfaces and floating_ips - """ - router_ids = kwargs.get('router_ids') - host = kwargs.get('host') - context = neutron_context.get_admin_context() - if not self.l3plugin: - routers = {} - LOG.error(_('No plugin for L3 routing registered! Will reply ' - 'to l3 agent with empty router dictionary.')) - elif utils.is_extension_supported( - self.l3plugin, constants.L3_AGENT_SCHEDULER_EXT_ALIAS): - if cfg.CONF.router_auto_schedule: - self.l3plugin.auto_schedule_routers(context, host, router_ids) - routers = ( - self.l3plugin.list_active_sync_routers_on_active_l3_agent( - context, host, router_ids)) - else: - routers = self.l3plugin.get_sync_data(context, router_ids) - if utils.is_extension_supported( - self.plugin, constants.PORT_BINDING_EXT_ALIAS): - self._ensure_host_set_on_ports(context, host, routers) - LOG.debug(_("Routers returned to l3 agent:\n %s"), - jsonutils.dumps(routers, indent=5)) - return routers - - def _ensure_host_set_on_ports(self, context, host, routers): - for router in routers: - LOG.debug(_("Checking router: %(id)s for host: %(host)s"), - {'id': router['id'], 'host': host}) - if router.get('gw_port') and router.get('distributed'): - self._ensure_host_set_on_port(context, - router.get('gw_port_host'), - router.get('gw_port'), - router['id']) - for p in router.get(constants.SNAT_ROUTER_INTF_KEY, []): - self._ensure_host_set_on_port(context, - router.get('gw_port_host'), - p, router['id']) - else: - self._ensure_host_set_on_port(context, host, - router.get('gw_port'), - router['id']) - for interface in router.get(constants.INTERFACE_KEY, []): - self._ensure_host_set_on_port(context, host, - interface, router['id']) - interface = router.get(constants.HA_INTERFACE_KEY) - if interface: - self._ensure_host_set_on_port(context, host, interface, - router['id']) - - def _ensure_host_set_on_port(self, context, host, port, router_id=None): - if (port and - (port.get('device_owner') != - constants.DEVICE_OWNER_DVR_INTERFACE and - port.get(portbindings.HOST_ID) != host or - port.get(portbindings.VIF_TYPE) == - portbindings.VIF_TYPE_BINDING_FAILED)): - # All ports, including ports created for SNAT'ing for - # DVR are handled here - try: - self.plugin.update_port(context, port['id'], - {'port': {portbindings.HOST_ID: host}}) - except exceptions.PortNotFound: - LOG.debug("Port %(port)s not found while updating " - "agent binding for router %(router)s." - % {"port": port['id'], "router": router_id}) - elif (port and - port.get('device_owner') == - constants.DEVICE_OWNER_DVR_INTERFACE): - # Ports that are DVR interfaces have multiple bindings (based on - # of hosts on which DVR router interfaces are spawned). Such - # bindings are created/updated here by invoking - # update_dvr_port_binding - self.plugin.update_dvr_port_binding(context, port['id'], - {'port': - {portbindings.HOST_ID: host, - 'device_id': router_id} - }) - - def get_external_network_id(self, context, **kwargs): - """Get one external network id for l3 agent. - - l3 agent expects only on external network when it performs - this query. - """ - context = neutron_context.get_admin_context() - net_id = self.plugin.get_external_network_id(context) - LOG.debug(_("External network ID returned to l3 agent: %s"), - net_id) - return net_id - - def get_service_plugin_list(self, context, **kwargs): - plugins = manager.NeutronManager.get_service_plugins() - return plugins.keys() - - def update_floatingip_statuses(self, context, router_id, fip_statuses): - """Update operational status for a floating IP.""" - with context.session.begin(subtransactions=True): - for (floatingip_id, status) in fip_statuses.iteritems(): - LOG.debug(_("New status for floating IP %(floatingip_id)s: " - "%(status)s"), {'floatingip_id': floatingip_id, - 'status': status}) - try: - self.l3plugin.update_floatingip_status(context, - floatingip_id, - status) - except l3.FloatingIPNotFound: - LOG.debug(_("Floating IP: %s no longer present."), - floatingip_id) - # Find all floating IPs known to have been the given router - # for which an update was not received. Set them DOWN mercilessly - # This situation might occur for some asynchronous backends if - # notifications were missed - known_router_fips = self.l3plugin.get_floatingips( - context, {'last_known_router_id': [router_id]}) - # Consider only floating ips which were disassociated in the API - # FIXME(salv-orlando): Filtering in code should be avoided. - # the plugin should offer a way to specify a null filter - fips_to_disable = (fip['id'] for fip in known_router_fips - if not fip['router_id']) - for fip_id in fips_to_disable: - self.l3plugin.update_floatingip_status( - context, fip_id, constants.FLOATINGIP_STATUS_DOWN) - - def get_ports_by_subnet(self, context, **kwargs): - """DVR: RPC called by dvr-agent to get all ports for subnet.""" - subnet_id = kwargs.get('subnet_id') - LOG.debug("DVR: subnet_id: %s", subnet_id) - filters = {'fixed_ips': {'subnet_id': [subnet_id]}} - return self.plugin.get_ports(context, filters=filters) - - def get_agent_gateway_port(self, context, **kwargs): - """Get Agent Gateway port for FIP. - - l3 agent expects an Agent Gateway Port to be returned - for this query. - """ - network_id = kwargs.get('network_id') - host = kwargs.get('host') - admin_ctx = neutron_context.get_admin_context() - agent_port = self.l3plugin.create_fip_agent_gw_port_if_not_exists( - admin_ctx, network_id, host) - self._ensure_host_set_on_port(admin_ctx, host, agent_port) - LOG.debug('Agent Gateway port returned : %(agent_port)s with ' - 'host %(host)s', {'agent_port': agent_port, - 'host': host}) - return agent_port - - #added by jiahaojie 00209498---begin - def update_router_extern_ip_map(self, context, **kwargs): - router_id = kwargs.get('router_id') - host = kwargs.get('host') - extern_ip = kwargs.get('gateway_ip') - context = neutron_context.get_admin_context() - plugin = manager.NeutronManager.get_plugin() - plugin.update_router_az_extern_ip_mapping(context, - router_id, host, extern_ip) - - def get_extra_routes_by_subnet(self, context, **kwargs): - router_id = kwargs.get('router_id') - host = kwargs.get('host') - subnet_id = kwargs.get('subnet_id') - plugin = manager.NeutronManager.get_plugin() - subnet = plugin.get_subnet(context, subnet_id) - network = plugin.get_network(context, subnet['network_id']) - binding_host = plugin.get_binding_az_by_network_id(context, - network['id']) - net_type = network['provider:network_type'] - seg_id = network['provider:segmentation_id'] - if(net_type == 'vxlan' and plugin.is_big2layer_vni(seg_id)): - extra_routes = ['big2Layer'] - elif(net_type in ['vlan', 'vxlan'] and binding_host != host): - if(binding_host is None): - return['not_bound_network'] - extern_ip = plugin.get_extern_ip_by_router_id_and_host( - context, - router_id, - binding_host) - extra_routes = [(extern_ip, subnet['cidr'])] - else: - extra_routes = ['local_network'] - return extra_routes - #added by jiahaojie 00209498---end - - def get_snat_router_interface_ports(self, context, **kwargs): - """Get SNAT serviced Router Port List. - - The Service Node that hosts the SNAT service requires - the ports to service the router interfaces. - This function will check if any available ports, if not - it will create ports on the routers interfaces and - will send a list to the L3 agent. - """ - router_id = kwargs.get('router_id') - host = kwargs.get('host') - admin_ctx = neutron_context.get_admin_context() - snat_port_list = ( - self.l3plugin.create_snat_intf_port_list_if_not_exists( - admin_ctx, router_id)) - for p in snat_port_list: - self._ensure_host_set_on_port(admin_ctx, host, p) - LOG.debug('SNAT interface ports returned : %(snat_port_list)s ' - 'and on host %(host)s', {'snat_port_list': snat_port_list, - 'host': host}) - return snat_port_list - - def update_router_state(self, context, **kwargs): - router_id = kwargs.get('router_id') - state = kwargs.get('state') - host = kwargs.get('host') - - return self.l3plugin.update_router_state(context, router_id, state, - host=host) diff --git a/juno-patches/neutron/neutron_cascading_l3_patch/neutron/common/config.py b/juno-patches/neutron/neutron_cascading_l3_patch/neutron/common/config.py deleted file mode 100644 index a1598e7a..00000000 --- a/juno-patches/neutron/neutron_cascading_l3_patch/neutron/common/config.py +++ /dev/null @@ -1,196 +0,0 @@ -# Copyright 2011 VMware, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Routines for configuring Neutron -""" - -import os - -from oslo.config import cfg -from oslo.db import options as db_options -from oslo import messaging -from paste import deploy - -from neutron.api.v2 import attributes -from neutron.common import utils -from neutron.openstack.common import log as logging -from neutron import version - - -LOG = logging.getLogger(__name__) - -core_opts = [ - cfg.StrOpt('bind_host', default='0.0.0.0', - help=_("The host IP to bind to")), - cfg.IntOpt('bind_port', default=9696, - help=_("The port to bind to")), - cfg.StrOpt('api_paste_config', default="api-paste.ini", - help=_("The API paste config file to use")), - cfg.StrOpt('api_extensions_path', default="", - help=_("The path for API extensions")), - cfg.StrOpt('policy_file', default="policy.json", - help=_("The policy file to use")), - cfg.StrOpt('auth_strategy', default='keystone', - help=_("The type of authentication to use")), - cfg.StrOpt('core_plugin', - help=_("The core plugin Neutron will use")), - cfg.ListOpt('service_plugins', default=[], - help=_("The service plugins Neutron will use")), - cfg.StrOpt('base_mac', default="fa:16:3e:00:00:00", - help=_("The base MAC address Neutron will use for VIFs")), - cfg.IntOpt('mac_generation_retries', default=16, - help=_("How many times Neutron will retry MAC generation")), - cfg.BoolOpt('allow_bulk', default=True, - help=_("Allow the usage of the bulk API")), - cfg.BoolOpt('allow_pagination', default=False, - help=_("Allow the usage of the pagination")), - cfg.BoolOpt('allow_sorting', default=False, - help=_("Allow the usage of the sorting")), - cfg.StrOpt('pagination_max_limit', default="-1", - help=_("The maximum number of items returned in a single " - "response, value was 'infinite' or negative integer " - "means no limit")), - cfg.IntOpt('max_dns_nameservers', default=5, - help=_("Maximum number of DNS nameservers")), - cfg.IntOpt('max_subnet_host_routes', default=20, - help=_("Maximum number of host routes per subnet")), - cfg.IntOpt('max_fixed_ips_per_port', default=5, - help=_("Maximum number of fixed ips per port")), - cfg.IntOpt('dhcp_lease_duration', default=86400, - deprecated_name='dhcp_lease_time', - help=_("DHCP lease duration (in seconds). Use -1 to tell " - "dnsmasq to use infinite lease times.")), - cfg.BoolOpt('dhcp_agent_notification', default=True, - help=_("Allow sending resource operation" - " notification to DHCP agent")), - cfg.BoolOpt('allow_overlapping_ips', default=False, - help=_("Allow overlapping IP support in Neutron")), - cfg.StrOpt('host', default=utils.get_hostname(), - help=_("The hostname Neutron is running on")), - cfg.BoolOpt('force_gateway_on_subnet', default=True, - help=_("Ensure that configured gateway is on subnet. " - "For IPv6, validate only if gateway is not a link " - "local address. Deprecated, to be removed during the " - "K release, at which point the check will be " - "mandatory.")), - cfg.BoolOpt('notify_nova_on_port_status_changes', default=True, - help=_("Send notification to nova when port status changes")), - cfg.BoolOpt('notify_nova_on_port_data_changes', default=True, - help=_("Send notification to nova when port data (fixed_ips/" - "floatingip) changes so nova can update its cache.")), - cfg.StrOpt('nova_url', - default='http://127.0.0.1:8774/v2', - help=_('URL for connection to nova')), - cfg.StrOpt('nova_admin_username', - help=_('Username for connecting to nova in admin context')), - cfg.StrOpt('nova_admin_password', - help=_('Password for connection to nova in admin context'), - secret=True), - cfg.StrOpt('nova_admin_tenant_id', - help=_('The uuid of the admin nova tenant')), - cfg.StrOpt('nova_admin_auth_url', - default='http://localhost:5000/v2.0', - help=_('Authorization URL for connecting to nova in admin ' - 'context')), - cfg.StrOpt('nova_ca_certificates_file', - help=_('CA file for novaclient to verify server certificates')), - cfg.BoolOpt('nova_api_insecure', default=False, - help=_("If True, ignore any SSL validation issues")), - cfg.StrOpt('nova_region_name', - help=_('Name of nova region to use. Useful if keystone manages' - ' more than one region.')), - cfg.IntOpt('send_events_interval', default=2, - help=_('Number of seconds between sending events to nova if ' - 'there are any events to send.')), - - # add by j00209498 - cfg.StrOpt('cascade_str', default='cascading', - help=_('cascade_str identity cascading openstack or cascaded' - 'openstack, value = cascaded or cascading.')), -] - -core_cli_opts = [ - cfg.StrOpt('state_path', - default='/var/lib/neutron', - help=_("Where to store Neutron state files. " - "This directory must be writable by the agent.")), -] - -# Register the configuration options -cfg.CONF.register_opts(core_opts) -cfg.CONF.register_cli_opts(core_cli_opts) - -# Ensure that the control exchange is set correctly -messaging.set_transport_defaults(control_exchange='neutron') -_SQL_CONNECTION_DEFAULT = 'sqlite://' -# Update the default QueuePool parameters. These can be tweaked by the -# configuration variables - max_pool_size, max_overflow and pool_timeout -db_options.set_defaults(cfg.CONF, - connection=_SQL_CONNECTION_DEFAULT, - sqlite_db='', max_pool_size=10, - max_overflow=20, pool_timeout=10) - - -def init(args, **kwargs): - cfg.CONF(args=args, project='neutron', - version='%%prog %s' % version.version_info.release_string(), - **kwargs) - - # FIXME(ihrachys): if import is put in global, circular import - # failure occurs - from neutron.common import rpc as n_rpc - n_rpc.init(cfg.CONF) - - # Validate that the base_mac is of the correct format - msg = attributes._validate_regex(cfg.CONF.base_mac, - attributes.MAC_PATTERN) - if msg: - msg = _("Base MAC: %s") % msg - raise Exception(msg) - - -def setup_logging(): - """Sets up the logging options for a log with supplied name.""" - product_name = "neutron" - logging.setup(product_name) - LOG.info(_("Logging enabled!")) - - -def load_paste_app(app_name): - """Builds and returns a WSGI app from a paste config file. - - :param app_name: Name of the application to load - :raises ConfigFilesNotFoundError when config file cannot be located - :raises RuntimeError when application cannot be loaded from config file - """ - - config_path = cfg.CONF.find_file(cfg.CONF.api_paste_config) - if not config_path: - raise cfg.ConfigFilesNotFoundError( - config_files=[cfg.CONF.api_paste_config]) - config_path = os.path.abspath(config_path) - LOG.info(_("Config paste file: %s"), config_path) - - try: - app = deploy.loadapp("config:%s" % config_path, name=app_name) - except (LookupError, ImportError): - msg = (_("Unable to load %(app_name)s from " - "configuration file %(config_path)s.") % - {'app_name': app_name, - 'config_path': config_path}) - LOG.exception(msg) - raise RuntimeError(msg) - return app diff --git a/juno-patches/neutron/neutron_cascading_l3_patch/neutron/common/exceptions.py b/juno-patches/neutron/neutron_cascading_l3_patch/neutron/common/exceptions.py deleted file mode 100644 index 03d5d6f5..00000000 --- a/juno-patches/neutron/neutron_cascading_l3_patch/neutron/common/exceptions.py +++ /dev/null @@ -1,341 +0,0 @@ -# Copyright 2011 VMware, Inc -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Neutron base exception handling. -""" - -from neutron.openstack.common import excutils - - -class NeutronException(Exception): - """Base Neutron Exception. - - To correctly use this class, inherit from it and define - a 'message' property. That message will get printf'd - with the keyword arguments provided to the constructor. - """ - message = _("An unknown exception occurred.") - - def __init__(self, **kwargs): - try: - super(NeutronException, self).__init__(self.message % kwargs) - self.msg = self.message % kwargs - except Exception: - with excutils.save_and_reraise_exception() as ctxt: - if not self.use_fatal_exceptions(): - ctxt.reraise = False - # at least get the core message out if something happened - super(NeutronException, self).__init__(self.message) - - def __unicode__(self): - return unicode(self.msg) - - def use_fatal_exceptions(self): - return False - - -class BadRequest(NeutronException): - message = _('Bad %(resource)s request: %(msg)s') - - -class NotFound(NeutronException): - pass - - -class Conflict(NeutronException): - pass - - -class NotAuthorized(NeutronException): - message = _("Not authorized.") - - -class ServiceUnavailable(NeutronException): - message = _("The service is unavailable") - - -class AdminRequired(NotAuthorized): - message = _("User does not have admin privileges: %(reason)s") - - -class PolicyNotAuthorized(NotAuthorized): - message = _("Policy doesn't allow %(action)s to be performed.") - - -class NetworkNotFound(NotFound): - message = _("Network %(net_id)s could not be found") - - -class SubnetNotFound(NotFound): - message = _("Subnet %(subnet_id)s could not be found") - - -class PortNotFound(NotFound): - message = _("Port %(port_id)s could not be found") - - -class PortNotFoundOnNetwork(NotFound): - message = _("Port %(port_id)s could not be found " - "on network %(net_id)s") - - -class PolicyFileNotFound(NotFound): - message = _("Policy configuration policy.json could not be found") - - -class PolicyInitError(NeutronException): - message = _("Failed to init policy %(policy)s because %(reason)s") - - -class PolicyCheckError(NeutronException): - message = _("Failed to check policy %(policy)s because %(reason)s") - - -class StateInvalid(BadRequest): - message = _("Unsupported port state: %(port_state)s") - - -class InUse(NeutronException): - message = _("The resource is inuse") - - -class NetworkInUse(InUse): - message = _("Unable to complete operation on network %(net_id)s. " - "There are one or more ports still in use on the network.") - - -class SubnetInUse(InUse): - message = _("Unable to complete operation on subnet %(subnet_id)s. " - "One or more ports have an IP allocation from this subnet.") - - -class PortInUse(InUse): - message = _("Unable to complete operation on port %(port_id)s " - "for network %(net_id)s. Port already has an attached" - "device %(device_id)s.") - - -class MacAddressInUse(InUse): - message = _("Unable to complete operation for network %(net_id)s. " - "The mac address %(mac)s is in use.") - - -class HostRoutesExhausted(BadRequest): - # NOTE(xchenum): probably make sense to use quota exceeded exception? - message = _("Unable to complete operation for %(subnet_id)s. " - "The number of host routes exceeds the limit %(quota)s.") - - -class DNSNameServersExhausted(BadRequest): - # NOTE(xchenum): probably make sense to use quota exceeded exception? - message = _("Unable to complete operation for %(subnet_id)s. " - "The number of DNS nameservers exceeds the limit %(quota)s.") - - -class IpAddressInUse(InUse): - message = _("Unable to complete operation for network %(net_id)s. " - "The IP address %(ip_address)s is in use.") - - -class VlanIdInUse(InUse): - message = _("Unable to create the network. " - "The VLAN %(vlan_id)s on physical network " - "%(physical_network)s is in use.") - - -class FlatNetworkInUse(InUse): - message = _("Unable to create the flat network. " - "Physical network %(physical_network)s is in use.") - - -class TunnelIdInUse(InUse): - message = _("Unable to create the network. " - "The tunnel ID %(tunnel_id)s is in use.") - - -class TenantNetworksDisabled(ServiceUnavailable): - message = _("Tenant network creation is not enabled.") - - -class ResourceExhausted(ServiceUnavailable): - pass - - -class NoNetworkAvailable(ResourceExhausted): - message = _("Unable to create the network. " - "No tenant network is available for allocation.") - - -class NoNetworkFoundInMaximumAllowedAttempts(ServiceUnavailable): - message = _("Unable to create the network. " - "No available network found in maximum allowed attempts.") - - -class SubnetMismatchForPort(BadRequest): - message = _("Subnet on port %(port_id)s does not match " - "the requested subnet %(subnet_id)s") - - -class MalformedRequestBody(BadRequest): - message = _("Malformed request body: %(reason)s") - - -class Invalid(NeutronException): - def __init__(self, message=None): - self.message = message - super(Invalid, self).__init__() - - -class InvalidInput(BadRequest): - message = _("Invalid input for operation: %(error_message)s.") - - -class InvalidAllocationPool(BadRequest): - message = _("The allocation pool %(pool)s is not valid.") - - -class OverlappingAllocationPools(Conflict): - message = _("Found overlapping allocation pools:" - "%(pool_1)s %(pool_2)s for subnet %(subnet_cidr)s.") - - -class OutOfBoundsAllocationPool(BadRequest): - message = _("The allocation pool %(pool)s spans " - "beyond the subnet cidr %(subnet_cidr)s.") - - -class MacAddressGenerationFailure(ServiceUnavailable): - message = _("Unable to generate unique mac on network %(net_id)s.") - - -class IpAddressGenerationFailure(Conflict): - message = _("No more IP addresses available on network %(net_id)s.") - - -class BridgeDoesNotExist(NeutronException): - message = _("Bridge %(bridge)s does not exist.") - - -class PreexistingDeviceFailure(NeutronException): - message = _("Creation failed. %(dev_name)s already exists.") - - -class SudoRequired(NeutronException): - message = _("Sudo privilege is required to run this command.") - - -class QuotaResourceUnknown(NotFound): - message = _("Unknown quota resources %(unknown)s.") - - -class OverQuota(Conflict): - message = _("Quota exceeded for resources: %(overs)s") - - -class QuotaMissingTenant(BadRequest): - message = _("Tenant-id was missing from Quota request") - - -class InvalidQuotaValue(Conflict): - message = _("Change would make usage less than 0 for the following " - "resources: %(unders)s") - - -class InvalidSharedSetting(Conflict): - message = _("Unable to reconfigure sharing settings for network " - "%(network)s. Multiple tenants are using it") - - -class InvalidExtensionEnv(BadRequest): - message = _("Invalid extension environment: %(reason)s") - - -class ExtensionsNotFound(NotFound): - message = _("Extensions not found: %(extensions)s") - - -class InvalidContentType(NeutronException): - message = _("Invalid content type %(content_type)s") - - -class ExternalIpAddressExhausted(BadRequest): - message = _("Unable to find any IP address on external " - "network %(net_id)s.") - - -class TooManyExternalNetworks(NeutronException): - message = _("More than one external network exists") - - -class InvalidConfigurationOption(NeutronException): - message = _("An invalid value was provided for %(opt_name)s: " - "%(opt_value)s") - - -class GatewayConflictWithAllocationPools(InUse): - message = _("Gateway ip %(ip_address)s conflicts with " - "allocation pool %(pool)s") - - -class GatewayIpInUse(InUse): - message = _("Current gateway ip %(ip_address)s already in use " - "by port %(port_id)s. Unable to update.") - - -class NetworkVlanRangeError(NeutronException): - message = _("Invalid network VLAN range: '%(vlan_range)s' - '%(error)s'") - - def __init__(self, **kwargs): - # Convert vlan_range tuple to 'start:end' format for display - if isinstance(kwargs['vlan_range'], tuple): - kwargs['vlan_range'] = "%d:%d" % kwargs['vlan_range'] - super(NetworkVlanRangeError, self).__init__(**kwargs) - - -class NetworkTunnelRangeError(NeutronException): - message = _("Invalid network Tunnel range: " - "'%(tunnel_range)s' - %(error)s") - - def __init__(self, **kwargs): - # Convert tunnel_range tuple to 'start:end' format for display - if isinstance(kwargs['tunnel_range'], tuple): - kwargs['tunnel_range'] = "%d:%d" % kwargs['tunnel_range'] - super(NetworkTunnelRangeError, self).__init__(**kwargs) - - -class NetworkVxlanPortRangeError(NeutronException): - message = _("Invalid network VXLAN port range: '%(vxlan_range)s'") - - -class VxlanNetworkUnsupported(NeutronException): - message = _("VXLAN Network unsupported.") - - -class DuplicatedExtension(NeutronException): - message = _("Found duplicate extension: %(alias)s") - - -class DeviceIDNotOwnedByTenant(Conflict): - message = _("The following device_id %(device_id)s is not owned by your " - "tenant or matches another tenants router.") - - -class InvalidCIDR(BadRequest): - message = _("Invalid CIDR %(input)s given as IP prefix") - -class PortBindAZError(BadRequest): - message = _("Network %(net_id)s is local network, " - "cannot be created in host %(host)s AZ.") diff --git a/juno-patches/neutron/neutron_cascading_l3_patch/neutron/db/cascade_db.py b/juno-patches/neutron/neutron_cascading_l3_patch/neutron/db/cascade_db.py deleted file mode 100644 index d32b9ccf..00000000 --- a/juno-patches/neutron/neutron_cascading_l3_patch/neutron/db/cascade_db.py +++ /dev/null @@ -1,162 +0,0 @@ -''' -Created on 2014-8-5 - -@author: j00209498 -''' -from oslo.db import exception as db_exc - -import sqlalchemy as sa -from neutron.api.rpc.agentnotifiers import l3_rpc_agent_api - -from neutron.common import exceptions as q_exc -from neutron.common import log -from neutron.common import utils -from neutron.db import model_base -from neutron.extensions import dvr as ext_dvr -from neutron import manager -from neutron.openstack.common import log as logging -from oslo.config import cfg -from sqlalchemy.orm import exc - -LOG = logging.getLogger(__name__) - -big2layer_vni_opts = [ - cfg.StrOpt('big2layer_vni_range', - default="4097:20000", - help=_('The big 2 layer vxlan vni range used for ' - 'CascadeDBMixin instances by Neutron')), -] -cfg.CONF.register_opts(big2layer_vni_opts) - - -class CascadeAZNetworkBinding(model_base.BASEV2): - - """Represents a v2 neutron distributed virtual router mac address.""" - - __tablename__ = 'cascade_az_network_bind' - - network_id = sa.Column(sa.String(36), primary_key=True, nullable=False) - host = sa.Column(sa.String(255), primary_key=True, nullable=False) - - -class CascadeRouterAZExternipMapping(model_base.BASEV2): - - """Represents a v2 neutron distributed virtual router mac address.""" - - __tablename__ = 'cascade_router_az_externip_map' - - router_id = sa.Column(sa.String(36), primary_key=True, nullable=False) - host = sa.Column(sa.String(255), primary_key=True, nullable=False) - extern_ip = sa.Column(sa.String(64), nullable=False) - - -class CascadeDBMixin(object): - - @property - def l3_rpc_notifier(self): - if not hasattr(self, '_l3_rpc_notifier'): - self._l3_rpc_notifier = l3_rpc_agent_api.L3AgentNotifyAPI() - return self._l3_rpc_notifier - - def is_big2layer_vni(self, seg_id): - vni = cfg.CONF.big2layer_vni_range.split(':') - if(seg_id >= int(vni[0]) and seg_id <= int(vni[1])): - return True - else: - return False - - def get_binding_az_by_network_id(self, context, net_id): - try: - query = context.session.query(CascadeAZNetworkBinding) - ban = query.filter( - CascadeAZNetworkBinding.network_id == net_id).one() - except exc.NoResultFound: - return None - return ban['host'] - - def add_binding_az_network_id(self, context, binding_host, net_id): - try: - with context.session.begin(subtransactions=True): - dvr_mac_binding = CascadeAZNetworkBinding( - network_id=net_id, host=binding_host) - context.session.add(dvr_mac_binding) - LOG.debug("add az_host %(host)s for network %(network_id)s ", - {'host': binding_host, 'network_id': net_id}) - except db_exc.DBDuplicateEntry: - LOG.debug("az_host %(host)s exists for network %(network_id)s," - " DBDuplicateEntry error.", - {'host': binding_host, 'network_id': net_id}) - - def get_extern_ip_by_router_id_and_host(self, context, router_id, host): - rae = self.get_router_az_extern_ip_mapping(context, router_id, host) - if(rae): - return rae['extern_ip'] - return None -# try: -# query = context.session.query(CascadeRouterAZExternipMapping) -# erh = query.filter( -# CascadeRouterAZExternipMapping.router_id == router_id, -# CascadeRouterAZExternipMapping.host == host).one() -# except exc.NoResultFound: -# return None -# return erh['extern_ip'] - - def get_router_az_extern_ip_mapping(self, context, router_id, host): - try: - query = context.session.query(CascadeRouterAZExternipMapping) - erh = query.filter( - CascadeRouterAZExternipMapping.router_id == router_id, - CascadeRouterAZExternipMapping.host == host).one() - except exc.NoResultFound: - return None - return erh - - def update_router_az_extern_ip_mapping(self, context, router_id, - host, extern_ip): - if extern_ip is None: - self.del_router_az_extern_ip_mapping(context, router_id, host) - self.l3_rpc_notifier.routers_updated(context, [router_id], - None, None) - return - rae = self.get_router_az_extern_ip_mapping(context, router_id, host) - if(rae and rae['extern_ip'] != extern_ip): - update_rae = {} - update_rae['router_id'] = rae['router_id'] - update_rae['host'] = rae['host'] - update_rae['extern_ip'] = extern_ip - rae.update(update_rae) - LOG.debug("update extern_ip %(extern_ip)s for az_host %(host)s " - "and router %(router_id)s ", - {'extern_ip': extern_ip, - 'host': host, - 'network_id': router_id}) - self.l3_rpc_notifier.routers_updated(context, [router_id], - None, None) - return - try: - with context.session.begin(subtransactions=True): - router_az_extern_ip_map = CascadeRouterAZExternipMapping( - router_id=router_id, host=host, extern_ip=extern_ip) - context.session.add(router_az_extern_ip_map) - LOG.debug("add extern_ip %(extern_ip)s for az_host %(host)s " - "and router %(router_id)s ", - {'extern_ip': extern_ip, - 'host': host, - 'network_id': router_id}) - self.l3_rpc_notifier.routers_updated(context, [router_id], - None, None) - except db_exc.DBDuplicateEntry: - LOG.debug("DBDuplicateEntry ERR:update extern_ip %(extern_ip)s " - "for az_host %(host)s and router %(router_id)s ", - {'extern_ip': extern_ip, - 'host': host, - 'network_id': router_id}) - - def del_router_az_extern_ip_mapping(self, context, router_id, host): - try: - query = context.session.query(CascadeRouterAZExternipMapping) - query.filter( - CascadeRouterAZExternipMapping.router_id == router_id, - CascadeRouterAZExternipMapping.host == host).delete() - except exc.NoResultFound: - return None diff --git a/juno-patches/neutron/neutron_cascading_l3_patch/neutron/db/migration/alembic_migrations/versions/2026156eab2f_l2_dvr_models.py b/juno-patches/neutron/neutron_cascading_l3_patch/neutron/db/migration/alembic_migrations/versions/2026156eab2f_l2_dvr_models.py deleted file mode 100644 index 5015a176..00000000 --- a/juno-patches/neutron/neutron_cascading_l3_patch/neutron/db/migration/alembic_migrations/versions/2026156eab2f_l2_dvr_models.py +++ /dev/null @@ -1,84 +0,0 @@ -# Copyright 2014 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -"""L2 models to support DVR - -Revision ID: 2026156eab2f -Revises: 3927f7f7c456 -Create Date: 2014-06-23 19:12:43.392912 - -""" - -# revision identifiers, used by Alembic. -revision = '2026156eab2f' -down_revision = '3927f7f7c456' - - -from alembic import op -import sqlalchemy as sa - - -def upgrade(): - op.create_table( - 'dvr_host_macs', - sa.Column('host', sa.String(length=255), nullable=False), - sa.Column('mac_address', sa.String(length=32), - nullable=False, unique=True), - sa.PrimaryKeyConstraint('host') - ) - op.create_table( - 'ml2_dvr_port_bindings', - sa.Column('port_id', sa.String(length=36), nullable=False), - sa.Column('host', sa.String(length=255), nullable=False), - sa.Column('router_id', sa.String(length=36), nullable=True), - sa.Column('vif_type', sa.String(length=64), nullable=False), - sa.Column('vif_details', sa.String(length=4095), - nullable=False, server_default=''), - sa.Column('vnic_type', sa.String(length=64), - nullable=False, server_default='normal'), - sa.Column('profile', sa.String(length=4095), - nullable=False, server_default=''), - sa.Column('cap_port_filter', sa.Boolean(), nullable=False), - sa.Column('driver', sa.String(length=64), nullable=True), - sa.Column('segment', sa.String(length=36), nullable=True), - sa.Column(u'status', sa.String(16), nullable=False), - sa.ForeignKeyConstraint(['port_id'], ['ports.id'], - ondelete='CASCADE'), - sa.ForeignKeyConstraint(['segment'], ['ml2_network_segments.id'], - ondelete='SET NULL'), - sa.PrimaryKeyConstraint('port_id', 'host') - ) - - # add by jiahaojie 00209498 ---begin - op.create_table( - 'cascade_az_network_bind', - sa.Column('network_id', sa.String(length=36), nullable=False), - sa.Column('host', sa.String(length=255), nullable=False), - sa.PrimaryKeyConstraint('network_id', 'host') - ) - op.create_table( - 'cascade_router_az_externip_map', - sa.Column('router_id', sa.String(length=36), nullable=False), - sa.Column('host', sa.String(length=255), nullable=False), - sa.Column('extern_ip', sa.String(length=64), nullable=False), - sa.PrimaryKeyConstraint('router_id', 'host') - ) - # add by jiahaojie 00209498 ---end - -def downgrade(): - op.drop_table('ml2_dvr_port_bindings') - op.drop_table('dvr_host_macs') - op.drop_table('cascade_az_network_bind') - op.drop_table('cascade_router_az_externip_map') diff --git a/juno-patches/neutron/neutron_cascading_l3_patch/neutron/plugins/ml2/plugin.py b/juno-patches/neutron/neutron_cascading_l3_patch/neutron/plugins/ml2/plugin.py deleted file mode 100644 index 0b9ffd1d..00000000 --- a/juno-patches/neutron/neutron_cascading_l3_patch/neutron/plugins/ml2/plugin.py +++ /dev/null @@ -1,1214 +0,0 @@ -# Copyright (c) 2013 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import contextlib -from eventlet import greenthread - -from oslo.config import cfg -from oslo.db import exception as os_db_exception -from sqlalchemy import exc as sql_exc -from sqlalchemy.orm import exc as sa_exc - -from neutron.agent import securitygroups_rpc as sg_rpc -from neutron.api.rpc.agentnotifiers import dhcp_rpc_agent_api -from neutron.api.rpc.handlers import dhcp_rpc -from neutron.api.rpc.handlers import dvr_rpc -from neutron.api.rpc.handlers import securitygroups_rpc -from neutron.api.v2 import attributes -from neutron.common import constants as const -from neutron.common import exceptions as exc -from neutron.common import rpc as n_rpc -from neutron.common import topics -from neutron.common import utils -from neutron.db import agents_db -from neutron.db import agentschedulers_db -from neutron.db import allowedaddresspairs_db as addr_pair_db -from neutron.db import api as db_api -from neutron.db import db_base_plugin_v2 -from neutron.db import dvr_mac_db -from neutron.db import external_net_db -from neutron.db import extradhcpopt_db -from neutron.db import models_v2 -from neutron.db import quota_db # noqa -from neutron.db import securitygroups_rpc_base as sg_db_rpc -from neutron.db import cascade_db -from neutron.extensions import allowedaddresspairs as addr_pair -from neutron.extensions import extra_dhcp_opt as edo_ext -from neutron.extensions import l3agentscheduler -from neutron.extensions import portbindings -from neutron.extensions import providernet as provider -from neutron import manager -from neutron.openstack.common import excutils -from neutron.openstack.common import importutils -from neutron.openstack.common import jsonutils -from neutron.openstack.common import lockutils -from neutron.openstack.common import log -from neutron.openstack.common import uuidutils -from neutron.plugins.common import constants as service_constants -from neutron.plugins.ml2.common import exceptions as ml2_exc -from neutron.plugins.ml2 import config # noqa -from neutron.plugins.ml2 import db -from neutron.plugins.ml2 import driver_api as api -from neutron.plugins.ml2 import driver_context -from neutron.plugins.ml2 import managers -from neutron.plugins.ml2 import models -from neutron.plugins.ml2 import rpc - -LOG = log.getLogger(__name__) - -MAX_BIND_TRIES = 10 - -# REVISIT(rkukura): Move this and other network_type constants to -# providernet.py? -TYPE_MULTI_SEGMENT = 'multi-segment' - - -class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2, - dvr_mac_db.DVRDbMixin, - external_net_db.External_net_db_mixin, - sg_db_rpc.SecurityGroupServerRpcMixin, - agentschedulers_db.DhcpAgentSchedulerDbMixin, - addr_pair_db.AllowedAddressPairsMixin, - extradhcpopt_db.ExtraDhcpOptMixin, - cascade_db.CascadeDBMixin): - - """Implement the Neutron L2 abstractions using modules. - - Ml2Plugin is a Neutron plugin based on separately extensible sets - of network types and mechanisms for connecting to networks of - those types. The network types and mechanisms are implemented as - drivers loaded via Python entry points. Networks can be made up of - multiple segments (not yet fully implemented). - """ - - # This attribute specifies whether the plugin supports or not - # bulk/pagination/sorting operations. Name mangling is used in - # order to ensure it is qualified by class - __native_bulk_support = True - __native_pagination_support = True - __native_sorting_support = True - - # List of supported extensions - _supported_extension_aliases = ["provider", "external-net", "binding", - "quotas", "security-group", "agent", - "dhcp_agent_scheduler", - "multi-provider", "allowed-address-pairs", - "extra_dhcp_opt"] - - @property - def supported_extension_aliases(self): - if not hasattr(self, '_aliases'): - aliases = self._supported_extension_aliases[:] - aliases += self.extension_manager.extension_aliases() - sg_rpc.disable_security_group_extension_by_config(aliases) - self._aliases = aliases - return self._aliases - - def __init__(self): - # First load drivers, then initialize DB, then initialize drivers - self.type_manager = managers.TypeManager() - self.extension_manager = managers.ExtensionManager() - self.mechanism_manager = managers.MechanismManager() - super(Ml2Plugin, self).__init__() - self.type_manager.initialize() - self.extension_manager.initialize() - self.mechanism_manager.initialize() - # bulk support depends on the underlying drivers - self.__native_bulk_support = self.mechanism_manager.native_bulk_support - - self._setup_rpc() - - # REVISIT(rkukura): Use stevedore for these? - self.network_scheduler = importutils.import_object( - cfg.CONF.network_scheduler_driver - ) - - LOG.info(_("Modular L2 Plugin initialization complete")) - - def _setup_rpc(self): - self.notifier = rpc.AgentNotifierApi(topics.AGENT) - self.agent_notifiers[const.AGENT_TYPE_DHCP] = ( - dhcp_rpc_agent_api.DhcpAgentNotifyAPI() - ) - - def start_rpc_listeners(self): - self.endpoints = [rpc.RpcCallbacks(self.notifier, self.type_manager), - securitygroups_rpc.SecurityGroupServerRpcCallback(), - dvr_rpc.DVRServerRpcCallback(), - dhcp_rpc.DhcpRpcCallback(), - agents_db.AgentExtRpcCallback()] - self.topic = topics.PLUGIN - self.conn = n_rpc.create_connection(new=True) - self.conn.create_consumer(self.topic, self.endpoints, - fanout=False) - return self.conn.consume_in_threads() - - def _filter_nets_provider(self, context, nets, filters): - # TODO(rkukura): Implement filtering. - return nets - - def _notify_l3_agent_new_port(self, context, port): - if not port: - return - - # Whenever a DVR serviceable port comes up on a - # node, it has to be communicated to the L3 Plugin - # and agent for creating the respective namespaces. - if (utils.is_dvr_serviced(port['device_owner'])): - l3plugin = manager.NeutronManager.get_service_plugins().get( - service_constants.L3_ROUTER_NAT) - if (utils.is_extension_supported( - l3plugin, const.L3_DISTRIBUTED_EXT_ALIAS)): - l3plugin.dvr_update_router_addvm(context, port) - - def _get_host_port_if_changed(self, mech_context, attrs): - binding = mech_context._binding - host = attrs and attrs.get(portbindings.HOST_ID) - if (attributes.is_attr_set(host) and binding.host != host): - return mech_context.current - - def _process_port_binding(self, mech_context, attrs): - binding = mech_context._binding - port = mech_context.current - changes = False - - host = attrs and attrs.get(portbindings.HOST_ID) - if (attributes.is_attr_set(host) and - binding.host != host): - binding.host = host - changes = True - - vnic_type = attrs and attrs.get(portbindings.VNIC_TYPE) - if (attributes.is_attr_set(vnic_type) and - binding.vnic_type != vnic_type): - binding.vnic_type = vnic_type - changes = True - - # treat None as clear of profile. - profile = None - if attrs and portbindings.PROFILE in attrs: - profile = attrs.get(portbindings.PROFILE) or {} - - if profile not in (None, attributes.ATTR_NOT_SPECIFIED, - self._get_profile(binding)): - binding.profile = jsonutils.dumps(profile) - if len(binding.profile) > models.BINDING_PROFILE_LEN: - msg = _("binding:profile value too large") - raise exc.InvalidInput(error_message=msg) - changes = True - - # Unbind the port if needed. - if changes: - binding.vif_type = portbindings.VIF_TYPE_UNBOUND - binding.vif_details = '' - binding.driver = None - binding.segment = None - - if port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE: - binding.vif_type = portbindings.VIF_TYPE_DISTRIBUTED - binding.vif_details = '' - binding.driver = None - binding.segment = None - binding.host = '' - - self._update_port_dict_binding(port, binding) - return changes - - def _bind_port_if_needed(self, context, allow_notify=False, - need_notify=False): - plugin_context = context._plugin_context - port_id = context._port['id'] - - # Since the mechanism driver bind_port() calls must be made - # outside a DB transaction locking the port state, it is - # possible (but unlikely) that the port's state could change - # concurrently while these calls are being made. If another - # thread or process succeeds in binding the port before this - # thread commits its results, the already committed results are - # used. If attributes such as binding:host_id, - # binding:profile, or binding:vnic_type are updated - # concurrently, this loop retries binding using the new - # values. - count = 0 - while True: - # First, determine whether it is necessary and possible to - # bind the port. - binding = context._binding - if (binding.vif_type != portbindings.VIF_TYPE_UNBOUND - or not binding.host): - # We either don't need to bind the port, or can't, so - # notify if needed and return. - if allow_notify and need_notify: - self._notify_port_updated(context) - return context - - # Limit binding attempts to avoid any possibility of - # infinite looping and to ensure an error is logged - # instead. This does not need to be tunable because no - # more than a couple attempts should ever be required in - # normal operation. Log at info level if not 1st attempt. - count += 1 - if count > MAX_BIND_TRIES: - LOG.error(_("Failed to commit binding results for %(port)s " - "after %(max)s tries"), - {'port': port_id, 'max': MAX_BIND_TRIES}) - return context - if count > 1: - greenthread.sleep(0) # yield - LOG.info(_("Attempt %(count)s to bind port %(port)s"), - {'count': count, 'port': port_id}) - - # The port isn't already bound and the necessary - # information is available, so attempt to bind the port. - bind_context = self._bind_port(context) - - # Now try to commit result of attempting to bind the port. - new_context, did_commit = self._commit_port_binding( - plugin_context, port_id, binding, bind_context) - if not new_context: - # The port has been deleted concurrently, so just - # return the unbound result from the initial - # transaction that completed before the deletion. - LOG.debug("Port %s has been deleted concurrently", - port_id) - return context - # Need to notify if we succeed and our results were - # committed. - if did_commit and (new_context._binding.vif_type != - portbindings.VIF_TYPE_BINDING_FAILED): - need_notify = True - context = new_context - - def _bind_port(self, orig_context): - # Construct a new PortContext from the one from the previous - # transaction. - port = orig_context._port - orig_binding = orig_context._binding - new_binding = models.PortBinding( - host=orig_binding.host, - vnic_type=orig_binding.vnic_type, - profile=orig_binding.profile, - vif_type=portbindings.VIF_TYPE_UNBOUND, - vif_details='' - ) - self._update_port_dict_binding(port, new_binding) - new_context = driver_context.PortContext( - self, orig_context._plugin_context, port, - orig_context._network_context._network, new_binding) - - # Attempt to bind the port and return the context with the - # result. - self.mechanism_manager.bind_port(new_context) - return new_context - - def _commit_port_binding(self, plugin_context, port_id, orig_binding, - new_context): - session = plugin_context.session - new_binding = new_context._binding - - # After we've attempted to bind the port, we begin a - # transaction, get the current port state, and decide whether - # to commit the binding results. - # - # REVISIT: Serialize this operation with a semaphore to - # prevent deadlock waiting to acquire a DB lock held by - # another thread in the same process, leading to 'lock wait - # timeout' errors. - with contextlib.nested(lockutils.lock('db-access'), - session.begin(subtransactions=True)): - # Get the current port state and build a new PortContext - # reflecting this state as original state for subsequent - # mechanism driver update_port_*commit() calls. - port_db, cur_binding = db.get_locked_port_and_binding(session, - port_id) - if not port_db: - # The port has been deleted concurrently. - return (None, None) - oport = self._make_port_dict(port_db) - port = self._make_port_dict(port_db) - network = self.get_network(plugin_context, port['network_id']) - cur_context = driver_context.PortContext( - self, plugin_context, port, network, cur_binding, - original_port=oport) - - # Commit our binding results only if port has not been - # successfully bound concurrently by another thread or - # process and no binding inputs have been changed. - commit = ((cur_binding.vif_type in - [portbindings.VIF_TYPE_UNBOUND, - portbindings.VIF_TYPE_BINDING_FAILED]) and - orig_binding.host == cur_binding.host and - orig_binding.vnic_type == cur_binding.vnic_type and - orig_binding.profile == cur_binding.profile) - - if commit: - # Update the port's binding state with our binding - # results. - cur_binding.vif_type = new_binding.vif_type - cur_binding.vif_details = new_binding.vif_details - cur_binding.driver = new_binding.driver - cur_binding.segment = new_binding.segment - - # Update PortContext's port dictionary to reflect the - # updated binding state. - self._update_port_dict_binding(port, cur_binding) - - # Update the port status if requested by the bound driver. - if new_binding.segment and new_context._new_port_status: - port_db.status = new_context._new_port_status - port['status'] = new_context._new_port_status - - # Call the mechanism driver precommit methods, commit - # the results, and call the postcommit methods. - self.mechanism_manager.update_port_precommit(cur_context) - if commit: - self.mechanism_manager.update_port_postcommit(cur_context) - - # Continue, using the port state as of the transaction that - # just finished, whether that transaction committed new - # results or discovered concurrent port state changes. - return (cur_context, commit) - - def _update_port_dict_binding(self, port, binding): - port[portbindings.HOST_ID] = binding.host - port[portbindings.VNIC_TYPE] = binding.vnic_type - port[portbindings.PROFILE] = self._get_profile(binding) - port[portbindings.VIF_TYPE] = binding.vif_type - port[portbindings.VIF_DETAILS] = self._get_vif_details(binding) - - def _get_vif_details(self, binding): - if binding.vif_details: - try: - return jsonutils.loads(binding.vif_details) - except Exception: - LOG.error(_("Serialized vif_details DB value '%(value)s' " - "for port %(port)s is invalid"), - {'value': binding.vif_details, - 'port': binding.port_id}) - return {} - - def _get_profile(self, binding): - if binding.profile: - try: - return jsonutils.loads(binding.profile) - except Exception: - LOG.error(_("Serialized profile DB value '%(value)s' for " - "port %(port)s is invalid"), - {'value': binding.profile, - 'port': binding.port_id}) - return {} - - #added by jiahaojie 00209498----begin - def _check_port_binding_az_valid(self, context, network, binding_host): - net_type = network['provider:network_type'] - seg_id = network['provider:segmentation_id'] - net_id = network['id'] - if (net_type == 'vxlan' and self.is_big2layer_vni(seg_id)): - return True - #net_type may be 'local' or 'flat' or 'gre', this not considered - elif(net_type in ['vlan', 'vxlan']): - host = self.get_binding_az_by_network_id(context, net_id) - if(not host): - self.add_binding_az_network_id(context, binding_host, net_id) - elif(host and host != binding_host): - return False - return True - return True - #added by jiahaojie 00209498----end - - def _ml2_extend_port_dict_binding(self, port_res, port_db): - # None when called during unit tests for other plugins. - if port_db.port_binding: - self._update_port_dict_binding(port_res, port_db.port_binding) - - db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs( - attributes.PORTS, ['_ml2_extend_port_dict_binding']) - - # Register extend dict methods for network and port resources. - # Each mechanism driver that supports extend attribute for the resources - # can add those attribute to the result. - db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs( - attributes.NETWORKS, ['_ml2_md_extend_network_dict']) - db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs( - attributes.PORTS, ['_ml2_md_extend_port_dict']) - db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs( - attributes.SUBNETS, ['_ml2_md_extend_subnet_dict']) - - def _ml2_md_extend_network_dict(self, result, netdb): - session = db_api.get_session() - with session.begin(subtransactions=True): - self.extension_manager.extend_network_dict(session, result) - - def _ml2_md_extend_port_dict(self, result, portdb): - session = db_api.get_session() - with session.begin(subtransactions=True): - self.extension_manager.extend_port_dict(session, result) - - def _ml2_md_extend_subnet_dict(self, result, subnetdb): - session = db_api.get_session() - with session.begin(subtransactions=True): - self.extension_manager.extend_subnet_dict(session, result) - - # Note - The following hook methods have "ml2" in their names so - # that they are not called twice during unit tests due to global - # registration of hooks in portbindings_db.py used by other - # plugins. - - def _ml2_port_model_hook(self, context, original_model, query): - query = query.outerjoin(models.PortBinding, - (original_model.id == - models.PortBinding.port_id)) - return query - - def _ml2_port_result_filter_hook(self, query, filters): - values = filters and filters.get(portbindings.HOST_ID, []) - if not values: - return query - return query.filter(models.PortBinding.host.in_(values)) - - db_base_plugin_v2.NeutronDbPluginV2.register_model_query_hook( - models_v2.Port, - "ml2_port_bindings", - '_ml2_port_model_hook', - None, - '_ml2_port_result_filter_hook') - - def _notify_port_updated(self, mech_context): - port = mech_context._port - segment = mech_context.bound_segment - if not segment: - # REVISIT(rkukura): This should notify agent to unplug port - network = mech_context.network.current - LOG.warning(_("In _notify_port_updated(), no bound segment for " - "port %(port_id)s on network %(network_id)s"), - {'port_id': port['id'], - 'network_id': network['id']}) - return - self.notifier.port_update(mech_context._plugin_context, port, - segment[api.NETWORK_TYPE], - segment[api.SEGMENTATION_ID], - segment[api.PHYSICAL_NETWORK]) - - # TODO(apech): Need to override bulk operations - - def create_network(self, context, network): - net_data = network['network'] - tenant_id = self._get_tenant_id_for_create(context, net_data) - session = context.session - with session.begin(subtransactions=True): - self._ensure_default_security_group(context, tenant_id) - result = super(Ml2Plugin, self).create_network(context, network) - self.extension_manager.process_create_network(session, net_data, - result) - self._process_l3_create(context, result, net_data) - net_data['id'] = result['id'] - self.type_manager.create_network_segments(context, net_data, - tenant_id) - self.type_manager._extend_network_dict_provider(context, result) - mech_context = driver_context.NetworkContext(self, context, - result) - self.mechanism_manager.create_network_precommit(mech_context) - - try: - self.mechanism_manager.create_network_postcommit(mech_context) - except ml2_exc.MechanismDriverError: - with excutils.save_and_reraise_exception(): - LOG.error(_("mechanism_manager.create_network_postcommit " - "failed, deleting network '%s'"), result['id']) - self.delete_network(context, result['id']) - return result - - def update_network(self, context, id, network): - provider._raise_if_updates_provider_attributes(network['network']) - - session = context.session - with session.begin(subtransactions=True): - original_network = super(Ml2Plugin, self).get_network(context, id) - updated_network = super(Ml2Plugin, self).update_network(context, - id, - network) - self.extension_manager.process_update_network(session, network, - original_network) - self._process_l3_update(context, updated_network, - network['network']) - self.type_manager._extend_network_dict_provider(context, - updated_network) - mech_context = driver_context.NetworkContext( - self, context, updated_network, - original_network=original_network) - self.mechanism_manager.update_network_precommit(mech_context) - - # TODO(apech) - handle errors raised by update_network, potentially - # by re-calling update_network with the previous attributes. For - # now the error is propogated to the caller, which is expected to - # either undo/retry the operation or delete the resource. - self.mechanism_manager.update_network_postcommit(mech_context) - return updated_network - - def get_network(self, context, id, fields=None): - session = context.session - with session.begin(subtransactions=True): - result = super(Ml2Plugin, self).get_network(context, id, None) - self.type_manager._extend_network_dict_provider(context, result) - - return self._fields(result, fields) - - def get_networks(self, context, filters=None, fields=None, - sorts=None, limit=None, marker=None, page_reverse=False): - session = context.session - with session.begin(subtransactions=True): - nets = super(Ml2Plugin, - self).get_networks(context, filters, None, sorts, - limit, marker, page_reverse) - for net in nets: - self.type_manager._extend_network_dict_provider(context, net) - - nets = self._filter_nets_provider(context, nets, filters) - nets = self._filter_nets_l3(context, nets, filters) - - return [self._fields(net, fields) for net in nets] - - def delete_network(self, context, id): - # REVISIT(rkukura) The super(Ml2Plugin, self).delete_network() - # function is not used because it auto-deletes ports and - # subnets from the DB without invoking the derived class's - # delete_port() or delete_subnet(), preventing mechanism - # drivers from being called. This approach should be revisited - # when the API layer is reworked during icehouse. - - LOG.debug(_("Deleting network %s"), id) - session = context.session - while True: - try: - # REVISIT(rkukura): Its not clear that - # with_lockmode('update') is really needed in this - # transaction, and if not, the semaphore can also be - # removed. - # - # REVISIT: Serialize this operation with a semaphore - # to prevent deadlock waiting to acquire a DB lock - # held by another thread in the same process, leading - # to 'lock wait timeout' errors. - # - # Process L3 first, since, depending on the L3 plugin, it may - # involve locking the db-access semaphore, sending RPC - # notifications, and/or calling delete_port on this plugin. - # Additionally, a rollback may not be enough to undo the - # deletion of a floating IP with certain L3 backends. - self._process_l3_delete(context, id) - with contextlib.nested(lockutils.lock('db-access'), - session.begin(subtransactions=True)): - # Get ports to auto-delete. - ports = (session.query(models_v2.Port). - enable_eagerloads(False). - filter_by(network_id=id). - with_lockmode('update').all()) - LOG.debug(_("Ports to auto-delete: %s"), ports) - only_auto_del = all(p.device_owner - in db_base_plugin_v2. - AUTO_DELETE_PORT_OWNERS - for p in ports) - if not only_auto_del: - LOG.debug(_("Tenant-owned ports exist")) - raise exc.NetworkInUse(net_id=id) - - # Get subnets to auto-delete. - subnets = (session.query(models_v2.Subnet). - enable_eagerloads(False). - filter_by(network_id=id). - with_lockmode('update').all()) - LOG.debug(_("Subnets to auto-delete: %s"), subnets) - - if not (ports or subnets): - network = self.get_network(context, id) - mech_context = driver_context.NetworkContext(self, - context, - network) - self.mechanism_manager.delete_network_precommit( - mech_context) - - self.type_manager.release_network_segments(session, id) - record = self._get_network(context, id) - LOG.debug(_("Deleting network record %s"), record) - session.delete(record) - - # The segment records are deleted via cascade from the - # network record, so explicit removal is not necessary. - LOG.debug(_("Committing transaction")) - break - except os_db_exception.DBError as e: - with excutils.save_and_reraise_exception() as ctxt: - if isinstance(e.inner_exception, sql_exc.IntegrityError): - ctxt.reraise = False - msg = _("A concurrent port creation has occurred") - LOG.warning(msg) - continue - - for port in ports: - try: - self.delete_port(context, port.id) - except Exception: - with excutils.save_and_reraise_exception(): - LOG.exception(_("Exception auto-deleting port %s"), - port.id) - - for subnet in subnets: - try: - self.delete_subnet(context, subnet.id) - except Exception: - with excutils.save_and_reraise_exception(): - LOG.exception(_("Exception auto-deleting subnet %s"), - subnet.id) - - try: - self.mechanism_manager.delete_network_postcommit(mech_context) - except ml2_exc.MechanismDriverError: - # TODO(apech) - One or more mechanism driver failed to - # delete the network. Ideally we'd notify the caller of - # the fact that an error occurred. - LOG.error(_("mechanism_manager.delete_network_postcommit failed")) - self.notifier.network_delete(context, id) - - def create_subnet(self, context, subnet): - session = context.session - with session.begin(subtransactions=True): - result = super(Ml2Plugin, self).create_subnet(context, subnet) - self.extension_manager.process_create_subnet(session, subnet, - result) - mech_context = driver_context.SubnetContext(self, context, result) - self.mechanism_manager.create_subnet_precommit(mech_context) - - try: - self.mechanism_manager.create_subnet_postcommit(mech_context) - except ml2_exc.MechanismDriverError: - with excutils.save_and_reraise_exception(): - LOG.error(_("mechanism_manager.create_subnet_postcommit " - "failed, deleting subnet '%s'"), result['id']) - self.delete_subnet(context, result['id']) - return result - - def update_subnet(self, context, id, subnet): - session = context.session - with session.begin(subtransactions=True): - original_subnet = super(Ml2Plugin, self).get_subnet(context, id) - updated_subnet = super(Ml2Plugin, self).update_subnet( - context, id, subnet) - self.extension_manager.process_update_subnet(session, subnet, - original_subnet) - mech_context = driver_context.SubnetContext( - self, context, updated_subnet, original_subnet=original_subnet) - self.mechanism_manager.update_subnet_precommit(mech_context) - - # TODO(apech) - handle errors raised by update_subnet, potentially - # by re-calling update_subnet with the previous attributes. For - # now the error is propogated to the caller, which is expected to - # either undo/retry the operation or delete the resource. - self.mechanism_manager.update_subnet_postcommit(mech_context) - return updated_subnet - - def delete_subnet(self, context, id): - # REVISIT(rkukura) The super(Ml2Plugin, self).delete_subnet() - # function is not used because it deallocates the subnet's addresses - # from ports in the DB without invoking the derived class's - # update_port(), preventing mechanism drivers from being called. - # This approach should be revisited when the API layer is reworked - # during icehouse. - - LOG.debug(_("Deleting subnet %s"), id) - session = context.session - while True: - # REVISIT: Serialize this operation with a semaphore to - # prevent deadlock waiting to acquire a DB lock held by - # another thread in the same process, leading to 'lock - # wait timeout' errors. - with contextlib.nested(lockutils.lock('db-access'), - session.begin(subtransactions=True)): - record = self._get_subnet(context, id) - subnet = self._make_subnet_dict(record, None) - # Get ports to auto-deallocate - allocated = (session.query(models_v2.IPAllocation). - filter_by(subnet_id=id). - join(models_v2.Port). - filter_by(network_id=subnet['network_id']). - with_lockmode('update').all()) - LOG.debug(_("Ports to auto-deallocate: %s"), allocated) - only_auto_del = all(not a.port_id or - a.ports.device_owner in db_base_plugin_v2. - AUTO_DELETE_PORT_OWNERS - for a in allocated) - if not only_auto_del: - LOG.debug(_("Tenant-owned ports exist")) - raise exc.SubnetInUse(subnet_id=id) - - if not allocated: - mech_context = driver_context.SubnetContext(self, context, - subnet) - self.mechanism_manager.delete_subnet_precommit( - mech_context) - - LOG.debug(_("Deleting subnet record")) - session.delete(record) - - LOG.debug(_("Committing transaction")) - break - - for a in allocated: - if a.port_id: - # calling update_port() for each allocation to remove the - # IP from the port and call the MechanismDrivers - data = {'port': - {'fixed_ips': [{'subnet_id': ip.subnet_id, - 'ip_address': ip.ip_address} - for ip in a.ports.fixed_ips - if ip.subnet_id != id]}} - try: - self.update_port(context, a.port_id, data) - except Exception: - with excutils.save_and_reraise_exception(): - LOG.exception(_("Exception deleting fixed_ip from " - "port %s"), a.port_id) - session.delete(a) - - try: - self.mechanism_manager.delete_subnet_postcommit(mech_context) - except ml2_exc.MechanismDriverError: - # TODO(apech) - One or more mechanism driver failed to - # delete the subnet. Ideally we'd notify the caller of - # the fact that an error occurred. - LOG.error(_("mechanism_manager.delete_subnet_postcommit failed")) - - def create_port(self, context, port): - attrs = port['port'] - attrs['status'] = const.PORT_STATUS_DOWN - - session = context.session - with session.begin(subtransactions=True): - self._ensure_default_security_group_on_port(context, port) - sgids = self._get_security_groups_on_port(context, port) - dhcp_opts = port['port'].get(edo_ext.EXTRADHCPOPTS, []) - network = self.get_network(context, port['port']['network_id']) - # add by j00209498 ---begin - if('compute' in attrs['device_owner']): - binding_host = attrs.get('binding:host_id', None) - if cfg.CONF.cascade_str == 'cascading' and binding_host: - is_continue = True - is_continue = self._check_port_binding_az_valid( - context, - network, - binding_host) - if(not is_continue): - raise exc.PortBindAZError( - net_id=port['port']['network_id'], - host=binding_host) - # add by j00209498 ---end - result = super(Ml2Plugin, self).create_port(context, port) - self.extension_manager.process_create_port(session, attrs, result) - self._process_port_create_security_group(context, result, sgids) - - binding = db.add_port_binding(session, result['id']) - mech_context = driver_context.PortContext(self, context, result, - network, binding) - new_host_port = self._get_host_port_if_changed(mech_context, attrs) - self._process_port_binding(mech_context, attrs) - - result[addr_pair.ADDRESS_PAIRS] = ( - self._process_create_allowed_address_pairs( - context, result, - attrs.get(addr_pair.ADDRESS_PAIRS))) - self._process_port_create_extra_dhcp_opts(context, result, - dhcp_opts) - self.mechanism_manager.create_port_precommit(mech_context) - - # Notification must be sent after the above transaction is complete - self._notify_l3_agent_new_port(context, new_host_port) - - try: - self.mechanism_manager.create_port_postcommit(mech_context) - except ml2_exc.MechanismDriverError: - with excutils.save_and_reraise_exception(): - LOG.error(_("mechanism_manager.create_port_postcommit " - "failed, deleting port '%s'"), result['id']) - self.delete_port(context, result['id']) - - # REVISIT(rkukura): Is there any point in calling this before - # a binding has been successfully established? - self.notify_security_groups_member_updated(context, result) - - try: - bound_context = self._bind_port_if_needed(mech_context) - except ml2_exc.MechanismDriverError: - with excutils.save_and_reraise_exception(): - LOG.error(_("_bind_port_if_needed " - "failed, deleting port '%s'"), result['id']) - self.delete_port(context, result['id']) - return bound_context._port - - def update_port(self, context, id, port): - attrs = port['port'] - need_port_update_notify = False - - session = context.session - - # REVISIT: Serialize this operation with a semaphore to - # prevent deadlock waiting to acquire a DB lock held by - # another thread in the same process, leading to 'lock wait - # timeout' errors. - with contextlib.nested(lockutils.lock('db-access'), - session.begin(subtransactions=True)): - port_db, binding = db.get_locked_port_and_binding(session, id) - if not port_db: - raise exc.PortNotFound(port_id=id) - original_port = self._make_port_dict(port_db) - updated_port = super(Ml2Plugin, self).update_port(context, id, - port) - self.extension_manager.process_update_port(session, attrs, - original_port) - if addr_pair.ADDRESS_PAIRS in port['port']: - need_port_update_notify |= ( - self.update_address_pairs_on_port(context, id, port, - original_port, - updated_port)) - need_port_update_notify |= self.update_security_group_on_port( - context, id, port, original_port, updated_port) - network = self.get_network(context, original_port['network_id']) - need_port_update_notify |= self._update_extra_dhcp_opts_on_port( - context, id, port, updated_port) - mech_context = driver_context.PortContext( - self, context, updated_port, network, binding, - original_port=original_port) - new_host_port = self._get_host_port_if_changed(mech_context, attrs) - need_port_update_notify |= self._process_port_binding( - mech_context, attrs) - self.mechanism_manager.update_port_precommit(mech_context) - - # Notification must be sent after the above transaction is complete - self._notify_l3_agent_new_port(context, new_host_port) - - # TODO(apech) - handle errors raised by update_port, potentially - # by re-calling update_port with the previous attributes. For - # now the error is propogated to the caller, which is expected to - # either undo/retry the operation or delete the resource. - self.mechanism_manager.update_port_postcommit(mech_context) - - need_port_update_notify |= self.is_security_group_member_updated( - context, original_port, updated_port) - - if original_port['admin_state_up'] != updated_port['admin_state_up']: - need_port_update_notify = True - - bound_port = self._bind_port_if_needed( - mech_context, - allow_notify=True, - need_notify=need_port_update_notify) - return bound_port._port - - def _process_dvr_port_binding(self, mech_context, context, attrs): - binding = mech_context._binding - port = mech_context.current - - if binding.vif_type != portbindings.VIF_TYPE_UNBOUND: - binding.vif_details = '' - binding.vif_type = portbindings.VIF_TYPE_UNBOUND - binding.driver = None - binding.segment = None - binding.host = '' - - self._update_port_dict_binding(port, binding) - binding.host = attrs and attrs.get(portbindings.HOST_ID) - binding.router_id = attrs and attrs.get('device_id') - - def update_dvr_port_binding(self, context, id, port): - attrs = port['port'] - - host = attrs and attrs.get(portbindings.HOST_ID) - host_set = attributes.is_attr_set(host) - - if not host_set: - LOG.error(_("No Host supplied to bind DVR Port %s"), id) - return - - session = context.session - binding = db.get_dvr_port_binding_by_host(session, id, host) - device_id = attrs and attrs.get('device_id') - router_id = binding and binding.get('router_id') - update_required = (not binding or - binding.vif_type == portbindings.VIF_TYPE_BINDING_FAILED or - router_id != device_id) - if update_required: - with session.begin(subtransactions=True): - try: - orig_port = super(Ml2Plugin, self).get_port(context, id) - except exc.PortNotFound: - LOG.debug("DVR Port %s has been deleted concurrently", id) - return - if not binding: - binding = db.ensure_dvr_port_binding( - session, id, host, router_id=device_id) - network = self.get_network(context, orig_port['network_id']) - mech_context = driver_context.DvrPortContext(self, - context, orig_port, network, - binding, original_port=orig_port) - self._process_dvr_port_binding(mech_context, context, attrs) - self.mechanism_manager.bind_port(mech_context) - # Now try to commit result of attempting to bind the port. - self._commit_dvr_port_binding(mech_context._plugin_context, - orig_port['id'], - host, - mech_context) - - def _commit_dvr_port_binding(self, plugin_context, - port_id, host, - mech_context): - session = plugin_context.session - new_binding = mech_context._binding - with contextlib.nested(lockutils.lock('db-access'), - session.begin(subtransactions=True)): - # Get the current port state and build a new PortContext - # reflecting this state as original state for subsequent - # mechanism driver update_port_*commit() calls. - cur_binding = db.get_dvr_port_binding_by_host(session, - port_id, - host) - # Commit our binding results only if port has not been - # successfully bound concurrently by another thread or - # process and no binding inputs have been changed. - commit = ((cur_binding.vif_type in - [portbindings.VIF_TYPE_UNBOUND, - portbindings.VIF_TYPE_BINDING_FAILED]) and - new_binding.host == cur_binding.host and - new_binding.vnic_type == cur_binding.vnic_type and - new_binding.profile == cur_binding.profile) - - if commit: - # Update the port's binding state with our binding - # results. - cur_binding.vif_type = new_binding.vif_type - cur_binding.vif_details = new_binding.vif_details - cur_binding.driver = new_binding.driver - cur_binding.segment = new_binding.segment - - def delete_port(self, context, id, l3_port_check=True): - LOG.debug(_("Deleting port %s"), id) - removed_routers = [] - l3plugin = manager.NeutronManager.get_service_plugins().get( - service_constants.L3_ROUTER_NAT) - is_dvr_enabled = utils.is_extension_supported( - l3plugin, const.L3_DISTRIBUTED_EXT_ALIAS) - if l3plugin and l3_port_check: - l3plugin.prevent_l3_port_deletion(context, id) - - session = context.session - # REVISIT: Serialize this operation with a semaphore to - # prevent deadlock waiting to acquire a DB lock held by - # another thread in the same process, leading to 'lock wait - # timeout' errors. - with contextlib.nested(lockutils.lock('db-access'), - session.begin(subtransactions=True)): - port_db, binding = db.get_locked_port_and_binding(session, id) - if not port_db: - # the port existed when l3plugin.prevent_l3_port_deletion - # was called but now is already gone - LOG.debug(_("The port '%s' was deleted"), id) - return - port = self._make_port_dict(port_db) - - network = self.get_network(context, port['network_id']) - mech_context = None - device_owner = port['device_owner'] - if device_owner == const.DEVICE_OWNER_DVR_INTERFACE: - bindings = db.get_dvr_port_bindings(context.session, id) - for bind in bindings: - mech_context = driver_context.DvrPortContext( - self, context, port, network, bind) - self.mechanism_manager.delete_port_precommit(mech_context) - else: - mech_context = driver_context.PortContext(self, context, port, - network, binding) - if is_dvr_enabled and utils.is_dvr_serviced(device_owner): - router_info = l3plugin.dvr_deletens_if_no_port(context, id) - removed_routers += router_info - self.mechanism_manager.delete_port_precommit(mech_context) - self._delete_port_security_group_bindings(context, id) - if l3plugin: - router_ids = l3plugin.disassociate_floatingips( - context, id, do_notify=False) - if is_dvr_enabled: - l3plugin.dvr_vmarp_table_update(context, id, "del") - - LOG.debug("Calling delete_port for %(port_id)s owned by %(owner)s" - % {"port_id": id, "owner": device_owner}) - super(Ml2Plugin, self).delete_port(context, id) - - # now that we've left db transaction, we are safe to notify - if l3plugin: - l3plugin.notify_routers_updated(context, router_ids) - for router in removed_routers: - try: - l3plugin.remove_router_from_l3_agent( - context, router['agent_id'], router['router_id']) - except l3agentscheduler.RouterNotHostedByL3Agent: - # router may have been removed by another process - LOG.debug("Router %(id)s not hosted by L3 agent %(agent)s", - {'id': router['router_id'], - 'agent': router['agent_id']}) - try: - # for both normal and DVR Interface ports, only one invocation of - # delete_port_postcommit. We use gather/scatter technique for DVR - # interface ports, where the bindings are gathered in - # delete_port_precommit() call earlier and scattered as l2pop - # rules to cloud nodes in delete_port_postcommit() here - if mech_context: - self.mechanism_manager.delete_port_postcommit(mech_context) - except ml2_exc.MechanismDriverError: - # TODO(apech) - One or more mechanism driver failed to - # delete the port. Ideally we'd notify the caller of the - # fact that an error occurred. - LOG.error(_("mechanism_manager.delete_port_postcommit failed for " - "port %s"), id) - self.notify_security_groups_member_updated(context, port) - - def get_bound_port_context(self, plugin_context, port_id, host=None): - session = plugin_context.session - with session.begin(subtransactions=True): - try: - port_db = (session.query(models_v2.Port). - enable_eagerloads(False). - filter(models_v2.Port.id.startswith(port_id)). - one()) - except sa_exc.NoResultFound: - return - except exc.MultipleResultsFound: - LOG.error(_("Multiple ports have port_id starting with %s"), - port_id) - return - port = self._make_port_dict(port_db) - network = self.get_network(plugin_context, port['network_id']) - if port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE: - binding = db.get_dvr_port_binding_by_host( - session, port['id'], host) - if not binding: - LOG.error(_("Binding info for DVR port %s not found"), - port_id) - return None - port_context = driver_context.DvrPortContext( - self, plugin_context, port, network, binding) - else: - port_context = driver_context.PortContext( - self, plugin_context, port, network, port_db.port_binding) - - return self._bind_port_if_needed(port_context) - - def update_port_status(self, context, port_id, status, host=None): - """ - Returns port_id (non-truncated uuid) if the port exists. - Otherwise returns None. - """ - updated = False - session = context.session - # REVISIT: Serialize this operation with a semaphore to - # prevent deadlock waiting to acquire a DB lock held by - # another thread in the same process, leading to 'lock wait - # timeout' errors. - with contextlib.nested(lockutils.lock('db-access'), - session.begin(subtransactions=True)): - port = db.get_port(session, port_id) - if not port: - LOG.warning(_("Port %(port)s updated up by agent not found"), - {'port': port_id}) - return None - if (port.status != status and - port['device_owner'] != const.DEVICE_OWNER_DVR_INTERFACE): - original_port = self._make_port_dict(port) - port.status = status - updated_port = self._make_port_dict(port) - network = self.get_network(context, - original_port['network_id']) - mech_context = driver_context.PortContext( - self, context, updated_port, network, port.port_binding, - original_port=original_port) - self.mechanism_manager.update_port_precommit(mech_context) - updated = True - elif port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE: - binding = db.get_dvr_port_binding_by_host( - session, port['id'], host) - if not binding: - return - binding['status'] = status - binding.update(binding) - updated = True - - if (updated and - port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE): - with contextlib.nested(lockutils.lock('db-access'), - session.begin(subtransactions=True)): - port = db.get_port(session, port_id) - if not port: - LOG.warning(_("Port %s not found during update"), port_id) - return - original_port = self._make_port_dict(port) - network = self.get_network(context, - original_port['network_id']) - port.status = db.generate_dvr_port_status(session, port['id']) - updated_port = self._make_port_dict(port) - mech_context = (driver_context.DvrPortContext( - self, context, updated_port, network, - binding, original_port=original_port)) - self.mechanism_manager.update_port_precommit(mech_context) - - if updated: - self.mechanism_manager.update_port_postcommit(mech_context) - - if port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE: - db.delete_dvr_port_binding_if_stale(session, binding) - - return port['id'] - - def port_bound_to_host(self, context, port_id, host): - port = db.get_port(context.session, port_id) - if not port: - LOG.debug("No Port match for: %s", port_id) - return False - if port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE: - bindings = db.get_dvr_port_bindings(context.session, port_id) - for b in bindings: - if b.host == host: - return True - LOG.debug("No binding found for DVR port %s", port['id']) - return False - else: - port_host = db.get_port_binding_host(port_id) - return (port_host == host) - - def get_port_from_device(self, device): - port_id = self._device_to_port_id(device) - port = db.get_port_and_sgs(port_id) - if port: - port['device'] = device - return port - - def _device_to_port_id(self, device): - # REVISIT(rkukura): Consider calling into MechanismDrivers to - # process device names, or having MechanismDrivers supply list - # of device prefixes to strip. - if device.startswith(const.TAP_DEVICE_PREFIX): - return device[len(const.TAP_DEVICE_PREFIX):] - else: - # REVISIT(irenab): Consider calling into bound MD to - # handle the get_device_details RPC, then remove the 'else' clause - if not uuidutils.is_uuid_like(device): - port = db.get_port_from_device_mac(device) - if port: - return port.id - return device diff --git a/juno-patches/neutron/neutron_timestamp_cascaded_patch/README.md b/juno-patches/neutron/neutron_timestamp_cascaded_patch/README.md deleted file mode 100644 index 88aa6445..00000000 --- a/juno-patches/neutron/neutron_timestamp_cascaded_patch/README.md +++ /dev/null @@ -1,68 +0,0 @@ -Openstack Neutron timestamp_cascaded_patch -=============================== - - Neutron timestamp_cascaded_patch is mainly used to provide query filter 'sinces_change' for "list ports" API. To achieve the goal, we add three fields ('created_at'/'updated_at'/'delete_at') for ports table in neutron DB, and modify few lines of code in _apply_filters_to_query() function. This patch should be made to the Cascaded Neutron nodes. - - -Key modules ------------ - -* add three fields ('created_at'/'updated_at'/'delete_at') for ports table, and modify few lines of code in _apply_filters_to_query() function: - neutron/db/migration/alembic_migrations/versions/238cf36dab26_add_port_timestamp_revision.py - neutron/db/migration/core_init_ops.py - neutron/db/common_db_mixin.py - neutron/db/models_v2.py - -Requirements ------------- -* openstack neutron-2014.2 has been installed. - -Installation ------------- - -We provide two ways to install the Neutron timestamp_cascaded_patch. In this section, we will guide you through installing the Neutron timestamp_cascaded_patch without modifying the configuration. - -* **Note:** - - - Make sure you have an existing installation of **Openstack Neutron of Juno Version**. - - We recommend that you Do backup at least the following files before installation, because they are to be overwritten or modified: - $NEUTRON_PARENT_DIR/neutron - (replace the $... with actual directory names.) - -* **Manual Installation** - - - Navigate to the local repository and copy the contents in 'neutron' sub-directory to the corresponding places in existing neutron, e.g. - ```cp -r $LOCAL_REPOSITORY_DIR/neutron $NEUTRON_PARENT_DIR``` - (replace the $... with actual directory name.) - - - Upgrade DB - ```neutron-db-manage --config-file $CONFIG_FILE_PATH/neutron.conf --config-file $CONFIG_FILE_PATH/plugins/ml2/ml2_conf.ini upgrade head``` - (replace the $... with actual directory name.) - - - Restart the neutron-server. - ```service neutron-server restart``` - - - Done. - -* **Automatic Installation** - - - Navigate to the installation directory and run installation script. - ``` - cd $LOCAL_REPOSITORY_DIR/installation - sudo bash ./install.sh - ``` - (replace the $... with actual directory name.) - - - Done. The installation script will automatically modify the neutron code, upgrade DB and restart neutron-server. - -* **Troubleshooting** - - In case the automatic installation process is not complete, please check the followings: - - - Make sure your OpenStack version is Juno. - - - Check the variables in the beginning of the install.sh scripts. Your installation directories may be different from the default values we provide. - - - The installation code will automatically modify the related codes to $NEUTRON_PARENT_DIR/neutron. - - - In case the automatic installation does not work, try to install manually. diff --git a/juno-patches/neutron/neutron_timestamp_cascaded_patch/installation/install.sh b/juno-patches/neutron/neutron_timestamp_cascaded_patch/installation/install.sh deleted file mode 100644 index d94ef857..00000000 --- a/juno-patches/neutron/neutron_timestamp_cascaded_patch/installation/install.sh +++ /dev/null @@ -1,103 +0,0 @@ -#!/bin/bash -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# Copyright (c) 2014 Huawei Technologies. -_NEUTRON_CONF_DIR="/etc/neutron" -_NEUTRON_CONF_FILE='neutron.conf' -_NEUTRON_INSTALL="/usr/lib/python2.7/dist-packages" -_NEUTRON_DIR="${_NEUTRON_INSTALL}/neutron" -# if you did not make changes to the installation files, -# please do not edit the following directories. -_CODE_DIR="../neutron/" -_BACKUP_DIR="${_NEUTRON_INSTALL}/.neutron-cascaded-timestamp-patch-installation-backup" -if [[ ${EUID} -ne 0 ]]; then - echo "Please run as root." - exit 1 -fi - -##Redirecting output to logfile as well as stdout -#exec > >(tee -a ${_SCRIPT_LOGFILE}) -#exec 2> >(tee -a ${_SCRIPT_LOGFILE} >&2) - -cd `dirname $0` - -echo "checking installation directories..." -if [ ! -d "${_NEUTRON_DIR}" ] ; then - echo "Could not find the neutron installation. Please check the variables in the beginning of the script." - echo "aborted." - exit 1 -fi -if [ ! -f "${_NEUTRON_CONF_DIR}/${_NEUTRON_CONF_FILE}" ] ; then - echo "Could not find neutron config file. Please check the variables in the beginning of the script." - echo "aborted." - exit 1 -fi - -echo "checking previous installation..." -if [ -d "${_BACKUP_DIR}/neutron" ] ; then - echo "It seems neutron-server-cascaded-timestamp-patch has already been installed!" - echo "Please check README for solution if this is not true." - exit 1 -fi - -echo "backing up current files that might be overwritten..." -mkdir -p "${_BACKUP_DIR}" -cp -r "${_NEUTRON_DIR}/" "${_BACKUP_DIR}/" -if [ $? -ne 0 ] ; then - rm -r "${_BACKUP_DIR}/neutron" - echo "Error in code backup, aborted." - exit 1 -fi - -echo "copying in new files..." -cp -r "${_CODE_DIR}" `dirname ${_NEUTRON_DIR}` -if [ $? -ne 0 ] ; then - echo "Error in copying, aborted." - echo "Recovering original files..." - cp -r "${_BACKUP_DIR}/neutron" `dirname ${_NEUTRON_DIR}` && rm -r "${_BACKUP_DIR}/neutron" - if [ $? -ne 0 ] ; then - echo "Recovering failed! Please install manually." - fi - exit 1 -fi - -echo "upgrade DB for cascaded-timestamp-patch..." -neutron-db-manage --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/ml2_conf.ini upgrade head -if [ $? -ne 0 ] ; then - echo "There was an error in upgrading DB for cascaded-timestamp-patch, please check cascacaded neutron server code manually." - exit 1 -fi - -echo "restarting cascaded neutron server..." -service neutron-server restart -if [ $? -ne 0 ] ; then - echo "There was an error in restarting the service, please restart cascaded neutron server manually." - exit 1 -fi - -echo "restarting cascaded neutron-plugin-openvswitch-agent..." -service neutron-plugin-openvswitch-agent restart -if [ $? -ne 0 ] ; then - echo "There was an error in restarting the service, please restart cascaded neutron-plugin-openvswitch-agent manually." - exit 1 -fi - -echo "restarting cascaded neutron-l3-agent..." -service neutron-l3-agent restart -if [ $? -ne 0 ] ; then - echo "There was an error in restarting the service, please restart cascaded neutron-l3-agent manually." - exit 1 -fi - -echo "Completed." -echo "See README to get started." -exit 0 diff --git a/juno-patches/neutron/neutron_timestamp_cascaded_patch/neutron/db/common_db_mixin.py b/juno-patches/neutron/neutron_timestamp_cascaded_patch/neutron/db/common_db_mixin.py deleted file mode 100644 index ffd35dd7..00000000 --- a/juno-patches/neutron/neutron_timestamp_cascaded_patch/neutron/db/common_db_mixin.py +++ /dev/null @@ -1,203 +0,0 @@ -# Copyright (c) 2014 OpenStack Foundation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import weakref - -from sqlalchemy import sql - -from neutron.common import exceptions as n_exc -from neutron.db import sqlalchemyutils - -from neutron.openstack.common import timeutils - -class CommonDbMixin(object): - """Common methods used in core and service plugins.""" - # Plugins, mixin classes implementing extension will register - # hooks into the dict below for "augmenting" the "core way" of - # building a query for retrieving objects from a model class. - # To this aim, the register_model_query_hook and unregister_query_hook - # from this class should be invoked - _model_query_hooks = {} - - # This dictionary will store methods for extending attributes of - # api resources. Mixins can use this dict for adding their own methods - # TODO(salvatore-orlando): Avoid using class-level variables - _dict_extend_functions = {} - - @classmethod - def register_model_query_hook(cls, model, name, query_hook, filter_hook, - result_filters=None): - """Register a hook to be invoked when a query is executed. - - Add the hooks to the _model_query_hooks dict. Models are the keys - of this dict, whereas the value is another dict mapping hook names to - callables performing the hook. - Each hook has a "query" component, used to build the query expression - and a "filter" component, which is used to build the filter expression. - - Query hooks take as input the query being built and return a - transformed query expression. - - Filter hooks take as input the filter expression being built and return - a transformed filter expression - """ - model_hooks = cls._model_query_hooks.get(model) - if not model_hooks: - # add key to dict - model_hooks = {} - cls._model_query_hooks[model] = model_hooks - model_hooks[name] = {'query': query_hook, 'filter': filter_hook, - 'result_filters': result_filters} - - @property - def safe_reference(self): - """Return a weakref to the instance. - - Minimize the potential for the instance persisting - unnecessarily in memory by returning a weakref proxy that - won't prevent deallocation. - """ - return weakref.proxy(self) - - def _model_query(self, context, model): - query = context.session.query(model) - # define basic filter condition for model query - # NOTE(jkoelker) non-admin queries are scoped to their tenant_id - # NOTE(salvatore-orlando): unless the model allows for shared objects - query_filter = None - if not context.is_admin and hasattr(model, 'tenant_id'): - if hasattr(model, 'shared'): - query_filter = ((model.tenant_id == context.tenant_id) | - (model.shared == sql.true())) - else: - query_filter = (model.tenant_id == context.tenant_id) - # Execute query hooks registered from mixins and plugins - for _name, hooks in self._model_query_hooks.get(model, - {}).iteritems(): - query_hook = hooks.get('query') - if isinstance(query_hook, basestring): - query_hook = getattr(self, query_hook, None) - if query_hook: - query = query_hook(context, model, query) - - filter_hook = hooks.get('filter') - if isinstance(filter_hook, basestring): - filter_hook = getattr(self, filter_hook, None) - if filter_hook: - query_filter = filter_hook(context, model, query_filter) - - # NOTE(salvatore-orlando): 'if query_filter' will try to evaluate the - # condition, raising an exception - if query_filter is not None: - query = query.filter(query_filter) - return query - - def _fields(self, resource, fields): - if fields: - return dict(((key, item) for key, item in resource.items() - if key in fields)) - return resource - - def _get_tenant_id_for_create(self, context, resource): - if context.is_admin and 'tenant_id' in resource: - tenant_id = resource['tenant_id'] - elif ('tenant_id' in resource and - resource['tenant_id'] != context.tenant_id): - reason = _('Cannot create resource for another tenant') - raise n_exc.AdminRequired(reason=reason) - else: - tenant_id = context.tenant_id - return tenant_id - - def _get_by_id(self, context, model, id): - query = self._model_query(context, model) - return query.filter(model.id == id).one() - - def _apply_filters_to_query(self, query, model, filters): - if filters: - for key, value in filters.iteritems(): - column = getattr(model, key, None) - if column: - query = query.filter(column.in_(value)) - if 'changes_since' in filters: - if isinstance(filters['changes_since'], list): - changes_since = timeutils.parse_isotime(filters['changes_since'][0]) - else: - changes_since = timeutils.parse_isotime(filters['changes_since']) - updated_at = timeutils.normalize_time(changes_since) - query = query.filter(model.updated_at >= updated_at) - for _name, hooks in self._model_query_hooks.get(model, - {}).iteritems(): - result_filter = hooks.get('result_filters', None) - if isinstance(result_filter, basestring): - result_filter = getattr(self, result_filter, None) - - if result_filter: - query = result_filter(query, filters) - return query - - def _apply_dict_extend_functions(self, resource_type, - response, db_object): - for func in self._dict_extend_functions.get( - resource_type, []): - args = (response, db_object) - if isinstance(func, basestring): - func = getattr(self, func, None) - else: - # must call unbound method - use self as 1st argument - args = (self,) + args - if func: - func(*args) - - def _get_collection_query(self, context, model, filters=None, - sorts=None, limit=None, marker_obj=None, - page_reverse=False): - collection = self._model_query(context, model) - collection = self._apply_filters_to_query(collection, model, filters) - if limit and page_reverse and sorts: - sorts = [(s[0], not s[1]) for s in sorts] - collection = sqlalchemyutils.paginate_query(collection, model, limit, - sorts, - marker_obj=marker_obj) - return collection - - def _get_collection(self, context, model, dict_func, filters=None, - fields=None, sorts=None, limit=None, marker_obj=None, - page_reverse=False): - query = self._get_collection_query(context, model, filters=filters, - sorts=sorts, - limit=limit, - marker_obj=marker_obj, - page_reverse=page_reverse) - items = [dict_func(c, fields) for c in query] - if limit and page_reverse: - items.reverse() - return items - - def _get_collection_count(self, context, model, filters=None): - return self._get_collection_query(context, model, filters).count() - - def _get_marker_obj(self, context, resource, limit, marker): - if limit and marker: - return getattr(self, '_get_%s' % resource)(context, marker) - return None - - def _filter_non_model_columns(self, data, model): - """Remove all the attributes from data which are not columns of - the model passed as second parameter. - """ - columns = [c.name for c in model.__table__.columns] - return dict((k, v) for (k, v) in - data.iteritems() if k in columns) diff --git a/juno-patches/neutron/neutron_timestamp_cascaded_patch/neutron/db/migration/alembic_migrations/core_init_ops.py b/juno-patches/neutron/neutron_timestamp_cascaded_patch/neutron/db/migration/alembic_migrations/core_init_ops.py deleted file mode 100644 index f3814c2a..00000000 --- a/juno-patches/neutron/neutron_timestamp_cascaded_patch/neutron/db/migration/alembic_migrations/core_init_ops.py +++ /dev/null @@ -1,132 +0,0 @@ -# Copyright 2014 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -# Initial operations for core resources -from alembic import op -import sqlalchemy as sa - - -def upgrade(): - op.create_table( - 'networks', - sa.Column('tenant_id', sa.String(length=255), nullable=True), - sa.Column('id', sa.String(length=36), nullable=False), - sa.Column('name', sa.String(length=255), nullable=True), - sa.Column('status', sa.String(length=16), nullable=True), - sa.Column('admin_state_up', sa.Boolean(), nullable=True), - sa.Column('shared', sa.Boolean(), nullable=True), - sa.PrimaryKeyConstraint('id')) - - op.create_table( - 'ports', - sa.Column('created_at', sa.DateTime), - sa.Column('updated_at', sa.DateTime), - sa.Column('deleted_at', sa.DateTime), - sa.Column('tenant_id', sa.String(length=255), nullable=True), - sa.Column('id', sa.String(length=36), nullable=False), - sa.Column('name', sa.String(length=255), nullable=True), - sa.Column('network_id', sa.String(length=36), nullable=False), - sa.Column('mac_address', sa.String(length=32), nullable=False), - sa.Column('admin_state_up', sa.Boolean(), nullable=False), - sa.Column('status', sa.String(length=16), nullable=False), - sa.Column('device_id', sa.String(length=255), nullable=False), - sa.Column('device_owner', sa.String(length=255), nullable=False), - sa.ForeignKeyConstraint(['network_id'], ['networks.id'], ), - sa.PrimaryKeyConstraint('id')) - - op.create_table( - 'subnets', - sa.Column('tenant_id', sa.String(length=255), nullable=True), - sa.Column('id', sa.String(length=36), nullable=False), - sa.Column('name', sa.String(length=255), nullable=True), - sa.Column('network_id', sa.String(length=36), nullable=True), - sa.Column('ip_version', sa.Integer(), nullable=False), - sa.Column('cidr', sa.String(length=64), nullable=False), - sa.Column('gateway_ip', sa.String(length=64), nullable=True), - sa.Column('enable_dhcp', sa.Boolean(), nullable=True), - sa.Column('shared', sa.Boolean(), nullable=True), - sa.ForeignKeyConstraint(['network_id'], ['networks.id'], ), - sa.PrimaryKeyConstraint('id')) - - op.create_table( - 'dnsnameservers', - sa.Column('address', sa.String(length=128), nullable=False), - sa.Column('subnet_id', sa.String(length=36), nullable=False), - sa.ForeignKeyConstraint(['subnet_id'], ['subnets.id'], - ondelete='CASCADE'), - sa.PrimaryKeyConstraint('address', 'subnet_id')) - - op.create_table( - 'ipallocationpools', - sa.Column('id', sa.String(length=36), nullable=False), - sa.Column('subnet_id', sa.String(length=36), nullable=True), - sa.Column('first_ip', sa.String(length=64), nullable=False), - sa.Column('last_ip', sa.String(length=64), nullable=False), - sa.ForeignKeyConstraint(['subnet_id'], ['subnets.id'], - ondelete='CASCADE'), - sa.PrimaryKeyConstraint('id')) - - op.create_table( - 'subnetroutes', - sa.Column('destination', sa.String(length=64), nullable=False), - sa.Column('nexthop', sa.String(length=64), nullable=False), - sa.Column('subnet_id', sa.String(length=36), nullable=False), - sa.ForeignKeyConstraint(['subnet_id'], ['subnets.id'], - ondelete='CASCADE'), - sa.PrimaryKeyConstraint('destination', 'nexthop', 'subnet_id')) - - op.create_table( - 'ipallocations', - sa.Column('port_id', sa.String(length=36), nullable=True), - sa.Column('ip_address', sa.String(length=64), nullable=False), - sa.Column('subnet_id', sa.String(length=36), nullable=False), - sa.Column('network_id', sa.String(length=36), nullable=False), - sa.ForeignKeyConstraint(['network_id'], ['networks.id'], - ondelete='CASCADE'), - sa.ForeignKeyConstraint(['port_id'], ['ports.id'], ondelete='CASCADE'), - sa.ForeignKeyConstraint(['subnet_id'], ['subnets.id'], - ondelete='CASCADE'), - sa.PrimaryKeyConstraint('ip_address', 'subnet_id', 'network_id')) - - op.create_table( - 'ipavailabilityranges', - sa.Column('allocation_pool_id', sa.String(length=36), nullable=False), - sa.Column('first_ip', sa.String(length=64), nullable=False), - sa.Column('last_ip', sa.String(length=64), nullable=False), - sa.ForeignKeyConstraint(['allocation_pool_id'], - ['ipallocationpools.id'], ondelete='CASCADE'), - sa.PrimaryKeyConstraint('allocation_pool_id', 'first_ip', 'last_ip')) - - op.create_table( - 'networkdhcpagentbindings', - sa.Column('network_id', sa.String(length=36), nullable=False), - sa.Column('dhcp_agent_id', sa.String(length=36), nullable=False), - sa.ForeignKeyConstraint(['dhcp_agent_id'], ['agents.id'], - ondelete='CASCADE'), - sa.ForeignKeyConstraint(['network_id'], ['networks.id'], - ondelete='CASCADE'), - sa.PrimaryKeyConstraint('network_id', 'dhcp_agent_id')) - - -def downgrade(): - op.drop_table('networkdhcpagentbindings') - op.drop_table('ipavailabilityranges') - op.drop_table('ipallocations') - op.drop_table('subnetroutes') - op.drop_table('ipallocationpools') - op.drop_table('dnsnameservers') - op.drop_table('subnets') - op.drop_table('ports') - op.drop_table('networks') diff --git a/juno-patches/neutron/neutron_timestamp_cascaded_patch/neutron/db/migration/alembic_migrations/versions/238cf36dab26_add_port_timestamp_revision.py b/juno-patches/neutron/neutron_timestamp_cascaded_patch/neutron/db/migration/alembic_migrations/versions/238cf36dab26_add_port_timestamp_revision.py deleted file mode 100644 index 111c4e84..00000000 --- a/juno-patches/neutron/neutron_timestamp_cascaded_patch/neutron/db/migration/alembic_migrations/versions/238cf36dab26_add_port_timestamp_revision.py +++ /dev/null @@ -1,44 +0,0 @@ -# Copyright 2014 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -"""add port timestamp revision - -Revision ID: 238cf36dab26 -Revises: juno -Create Date: 2014-11-27 17:04:05.835703 - -""" - -# revision identifiers, used by Alembic. -revision = '238cf36dab26' -down_revision = 'juno' - -from alembic import op -import sqlalchemy as sa - - - -def upgrade(): - ### commands auto generated by Alembic - please adjust! ### - op.add_column('ports', sa.Column('created_at', sa.DateTime(), nullable=True)) - op.add_column('ports', sa.Column('updated_at', sa.DateTime(), nullable=True)) - ### end Alembic commands ### - - -def downgrade(): - ### commands auto generated by Alembic - please adjust! ### - op.drop_column('ports', 'updated_at') - op.drop_column('ports', 'created_at') - ### end Alembic commands ### diff --git a/juno-patches/neutron/neutron_timestamp_cascaded_patch/neutron/db/models_v2.py b/juno-patches/neutron/neutron_timestamp_cascaded_patch/neutron/db/models_v2.py deleted file mode 100644 index a8d1e9a9..00000000 --- a/juno-patches/neutron/neutron_timestamp_cascaded_patch/neutron/db/models_v2.py +++ /dev/null @@ -1,209 +0,0 @@ -# Copyright (c) 2012 OpenStack Foundation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import sqlalchemy as sa -from sqlalchemy import orm - -from neutron.common import constants -from neutron.db import model_base -from neutron.openstack.common import uuidutils - -from neutron.openstack.common import timeutils - - -class HasTenant(object): - """Tenant mixin, add to subclasses that have a tenant.""" - - # NOTE(jkoelker) tenant_id is just a free form string ;( - tenant_id = sa.Column(sa.String(255)) - - -class HasId(object): - """id mixin, add to subclasses that have an id.""" - - id = sa.Column(sa.String(36), - primary_key=True, - default=uuidutils.generate_uuid) - - -class HasStatusDescription(object): - """Status with description mixin.""" - - status = sa.Column(sa.String(16), nullable=False) - status_description = sa.Column(sa.String(255)) - - -class IPAvailabilityRange(model_base.BASEV2): - """Internal representation of available IPs for Neutron subnets. - - Allocation - first entry from the range will be allocated. - If the first entry is equal to the last entry then this row - will be deleted. - Recycling ips involves reading the IPAllocationPool and IPAllocation tables - and inserting ranges representing available ips. This happens after the - final allocation is pulled from this table and a new ip allocation is - requested. Any contiguous ranges of available ips will be inserted as a - single range. - """ - - allocation_pool_id = sa.Column(sa.String(36), - sa.ForeignKey('ipallocationpools.id', - ondelete="CASCADE"), - nullable=False, - primary_key=True) - first_ip = sa.Column(sa.String(64), nullable=False, primary_key=True) - last_ip = sa.Column(sa.String(64), nullable=False, primary_key=True) - - def __repr__(self): - return "%s - %s" % (self.first_ip, self.last_ip) - - -class IPAllocationPool(model_base.BASEV2, HasId): - """Representation of an allocation pool in a Neutron subnet.""" - - subnet_id = sa.Column(sa.String(36), sa.ForeignKey('subnets.id', - ondelete="CASCADE"), - nullable=True) - first_ip = sa.Column(sa.String(64), nullable=False) - last_ip = sa.Column(sa.String(64), nullable=False) - available_ranges = orm.relationship(IPAvailabilityRange, - backref='ipallocationpool', - lazy="joined", - cascade='all, delete-orphan') - - def __repr__(self): - return "%s - %s" % (self.first_ip, self.last_ip) - - -class IPAllocation(model_base.BASEV2): - """Internal representation of allocated IP addresses in a Neutron subnet. - """ - - port_id = sa.Column(sa.String(36), sa.ForeignKey('ports.id', - ondelete="CASCADE"), - nullable=True) - ip_address = sa.Column(sa.String(64), nullable=False, primary_key=True) - subnet_id = sa.Column(sa.String(36), sa.ForeignKey('subnets.id', - ondelete="CASCADE"), - nullable=False, primary_key=True) - network_id = sa.Column(sa.String(36), sa.ForeignKey("networks.id", - ondelete="CASCADE"), - nullable=False, primary_key=True) - - -class Route(object): - """mixin of a route.""" - - destination = sa.Column(sa.String(64), nullable=False, primary_key=True) - nexthop = sa.Column(sa.String(64), nullable=False, primary_key=True) - - -class SubnetRoute(model_base.BASEV2, Route): - - subnet_id = sa.Column(sa.String(36), - sa.ForeignKey('subnets.id', - ondelete="CASCADE"), - primary_key=True) - - -class Port(model_base.BASEV2, HasId, HasTenant): - """Represents a port on a Neutron v2 network.""" - - name = sa.Column(sa.String(255)) - network_id = sa.Column(sa.String(36), sa.ForeignKey("networks.id"), - nullable=False) - fixed_ips = orm.relationship(IPAllocation, backref='ports', lazy='joined') - mac_address = sa.Column(sa.String(32), nullable=False) - admin_state_up = sa.Column(sa.Boolean(), nullable=False) - status = sa.Column(sa.String(16), nullable=False) - device_id = sa.Column(sa.String(255), nullable=False) - device_owner = sa.Column(sa.String(255), nullable=False) - created_at = sa.Column(sa.DateTime, default=lambda: timeutils.utcnow()) - updated_at = sa.Column(sa.DateTime, default=lambda: timeutils.utcnow(), - onupdate=lambda: timeutils.utcnow()) - - def __init__(self, id=None, tenant_id=None, name=None, network_id=None, - mac_address=None, admin_state_up=None, status=None, - device_id=None, device_owner=None, fixed_ips=None): - self.id = id - self.tenant_id = tenant_id - self.name = name - self.network_id = network_id - self.mac_address = mac_address - self.admin_state_up = admin_state_up - self.device_owner = device_owner - self.device_id = device_id - # Since this is a relationship only set it if one is passed in. - if fixed_ips: - self.fixed_ips = fixed_ips - - # NOTE(arosen): status must be set last as an event is triggered on! - self.status = status - - -class DNSNameServer(model_base.BASEV2): - """Internal representation of a DNS nameserver.""" - - address = sa.Column(sa.String(128), nullable=False, primary_key=True) - subnet_id = sa.Column(sa.String(36), - sa.ForeignKey('subnets.id', - ondelete="CASCADE"), - primary_key=True) - - -class Subnet(model_base.BASEV2, HasId, HasTenant): - """Represents a neutron subnet. - - When a subnet is created the first and last entries will be created. These - are used for the IP allocation. - """ - - name = sa.Column(sa.String(255)) - network_id = sa.Column(sa.String(36), sa.ForeignKey('networks.id')) - ip_version = sa.Column(sa.Integer, nullable=False) - cidr = sa.Column(sa.String(64), nullable=False) - gateway_ip = sa.Column(sa.String(64)) - allocation_pools = orm.relationship(IPAllocationPool, - backref='subnet', - lazy="joined", - cascade='delete') - enable_dhcp = sa.Column(sa.Boolean()) - dns_nameservers = orm.relationship(DNSNameServer, - backref='subnet', - cascade='all, delete, delete-orphan') - routes = orm.relationship(SubnetRoute, - backref='subnet', - cascade='all, delete, delete-orphan') - shared = sa.Column(sa.Boolean) - ipv6_ra_mode = sa.Column(sa.Enum(constants.IPV6_SLAAC, - constants.DHCPV6_STATEFUL, - constants.DHCPV6_STATELESS, - name='ipv6_ra_modes'), nullable=True) - ipv6_address_mode = sa.Column(sa.Enum(constants.IPV6_SLAAC, - constants.DHCPV6_STATEFUL, - constants.DHCPV6_STATELESS, - name='ipv6_address_modes'), nullable=True) - - -class Network(model_base.BASEV2, HasId, HasTenant): - """Represents a v2 neutron network.""" - - name = sa.Column(sa.String(255)) - ports = orm.relationship(Port, backref='networks') - subnets = orm.relationship(Subnet, backref='networks', - lazy="joined") - status = sa.Column(sa.String(16)) - admin_state_up = sa.Column(sa.Boolean) - shared = sa.Column(sa.Boolean) diff --git a/juno-patches/nova/nova_scheduling_patch/nova/conductor/manager.py b/juno-patches/nova/nova_scheduling_patch/nova/conductor/manager.py deleted file mode 100755 index 270b1cdd..00000000 --- a/juno-patches/nova/nova_scheduling_patch/nova/conductor/manager.py +++ /dev/null @@ -1,769 +0,0 @@ -# Copyright 2013 IBM Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Handles database requests from other nova services.""" - -import copy -import itertools - -from oslo import messaging -import six - -from nova.api.ec2 import ec2utils -from nova import block_device -from nova.cells import rpcapi as cells_rpcapi -from nova.compute import api as compute_api -from nova.compute import rpcapi as compute_rpcapi -from nova.compute import task_states -from nova.compute import utils as compute_utils -from nova.compute import vm_states -from nova.conductor.tasks import live_migrate -from nova.db import base -from nova import exception -from nova.i18n import _ -from nova import image -from nova import manager -from nova import network -from nova.network.security_group import openstack_driver -from nova import notifications -from nova import objects -from nova.objects import base as nova_object -from nova.openstack.common import excutils -from nova.openstack.common import jsonutils -from nova.openstack.common import log as logging -from nova.openstack.common import timeutils -from nova import quota -from nova.scheduler import client as scheduler_client -from nova.scheduler import driver as scheduler_driver -from nova.scheduler import utils as scheduler_utils - -LOG = logging.getLogger(__name__) - -# Instead of having a huge list of arguments to instance_update(), we just -# accept a dict of fields to update and use this whitelist to validate it. -allowed_updates = ['task_state', 'vm_state', 'expected_task_state', - 'power_state', 'access_ip_v4', 'access_ip_v6', - 'launched_at', 'terminated_at', 'host', 'node', - 'memory_mb', 'vcpus', 'root_gb', 'ephemeral_gb', - 'instance_type_id', 'root_device_name', 'launched_on', - 'progress', 'vm_mode', 'default_ephemeral_device', - 'default_swap_device', 'root_device_name', - 'system_metadata', 'updated_at' - ] - -# Fields that we want to convert back into a datetime object. -datetime_fields = ['launched_at', 'terminated_at', 'updated_at'] - - -class ConductorManager(manager.Manager): - """Mission: Conduct things. - - The methods in the base API for nova-conductor are various proxy operations - performed on behalf of the nova-compute service running on compute nodes. - Compute nodes are not allowed to directly access the database, so this set - of methods allows them to get specific work done without locally accessing - the database. - - The nova-conductor service also exposes an API in the 'compute_task' - namespace. See the ComputeTaskManager class for details. - """ - - target = messaging.Target(version='2.0') - - def __init__(self, *args, **kwargs): - super(ConductorManager, self).__init__(service_name='conductor', - *args, **kwargs) - self.security_group_api = ( - openstack_driver.get_openstack_security_group_driver()) - self._network_api = None - self._compute_api = None - self.compute_task_mgr = ComputeTaskManager() - self.cells_rpcapi = cells_rpcapi.CellsAPI() - self.additional_endpoints.append(self.compute_task_mgr) - - @property - def network_api(self): - # NOTE(danms): We need to instantiate our network_api on first use - # to avoid the circular dependency that exists between our init - # and network_api's - if self._network_api is None: - self._network_api = network.API() - return self._network_api - - @property - def compute_api(self): - if self._compute_api is None: - self._compute_api = compute_api.API() - return self._compute_api - - def ping(self, context, arg): - # NOTE(russellb) This method can be removed in 2.0 of this API. It is - # now a part of the base rpc API. - return jsonutils.to_primitive({'service': 'conductor', 'arg': arg}) - - @messaging.expected_exceptions(KeyError, ValueError, - exception.InvalidUUID, - exception.InstanceNotFound, - exception.UnexpectedTaskStateError) - def instance_update(self, context, instance_uuid, - updates, service): - for key, value in updates.iteritems(): - if key not in allowed_updates: - LOG.error(_("Instance update attempted for " - "'%(key)s' on %(instance_uuid)s"), - {'key': key, 'instance_uuid': instance_uuid}) - raise KeyError("unexpected update keyword '%s'" % key) - if key in datetime_fields and isinstance(value, six.string_types): - updates[key] = timeutils.parse_strtime(value) - - old_ref, instance_ref = self.db.instance_update_and_get_original( - context, instance_uuid, updates) - notifications.send_update(context, old_ref, instance_ref, service) - return jsonutils.to_primitive(instance_ref) - - @messaging.expected_exceptions(exception.InstanceNotFound) - def instance_get_by_uuid(self, context, instance_uuid, - columns_to_join): - return jsonutils.to_primitive( - self.db.instance_get_by_uuid(context, instance_uuid, - columns_to_join)) - - def instance_get_all_by_host(self, context, host, node, - columns_to_join): - if node is not None: - result = self.db.instance_get_all_by_host_and_node( - context.elevated(), host, node) - else: - result = self.db.instance_get_all_by_host(context.elevated(), host, - columns_to_join) - return jsonutils.to_primitive(result) - - def migration_get_in_progress_by_host_and_node(self, context, - host, node): - migrations = self.db.migration_get_in_progress_by_host_and_node( - context, host, node) - return jsonutils.to_primitive(migrations) - - @messaging.expected_exceptions(exception.AggregateHostExists) - def aggregate_host_add(self, context, aggregate, host): - host_ref = self.db.aggregate_host_add(context.elevated(), - aggregate['id'], host) - - return jsonutils.to_primitive(host_ref) - - @messaging.expected_exceptions(exception.AggregateHostNotFound) - def aggregate_host_delete(self, context, aggregate, host): - self.db.aggregate_host_delete(context.elevated(), - aggregate['id'], host) - - def aggregate_metadata_get_by_host(self, context, host, - key='availability_zone'): - result = self.db.aggregate_metadata_get_by_host(context, host, key) - return jsonutils.to_primitive(result) - - def bw_usage_update(self, context, uuid, mac, start_period, - bw_in, bw_out, last_ctr_in, last_ctr_out, - last_refreshed, update_cells): - if [bw_in, bw_out, last_ctr_in, last_ctr_out].count(None) != 4: - self.db.bw_usage_update(context, uuid, mac, start_period, - bw_in, bw_out, last_ctr_in, last_ctr_out, - last_refreshed, - update_cells=update_cells) - usage = self.db.bw_usage_get(context, uuid, start_period, mac) - return jsonutils.to_primitive(usage) - - def provider_fw_rule_get_all(self, context): - rules = self.db.provider_fw_rule_get_all(context) - return jsonutils.to_primitive(rules) - - # NOTE(danms): This can be removed in version 3.0 of the RPC API - def agent_build_get_by_triple(self, context, hypervisor, os, architecture): - info = self.db.agent_build_get_by_triple(context, hypervisor, os, - architecture) - return jsonutils.to_primitive(info) - - def block_device_mapping_update_or_create(self, context, values, create): - if create is None: - bdm = self.db.block_device_mapping_update_or_create(context, - values) - elif create is True: - bdm = self.db.block_device_mapping_create(context, values) - else: - bdm = self.db.block_device_mapping_update(context, - values['id'], - values) - bdm_obj = objects.BlockDeviceMapping._from_db_object( - context, objects.BlockDeviceMapping(), bdm) - self.cells_rpcapi.bdm_update_or_create_at_top(context, bdm_obj, - create=create) - - def block_device_mapping_get_all_by_instance(self, context, instance, - legacy): - bdms = self.db.block_device_mapping_get_all_by_instance( - context, instance['uuid']) - if legacy: - bdms = block_device.legacy_mapping(bdms) - return jsonutils.to_primitive(bdms) - - def instance_get_all_by_filters(self, context, filters, sort_key, - sort_dir, columns_to_join, - use_slave): - result = self.db.instance_get_all_by_filters( - context, filters, sort_key, sort_dir, - columns_to_join=columns_to_join, use_slave=use_slave) - return jsonutils.to_primitive(result) - - def instance_get_active_by_window(self, context, begin, end, - project_id, host): - # Unused, but cannot remove until major RPC version bump - result = self.db.instance_get_active_by_window(context, begin, end, - project_id, host) - return jsonutils.to_primitive(result) - - def instance_get_active_by_window_joined(self, context, begin, end, - project_id, host): - result = self.db.instance_get_active_by_window_joined( - context, begin, end, project_id, host) - return jsonutils.to_primitive(result) - - def instance_destroy(self, context, instance): - result = self.db.instance_destroy(context, instance['uuid']) - return jsonutils.to_primitive(result) - - def instance_fault_create(self, context, values): - result = self.db.instance_fault_create(context, values) - return jsonutils.to_primitive(result) - - # NOTE(kerrin): The last_refreshed argument is unused by this method - # and can be removed in v3.0 of the RPC API. - def vol_usage_update(self, context, vol_id, rd_req, rd_bytes, wr_req, - wr_bytes, instance, last_refreshed, update_totals): - vol_usage = self.db.vol_usage_update(context, vol_id, - rd_req, rd_bytes, - wr_req, wr_bytes, - instance['uuid'], - instance['project_id'], - instance['user_id'], - instance['availability_zone'], - update_totals) - - # We have just updated the database, so send the notification now - self.notifier.info(context, 'volume.usage', - compute_utils.usage_volume_info(vol_usage)) - - @messaging.expected_exceptions(exception.ComputeHostNotFound, - exception.HostBinaryNotFound) - def service_get_all_by(self, context, topic, host, binary): - if not any((topic, host, binary)): - result = self.db.service_get_all(context) - elif all((topic, host)): - if topic == 'compute': - result = self.db.service_get_by_compute_host(context, host) - # FIXME(comstud) Potentially remove this on bump to v3.0 - result = [result] - else: - result = self.db.service_get_by_host_and_topic(context, - host, topic) - elif all((host, binary)): - result = self.db.service_get_by_args(context, host, binary) - elif topic: - result = self.db.service_get_all_by_topic(context, topic) - elif host: - result = self.db.service_get_all_by_host(context, host) - - return jsonutils.to_primitive(result) - - @messaging.expected_exceptions(exception.InstanceActionNotFound) - def action_event_start(self, context, values): - evt = self.db.action_event_start(context, values) - return jsonutils.to_primitive(evt) - - @messaging.expected_exceptions(exception.InstanceActionNotFound, - exception.InstanceActionEventNotFound) - def action_event_finish(self, context, values): - evt = self.db.action_event_finish(context, values) - return jsonutils.to_primitive(evt) - - def service_create(self, context, values): - svc = self.db.service_create(context, values) - return jsonutils.to_primitive(svc) - - @messaging.expected_exceptions(exception.ServiceNotFound) - def service_destroy(self, context, service_id): - self.db.service_destroy(context, service_id) - - def compute_node_create(self, context, values): - result = self.db.compute_node_create(context, values) - return jsonutils.to_primitive(result) - - def compute_node_update(self, context, node, values): - result = self.db.compute_node_update(context, node['id'], values) - return jsonutils.to_primitive(result) - - def compute_node_delete(self, context, node): - result = self.db.compute_node_delete(context, node['id']) - return jsonutils.to_primitive(result) - - @messaging.expected_exceptions(exception.ServiceNotFound) - def service_update(self, context, service, values): - svc = self.db.service_update(context, service['id'], values) - return jsonutils.to_primitive(svc) - - def task_log_get(self, context, task_name, begin, end, host, state): - result = self.db.task_log_get(context, task_name, begin, end, host, - state) - return jsonutils.to_primitive(result) - - def task_log_begin_task(self, context, task_name, begin, end, host, - task_items, message): - result = self.db.task_log_begin_task(context.elevated(), task_name, - begin, end, host, task_items, - message) - return jsonutils.to_primitive(result) - - def task_log_end_task(self, context, task_name, begin, end, host, - errors, message): - result = self.db.task_log_end_task(context.elevated(), task_name, - begin, end, host, errors, message) - return jsonutils.to_primitive(result) - - def notify_usage_exists(self, context, instance, current_period, - ignore_missing_network_data, - system_metadata, extra_usage_info): - compute_utils.notify_usage_exists(self.notifier, context, instance, - current_period, - ignore_missing_network_data, - system_metadata, extra_usage_info) - - def security_groups_trigger_handler(self, context, event, args): - self.security_group_api.trigger_handler(event, context, *args) - - def security_groups_trigger_members_refresh(self, context, group_ids): - self.security_group_api.trigger_members_refresh(context, group_ids) - - def network_migrate_instance_start(self, context, instance, migration): - self.network_api.migrate_instance_start(context, instance, migration) - - def network_migrate_instance_finish(self, context, instance, migration): - self.network_api.migrate_instance_finish(context, instance, migration) - - def quota_commit(self, context, reservations, project_id=None, - user_id=None): - quota.QUOTAS.commit(context, reservations, project_id=project_id, - user_id=user_id) - - def quota_rollback(self, context, reservations, project_id=None, - user_id=None): - quota.QUOTAS.rollback(context, reservations, project_id=project_id, - user_id=user_id) - - def get_ec2_ids(self, context, instance): - ec2_ids = {} - - ec2_ids['instance-id'] = ec2utils.id_to_ec2_inst_id(instance['uuid']) - ec2_ids['ami-id'] = ec2utils.glance_id_to_ec2_id(context, - instance['image_ref']) - for image_type in ['kernel', 'ramdisk']: - image_id = instance.get('%s_id' % image_type) - if image_id is not None: - ec2_image_type = ec2utils.image_type(image_type) - ec2_id = ec2utils.glance_id_to_ec2_id(context, image_id, - ec2_image_type) - ec2_ids['%s-id' % image_type] = ec2_id - - return ec2_ids - - def compute_unrescue(self, context, instance): - self.compute_api.unrescue(context, instance) - - def _object_dispatch(self, target, method, context, args, kwargs): - """Dispatch a call to an object method. - - This ensures that object methods get called and any exception - that is raised gets wrapped in an ExpectedException for forwarding - back to the caller (without spamming the conductor logs). - """ - try: - # NOTE(danms): Keep the getattr inside the try block since - # a missing method is really a client problem - return getattr(target, method)(context, *args, **kwargs) - except Exception: - raise messaging.ExpectedException() - - def object_class_action(self, context, objname, objmethod, - objver, args, kwargs): - """Perform a classmethod action on an object.""" - objclass = nova_object.NovaObject.obj_class_from_name(objname, - objver) - result = self._object_dispatch(objclass, objmethod, context, - args, kwargs) - # NOTE(danms): The RPC layer will convert to primitives for us, - # but in this case, we need to honor the version the client is - # asking for, so we do it before returning here. - return (result.obj_to_primitive(target_version=objver) - if isinstance(result, nova_object.NovaObject) else result) - - def object_action(self, context, objinst, objmethod, args, kwargs): - """Perform an action on an object.""" - oldobj = objinst.obj_clone() - result = self._object_dispatch(objinst, objmethod, context, - args, kwargs) - updates = dict() - # NOTE(danms): Diff the object with the one passed to us and - # generate a list of changes to forward back - for name, field in objinst.fields.items(): - if not objinst.obj_attr_is_set(name): - # Avoid demand-loading anything - continue - if (not oldobj.obj_attr_is_set(name) or - oldobj[name] != objinst[name]): - updates[name] = field.to_primitive(objinst, name, - objinst[name]) - # This is safe since a field named this would conflict with the - # method anyway - updates['obj_what_changed'] = objinst.obj_what_changed() - return updates, result - - def object_backport(self, context, objinst, target_version): - return objinst.obj_to_primitive(target_version=target_version) - - -class ComputeTaskManager(base.Base): - """Namespace for compute methods. - - This class presents an rpc API for nova-conductor under the 'compute_task' - namespace. The methods here are compute operations that are invoked - by the API service. These methods see the operation to completion, which - may involve coordinating activities on multiple compute nodes. - """ - - target = messaging.Target(namespace='compute_task', version='1.9') - - def __init__(self): - super(ComputeTaskManager, self).__init__() - self.compute_rpcapi = compute_rpcapi.ComputeAPI() - self.image_api = image.API() - self.scheduler_client = scheduler_client.SchedulerClient() - - @messaging.expected_exceptions(exception.NoValidHost, - exception.ComputeServiceUnavailable, - exception.InvalidHypervisorType, - exception.InvalidCPUInfo, - exception.UnableToMigrateToSelf, - exception.DestinationHypervisorTooOld, - exception.InvalidLocalStorage, - exception.InvalidSharedStorage, - exception.HypervisorUnavailable, - exception.InstanceNotRunning, - exception.MigrationPreCheckError) - def migrate_server(self, context, instance, scheduler_hint, live, rebuild, - flavor, block_migration, disk_over_commit, reservations=None): - if instance and not isinstance(instance, nova_object.NovaObject): - # NOTE(danms): Until v2 of the RPC API, we need to tolerate - # old-world instance objects here - attrs = ['metadata', 'system_metadata', 'info_cache', - 'security_groups'] - instance = objects.Instance._from_db_object( - context, objects.Instance(), instance, - expected_attrs=attrs) - if live and not rebuild and not flavor: - self._live_migrate(context, instance, scheduler_hint, - block_migration, disk_over_commit) - elif not live and not rebuild and flavor: - instance_uuid = instance['uuid'] - with compute_utils.EventReporter(context, 'cold_migrate', - instance_uuid): - self._cold_migrate(context, instance, flavor, - scheduler_hint['filter_properties'], - reservations) - else: - raise NotImplementedError() - - def _cold_migrate(self, context, instance, flavor, filter_properties, - reservations): - image_ref = instance.image_ref - image = compute_utils.get_image_metadata( - context, self.image_api, image_ref, instance) - - request_spec = scheduler_utils.build_request_spec( - context, image, [instance], instance_type=flavor) - - quotas = objects.Quotas.from_reservations(context, - reservations, - instance=instance) - try: - scheduler_utils.populate_retry(filter_properties, instance['uuid']) - hosts = self.scheduler_client.select_destinations( - context, request_spec, filter_properties) - host_state = hosts[0] - except exception.NoValidHost as ex: - vm_state = instance['vm_state'] - if not vm_state: - vm_state = vm_states.ACTIVE - updates = {'vm_state': vm_state, 'task_state': None} - self._set_vm_state_and_notify(context, 'migrate_server', - updates, ex, request_spec) - quotas.rollback() - - # if the flavor IDs match, it's migrate; otherwise resize - if flavor['id'] == instance['instance_type_id']: - msg = _("No valid host found for cold migrate") - else: - msg = _("No valid host found for resize") - raise exception.NoValidHost(reason=msg) - - try: - scheduler_utils.populate_filter_properties(filter_properties, - host_state) - # context is not serializable - filter_properties.pop('context', None) - - # TODO(timello): originally, instance_type in request_spec - # on compute.api.resize does not have 'extra_specs', so we - # remove it for now to keep tests backward compatibility. - request_spec['instance_type'].pop('extra_specs') - - (host, node) = (host_state['host'], host_state['nodename']) - self.compute_rpcapi.prep_resize( - context, image, instance, - flavor, host, - reservations, request_spec=request_spec, - filter_properties=filter_properties, node=node) - except Exception as ex: - with excutils.save_and_reraise_exception(): - updates = {'vm_state': instance['vm_state'], - 'task_state': None} - self._set_vm_state_and_notify(context, 'migrate_server', - updates, ex, request_spec) - quotas.rollback() - - def _set_vm_state_and_notify(self, context, method, updates, ex, - request_spec): - scheduler_utils.set_vm_state_and_notify( - context, 'compute_task', method, updates, - ex, request_spec, self.db) - - def _live_migrate(self, context, instance, scheduler_hint, - block_migration, disk_over_commit): - destination = scheduler_hint.get("host") - try: - live_migrate.execute(context, instance, destination, - block_migration, disk_over_commit) - except (exception.NoValidHost, - exception.ComputeServiceUnavailable, - exception.InvalidHypervisorType, - exception.InvalidCPUInfo, - exception.UnableToMigrateToSelf, - exception.DestinationHypervisorTooOld, - exception.InvalidLocalStorage, - exception.InvalidSharedStorage, - exception.HypervisorUnavailable, - exception.InstanceNotRunning, - exception.MigrationPreCheckError) as ex: - with excutils.save_and_reraise_exception(): - # TODO(johngarbutt) - eventually need instance actions here - request_spec = {'instance_properties': { - 'uuid': instance['uuid'], }, - } - scheduler_utils.set_vm_state_and_notify(context, - 'compute_task', 'migrate_server', - dict(vm_state=instance['vm_state'], - task_state=None, - expected_task_state=task_states.MIGRATING,), - ex, request_spec, self.db) - except Exception as ex: - LOG.error(_('Migration of instance %(instance_id)s to host' - ' %(dest)s unexpectedly failed.'), - {'instance_id': instance['uuid'], 'dest': destination}, - exc_info=True) - raise exception.MigrationError(reason=ex) - - def build_instances(self, context, instances, image, filter_properties, - admin_password, injected_files, requested_networks, - security_groups, block_device_mapping=None, legacy_bdm=True): - # TODO(ndipanov): Remove block_device_mapping and legacy_bdm in version - # 2.0 of the RPC API. - request_spec = scheduler_utils.build_request_spec(context, image, - instances) - # TODO(danms): Remove this in version 2.0 of the RPC API - if (requested_networks and - not isinstance(requested_networks, - objects.NetworkRequestList)): - requested_networks = objects.NetworkRequestList( - objects=[objects.NetworkRequest.from_tuple(t) - for t in requested_networks]) - - try: - # check retry policy. Rather ugly use of instances[0]... - # but if we've exceeded max retries... then we really only - # have a single instance. - scheduler_utils.populate_retry(filter_properties, - instances[0].uuid) - hosts = self.scheduler_client.select_destinations(context, - request_spec, filter_properties) - except Exception as exc: - for instance in instances: - scheduler_driver.handle_schedule_error(context, exc, - instance.uuid, request_spec) - return - - for (instance, host) in itertools.izip(instances, hosts): - try: - instance.refresh() - except (exception.InstanceNotFound, - exception.InstanceInfoCacheNotFound): - LOG.debug('Instance deleted during build', instance=instance) - continue - local_filter_props = copy.deepcopy(filter_properties) - scheduler_utils.populate_filter_properties(local_filter_props, - host) - # The block_device_mapping passed from the api doesn't contain - # instance specific information - bdms = objects.BlockDeviceMappingList.get_by_instance_uuid( - context, instance.uuid) - - # Note(lizm) convert host name to cas host name - host_name = host['host'] - if host_name.find("_"): - host_name = self._convert_host(host_name) - - self.compute_rpcapi.build_and_run_instance(context, - instance=instance, host=host_name, image=image, - request_spec=request_spec, - filter_properties=local_filter_props, - admin_password=admin_password, - injected_files=injected_files, - requested_networks=requested_networks, - security_groups=security_groups, - block_device_mapping=bdms, node=host['nodename'], - limits=host['limits']) - - def _convert_host(self, host): - # Note(lizm) convert host name, get cas host name - # eg. "lee_str"-->"lee" - return str(host.split("_")[0]) - - def _delete_image(self, context, image_id): - return self.image_api.delete(context, image_id) - - def _schedule_instances(self, context, image, filter_properties, - *instances): - request_spec = scheduler_utils.build_request_spec(context, image, - instances) - hosts = self.scheduler_client.select_destinations(context, - request_spec, filter_properties) - return hosts - - def unshelve_instance(self, context, instance): - sys_meta = instance.system_metadata - - def safe_image_show(ctx, image_id): - if image_id: - return self.image_api.get(ctx, image_id) - - if instance.vm_state == vm_states.SHELVED: - instance.task_state = task_states.POWERING_ON - instance.save(expected_task_state=task_states.UNSHELVING) - self.compute_rpcapi.start_instance(context, instance) - snapshot_id = sys_meta.get('shelved_image_id') - if snapshot_id: - self._delete_image(context, snapshot_id) - elif instance.vm_state == vm_states.SHELVED_OFFLOADED: - image_id = sys_meta.get('shelved_image_id') - with compute_utils.EventReporter( - context, 'get_image_info', instance.uuid): - try: - image = safe_image_show(context, image_id) - except exception.ImageNotFound: - instance.vm_state = vm_states.ERROR - instance.save() - reason = _('Unshelve attempted but the image %s ' - 'cannot be found.') % image_id - LOG.error(reason, instance=instance) - raise exception.UnshelveException( - instance_id=instance.uuid, reason=reason) - - try: - with compute_utils.EventReporter(context, 'schedule_instances', - instance.uuid): - filter_properties = {} - hosts = self._schedule_instances(context, image, - filter_properties, - instance) - host_state = hosts[0] - scheduler_utils.populate_filter_properties( - filter_properties, host_state) - (host, node) = (host_state['host'], host_state['nodename']) - self.compute_rpcapi.unshelve_instance( - context, instance, host, image=image, - filter_properties=filter_properties, node=node) - except exception.NoValidHost: - instance.task_state = None - instance.save() - LOG.warning(_("No valid host found for unshelve instance"), - instance=instance) - return - else: - LOG.error(_('Unshelve attempted but vm_state not SHELVED or ' - 'SHELVED_OFFLOADED'), instance=instance) - instance.vm_state = vm_states.ERROR - instance.save() - return - - for key in ['shelved_at', 'shelved_image_id', 'shelved_host']: - if key in sys_meta: - del(sys_meta[key]) - instance.system_metadata = sys_meta - instance.save() - - def rebuild_instance(self, context, instance, orig_image_ref, image_ref, - injected_files, new_pass, orig_sys_metadata, - bdms, recreate, on_shared_storage, - preserve_ephemeral=False, host=None): - - with compute_utils.EventReporter(context, 'rebuild_server', - instance.uuid): - if not host: - # NOTE(lcostantino): Retrieve scheduler filters for the - # instance when the feature is available - filter_properties = {'ignore_hosts': [instance.host]} - request_spec = scheduler_utils.build_request_spec(context, - image_ref, - [instance]) - try: - hosts = self.scheduler_client.select_destinations(context, - request_spec, - filter_properties) - host = hosts.pop(0)['host'] - except exception.NoValidHost as ex: - with excutils.save_and_reraise_exception(): - self._set_vm_state_and_notify(context, - 'rebuild_server', - {'vm_state': instance.vm_state, - 'task_state': None}, ex, request_spec) - LOG.warning(_("No valid host found for rebuild"), - instance=instance) - - self.compute_rpcapi.rebuild_instance(context, - instance=instance, - new_pass=new_pass, - injected_files=injected_files, - image_ref=image_ref, - orig_image_ref=orig_image_ref, - orig_sys_metadata=orig_sys_metadata, - bdms=bdms, - recreate=recreate, - on_shared_storage=on_shared_storage, - preserve_ephemeral=preserve_ephemeral, - host=host) diff --git a/neutronproxy/l2proxy/neutron/plugins/l2_proxy/README b/l2_proxy/README similarity index 100% rename from neutronproxy/l2proxy/neutron/plugins/l2_proxy/README rename to l2_proxy/README diff --git a/glancesync/glance/sync/client/v1/__init__.py b/l2_proxy/__init__.py similarity index 100% rename from glancesync/glance/sync/client/v1/__init__.py rename to l2_proxy/__init__.py diff --git a/glancesync/glance/sync/store/__init__.py b/l2_proxy/agent/__init__.py similarity index 100% rename from glancesync/glance/sync/store/__init__.py rename to l2_proxy/agent/__init__.py diff --git a/neutronproxy/l2proxy/neutron/plugins/l2_proxy/agent/clients.py b/l2_proxy/agent/clients.py similarity index 98% rename from neutronproxy/l2proxy/neutron/plugins/l2_proxy/agent/clients.py rename to l2_proxy/agent/clients.py index dbd6a624..c1abc878 100644 --- a/neutronproxy/l2proxy/neutron/plugins/l2_proxy/agent/clients.py +++ b/l2_proxy/agent/clients.py @@ -19,8 +19,8 @@ from oslo.config import cfg #from heat.openstack.common import importutils #from heat.openstack.common import log as logging -from neutron.openstack.common import importutils -from neutron.openstack.common import log as logging +from oslo.utils import importutils +from oslo_log import log as logging logger = logging.getLogger(__name__) diff --git a/neutronproxy/l2proxy/neutron/plugins/l2_proxy/agent/l2_proxy.py b/l2_proxy/agent/l2_proxy.py similarity index 79% rename from neutronproxy/l2proxy/neutron/plugins/l2_proxy/agent/l2_proxy.py rename to l2_proxy/agent/l2_proxy.py index c8262f0a..93343e6e 100644 --- a/neutronproxy/l2proxy/neutron/plugins/l2_proxy/agent/l2_proxy.py +++ b/l2_proxy/agent/l2_proxy.py @@ -16,12 +16,12 @@ # @author: Haojie Jia, Huawei import hashlib +import os +import select import signal +import socket import sys import time -import os -import socket -import select from neutron import context as n_context from neutron.common import constants as const @@ -30,12 +30,18 @@ import eventlet eventlet.monkey_patch() import netaddr +from neutronclient.common import exceptions +from oslo_log import log as logging +import oslo_messaging from oslo.config import cfg +from oslo.serialization import jsonutils +from oslo.utils import excutils +from oslo.utils import timeutils from six import moves from neutron.agent import l2population_rpc from neutron.agent.linux import ip_lib -from neutron.agent.linux import ovs_lib +from neutron.agent.common import ovs_lib from neutron.agent.linux import polling from neutron.agent.linux import utils from neutron.agent import rpc as agent_rpc @@ -48,18 +54,12 @@ from neutron.common import rpc as n_rpc from neutron.common import topics from neutron.common import utils as q_utils from neutron import context -from neutron.openstack.common import log as logging from neutron.openstack.common import loopingcall -from neutron.openstack.common import jsonutils from neutron.plugins.common import constants as p_const from neutron.plugins.l2_proxy.common import config # noqa from neutron.plugins.l2_proxy.common import constants from neutron.plugins.l2_proxy.agent import neutron_proxy_context from neutron.plugins.l2_proxy.agent import clients -from neutron.openstack.common import timeutils -from neutronclient.common import exceptions -from neutron.openstack.common import excutils - LOG = logging.getLogger(__name__) @@ -79,8 +79,9 @@ class QueryPortsInterface: def __init__(self): self.context = n_context.get_admin_context_without_session() - def _get_cascaded_neutron_client(self): - context = n_context.get_admin_context_without_session() + @classmethod + def _get_cascaded_neutron_client(cls): + admin_context = n_context.get_admin_context_without_session() keystone_auth_url = cfg.CONF.AGENT.keystone_auth_url kwargs = {'auth_token': None, 'username': cfg.CONF.AGENT.neutron_user_name, @@ -88,36 +89,39 @@ class QueryPortsInterface: 'aws_creds': None, 'tenant': cfg.CONF.AGENT.neutron_tenant_name, 'auth_url': keystone_auth_url, - 'roles': context.roles, - 'is_admin': context.is_admin, + 'roles': admin_context.roles, + 'is_admin': admin_context.is_admin, 'region_name': cfg.CONF.AGENT.os_region_name} - reqCon = neutron_proxy_context.RequestContext(**kwargs) - openStackClients = clients.OpenStackClients(reqCon) - neutronClient = openStackClients.neutron() - return neutronClient + req_context = neutron_proxy_context.RequestContext(**kwargs) + openstack_clients = clients.OpenStackClients(req_context) + cls.cascaded_neutron_client = openstack_clients.neutron() + + @classmethod + def _is_cascaded_neutron_client_ready(cls): + if cls.cascaded_neutron_client: + return True + else: + return False def _show_port(self, port_id): - portResponse = None - if(not QueryPortsFromCascadedNeutron.cascaded_neutron_client): - QueryPortsFromCascadedNeutron.cascaded_neutron_client = \ + if not self._is_cascaded_neutron_client_ready(): self._get_cascaded_neutron_client() retry = 0 - while(True): + while True: try: - portResponse = QueryPortsFromCascadedNeutron.\ - cascaded_neutron_client.show_port(port_id) - LOG.debug(_('show port, port_id=%s, Response:%s'), str(port_id), - str(portResponse)) - return portResponse + port_response = self.cascaded_neutron_client.show_port(port_id) + LOG.debug(_('show port, port_id=%s, Response:%s'), + str(port_id), str(port_response)) + return port_response except exceptions.Unauthorized: - retry = retry + 1 - if(retry <= 3): - QueryPortsFromCascadedNeutron.cascaded_neutron_client = \ - self._get_cascaded_neutron_client() + retry += 1 + if retry <= 3: + self._get_cascaded_neutron_client() continue else: with excutils.save_and_reraise_exception(): - LOG.error(_('ERR: Try 3 times,Unauthorized to list ports!')) + LOG.error( + _('ERR: Try 3 times, Unauthorized to list ports!')) return None except Exception: with excutils.save_and_reraise_exception(): @@ -128,36 +132,34 @@ class QueryPortsInterface: pagination_limit=None, pagination_marker=None): filters = {'status': 'ACTIVE'} - if(since_time): + if since_time: filters['changes_since'] = since_time - if(pagination_limit): + if pagination_limit: filters['limit'] = pagination_limit filters['page_reverse'] = 'False' - if(pagination_marker): + if pagination_marker: filters['marker'] = pagination_marker - portResponse = None - if(not QueryPortsFromCascadedNeutron.cascaded_neutron_client): - QueryPortsFromCascadedNeutron.cascaded_neutron_client = \ - self._get_cascaded_neutron_client() + if not self._is_cascaded_neutron_client_ready(): + self._get_cascaded_neutron_client() retry = 0 - while(True): + while True: try: - portResponse = QueryPortsFromCascadedNeutron.\ - cascaded_neutron_client.get('/ports', params=filters) + port_response = self.cascaded_neutron_client.get( + '/ports', params=filters) LOG.debug(_('list ports, filters:%s, since_time:%s, limit=%s, ' 'marker=%s, Response:%s'), str(filters), - str(since_time), str(pagination_limit), - str(pagination_marker), str(portResponse)) - return portResponse + str(since_time), str(pagination_limit), + str(pagination_marker), str(port_response)) + return port_response except exceptions.Unauthorized: - retry = retry + 1 - if(retry <= 3): - QueryPortsFromCascadedNeutron.cascaded_neutron_client = \ - self._get_cascaded_neutron_client() + retry += 1 + if retry <= 3: + self._get_cascaded_neutron_client() continue else: with excutils.save_and_reraise_exception(): - LOG.error(_('ERR: Try 3 times,Unauthorized to list ports!')) + LOG.error( + _('ERR: Try 3 times, Unauthorized to list ports!')) return None except Exception: with excutils.save_and_reraise_exception(): @@ -174,84 +176,27 @@ class QueryPortsInterface: else: pagination_limit = cfg.CONF.AGENT.pagination_limit first_page = self._list_ports(since_time, pagination_limit) - if(not first_page): + if not first_page: return ports_info ports_info['ports'].extend(first_page.get('ports', [])) ports_links_list = first_page.get('ports_links', []) - while(True): + while True: last_port_id = None current_page = None for pl in ports_links_list: - if (pl.get('rel', None) == 'next'): + if pl.get('rel') == 'next': port_count = len(ports_info['ports']) - last_port_id = ports_info['ports'][port_count - 1].get('id') - if(last_port_id): + last_port_id = ports_info['ports'][ + port_count - 1].get('id') + if last_port_id: current_page = self._list_ports(since_time, pagination_limit, last_port_id) - if(not current_page): + if not current_page: return ports_info ports_info['ports'].extend(current_page.get('ports', [])) ports_links_list = current_page.get('ports_links', []) - -class QueryPortsFromNovaproxy(QueryPortsInterface): - - ports_info = {'ports': {'add': [], 'del': []}} - - def __init__(self): - self.context = n_context.get_admin_context_without_session() - self.sock_path = None - self.sock = None - - def listen_and_recv_port_info(self, sock_path): - try: - sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) - path = sock_path - if os.path.exists(path): - os.unlink(path) - sock.bind(path) - sock.listen(5) - while(True): - infds, outfds, errfds = select.select([sock,], [], [], 5) - if len(infds) != 0: - con, addr = sock.accept() - recv_data = con.recv(1024) - self.process_recv_data(recv_data) - except socket.error as e: - LOG.warn(_('Error while connecting to socket: %s'), e) - return {} -# con.close() -# sock.close() - - def process_recv_data(self, data): - LOG.debug(_('process_recv_data begin! data:%s'), data) - data_dict = jsonutils.loads(data) - ports = data_dict.get('ports', None) - if(ports): - added_ports = ports.get('add', []) - for port_id in added_ports: - port_ret = self._show_port(port_id) - if port_ret and port_ret.get('port', None): - QueryPortsFromNovaproxy.ports_info['ports']['add']. \ - append(port_ret.get('port')) -# removed_ports = ports.get('delete', []) - - def get_update_net_port_info(self, since_time=None): - if(since_time): - ports_info = QueryPortsFromNovaproxy.ports_info['ports'].get('add', []) - QueryPortsFromNovaproxy.ports_info['ports']['add'] = [] - else: - all_ports = self._get_ports_pagination() - ports_info = all_ports.get('ports', []) - return ports_info - - -class QueryPortsFromCascadedNeutron(QueryPortsInterface): - - def __init__(self): - self.context = n_context.get_admin_context_without_session() - def get_update_net_port_info(self, since_time=None): if since_time: ports = self._get_ports_pagination(since_time) @@ -259,10 +204,6 @@ class QueryPortsFromCascadedNeutron(QueryPortsInterface): ports = self._get_ports_pagination() return ports.get("ports", []) -# def get_update_port_info_since(self, since_time): -# ports = self._get_ports_pagination(since_time) -# return ports.get("ports", []) - class RemotePort: @@ -271,7 +212,7 @@ class RemotePort: self.port_name = port_name self.mac = mac self.binding_profile = binding_profile - if(ips is None): + if not ips: self.ip = set() else: self.ip = set(ips) @@ -283,7 +224,7 @@ class LocalPort: self.port_id = port_id self.cascaded_port_id = cascaded_port_id self.mac = mac - if(ips is None): + if not ips: self.ip = set() else: self.ip = set(ips) @@ -312,22 +253,11 @@ class LocalVLANMapping: self.segmentation_id)) -class OVSPluginApi(agent_rpc.PluginApi, - dvr_rpc.DVRServerRpcApiMixin, - sg_rpc.SecurityGroupServerRpcApiMixin): +class OVSPluginApi(agent_rpc.PluginApi): pass -class OVSSecurityGroupAgent(sg_rpc.SecurityGroupAgentRpcMixin): - def __init__(self, context, plugin_rpc, root_helper): - self.context = context - self.plugin_rpc = plugin_rpc - self.root_helper = root_helper - self.init_firewall(defer_refresh_firewall=True) - - -class OVSNeutronAgent(n_rpc.RpcCallback, - sg_rpc.SecurityGroupAgentRpcCallbackMixin, +class OVSNeutronAgent(sg_rpc.SecurityGroupAgentRpcCallbackMixin, l2population_rpc.L2populationRpcCallBackTunnelMixin, dvr_rpc.DVRAgentRpcCallbackMixin): '''Implements OVS-based tunneling, VLANs and flat networks. @@ -360,7 +290,8 @@ class OVSNeutronAgent(n_rpc.RpcCallback, # 1.0 Initial version # 1.1 Support Security Group RPC # 1.2 Support DVR (Distributed Virtual Router) RPC - RPC_API_VERSION = '1.2' + # RPC_API_VERSION = '1.2' + target = oslo_messaging.Target(version='1.2') def __init__(self, integ_br, tun_br, local_ip, bridge_mappings, root_helper, @@ -422,13 +353,7 @@ class OVSNeutronAgent(n_rpc.RpcCallback, self.enable_distributed_routing}, 'agent_type': q_const.AGENT_TYPE_OVS, 'start_flag': True} - if(cfg.CONF.AGENT.query_ports_mode == 'cascaded_neutron'): - self.query_ports_info_inter = QueryPortsFromCascadedNeutron() - elif(cfg.CONF.AGENT.query_ports_mode == 'nova_proxy'): - self.sock_path = cfg.CONF.AGENT.proxy_sock_path - self.query_ports_info_inter = QueryPortsFromNovaproxy() - eventlet.spawn_n(self.query_ports_info_inter.listen_and_recv_port_info, - self.sock_path) + self.query_ports_info_inter = QueryPortsInterface() self.cascaded_port_info = {} self.cascaded_host_map = {} self.first_scan_flag = True @@ -436,13 +361,13 @@ class OVSNeutronAgent(n_rpc.RpcCallback, # Keep track of int_br's device count for use by _report_state() self.int_br_device_count = 0 - self.int_br = ovs_lib.OVSBridge(integ_br, self.root_helper) -# self.setup_integration_br() + self.int_br = ovs_lib.OVSBridge(integ_br) + # self.setup_integration_br() # Stores port update notifications for processing in main rpc loop self.updated_ports = set() self.setup_rpc() self.bridge_mappings = bridge_mappings -# self.setup_physical_bridges(self.bridge_mappings) + # self.setup_physical_bridges(self.bridge_mappings) self.local_vlan_map = {} self.tun_br_ofports = {p_const.TYPE_GRE: {}, p_const.TYPE_VXLAN: {}} @@ -463,12 +388,11 @@ class OVSNeutronAgent(n_rpc.RpcCallback, self.patch_int_ofport = constants.OFPORT_INVALID self.patch_tun_ofport = constants.OFPORT_INVALID -# self.dvr_agent.setup_dvr_flows_on_integ_tun_br() + # self.dvr_agent.setup_dvr_flows_on_integ_tun_br() # Security group agent support - self.sg_agent = OVSSecurityGroupAgent(self.context, - self.plugin_rpc, - root_helper) + self.sg_agent = sg_rpc.SecurityGroupAgentRpc( + self.context, self.sg_plugin_rpc, defer_refresh_firewall=True) # Initialize iteration counter self.iter_num = 0 self.run_daemon_loop = True @@ -490,6 +414,11 @@ class OVSNeutronAgent(n_rpc.RpcCallback, self.agent_id = 'ovs-agent-%s' % cfg.CONF.host self.topic = topics.AGENT self.plugin_rpc = OVSPluginApi(topics.PLUGIN) + # Vega: adopt the change in community which replaces + # xxxRpcApiMixin with a standalone class xxxRpcApi + self.sg_plugin_rpc = sg_rpc.SecurityGroupServerRpcApi(topics.PLUGIN) + self.dvr_plugin_rpc = dvr_rpc.DVRServerRpcApi(topics.PLUGIN) + self.state_rpc = agent_rpc.PluginReportStateAPI(topics.PLUGIN) # RPC network init @@ -532,17 +461,17 @@ class OVSNeutronAgent(n_rpc.RpcCallback, return network_id def network_delete(self, context, **kwargs): - LOG.debug(_("TRICIRCLE network_delete received")) + LOG.debug(_("Tricircle network_delete received")) network_id = kwargs.get('network_id') csd_network_name = self.get_csd_network_name(network_id) network_ret = self.list_cascaded_network_by_name(csd_network_name) - if(network_ret and (network_ret.get('networks'))): + if network_ret and (network_ret.get('networks')): cascaded_net = network_ret['networks'][0] self.delete_cascaded_network_by_id(cascaded_net['id']) else: - LOG.error('TRICIRCLE List cascaded network %s failed when ' + LOG.error('Tricircle List cascaded network %s failed when ' 'call network_delete!', csd_network_name) - LOG.debug(_("TRICIRCLE Network %s was deleted successfully."), + LOG.debug(_("Tricircle Network %s was deleted successfully."), network_id) def port_update(self, context, **kwargs): @@ -559,10 +488,10 @@ class OVSNeutronAgent(n_rpc.RpcCallback, def _create_port(self, context, network_id, binding_profile, port_name, mac_address, ips): - if(not network_id): + if not network_id: LOG.error(_("No network id is specified, cannot create port")) return - neutronClient = self.get_cascaded_neutron_client() + neutron_client = self.get_cascaded_neutron_client() req_props = {'network_id': network_id, 'name': port_name, 'admin_state_up': True, @@ -571,19 +500,19 @@ class OVSNeutronAgent(n_rpc.RpcCallback, 'binding:profile': binding_profile, 'device_owner': 'compute:' } - bodyResponse = neutronClient.create_port({'port': req_props}) - LOG.debug(_('create port, Response:%s'), str(bodyResponse)) - return bodyResponse + body_response = neutron_client.create_port({'port': req_props}) + LOG.debug(_('create port, Response:%s'), str(body_response)) + return body_response def _destroy_port(self, context, port_id): - if(not port_id): + if not port_id: LOG.error(_("No port id is specified, cannot destroy port")) return - neutronClient = self.get_cascaded_neutron_client() - bodyResponse = neutronClient.delete_port(port_id) - LOG.debug(_('destroy port, Response:%s'), str(bodyResponse)) - return bodyResponse + neutron_client = self.get_cascaded_neutron_client() + body_response = neutron_client.delete_port(port_id) + LOG.debug(_('destroy port, Response:%s'), str(body_response)) + return body_response def fdb_add(self, context, fdb_entries): LOG.debug("fdb_add received") @@ -601,12 +530,12 @@ class OVSNeutronAgent(n_rpc.RpcCallback, port_name = 'remote_port' mac_ip_map = {} for port in ports: - if(port == q_const.FLOODING_ENTRY): + if port == q_const.FLOODING_ENTRY: continue - if(const.DEVICE_OWNER_DVR_INTERFACE in port[1]): + if const.DEVICE_OWNER_DVR_INTERFACE in port[1]: return ips = mac_ip_map.get(port[0]) - if(ips): + if ips: ips += port[2] mac_ip_map[port[0]] = ips else: @@ -649,7 +578,7 @@ class OVSNeutronAgent(n_rpc.RpcCallback, for agent_ip, ports in agent_ports.items(): for port in ports: local_p = lvm.vif_ports.pop(port[0], None) - if(local_p and local_p.port_id): + if local_p and local_p.port_id: self.cascaded_port_info.pop(local_p.port_id, None) continue remote_p = lvm.remote_ports.pop(port[0], None) @@ -660,7 +589,7 @@ class OVSNeutronAgent(n_rpc.RpcCallback, def add_fdb_flow(self, br, port_info, remote_ip, lvm, ofport): '''TODO can not delete, by jiahaojie if delete,it will raise TypeError: - Can't instantiate abstract class OVSNeutronAgent with abstract + Can't instantiate abstract class OVSNeutronAgent with abstract methods add_fdb_flow, cleanup_tunnel_port, del_fdb_flow, setup_entry_for_arp_reply, setup_tunnel_port ''' LOG.debug("add_fdb_flow received") @@ -676,7 +605,7 @@ class OVSNeutronAgent(n_rpc.RpcCallback, def setup_entry_for_arp_reply(self, br, action, local_vid, mac_address, ip_address): '''TODO can not delete, by jiahaojie - if delete,it will raise TypeError: + if delete,it will raise TypeError: Can't instantiate abstract class OVSNeutronAgent with abstract methods add_fdb_flow, cleanup_tunnel_port, del_fdb_flow, setup_entry_for_arp_reply, setup_tunnel_port ''' @@ -704,8 +633,7 @@ class OVSNeutronAgent(n_rpc.RpcCallback, LOG.error(_("No local VLAN available for net-id=%s"), net_uuid) return lvid = self.available_local_vlans.pop() - self.local_vlan_map[net_uuid] = LocalVLANMapping( - network_type, + self.local_vlan_map[net_uuid] = LocalVLANMapping(network_type, physical_network, segmentation_id, cascaded_net_id) @@ -764,10 +692,8 @@ class OVSNeutronAgent(n_rpc.RpcCallback, physical_network, segmentation_id, cascaded_port_info['network_id']) lvm = self.local_vlan_map[net_uuid] - lvm.vif_ports[cascaded_port_info['mac_address']] = \ - LocalPort(port, - cascaded_port_info['id'], - cascaded_port_info['mac_address']) + lvm.vif_ports[cascaded_port_info['mac_address']] = LocalPort( + port, cascaded_port_info['id'], cascaded_port_info['mac_address']) def get_port_id_from_profile(self, profile): return profile.get('cascading_port_id', None) @@ -779,29 +705,32 @@ class OVSNeutronAgent(n_rpc.RpcCallback, LOG.debug(_('jiahaojie---port: %s'), str(port)) profile = port['binding:profile'] cascading_port_id = self.get_port_id_from_profile(profile) - if(not cascading_port_id): + if not cascading_port_id: continue self.cascaded_port_info[cascading_port_id] = port cur_ports.add(cascading_port_id) return cur_ports def scan_ports(self, registered_ports, updated_ports=None): - if(self.first_scan_flag): + if self.first_scan_flag: ports_info = self.query_ports_info_inter.get_update_net_port_info() - self.first_scan_flag = False + # Vega: since query based on timestamp is not supported currently, + # comment the following line to always query all the ports. + # self.first_scan_flag = False else: pre_time = time.time() - self.polling_interval - 1 since_time = time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime(pre_time)) ports_info = self.query_ports_info_inter.get_update_net_port_info( - since_time) + since_time) added_or_updated_ports = self.analysis_ports_info(ports_info) - cur_ports = set(self.cascaded_port_info.keys()) | added_or_updated_ports + cur_ports = set( + self.cascaded_port_info.keys()) | added_or_updated_ports self.int_br_device_count = len(cur_ports) port_info = {'current': cur_ports} if updated_ports is None: updated_ports = set() - #updated_ports.update(self.check_changed_vlans(registered_ports)) + # updated_ports.update(self.check_changed_vlans(registered_ports)) if updated_ports: # Some updated ports might have been removed in the # meanwhile, and therefore should not be processed. @@ -839,17 +768,17 @@ class OVSNeutronAgent(n_rpc.RpcCallback, def setup_tunnel_port(self, br, remote_ip, network_type): '''TODO can not delete, by jiahaojie - if delete,it will raise TypeError: - Can't instantiate abstract class OVSNeutronAgent with abstract - methods add_fdb_flow, cleanup_tunnel_port, del_fdb_flow, + if delete,it will raise TypeError: + Can't instantiate abstract class OVSNeutronAgent with abstract + methods add_fdb_flow, cleanup_tunnel_port, del_fdb_flow, setup_entry_for_arp_reply, setup_tunnel_port ''' LOG.debug("cleanup_tunnel_port is called!") def cleanup_tunnel_port(self, br, tun_ofport, tunnel_type): '''TODO can not delete, by jiahaojie - if delete,it will raise TypeError: - Can't instantiate abstract class OVSNeutronAgent with abstract - methods add_fdb_flow, cleanup_tunnel_port, del_fdb_flow, + if delete,it will raise TypeError: + Can't instantiate abstract class OVSNeutronAgent with abstract + methods add_fdb_flow, cleanup_tunnel_port, del_fdb_flow, setup_entry_for_arp_reply, setup_tunnel_port ''' LOG.debug("cleanup_tunnel_port is called!") @@ -863,26 +792,24 @@ class OVSNeutronAgent(n_rpc.RpcCallback, return details_ips_set == cascaded_ips_set def get_cascading_neutron_client(self): - context = n_context.get_admin_context_without_session() + admin_context = n_context.get_admin_context_without_session() keystone_auth_url = cfg.CONF.AGENT.cascading_auth_url kwargs = {'auth_token': None, 'username': cfg.CONF.AGENT.cascading_user_name, 'password': cfg.CONF.AGENT.cascading_password, 'aws_creds': None, 'tenant': cfg.CONF.AGENT.cascading_tenant_name, - # 'tenant_id':'e8f280855dbe42a189eebb0f3ecb94bb', #context.values['tenant'], 'auth_url': keystone_auth_url, - 'roles': context.roles, - 'is_admin': context.is_admin, + 'roles': admin_context.roles, + 'is_admin': admin_context.is_admin, 'region_name': cfg.CONF.AGENT.cascading_os_region_name} - reqCon = neutron_proxy_context.RequestContext(**kwargs) - openStackClients = clients.OpenStackClients(reqCon) - neutronClient = openStackClients.neutron() - return neutronClient + req_context = neutron_proxy_context.RequestContext(**kwargs) + openstack_clients = clients.OpenStackClients(req_context) + return openstack_clients.neutron() def update_cascading_port_profile(self, cascaded_host_ip, cascaded_port_info, details): - if(not cascaded_host_ip): + if not cascaded_host_ip: return profile = {'host_ip': cascaded_host_ip, 'cascaded_net_id': { @@ -893,10 +820,10 @@ class OVSNeutronAgent(n_rpc.RpcCallback, subnet_map = profile['cascaded_subnet_id'] for fi_ing in details['fixed_ips']: for fi_ed in cascaded_port_info['fixed_ips']: - if (fi_ed['ip_address'] == fi_ing['ip_address']): + if fi_ed['ip_address'] == fi_ing['ip_address']: subnet_map[fi_ing['subnet_id']] = {} - subnet_map[fi_ing['subnet_id']][cfg.CONF.host] = \ - fi_ed['subnet_id'] + subnet_map[fi_ing['subnet_id']][ + cfg.CONF.host] = fi_ed['subnet_id'] break neutron_client = self.get_cascading_neutron_client() req_props = {"binding:profile": profile} @@ -905,26 +832,26 @@ class OVSNeutronAgent(n_rpc.RpcCallback, LOG.debug(_('update compute port, Response:%s'), str(port_ret)) def get_cascaded_neutron_client(self): - context = n_context.get_admin_context_without_session() - keystone_auth_url = cfg.CONF.AGENT.keystone_auth_url - kwargs = {'auth_token': None, - 'username': cfg.CONF.AGENT.neutron_user_name, - 'password': cfg.CONF.AGENT.neutron_password, - 'aws_creds': None, - 'tenant': cfg.CONF.AGENT.neutron_tenant_name, - # 'tenant_id':'e8f280855dbe42a189eebb0f3ecb94bb', #context.values['tenant'], - 'auth_url': keystone_auth_url, - 'roles': context.roles, - 'is_admin': context.is_admin, - 'region_name': cfg.CONF.AGENT.os_region_name} - reqCon = neutron_proxy_context.RequestContext(**kwargs) - openStackClients = clients.OpenStackClients(reqCon) - neutronClient = openStackClients.neutron() - return neutronClient + return self.query_ports_info_inter.cascaded_neutron_client + # context = n_context.get_admin_context_without_session() + # keystone_auth_url = cfg.CONF.AGENT.keystone_auth_url + # kwargs = {'auth_token': None, + # 'username': cfg.CONF.AGENT.neutron_user_name, + # 'password': cfg.CONF.AGENT.neutron_password, + # 'aws_creds': None, + # 'tenant': cfg.CONF.AGENT.neutron_tenant_name, + # 'auth_url': keystone_auth_url, + # 'roles': context.roles, + # 'is_admin': context.is_admin, + # 'region_name': cfg.CONF.AGENT.os_region_name} + # reqCon = neutron_proxy_context.RequestContext(**kwargs) + # openStackClients = clients.OpenStackClients(reqCon) + # neutronClient = openStackClients.neutron() + # return neutronClient def get_cascaded_host_ip(self, ed_host_id): host_ip = self.cascaded_host_map.get(ed_host_id) - if(host_ip): + if host_ip: return host_ip neutron_client = self.get_cascaded_neutron_client() agent_ret = neutron_client.list_agents(host=ed_host_id, @@ -937,7 +864,7 @@ class OVSNeutronAgent(n_rpc.RpcCallback, # json.loads(agent_config) configuration = agent_config host_ip = configuration.get('tunneling_ip') - if(host_ip): + if host_ip: self.cascaded_host_map[ed_host_id] = host_ip return host_ip @@ -957,9 +884,9 @@ class OVSNeutronAgent(n_rpc.RpcCallback, if 'port_id' in details: cascaded_port_info = self.cascaded_port_info.get(device) - if(not self.compare_port_info(details, cascaded_port_info)): - LOG.info(_("Port %(device)s can not updated. " - "Because port info in cascading and cascaded layer" + if not self.compare_port_info(details, cascaded_port_info): + LOG.info(_("Port %(device)s can not updated because " + "port info in cascading and cascaded layer" "are different, Details: %(details)s"), {'device': device, 'details': details}) skipped_devices.append(device) @@ -978,7 +905,7 @@ class OVSNeutronAgent(n_rpc.RpcCallback, ovs_restarted) # update cascading port, modify binding:profile to add host_ip # and cascaded net_id/cascaded_subnet_id - if('compute' in details['device_owner']): + if 'compute' in details['device_owner']: ed_host_id = cascaded_port_info['binding:host_id'] cascaded_host_ip = self.get_cascaded_host_ip(ed_host_id) self.update_cascading_port_profile(cascaded_host_ip, @@ -998,10 +925,6 @@ class OVSNeutronAgent(n_rpc.RpcCallback, self.plugin_rpc.update_device_down( self.context, device, self.agent_id, cfg.CONF.host) LOG.info(_("Configuration for device %s completed."), device) -# else: -# LOG.warn(_("Device %s not defined on plugin"), device) -# if (port and port.ofport != -1): -# self.port_dead(port) return skipped_devices def process_network_ports(self, port_info, ovs_restarted): @@ -1051,13 +974,13 @@ class OVSNeutronAgent(n_rpc.RpcCallback, resync_a = True if 'removed' in port_info: start = time.time() - #resync_b = self.treat_devices_removed(port_info['removed']) + # resync_b = self.treat_devices_removed(port_info['removed']) LOG.debug(_("process_network_ports - iteration:%(iter_num)d -" "treat_devices_removed completed in %(elapsed).3f"), {'iter_num': self.iter_num, 'elapsed': time.time() - start}) # If one of the above operations fails => resync with plugin - return (resync_a | resync_b) + return resync_a or resync_b def get_ip_in_hex(self, ip_address): try: @@ -1072,14 +995,9 @@ class OVSNeutronAgent(n_rpc.RpcCallback, port_info.get('removed') or port_info.get('updated')) - def rpc_loop(self, polling_manager=None): -# if not polling_manager: -# polling_manager = polling.AlwaysPoll() - - sync = True + def rpc_loop(self): ports = set() updated_ports_copy = set() - ancillary_ports = set() ovs_restarted = False while self.run_daemon_loop: start = time.time() @@ -1090,14 +1008,6 @@ class OVSNeutronAgent(n_rpc.RpcCallback, 'removed': 0}} LOG.debug(_("Agent rpc_loop - iteration:%d started"), self.iter_num) -# if sync: -# LOG.info(_("Agent out of sync with plugin!")) -# ports.clear() -# ancillary_ports.clear() -# sync = False -# polling_manager.force_polling() - -# if self._agent_has_updates(polling_manager) or ovs_restarted: if True: try: LOG.debug(_("Agent rpc_loop - iteration:%(iter_num)d - " @@ -1111,7 +1021,6 @@ class OVSNeutronAgent(n_rpc.RpcCallback, updated_ports_copy = self.updated_ports self.updated_ports = set() reg_ports = (set() if ovs_restarted else ports) - #import pdb;pdb.set_trace() port_info = self.scan_ports(reg_ports, updated_ports_copy) LOG.debug(_("Agent rpc_loop - iteration:%(iter_num)d - " "port information retrieved. " @@ -1121,13 +1030,12 @@ class OVSNeutronAgent(n_rpc.RpcCallback, # Secure and wire/unwire VIFs and update their status # on Neutron server if (self._port_info_has_changes(port_info) or - self.sg_agent.firewall_refresh_needed() or - ovs_restarted): + self.sg_agent.firewall_refresh_needed() or + ovs_restarted): LOG.debug(_("Starting to process devices in:%s"), port_info) # If treat devices fails - must resync with plugin - sync = self.process_network_ports(port_info, - ovs_restarted) + self.process_network_ports(port_info, ovs_restarted) LOG.debug(_("Agent rpc_loop - iteration:%(iter_num)d -" "ports processed. Elapsed:%(elapsed).3f"), {'iter_num': self.iter_num, @@ -1139,13 +1047,10 @@ class OVSNeutronAgent(n_rpc.RpcCallback, port_stats['regular']['removed'] = ( len(port_info.get('removed', []))) ports = port_info['current'] - -# polling_manager.polling_completed() except Exception: LOG.exception(_("Error while processing VIF ports")) # Put the ports back in self.updated_port self.updated_ports |= updated_ports_copy - sync = True # sleep till end of polling interval elapsed = (time.time() - start) @@ -1155,27 +1060,18 @@ class OVSNeutronAgent(n_rpc.RpcCallback, {'iter_num': self.iter_num, 'port_stats': port_stats, 'elapsed': elapsed}) - if (elapsed < self.polling_interval): + if elapsed < self.polling_interval: time.sleep(self.polling_interval - elapsed) else: LOG.debug(_("Loop iteration exceeded interval " "(%(polling_interval)s vs. %(elapsed)s)!"), {'polling_interval': self.polling_interval, 'elapsed': elapsed}) - self.iter_num = self.iter_num + 1 + self.iter_num += 1 def daemon_loop(self): -# with polling.get_polling_manager( -# self.minimize_polling) as pm: self.rpc_loop() -# with polling.get_polling_manager( -# self.minimize_polling, -# self.root_helper, -# self.ovsdb_monitor_respawn_interval) as pm: -# -# self.rpc_loop(polling_manager=pm) - def _handle_sigterm(self, signum, frame): LOG.debug("Agent caught SIGTERM, quitting daemon loop.") self.run_daemon_loop = False @@ -1236,14 +1132,7 @@ def main(): LOG.error(_('%s Agent terminated!'), e) sys.exit(1) -# is_xen_compute_host = 'rootwrap-xen-dom0' in agent_config['root_helper'] -# if is_xen_compute_host: -# # Force ip_lib to always use the root helper to ensure that ip -# # commands target xen dom0 rather than domU. -# cfg.CONF.set_default('ip_lib_force_root', True) - agent = OVSNeutronAgent(**agent_config) -# signal.signal(signal.SIGTERM, agent._handle_sigterm) # Start everything. LOG.info(_("Agent initialized successfully, now running... ")) diff --git a/neutronproxy/l2proxy/neutron/plugins/l2_proxy/agent/neutron_keystoneclient.py b/l2_proxy/agent/neutron_keystoneclient.py similarity index 98% rename from neutronproxy/l2proxy/neutron/plugins/l2_proxy/agent/neutron_keystoneclient.py rename to l2_proxy/agent/neutron_keystoneclient.py index 08bc41aa..cbec35c7 100644 --- a/neutronproxy/l2proxy/neutron/plugins/l2_proxy/agent/neutron_keystoneclient.py +++ b/l2_proxy/agent/neutron_keystoneclient.py @@ -14,7 +14,7 @@ # under the License. # @author: Haojie Jia, Huawei -from neutron.openstack.common import context +from oslo_context import context from neutron.common import exceptions import eventlet @@ -23,10 +23,8 @@ from keystoneclient.v2_0 import client as kc from keystoneclient.v3 import client as kc_v3 from oslo.config import cfg -#from heat.openstack.common import importutils -from neutron.openstack.common import importutils -#from heat.openstack.common import log as logging -from neutron.openstack.common import log as logging +from oslo.utils import importutils +from oslo_log import log as logging logger = logging.getLogger( 'neutron.plugins.cascading_proxy_agent.keystoneclient') diff --git a/neutronproxy/l2proxy/neutron/plugins/l2_proxy/agent/neutron_proxy_context.py b/l2_proxy/agent/neutron_proxy_context.py similarity index 93% rename from neutronproxy/l2proxy/neutron/plugins/l2_proxy/agent/neutron_proxy_context.py rename to l2_proxy/agent/neutron_proxy_context.py index 53ad7bf8..1f6ba162 100644 --- a/neutronproxy/l2proxy/neutron/plugins/l2_proxy/agent/neutron_proxy_context.py +++ b/l2_proxy/agent/neutron_proxy_context.py @@ -17,16 +17,19 @@ from oslo.config import cfg #from heat.openstack.common import local -from neutron.openstack.common import local +#from neutron.openstack.common import local #from heat.common import exception from neutron.common import exceptions #from heat.common import wsgi from neutron import wsgi -from neutron.openstack.common import context +#from neutron.openstack.common import context +from oslo_context import context #from heat.openstack.common import importutils -from neutron.openstack.common import importutils +#from neutron.openstack.common import importutils +from oslo.utils import importutils #from heat.openstack.common import uuidutils -from neutron.openstack.common import uuidutils +#from neutron.openstack.common import uuidutils +from oslo.utils import uuidutils def generate_request_id(): @@ -69,14 +72,14 @@ class RequestContext(context.RequestContext): self.roles = roles or [] self.region_name = region_name self.owner_is_tenant = owner_is_tenant - if overwrite or not hasattr(local.store, 'context'): - self.update_store() + # if overwrite or not hasattr(local.store, 'context'): + # self.update_store() self._session = None self.trust_id = trust_id self.trustor_user_id = trustor_user_id - def update_store(self): - local.store.context = self + # def update_store(self): + # local.store.context = self def to_dict(self): return {'auth_token': self.auth_token, diff --git a/neutronproxy/l2proxy/neutron/plugins/l2_proxy/agent/ovs_dvr_neutron_agent.py b/l2_proxy/agent/ovs_dvr_neutron_agent.py similarity index 100% rename from neutronproxy/l2proxy/neutron/plugins/l2_proxy/agent/ovs_dvr_neutron_agent.py rename to l2_proxy/agent/ovs_dvr_neutron_agent.py diff --git a/neutronproxy/l2proxy/neutron/plugins/l2_proxy/agent/xenapi/README b/l2_proxy/agent/xenapi/README similarity index 100% rename from neutronproxy/l2proxy/neutron/plugins/l2_proxy/agent/xenapi/README rename to l2_proxy/agent/xenapi/README diff --git a/neutronproxy/l2proxy/neutron/plugins/l2_proxy/agent/xenapi/contrib/build-rpm.sh b/l2_proxy/agent/xenapi/contrib/build-rpm.sh similarity index 100% rename from neutronproxy/l2proxy/neutron/plugins/l2_proxy/agent/xenapi/contrib/build-rpm.sh rename to l2_proxy/agent/xenapi/contrib/build-rpm.sh diff --git a/neutronproxy/l2proxy/neutron/plugins/l2_proxy/agent/xenapi/contrib/rpmbuild/SPECS/openstack-quantum-xen-plugins.spec b/l2_proxy/agent/xenapi/contrib/rpmbuild/SPECS/openstack-quantum-xen-plugins.spec similarity index 100% rename from neutronproxy/l2proxy/neutron/plugins/l2_proxy/agent/xenapi/contrib/rpmbuild/SPECS/openstack-quantum-xen-plugins.spec rename to l2_proxy/agent/xenapi/contrib/rpmbuild/SPECS/openstack-quantum-xen-plugins.spec diff --git a/neutronproxy/l2proxy/neutron/plugins/l2_proxy/agent/xenapi/etc/xapi.d/plugins/netwrap b/l2_proxy/agent/xenapi/etc/xapi.d/plugins/netwrap similarity index 100% rename from neutronproxy/l2proxy/neutron/plugins/l2_proxy/agent/xenapi/etc/xapi.d/plugins/netwrap rename to l2_proxy/agent/xenapi/etc/xapi.d/plugins/netwrap diff --git a/neutronproxy/l2proxy/neutron/plugins/l2_proxy/common/__init__.py b/l2_proxy/common/__init__.py similarity index 100% rename from neutronproxy/l2proxy/neutron/plugins/l2_proxy/common/__init__.py rename to l2_proxy/common/__init__.py diff --git a/neutronproxy/l2proxy/neutron/plugins/l2_proxy/common/config.py b/l2_proxy/common/config.py similarity index 100% rename from neutronproxy/l2proxy/neutron/plugins/l2_proxy/common/config.py rename to l2_proxy/common/config.py diff --git a/neutronproxy/l2proxy/neutron/plugins/l2_proxy/common/constants.py b/l2_proxy/common/constants.py similarity index 100% rename from neutronproxy/l2proxy/neutron/plugins/l2_proxy/common/constants.py rename to l2_proxy/common/constants.py diff --git a/neutronproxy/l2proxy/neutron/plugins/l2_proxy/ovs_models_v2.py b/l2_proxy/ovs_models_v2.py similarity index 100% rename from neutronproxy/l2proxy/neutron/plugins/l2_proxy/ovs_models_v2.py rename to l2_proxy/ovs_models_v2.py diff --git a/neutronproxy/l2proxy/etc/neutron/plugins/ml2/ml2_conf.ini b/neutronproxy/l2proxy/etc/neutron/plugins/ml2/ml2_conf.ini deleted file mode 100644 index 77d1adb9..00000000 --- a/neutronproxy/l2proxy/etc/neutron/plugins/ml2/ml2_conf.ini +++ /dev/null @@ -1,107 +0,0 @@ -[ovs] -bridge_mappings = default:br-eth1,external:br-ex -integration_bridge = br-int -network_vlan_ranges = default:1:4094 -tunnel_type = vxlan,gre -enable_tunneling = True -local_ip = LOCAL_IP - - -[ml2] -type_drivers = local,flat,vlan,gre,vxlan -tenant_network_types = local,flat,vlan,gre,vxlan -mechanism_drivers = openvswitch,l2population - -# (ListOpt) List of network type driver entrypoints to be loaded from -# the neutron.ml2.type_drivers namespace. -# -# type_drivers = local,flat,vlan,gre,vxlan -# Example: type_drivers = flat,vlan,gre,vxlan - -# (ListOpt) Ordered list of network_types to allocate as tenant -# networks. The default value 'local' is useful for single-box testing -# but provides no connectivity between hosts. -# -# tenant_network_types = local -# Example: tenant_network_types = vlan,gre,vxlan - -# (ListOpt) Ordered list of networking mechanism driver entrypoints -# to be loaded from the neutron.ml2.mechanism_drivers namespace. -# mechanism_drivers = -# Example: mechanism_drivers = openvswitch,mlnx -# Example: mechanism_drivers = arista -# Example: mechanism_drivers = cisco,logger -# Example: mechanism_drivers = openvswitch,brocade -# Example: mechanism_drivers = linuxbridge,brocade - -# (ListOpt) Ordered list of extension driver entrypoints -# to be loaded from the neutron.ml2.extension_drivers namespace. -# extension_drivers = -# Example: extension_drivers = anewextensiondriver - -[ml2_type_flat] -flat_networks = external - -# (ListOpt) List of physical_network names with which flat networks -# can be created. Use * to allow flat networks with arbitrary -# physical_network names. -# -# flat_networks = -# Example:flat_networks = physnet1,physnet2 -# Example:flat_networks = * - -[ml2_type_vlan] -# (ListOpt) List of [::] tuples -# specifying physical_network names usable for VLAN provider and -# tenant networks, as well as ranges of VLAN tags on each -# physical_network available for allocation as tenant networks. -# -# network_vlan_ranges = -# Example: network_vlan_ranges = physnet1:1000:2999,physnet2 -network_vlan_ranges = default:1:4094 - -[ml2_type_gre] - -tunnel_id_ranges = 1:1000 -# (ListOpt) Comma-separated list of : tuples enumerating ranges of GRE tunnel IDs that are available for tenant network allocation -# tunnel_id_ranges = - -[ml2_type_vxlan] -# (ListOpt) Comma-separated list of : tuples enumerating -# ranges of VXLAN VNI IDs that are available for tenant network allocation. -# -vni_ranges = 4097:200000 - -# (StrOpt) Multicast group for the VXLAN interface. When configured, will -# enable sending all broadcast traffic to this multicast group. When left -# unconfigured, will disable multicast VXLAN mode. -# -# vxlan_group = -# Example: vxlan_group = 239.1.1.1 - -[securitygroup] -#firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver -firewall_driver=neutron.agent.firewall.NoopFirewallDriver -enable_security_group = True -enable_ipset = True -# Controls if neutron security group is enabled or not. -# It should be false when you use nova security group. -# enable_security_group = True -[agent] -tunnel_types = vxlan, gre -l2_population = True -arp_responder = True -enable_distributed_routing = True - -#configure added by j00209498 -keystone_auth_url = http://CASCADING_CONTROL_IP:35357/v2.0 -neutron_user_name = USER_NAME -neutron_password = USER_PWD -neutron_tenant_name = TENANT_NAME -os_region_name = CASCADED_REGION_NAME - -cascading_os_region_name = CASCADING_REGION_NAME -cascading_auth_url = http://CASCADING_CONTROL_IP:35357/v2.0 -cascading_user_name = USER_NAME -cascading_password = USER_PWD -cascading_tenant_name = TENANT_NAME diff --git a/neutronproxy/l2proxy/installation/install.sh b/neutronproxy/l2proxy/installation/install.sh deleted file mode 100644 index 34f146b1..00000000 --- a/neutronproxy/l2proxy/installation/install.sh +++ /dev/null @@ -1,130 +0,0 @@ -#!/bin/bash - -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# Copyright (c) 2014 Huawei Technologies. - -CASCADING_CONTROL_IP=127.0.0.1 -CASCADING_REGION_NAME=Cascading_Openstack -CASCADED_REGION_NAME=AZ1 -USER_NAME=neutron -USER_PWD=openstack -TENANT_NAME=service - -#For test path or the path is not standard -_PREFIX_DIR="" - -_NEUTRON_CONF_DIR="${_PREFIX_DIR}/etc/neutron" -_NEUTRON_INSTALL="${_PREFIX_DIR}/usr/lib/python2.7/dist-packages" -_NEUTRON_DIR="${_NEUTRON_INSTALL}/neutron" -_NEUTRON_L2_PROXY_FILE="plugins/ml2/ml2_conf.ini" -_NEUTRON_L2_PROXY_CONF="${_NEUTRON_CONF_DIR}/${_NEUTRON_L2_PROXY_FILE}" -_NEUTRON_CONF="${_NEUTRON_CONF_DIR}/neutron.conf" - -# if you did not make changes to the installation files, -# please do not edit the following directories. -_CONF_DIR="../etc/neutron/" -_CONF_BACKUP_DIR="`dirname ${_NEUTRON_CONF_DIR}`/.neutron-cascading-l2proxy-installation-backup" -_CODE_DIR="../neutron/" -_BACKUP_DIR="${_NEUTRON_INSTALL}/.neutron-cascading-l2proxy-installation-backup" -#_SCRIPT_NAME="${0##*/}" -#_SCRIPT_LOGFILE="/var/log/neutron/installation/${_SCRIPT_NAME}.log" - -if [[ ${EUID} -ne 0 ]]; then - echo "Please run as root." - exit 1 -fi - -##Redirecting output to logfile as well as stdout -#exec > >(tee -a ${_SCRIPT_LOGFILE}) -#exec 2> >(tee -a ${_SCRIPT_LOGFILE} >&2) - -cd `dirname $0` - -echo "checking installation directories..." -if [ ! -d "${_NEUTRON_DIR}" ] ; then - echo "Could not find the neutron installation. Please check the variables in the beginning of the script." - echo "aborted." - exit 1 -fi - -#for test begin -#rm -rf "${_CONF_BACKUP_DIR}/neutron" -#rm -rf "${_BACKUP_DIR}/neutron" -#for test end - -echo "checking previous installation..." -if [ -d "${_BACKUP_DIR}/neutron" -o -d "${_CONF_BACKUP_DIR}/neutron" ] ; then - echo "It seems l2proxy has already been installed!" - echo "Please check README for solution if this is not true." - exit 1 -fi - -echo "backing up current files that might be overwritten..." -mkdir -p "${_CONF_BACKUP_DIR}" -cp -r "${_NEUTRON_CONF_DIR}/" "${_CONF_BACKUP_DIR}/" -if [ $? -ne 0 ] ; then - rm -r "${_CONF_BACKUP_DIR}/neutron" - echo "Error in code backup, aborted." - exit 1 -fi - -mkdir -p "${_BACKUP_DIR}" -cp -r "${_NEUTRON_DIR}/" "${_BACKUP_DIR}/" -if [ $? -ne 0 ] ; then - rm -r "${_BACKUP_DIR}/neutron" - echo "Error in code backup, aborted." - exit 1 -fi - -echo "copying in new code files..." -cp -r "${_CODE_DIR}" `dirname ${_NEUTRON_DIR}` -if [ $? -ne 0 ] ; then - echo "Error in copying new code files, aborted." - exit 1 -fi - -echo "copying in new config files..." -cp -r "${_CONF_DIR}" `dirname ${_NEUTRON_CONF_DIR}` -if [ $? -ne 0 ] ; then - echo "Error in copying config files, aborted." - exit 1 -fi - -echo "updating config file..." -sed -i "s/CASCADING_CONTROL_IP/$CASCADING_CONTROL_IP/g" "${_NEUTRON_CONF_DIR}/${_NEUTRON_L2_PROXY_FILE}" -sed -i "s/CASCADING_REGION_NAME/$CASCADING_REGION_NAME/g" "${_NEUTRON_CONF_DIR}/${_NEUTRON_L2_PROXY_FILE}" -sed -i "s/CASCADED_REGION_NAME/$CASCADED_REGION_NAME/g" "${_NEUTRON_CONF_DIR}/${_NEUTRON_L2_PROXY_FILE}" -sed -i "s/USER_NAME/$USER_NAME/g" "${_NEUTRON_CONF_DIR}/${_NEUTRON_L2_PROXY_FILE}" -sed -i "s/USER_PWD/$USER_PWD/g" "${_NEUTRON_CONF_DIR}/${_NEUTRON_L2_PROXY_FILE}" -sed -i "s/TENANT_NAME/$TENANT_NAME/g" "${_NEUTRON_CONF_DIR}/${_NEUTRON_L2_PROXY_FILE}" - -if [ $? -ne 0 ] ; then - echo "Error in updating config file, aborted." - exit 1 -fi - -echo "stoping neutron-openvswitch-agent" -service neutron-plugin-openvswitch-agent stop -ps -efw|grep l2_proxy.py|grep -v grep|awk '{print $2}'|xargs kill -9 - -echo "starting neutron l2-proxy..." -nohup /usr/bin/python ${_NEUTRON_DIR}/plugins/l2_proxy/agent/l2_proxy.py --config-file ${_NEUTRON_CONF} --config-file ${_NEUTRON_L2_PROXY_CONF} >/dev/null 2>&1 & -if [ $? -ne 0 ] ; then - echo "There was an error in starting the l2-proxy, please start neutron l2-proxy manually." - exit 1 -fi - -echo "Completed." -echo "See README to get started." - -exit 0 diff --git a/neutronproxy/l2proxy/neutron/plugins/l2_proxy/__init__.py b/neutronproxy/l2proxy/neutron/plugins/l2_proxy/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/neutronproxy/l2proxy/neutron/plugins/l2_proxy/agent/__init__.py b/neutronproxy/l2proxy/neutron/plugins/l2_proxy/agent/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/neutronproxy/l3proxy/etc/neutron/l3_proxy.ini b/neutronproxy/l3proxy/etc/neutron/l3_proxy.ini deleted file mode 100644 index 8fa3d5de..00000000 --- a/neutronproxy/l3proxy/etc/neutron/l3_proxy.ini +++ /dev/null @@ -1,20 +0,0 @@ -[DEFAULT] -debug = True -verbose = True -interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver -use_namespaces = True -agent_mode = dvr - -admin_tenant_name = TENANT_NAME -admin_user = USER_NAME -admin_password = USER_PWD -auth_url = http://CASCADING_CONTROL_IP:35357/v2.0 -os_region_name = CASCADING_REGION_NAME - -cascaded_os_region_name = CASCADED_REGION_NAME -cascaded_auth_url = http://CASCADING_CONTROL_IP:35357/v2.0 -cascaded_admin_user_name = USER_NAME -cascaded_admin_password = USER_PWD -cascaded_tenant_name = TENANT_NAME - - diff --git a/neutronproxy/l3proxy/installation/install.sh b/neutronproxy/l3proxy/installation/install.sh deleted file mode 100644 index 9018ce38..00000000 --- a/neutronproxy/l3proxy/installation/install.sh +++ /dev/null @@ -1,132 +0,0 @@ -#!/bin/bash - -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# Copyright (c) 2014 Huawei Technologies. - -CASCADING_CONTROL_IP=127.0.0.1 -CASCADING_REGION_NAME=Cascading_Openstack -CASCADED_REGION_NAME=AZ1 -USER_NAME=neutron -USER_PWD=openstack -TENANT_NAME=service - -#For test path or the path is not standard -_PREFIX_DIR="" - -_NEUTRON_CONF_DIR="${_PREFIX_DIR}/etc/neutron" -_NEUTRON_INSTALL="${_PREFIX_DIR}/usr/lib/python2.7/dist-packages" -_NEUTRON_DIR="${_NEUTRON_INSTALL}/neutron" -_NEUTRON_L3_PROXY_CONF_FILE='l3_proxy.ini' -_NEUTRON_L3_PROXY_CONF="${_NEUTRON_CONF_DIR}/${_NEUTRON_L3_PROXY_CONF_FILE}" -_NEUTRON_CONF="${_NEUTRON_CONF_DIR}/neutron.conf" - -# if you did not make changes to the installation files, -# please do not edit the following directories. -_CONF_DIR="../etc/neutron/" -_CONF_BACKUP_DIR="`dirname ${_NEUTRON_CONF_DIR}`/.neutron-cascading-l3proxy-installation-backup" -_CODE_DIR="../neutron/" -_BACKUP_DIR="${_NEUTRON_INSTALL}/.neutron-cascading-l3proxy-installation-backup" - -#_SCRIPT_NAME="${0##*/}" -#_SCRIPT_LOGFILE="/var/log/neutron/installation/${_SCRIPT_NAME}.log" - -if [[ ${EUID} -ne 0 ]]; then - echo "Please run as root." - exit 1 -fi - -##Redirecting output to logfile as well as stdout -#exec > >(tee -a ${_SCRIPT_LOGFILE}) -#exec 2> >(tee -a ${_SCRIPT_LOGFILE} >&2) - -cd `dirname $0` -echo "checking installation directories..." -if [ ! -d "${_NEUTRON_DIR}" ] ; then - echo "Could not find the neutron installation. Please check the variables in the beginning of the script." - echo "aborted." - exit 1 -fi - -#for test begin -#rm -rf "${_CONF_BACKUP_DIR}/neutron" -#rm -rf "${_BACKUP_DIR}/neutron" -#for test end - - -echo "checking previous installation..." -if [ -d "${_BACKUP_DIR}/neutron" -o -d "${_CONF_BACKUP_DIR}/neutron" ] ; then - echo "It seems l3proxy has already been installed!" - echo "Please check README for solution if this is not true." - exit 1 -fi - -echo "backing up current files that might be overwritten..." -mkdir -p "${_CONF_BACKUP_DIR}" -cp -r "${_NEUTRON_CONF_DIR}/" "${_CONF_BACKUP_DIR}/" -if [ $? -ne 0 ] ; then - rm -r "${_CONF_BACKUP_DIR}/neutron" - echo "Error in code backup, aborted." - exit 1 -fi - -mkdir -p "${_BACKUP_DIR}" -cp -r "${_NEUTRON_DIR}/" "${_BACKUP_DIR}/" -if [ $? -ne 0 ] ; then - rm -r "${_BACKUP_DIR}/neutron" - echo "Error in code backup, aborted." - exit 1 -fi - -echo "copying in new code files..." -cp -r "${_CODE_DIR}" `dirname ${_NEUTRON_DIR}` -echo -if [ $? -ne 0 ] ; then - echo "Error in copying new code files, aborted." - exit 1 -fi - -echo "copying in new config files..." -cp -r "${_CONF_DIR}" `dirname ${_NEUTRON_CONF_DIR}` -if [ $? -ne 0 ] ; then - echo "Error in copying config files, aborted." - exit 1 -fi - -echo "updating config file..." -sed -i "s/CASCADING_CONTROL_IP/$CASCADING_CONTROL_IP/g" "${_NEUTRON_CONF_DIR}/${_NEUTRON_L3_PROXY_CONF_FILE}" -sed -i "s/CASCADING_REGION_NAME/$CASCADING_REGION_NAME/g" "${_NEUTRON_CONF_DIR}/${_NEUTRON_L3_PROXY_CONF_FILE}" -sed -i "s/CASCADED_REGION_NAME/$CASCADED_REGION_NAME/g" "${_NEUTRON_CONF_DIR}/${_NEUTRON_L3_PROXY_CONF_FILE}" -sed -i "s/USER_NAME/$USER_NAME/g" "${_NEUTRON_CONF_DIR}/${_NEUTRON_L3_PROXY_CONF_FILE}" -sed -i "s/USER_PWD/$USER_PWD/g" "${_NEUTRON_CONF_DIR}/${_NEUTRON_L3_PROXY_CONF_FILE}" -sed -i "s/TENANT_NAME/$TENANT_NAME/g" "${_NEUTRON_CONF_DIR}/${_NEUTRON_L3_PROXY_CONF_FILE}" - -if [ $? -ne 0 ] ; then - echo "Error in updating config file, aborted." - exit 1 -fi - -echo "stoping l3-agent" -service neutron-l3-agent stop -ps -efw|grep l3_proxy.py|grep -v grep|awk '{print $2}'|xargs kill -9 - -echo "starting neutron l3-proxy..." -nohup /usr/bin/python ${_NEUTRON_DIR}/agent/l3_proxy.py --config-file ${_NEUTRON_CONF} --config-file ${_NEUTRON_L3_PROXY_CONF} >/dev/null 2>&1 & -if [ $? -ne 0 ] ; then - echo "There was an error in starting the l3-proxy, please start neutron l3-proxy manually." - exit 1 -fi - -echo "Completed." -echo "See README to get started." - -exit 0 diff --git a/neutronproxy/l3proxy/neutron/agent/l3_proxy.py b/neutronproxy/l3proxy/neutron/agent/l3_proxy.py deleted file mode 100644 index 1d18ab32..00000000 --- a/neutronproxy/l3proxy/neutron/agent/l3_proxy.py +++ /dev/null @@ -1,2432 +0,0 @@ -''' -Created on 2014-5-23 - -@author: jiahaojie 00209498 -''' - -import sys - -import datetime -import eventlet -eventlet.monkey_patch() - -import netaddr -import os -from oslo.config import cfg -from oslo import messaging -import Queue - -from neutron.agent.common import config -from neutron.agent import l3_ha_agent -from neutron.agent.linux import external_process -from neutron.agent.linux import interface -from neutron.agent.linux import ip_lib -from neutron.agent.linux import iptables_manager -from neutron.agent.linux import ra -from neutron.agent import rpc as agent_rpc -from neutron.common import config as common_config -from neutron.common import constants as l3_constants -from neutron.common import ipv6_utils -from neutron.common import rpc as n_rpc -from neutron.common import topics -from neutron.common import utils as common_utils -from neutron import context -from neutron import manager -from neutron.openstack.common import excutils -from neutron.openstack.common.gettextutils import _LW -from neutron.openstack.common import importutils -from neutron.openstack.common import log as logging -from neutron.openstack.common import loopingcall -from neutron.openstack.common import periodic_task -from neutron.openstack.common import processutils -from neutron.openstack.common import service -from neutron.openstack.common import timeutils -from neutron import service as neutron_service -from neutron.services.firewall.agents.l3reference import firewall_l3_agent - -from neutron.plugins.l2_proxy.agent import neutron_proxy_context -from neutron.plugins.l2_proxy.agent import clients - -LOG = logging.getLogger(__name__) -NS_PREFIX = 'qrouter-' -INTERNAL_DEV_PREFIX = 'qr-' -EXTERNAL_DEV_PREFIX = 'qg-' -SNAT_INT_DEV_PREFIX = 'sg-' -FIP_NS_PREFIX = 'fip-' -SNAT_NS_PREFIX = 'snat-' -FIP_2_ROUTER_DEV_PREFIX = 'fpr-' -ROUTER_2_FIP_DEV_PREFIX = 'rfp-' -FIP_EXT_DEV_PREFIX = 'fg-' -FIP_LL_SUBNET = '169.254.30.0/23' -# Route Table index for FIPs -FIP_RT_TBL = 16 -# Rule priority range for FIPs -FIP_PR_START = 32768 -FIP_PR_END = FIP_PR_START + 40000 -RPC_LOOP_INTERVAL = 1 -FLOATING_IP_CIDR_SUFFIX = '/32' -# Lower value is higher priority -PRIORITY_RPC = 0 -PRIORITY_SYNC_ROUTERS_TASK = 1 -DELETE_ROUTER = 1 - - -class L3PluginApi(n_rpc.RpcProxy): - """Agent side of the l3 agent RPC API. - - API version history: - 1.0 - Initial version. - 1.1 - Floating IP operational status updates - 1.2 - DVR support: new L3 plugin methods added. - - get_ports_by_subnet - - get_agent_gateway_port - Needed by the agent when operating in DVR/DVR_SNAT mode - 1.3 - Get the list of activated services - - """ - - BASE_RPC_API_VERSION = '1.0' - - def __init__(self, topic, host): - super(L3PluginApi, self).__init__( - topic=topic, default_version=self.BASE_RPC_API_VERSION) - self.host = host - - def get_routers(self, context, router_ids=None): - """Make a remote process call to retrieve the sync data for routers.""" - return self.call(context, - self.make_msg('sync_routers', host=self.host, - router_ids=router_ids)) - - def get_external_network_id(self, context): - """Make a remote process call to retrieve the external network id. - - @raise n_rpc.RemoteError: with TooManyExternalNetworks as - exc_type if there are more than one - external network - """ - return self.call(context, - self.make_msg('get_external_network_id', - host=self.host)) - - def update_floatingip_statuses(self, context, router_id, fip_statuses): - """Call the plugin update floating IPs's operational status.""" - return self.call(context, - self.make_msg('update_floatingip_statuses', - router_id=router_id, - fip_statuses=fip_statuses), - version='1.1') - - def get_ports_by_subnet(self, context, subnet_id): - """Retrieve ports by subnet id.""" - return self.call(context, - self.make_msg('get_ports_by_subnet', host=self.host, - subnet_id=subnet_id), - topic=self.topic, - version='1.2') - - def get_agent_gateway_port(self, context, fip_net): - """Get or create an agent_gateway_port.""" - return self.call(context, - self.make_msg('get_agent_gateway_port', - network_id=fip_net, host=self.host), - topic=self.topic, - version='1.2') - - def get_service_plugin_list(self, context): - """Make a call to get the list of activated services.""" - return self.call(context, - self.make_msg('get_service_plugin_list'), - topic=self.topic, - version='1.3') - - def update_router_extern_ip_map(self, context, router_id, gateway_ip): - """update router and extern ip mapping""" - return self.call(context, - self.make_msg('update_router_extern_ip_map', - router_id=router_id, host=self.host, - gateway_ip=gateway_ip), - topic=self.topic, - version='1.2') - - def get_extra_routes_by_subnet(self, context, router_id, subnet_id): - """get extra routes for router by subnet id""" - return self.call(context, - self.make_msg('get_extra_routes_by_subnet', - router_id=router_id, host=self.host, - subnet_id=subnet_id), - topic=self.topic, - version='1.2') - - -class LinkLocalAddressPair(netaddr.IPNetwork): - def __init__(self, addr): - super(LinkLocalAddressPair, self).__init__(addr) - - def get_pair(self): - """Builds an address pair from the first and last addresses. """ - return (netaddr.IPNetwork("%s/%s" % (self.network, self.prefixlen)), - netaddr.IPNetwork("%s/%s" % (self.broadcast, self.prefixlen))) - - -class LinkLocalAllocator(object): - """Manages allocation of link local IP addresses. - - These link local addresses are used for routing inside the fip namespaces. - The associations need to persist across agent restarts to maintain - consistency. Without this, there is disruption in network connectivity - as the agent rewires the connections with the new IP address assocations. - - Persisting these in the database is unnecessary and would degrade - performance. - """ - def __init__(self, state_file, subnet): - """Read the file with previous allocations recorded. - - See the note in the allocate method for more detail. - """ - self.state_file = state_file - subnet = netaddr.IPNetwork(subnet) - - self.allocations = {} - - self.remembered = {} - for line in self._read(): - key, cidr = line.strip().split(',') - self.remembered[key] = LinkLocalAddressPair(cidr) - - self.pool = set(LinkLocalAddressPair(s) for s in subnet.subnet(31)) - self.pool.difference_update(self.remembered.values()) - - def allocate(self, key): - """Try to allocate a link local address pair. - - I expect this to work in all cases because I expect the pool size to be - large enough for any situation. Nonetheless, there is some defensive - programming in here. - - Since the allocations are persisted, there is the chance to leak - allocations which should have been released but were not. This leak - could eventually exhaust the pool. - - So, if a new allocation is needed, the code first checks to see if - there are any remembered allocations for the key. If not, it checks - the free pool. If the free pool is empty then it dumps the remembered - allocations to free the pool. This final desparate step will not - happen often in practice. - """ - if key in self.remembered: - self.allocations[key] = self.remembered.pop(key) - return self.allocations[key] - - if not self.pool: - # Desparate times. Try to get more in the pool. - self.pool.update(self.remembered.values()) - self.remembered.clear() - if not self.pool: - # More than 256 routers on a compute node! - raise RuntimeError(_("Cannot allocate link local address")) - - self.allocations[key] = self.pool.pop() - self._write_allocations() - return self.allocations[key] - - def release(self, key): - self.pool.add(self.allocations.pop(key)) - self._write_allocations() - - def _write_allocations(self): - current = ["%s,%s\n" % (k, v) for k, v in self.allocations.items()] - remembered = ["%s,%s\n" % (k, v) for k, v in self.remembered.items()] - current.extend(remembered) - self._write(current) - - def _write(self, lines): - with open(self.state_file, "w") as f: - f.writelines(lines) - - def _read(self): - if not os.path.exists(self.state_file): - return [] - with open(self.state_file) as f: - return f.readlines() - - -class RouterInfo(l3_ha_agent.RouterMixin): - - def __init__(self, router_id, root_helper, use_namespaces, router, - use_ipv6=False): - self.router_id = router_id - #added by jiahaojie 00209498---begin - self.cascaded_router_id = None - self.extern_extra_routes = {} - self.extra_routes_is_update = False - self.local_internal_ports = [] - #added by jiahaojie 00209498---end - self.ex_gw_port = None - self._snat_enabled = None - self._snat_action = None - self.internal_ports = [] - self.snat_ports = [] - self.floating_ips = set() - self.floating_ips_dict = {} - self.root_helper = root_helper - self.use_namespaces = use_namespaces - # Invoke the setter for establishing initial SNAT action - self.router = router - self.ns_name = NS_PREFIX + router_id if use_namespaces else None - self.iptables_manager = iptables_manager.IptablesManager( - root_helper=root_helper, - use_ipv6=use_ipv6, - namespace=self.ns_name) - self.snat_iptables_manager = None - self.routes = [] - # DVR Data - # Linklocal subnet for router and floating IP namespace link - self.rtr_fip_subnet = None - self.dist_fip_count = 0 - - super(RouterInfo, self).__init__() - - @property - def router(self): - return self._router - - @router.setter - def router(self, value): - self._router = value - if not self._router: - return - # enable_snat by default if it wasn't specified by plugin - self._snat_enabled = self._router.get('enable_snat', True) - # Set a SNAT action for the router - if self._router.get('gw_port'): - self._snat_action = ('add_rules' if self._snat_enabled - else 'remove_rules') - elif self.ex_gw_port: - # Gateway port was removed, remove rules - self._snat_action = 'remove_rules' - - def perform_snat_action(self, snat_callback, *args): - # Process SNAT rules for attached subnets - if self._snat_action: - snat_callback(self, self._router.get('gw_port'), - *args, action=self._snat_action) - self._snat_action = None - - -class RouterUpdate(object): - """Encapsulates a router update - - An instance of this object carries the information necessary to prioritize - and process a request to update a router. - """ - def __init__(self, router_id, priority, - action=None, router=None, timestamp=None): - self.priority = priority - self.timestamp = timestamp - if not timestamp: - self.timestamp = timeutils.utcnow() - self.id = router_id - self.action = action - self.router = router - - def __lt__(self, other): - """Implements priority among updates - - Lower numerical priority always gets precedence. When comparing two - updates of the same priority then the one with the earlier timestamp - gets procedence. In the unlikely event that the timestamps are also - equal it falls back to a simple comparison of ids meaning the - precedence is essentially random. - """ - if self.priority != other.priority: - return self.priority < other.priority - if self.timestamp != other.timestamp: - return self.timestamp < other.timestamp - return self.id < other.id - - -class ExclusiveRouterProcessor(object): - """Manager for access to a router for processing - - This class controls access to a router in a non-blocking way. The first - instance to be created for a given router_id is granted exclusive access to - the router. - - Other instances may be created for the same router_id while the first - instance has exclusive access. If that happens then it doesn't block and - wait for access. Instead, it signals to the master instance that an update - came in with the timestamp. - - This way, a thread will not block to wait for access to a router. Instead - it effectively signals to the thread that is working on the router that - something has changed since it started working on it. That thread will - simply finish its current iteration and then repeat. - - This class keeps track of the last time that a router data was fetched and - processed. The timestamp that it keeps must be before when the data used - to process the router last was fetched from the database. But, as close as - possible. The timestamp should not be recorded, however, until the router - has been processed using the fetch data. - """ - _masters = {} - _router_timestamps = {} - - def __init__(self, router_id): - self._router_id = router_id - - if router_id not in self._masters: - self._masters[router_id] = self - self._queue = [] - - self._master = self._masters[router_id] - - def _i_am_master(self): - return self == self._master - - def __enter__(self): - return self - - def __exit__(self, type, value, traceback): - if self._i_am_master(): - del self._masters[self._router_id] - - def _get_router_data_timestamp(self): - return self._router_timestamps.get(self._router_id, - datetime.datetime.min) - - def fetched_and_processed(self, timestamp): - """Records the data timestamp after it is used to update the router""" - new_timestamp = max(timestamp, self._get_router_data_timestamp()) - self._router_timestamps[self._router_id] = new_timestamp - - def queue_update(self, update): - """Queues an update from a worker - - This is the queue used to keep new updates that come in while a router - is being processed. These updates have already bubbled to the front of - the RouterProcessingQueue. - """ - self._master._queue.append(update) - - def updates(self): - """Processes the router until updates stop coming - - Only the master instance will process the router. However, updates may - come in from other workers while it is in progress. This method loops - until they stop coming. - """ - if self._i_am_master(): - while self._queue: - # Remove the update from the queue even if it is old. - update = self._queue.pop(0) - # Process the update only if it is fresh. - if self._get_router_data_timestamp() < update.timestamp: - yield update - - -class RouterProcessingQueue(object): - """Manager of the queue of routers to process.""" - def __init__(self): - self._queue = Queue.PriorityQueue() - - def add(self, update): - self._queue.put(update) - - def each_update_to_next_router(self): - """Grabs the next router from the queue and processes - - This method uses a for loop to process the router repeatedly until - updates stop bubbling to the front of the queue. - """ - next_update = self._queue.get() - - with ExclusiveRouterProcessor(next_update.id) as rp: - # Queue the update whether this worker is the master or not. - rp.queue_update(next_update) - - # Here, if the current worker is not the master, the call to - # rp.updates() will not yield and so this will essentially be a - # noop. - for update in rp.updates(): - yield (rp, update) - - -class L3NATAgent(firewall_l3_agent.FWaaSL3AgentRpcCallback, - l3_ha_agent.AgentMixin, - manager.Manager): - """Manager for L3NatAgent - - API version history: - 1.0 initial Version - 1.1 changed the type of the routers parameter - to the routers_updated method. - It was previously a list of routers in dict format. - It is now a list of router IDs only. - Per rpc versioning rules, it is backwards compatible. - 1.2 - DVR support: new L3 agent methods added. - - add_arp_entry - - del_arp_entry - Needed by the L3 service when dealing with DVR - """ - RPC_API_VERSION = '1.2' - - OPTS = [ - cfg.StrOpt('agent_mode', default='legacy', - help=_("The working mode for the agent. Allowed modes are: " - "'legacy' - this preserves the existing behavior " - "where the L3 agent is deployed on a centralized " - "networking node to provide L3 services like DNAT, " - "and SNAT. Use this mode if you do not want to " - "adopt DVR. 'dvr' - this mode enables DVR " - "functionality and must be used for an L3 agent " - "that runs on a compute host. 'dvr_snat' - this " - "enables centralized SNAT support in conjunction " - "with DVR. This mode must be used for an L3 agent " - "running on a centralized node (or in single-host " - "deployments, e.g. devstack)")), - cfg.StrOpt('external_network_bridge', default='br-ex', - help=_("Name of bridge used for external network " - "traffic.")), - cfg.IntOpt('metadata_port', - default=9697, - help=_("TCP Port used by Neutron metadata namespace " - "proxy.")), - cfg.IntOpt('send_arp_for_ha', - default=3, - help=_("Send this many gratuitous ARPs for HA setup, if " - "less than or equal to 0, the feature is disabled")), - cfg.StrOpt('router_id', default='', - help=_("If namespaces is disabled, the l3 agent can only" - " configure a router that has the matching router " - "ID.")), - cfg.BoolOpt('handle_internal_only_routers', - default=True, - help=_("Agent should implement routers with no gateway")), - cfg.StrOpt('gateway_external_network_id', default='', - help=_("UUID of external network for routers implemented " - "by the agents.")), - cfg.BoolOpt('enable_metadata_proxy', default=True, - help=_("Allow running metadata proxy.")), - cfg.BoolOpt('router_delete_namespaces', default=False, - help=_("Delete namespace after removing a router.")), - cfg.StrOpt('metadata_proxy_socket', - default='$state_path/metadata_proxy', - help=_('Location of Metadata Proxy UNIX domain ' - 'socket')), - # added by jiahaojie 00209498 ---begin - cfg.StrOpt('cascaded_os_region_name', default=None, - help=_("region name to use")), - cfg.StrOpt('cascaded_auth_url', - default='http://127.0.0.1:35357/v2.0', - help=_("keystone auth url to use")), - cfg.StrOpt('cascaded_admin_user_name', - help=_("access neutron user name to use")), - cfg.StrOpt('cascaded_admin_password', - help=_("access neutron password to use")), - cfg.StrOpt('cascaded_tenant_name', - help=_("access neutron tenant to use")), - cfg.StrOpt('cascaded_extern_subnet_cidr', - default='100.64.1.0/24', - help=_("cascaded_extern_subnet_cidr")), - cfg.StrOpt('cascaded_start_extern_ip', - default='100.64.1.2', - help=_("cascaded_start_extern_ip")), - cfg.StrOpt('cascaded_end_extern_ip', - default='100.64.1.254', - help=_("cascaded_end_extern_ip")), - cfg.StrOpt('cascaded_extern_network_type', - default='flat', - help=_("cascaded_extern_net_type")), - cfg.StrOpt('cascaded_extern_physical_network', - default='external', - help=_("cascaded_extern_physical_net")), - # added by jiahaojie 00209498 ---end - ] - - def __init__(self, host, conf=None): - if conf: - self.conf = conf - else: - self.conf = cfg.CONF - self.root_helper = config.get_root_helper(self.conf) - self.router_info = {} - - self._check_config_params() - - try: - self.driver = importutils.import_object( - self.conf.interface_driver, - self.conf - ) - except Exception: - msg = _("Error importing interface driver " - "'%s'") % self.conf.interface_driver - LOG.error(msg) - raise SystemExit(1) - - self.context = context.get_admin_context_without_session() - self.plugin_rpc = L3PluginApi(topics.L3PLUGIN, host) - self.fullsync = True - self.sync_progress = False - - # Get the list of service plugins from Neutron Server - # This is the first place where we contact neutron-server on startup - # so retry in case its not ready to respond. - retry_count = 5 - while True: - retry_count = retry_count - 1 - try: - self.neutron_service_plugins = ( - self.plugin_rpc.get_service_plugin_list(self.context)) - except n_rpc.RemoteError as e: - with excutils.save_and_reraise_exception() as ctx: - ctx.reraise = False - LOG.warning(_LW('l3-agent cannot check service plugins ' - 'enabled at the neutron server when ' - 'startup due to RPC error. It happens ' - 'when the server does not support this ' - 'RPC API. If the error is ' - 'UnsupportedVersion you can ignore this ' - 'warning. Detail message: %s'), e) - self.neutron_service_plugins = None - except messaging.MessagingTimeout as e: - with excutils.save_and_reraise_exception() as ctx: - if retry_count > 0: - ctx.reraise = False - LOG.warning(_LW('l3-agent cannot check service ' - 'plugins enabled on the neutron ' - 'server. Retrying. ' - 'Detail message: %s'), e) - continue - break - - self._clean_stale_namespaces = self.conf.use_namespaces - - # added by jiahaojie 00209498 save cascading data - self.network_map = {} - self.subnet_map = {} - self.cascaded_extern_net_id = None - - # dvr data - self.agent_gateway_port = None - self.agent_fip_count = 0 - self.local_subnets = LinkLocalAllocator( - os.path.join(self.conf.state_path, 'fip-linklocal-networks'), - FIP_LL_SUBNET) - self.fip_priorities = set(range(FIP_PR_START, FIP_PR_END)) - - self._queue = RouterProcessingQueue() - super(L3NATAgent, self).__init__(conf=self.conf) - - self.target_ex_net_id = None - self.use_ipv6 = ipv6_utils.is_enabled() - - def _check_config_params(self): - """Check items in configuration files. - - Check for required and invalid configuration items. - The actual values are not verified for correctness. - """ - if not self.conf.interface_driver: - msg = _('An interface driver must be specified') - LOG.error(msg) - raise SystemExit(1) - - if not self.conf.use_namespaces and not self.conf.router_id: - msg = _('Router id is required if not using namespaces.') - LOG.error(msg) - raise SystemExit(1) - - def _list_namespaces(self): - """Get a set of all router namespaces on host - - The argument routers is the list of routers that are recorded in - the database as being hosted on this node. - """ - try: - root_ip = ip_lib.IPWrapper(self.root_helper) - - host_namespaces = root_ip.get_namespaces(self.root_helper) - return set(ns for ns in host_namespaces - if (ns.startswith(NS_PREFIX) - or ns.startswith(SNAT_NS_PREFIX))) - except RuntimeError: - LOG.exception(_('RuntimeError in obtaining router list ' - 'for namespace cleanup.')) - return set() - - def _cleanup_namespaces(self, router_namespaces, router_ids): - """Destroy stale router namespaces on host when L3 agent restarts - - This routine is called when self._clean_stale_namespaces is True. - - The argument router_namespaces is the list of all routers namespaces - The argument router_ids is the list of ids for known routers. - """ - ns_to_ignore = set(NS_PREFIX + id for id in router_ids) - ns_to_ignore.update(SNAT_NS_PREFIX + id for id in router_ids) - ns_to_destroy = router_namespaces - ns_to_ignore - self._destroy_stale_router_namespaces(ns_to_destroy) - - def _destroy_stale_router_namespaces(self, router_namespaces): - """Destroys the stale router namespaces - - The argumenet router_namespaces is a list of stale router namespaces - - As some stale router namespaces may not be able to be deleted, only - one attempt will be made to delete them. - """ - for ns in router_namespaces: - ra.disable_ipv6_ra(ns[len(NS_PREFIX):], ns, self.root_helper) - try: - self._destroy_namespace(ns) - except RuntimeError: - LOG.exception(_('Failed to destroy stale router namespace ' - '%s'), ns) - self._clean_stale_namespaces = False - - def _destroy_namespace(self, ns): - if ns.startswith(NS_PREFIX): - if self.conf.enable_metadata_proxy: - self._destroy_metadata_proxy(ns[len(NS_PREFIX):], ns) - self._destroy_router_namespace(ns) - elif ns.startswith(FIP_NS_PREFIX): - self._destroy_fip_namespace(ns) - elif ns.startswith(SNAT_NS_PREFIX): - self._destroy_snat_namespace(ns) - - def _delete_namespace(self, ns_ip, ns): - try: - ns_ip.netns.delete(ns) - except RuntimeError: - msg = _('Failed trying to delete namespace: %s') % ns - LOG.exception(msg) - - def _destroy_snat_namespace(self, ns): - ns_ip = ip_lib.IPWrapper(self.root_helper, namespace=ns) - # delete internal interfaces - for d in ns_ip.get_devices(exclude_loopback=True): - if d.name.startswith(SNAT_INT_DEV_PREFIX): - LOG.debug('Unplugging DVR device %s', d.name) - self.driver.unplug(d.name, namespace=ns, - prefix=SNAT_INT_DEV_PREFIX) - - # TODO(mrsmith): delete ext-gw-port - LOG.debug('DVR: destroy snat ns: %s', ns) - if self.conf.router_delete_namespaces: - self._delete_namespace(ns_ip, ns) - - def _destroy_fip_namespace(self, ns): - ns_ip = ip_lib.IPWrapper(self.root_helper, namespace=ns) - for d in ns_ip.get_devices(exclude_loopback=True): - if d.name.startswith(FIP_2_ROUTER_DEV_PREFIX): - # internal link between IRs and FIP NS - ns_ip.del_veth(d.name) - elif d.name.startswith(FIP_EXT_DEV_PREFIX): - # single port from FIP NS to br-ext - # TODO(carl) Where does the port get deleted? - LOG.debug('DVR: unplug: %s', d.name) - self.driver.unplug(d.name, - bridge=self.conf.external_network_bridge, - namespace=ns, - prefix=FIP_EXT_DEV_PREFIX) - LOG.debug('DVR: destroy fip ns: %s', ns) - # TODO(mrsmith): add LOG warn if fip count != 0 - if self.conf.router_delete_namespaces: - self._delete_namespace(ns_ip, ns) - self.agent_gateway_port = None - - def _destroy_router_namespace(self, ns): - ns_ip = ip_lib.IPWrapper(self.root_helper, namespace=ns) - for d in ns_ip.get_devices(exclude_loopback=True): - if d.name.startswith(INTERNAL_DEV_PREFIX): - # device is on default bridge - self.driver.unplug(d.name, namespace=ns, - prefix=INTERNAL_DEV_PREFIX) - elif d.name.startswith(ROUTER_2_FIP_DEV_PREFIX): - ns_ip.del_veth(d.name) - elif d.name.startswith(EXTERNAL_DEV_PREFIX): - self.driver.unplug(d.name, - bridge=self.conf.external_network_bridge, - namespace=ns, - prefix=EXTERNAL_DEV_PREFIX) - - if self.conf.router_delete_namespaces: - self._delete_namespace(ns_ip, ns) - - def _create_namespace(self, name): - ip_wrapper_root = ip_lib.IPWrapper(self.root_helper) - ip_wrapper = ip_wrapper_root.ensure_namespace(name) - ip_wrapper.netns.execute(['sysctl', '-w', 'net.ipv4.ip_forward=1']) - if self.use_ipv6: - ip_wrapper.netns.execute(['sysctl', '-w', - 'net.ipv6.conf.all.forwarding=1']) - - def _create_router_namespace(self, ri): - self._create_namespace(ri.ns_name) - - def _fetch_external_net_id(self, force=False): - """Find UUID of single external network for this agent.""" - if self.conf.gateway_external_network_id: - return self.conf.gateway_external_network_id - - # L3 agent doesn't use external_network_bridge to handle external - # networks, so bridge_mappings with provider networks will be used - # and the L3 agent is able to handle any external networks. - if not self.conf.external_network_bridge: - return - - if not force and self.target_ex_net_id: - return self.target_ex_net_id - - try: - self.target_ex_net_id = self.plugin_rpc.get_external_network_id( - self.context) - return self.target_ex_net_id - except n_rpc.RemoteError as e: - with excutils.save_and_reraise_exception() as ctx: - if e.exc_type == 'TooManyExternalNetworks': - ctx.reraise = False - msg = _( - "The 'gateway_external_network_id' option must be " - "configured for this agent as Neutron has more than " - "one external network.") - raise Exception(msg) - - def _router_added(self, router_id, router): - ri = RouterInfo(router_id, self.root_helper, - self.conf.use_namespaces, router, - use_ipv6=self.use_ipv6) - self.router_info[router_id] = ri -# may be deleted. by jiahaojie 00209498 -# if self.conf.use_namespaces: -# self._create_router_namespace(ri) -# for c, r in self.metadata_filter_rules(): -# ri.iptables_manager.ipv4['filter'].add_rule(c, r) -# for c, r in self.metadata_nat_rules(): -# ri.iptables_manager.ipv4['nat'].add_rule(c, r) -# ri.iptables_manager.apply() -# self.process_router_add(ri) -# -# if ri.is_ha: -# self.process_ha_router_added(ri) -# -# if self.conf.enable_metadata_proxy: -# if ri.is_ha: -# self._add_keepalived_notifiers(ri) -# else: -# self._spawn_metadata_proxy(ri.router_id, ri.ns_name) - - def _router_removed(self, router_id): - ri = self.router_info.get(router_id) - if ri is None: - LOG.warn(_("Info for router %s were not found. " - "Skipping router removal"), router_id) - return - -# may be deleted. by jiahaojie 00209498 -# if ri.is_ha: -# self.process_ha_router_removed(ri) -# -# ri.router['gw_port'] = None -# ri.router[l3_constants.INTERFACE_KEY] = [] -# ri.router[l3_constants.FLOATINGIP_KEY] = [] -# self.process_router(ri) -# for c, r in self.metadata_filter_rules(): -# ri.iptables_manager.ipv4['filter'].remove_rule(c, r) -# for c, r in self.metadata_nat_rules(): -# ri.iptables_manager.ipv4['nat'].remove_rule(c, r) -# ri.iptables_manager.apply() -# if self.conf.enable_metadata_proxy: -# self._destroy_metadata_proxy(ri.router_id, ri.ns_name) -# del self.router_info[router_id] -# self._destroy_router_namespace(ri.ns_name) - - def _get_metadata_proxy_callback(self, router_id): - - def callback(pid_file): - metadata_proxy_socket = cfg.CONF.metadata_proxy_socket - proxy_cmd = ['neutron-ns-metadata-proxy', - '--pid_file=%s' % pid_file, - '--metadata_proxy_socket=%s' % metadata_proxy_socket, - '--router_id=%s' % router_id, - '--state_path=%s' % self.conf.state_path, - '--metadata_port=%s' % self.conf.metadata_port] - proxy_cmd.extend(config.get_log_args( - cfg.CONF, 'neutron-ns-metadata-proxy-%s.log' % - router_id)) - return proxy_cmd - - return callback - - def _get_metadata_proxy_process_manager(self, router_id, ns_name): - return external_process.ProcessManager( - self.conf, - router_id, - self.root_helper, - ns_name) - - def _spawn_metadata_proxy(self, router_id, ns_name): - callback = self._get_metadata_proxy_callback(router_id) - pm = self._get_metadata_proxy_process_manager(router_id, ns_name) - pm.enable(callback) - - def _destroy_metadata_proxy(self, router_id, ns_name): - pm = self._get_metadata_proxy_process_manager(router_id, ns_name) - pm.disable() - - #this function is added by jiahaojie 00209498 - def get_one_compute_port(self, ri, port): - # Get DVR ports for subnet - if 'id' not in port['subnet'] or ri.router['distributed'] is False: - return - - subnet_ports = ( - self.plugin_rpc.get_ports_by_subnet(self.context, - port['subnet']['id'])) - LOG.debug(_('DVR: subnet_ports: %s'), subnet_ports) - - for p in subnet_ports: - # TODO: check for multiple subnets on port case - if ( 'compute' in p['device_owner'] and - p['binding:host_id'] == self.conf.host and - p['binding:profile']): - return p - - def _set_subnet_arp_info(self, ri, port): - """Set ARP info retrieved from Plugin for existing ports.""" - if 'id' not in port['subnet'] or not ri.router['distributed']: - return - subnet_id = port['subnet']['id'] - subnet_ports = ( - self.plugin_rpc.get_ports_by_subnet(self.context, - subnet_id)) - - for p in subnet_ports: - if (p['device_owner'] not in ( - l3_constants.DEVICE_OWNER_ROUTER_INTF, - l3_constants.DEVICE_OWNER_DVR_INTERFACE)): - for fixed_ip in p['fixed_ips']: - self._update_arp_entry(ri, fixed_ip['ip_address'], - p['mac_address'], - subnet_id, 'add') - - def _set_subnet_info(self, port): - ips = port['fixed_ips'] - if not ips: - raise Exception(_("Router port %s has no IP address") % port['id']) - if len(ips) > 1: - LOG.error(_("Ignoring multiple IPs on router port %s"), - port['id']) - prefixlen = netaddr.IPNetwork(port['subnet']['cidr']).prefixlen - port['ip_cidr'] = "%s/%s" % (ips[0]['ip_address'], prefixlen) - - def _get_existing_devices(self, ri): - ip_wrapper = ip_lib.IPWrapper(root_helper=self.root_helper, - namespace=ri.ns_name) - ip_devs = ip_wrapper.get_devices(exclude_loopback=True) - return [ip_dev.name for ip_dev in ip_devs] - - #added by jiahaojie 00209498 ---begin - def get_neutron_client(self): - kwargs = {'auth_token': None, - 'username': self.conf.cascaded_admin_user_name, - 'password': self.conf.cascaded_admin_password, - 'aws_creds': None, - 'tenant': self.conf.cascaded_tenant_name, - # 'tenant_id':'e8f280855dbe42a189eebb0f3ecb94bb', #context.values['tenant'], - 'auth_url': self.conf.cascaded_auth_url, - 'roles': self.context.roles, - 'is_admin': self.context.is_admin, - 'region_name': self.conf.cascaded_os_region_name} - reqCon = neutron_proxy_context.RequestContext(**kwargs) - openStackClients = clients.OpenStackClients(reqCon) - neutronClient = openStackClients.neutron() - return neutronClient - - def create_cascaded_router(self, router_name, extern_net_id): - req_props = {'name': router_name, - 'distributed':'True', - "external_gateway_info": { - "network_id": extern_net_id}} - neutron_client = self.get_neutron_client() - router_ret = neutron_client.create_router({'router': req_props}) - if(not router_ret or - (router_ret and (not router_ret.get('router')))): - LOG.debug(_("cascaded router created failed, " - "router name:%s"), router_name) - return - LOG.debug(_('create router, Response:%s'), str(router_ret)) - return router_ret['router']['id'] - - def delete_cascaded_router(self, router_id): - neutron_client = self.get_neutron_client() - neutron_client.delete_router(router_id) - - def get_or_create_cascaded_net_id(self, port): - '''only get cascaded net_id from port binding:profile''' - '''not implement creating cascaded network''' - cascaded_net_id = self.network_map.get(port['network_id']) - if cascaded_net_id: - return cascaded_net_id - profile = port['binding:profile'] - #profile = json.load(profile) - cascaded_net_id_dict = profile.get('cascaded_net_id') - if(not cascaded_net_id_dict): - return - cascaded_net_id_dict = cascaded_net_id_dict.get(port['network_id']) - cascaded_net_id = cascaded_net_id_dict.get(cfg.CONF.host) - if(cascaded_net_id): - self.network_map[port['network_id']] = cascaded_net_id - return cascaded_net_id - - def get_or_create_cascaded_subnet_id(self, subnet_id, port): - '''only get cascaded subnet_id from port binding:profile''' - '''not implement creating cascaded subnet''' - cascaded_subnet_id = \ - self.subnet_map.get(port['fixed_ips'][0]['subnet_id']) - if cascaded_subnet_id: - return cascaded_subnet_id - profile = port['binding:profile'] - #profile = json.load(profile) - cascaded_subnet_id_dict = profile.get('cascaded_subnet_id') - if(not cascaded_subnet_id_dict): - return - cascaded_subnet_id_dict = cascaded_subnet_id_dict.get(subnet_id) - if(not cascaded_subnet_id_dict): - return - cascaded_subnet_id = cascaded_subnet_id_dict.get(cfg.CONF.host) - if(cascaded_subnet_id): - self.subnet_map[port['fixed_ips'][0]['subnet_id']] = \ - cascaded_subnet_id - return cascaded_subnet_id - - def create_cascaded_router_port(self, cascaded_net_id, port): - neutron_client = self.get_neutron_client() - mac_address = port['mac_address'] - ip_address = port['fixed_ips'][0]['ip_address'] - profile = {'cascading_port_id': port['id']} - req_props = {'network_id': cascaded_net_id, - 'name': 'router_port', - 'admin_state_up': True, - 'fixed_ips': [{'ip_address': ip_address}], - 'mac_address': mac_address, - 'binding:profile': profile - } - port_ret = neutron_client.create_port({'port': req_props}) - if(not port_ret or - (port_ret and (not port_ret.get('port')))): - LOG.error(_("ERR:router port created failed, " - "ip_address:%s, mac_address:%s"), - ip_address, mac_address) - return - LOG.debug(_('create router port, Response:%s'), str(port_ret)) - return port_ret['port'].get('id') - - def delete_cascaded_router_port(self, cascaded_port_id): - neutron_client = self.get_neutron_client() - bodyResponse = neutron_client.delete_port(cascaded_port_id) - LOG.debug(_('delete port, Response:%s'), str(bodyResponse)) - return bodyResponse - - def add_interface_for_cascaded_router(self, cascaded_router_id, - cascaded_subnet_id, - cascaded_port_id): - neutron_client = self.get_neutron_client() - #'subnet_id': cascaded_subnet_id, - req_props = {'port_id': cascaded_port_id} - ret = neutron_client.add_interface_router(cascaded_router_id, - req_props) - LOG.debug(_('add interface for router port, Response:%s'), str(ret)) - return - - def delete_interface_for_cascaded_router(self, cascaded_router_id, - cascaded_subnet_id): - neutron_client = self.get_neutron_client() - req_props = {'subnet_id': cascaded_subnet_id} - ret = neutron_client.remove_interface_router(cascaded_router_id, - req_props) - LOG.debug(_('delete interface for router port, Response:%s'), str(ret)) - return - - def create_cascaded_extern_net(self): - net_name = 'net_extern' - net_type = self.conf.cascaded_extern_network_type - physical_net = self.conf.cascaded_extern_physical_network - req_props = {'name': net_name, - 'provider:network_type': net_type, - 'provider:physical_network': physical_net, - 'router:external': True} - neutron_client = self.get_neutron_client() - net_ret = neutron_client.create_network({'network': req_props}) - if(not net_ret or - (net_ret and (not net_ret.get('network')))): - LOG.debug(_("cascaded extern net created failed, " - "net name:%s"), net_name) - return - LOG.debug(_('create extern net, Response:%s'), str(net_ret)) - net_id = net_ret['network']['id'] - alloc_ip_pools = {'start': self.conf.cascaded_start_extern_ip, - 'end': self.conf.cascaded_end_extern_ip} - subnet_req_props = {'name': 'subnet_extern', - 'network_id': net_id, - 'cidr': self.conf.cascaded_extern_subnet_cidr, - 'allocation_pools': [alloc_ip_pools], - 'enable_dhcp': False, - "ip_version": "4"} - subnet_ret = neutron_client.create_subnet({'subnet': subnet_req_props}) - if(not subnet_ret or - (subnet_ret and (not subnet_ret.get('subnet')))): - LOG.debug(_("cascaded extern subnet created failed, " - "net name:%s"), net_name) - return - return net_id - - def get_or_create_cascaded_extern_net(self): - if(self.cascaded_extern_net_id): - return self.cascaded_extern_net_id - net_id = self.create_cascaded_extern_net() - if(not net_id): - return - self.cascaded_extern_net_id = net_id - return net_id - - def get_cascaded_router_gateway_ip(self, router_id): - search_opts = {'device_id': router_id, - 'device_owner': 'network:router_gateway'} - neutron_client = self.get_neutron_client() - port_ret = neutron_client.list_ports(**search_opts) - if(not port_ret or - (port_ret and (not port_ret.get('ports')))): - LOG.debug(_("cascaded router gateway_ip get failed, " - "router id:%s"), router_id) - return - port = port_ret['ports'] - if(len(port) == 0): - return - return port[0]['fixed_ips'][0]['ip_address'] - - def update_extra_routes_for_cascaded_router(self, router_id, extra_routes): - req_props = {"routes": [{ - 'nexthop': n, - 'destination': d} for d, n in extra_routes.items()]} - neutron_client = self.get_neutron_client() - router_ret = neutron_client.update_router(router_id, - {'router': req_props}) - if(not router_ret or - (router_ret and (not router_ret.get('router')))): - LOG.debug(_("cascaded router update failed, " - "router id:%s"), router_id) - return - LOG.debug(_('update router, Response:%s'), str(router_ret)) - return router_ret['router']['id'] - - def create_cascaded_extern_net_and_router(self, existing_port_ids, - internal_ports, ri): - if(len(existing_port_ids) == 0 and len(internal_ports) > 0 and - not ri.cascaded_router_id): - extern_net_id = self.get_or_create_cascaded_extern_net() - if(not extern_net_id): - LOG.error(_('ERR: can not get or create cascaded extern net')) - return - router_name = ri.router['name'] - router_id = self.create_cascaded_router(router_name, extern_net_id) - if(not router_id): - LOG.error(_('ERR: can not create cascaded router: %s'), - router_name) - return - gateway_ip = self.get_cascaded_router_gateway_ip(router_id) - if(not gateway_ip): - LOG.error(_('ERR: can not get cascaded router gateway ip')) - return - self.plugin_rpc.update_router_extern_ip_map(self.context, - ri.router['id'], - gateway_ip) - ri.cascaded_router_id = router_id - pass - #added by jiahaojie 00209498 ---end - - @common_utils.exception_logger() - def process_router(self, ri): - # TODO(mrsmith) - we shouldn't need to check here - if 'distributed' not in ri.router: - ri.router['distributed'] = False -# ri.iptables_manager.defer_apply_on() -# ex_gw_port = self._get_ex_gw_port(ri) - internal_ports = ri.router.get(l3_constants.INTERFACE_KEY, []) -# snat_ports = ri.router.get(l3_constants.SNAT_ROUTER_INTF_KEY, []) - existing_port_ids = set([p['id'] for p in ri.internal_ports]) - current_port_ids = set([p['id'] for p in internal_ports - if p['admin_state_up']]) - new_ports = [p for p in internal_ports if - p['id'] in current_port_ids and - p['id'] not in existing_port_ids] - old_ports = [p for p in ri.internal_ports if - p['id'] not in current_port_ids] - -# new_ipv6_port = False -# old_ipv6_port = False - for p in new_ports: - extra_routes = self.plugin_rpc.get_extra_routes_by_subnet( - self.context, - ri.router['id'], - p['fixed_ips'][0]['subnet_id']) - LOG.debug(_("Cascade Info, new ports, extra_routes:%s from " - "plugin_rpc.get_extra_routes_by_subnet"), extra_routes) - if('not_bound_network' in extra_routes): - continue - if ('big2Layer' not in extra_routes and - 'local_network' not in extra_routes): - next_hop = extra_routes[0][0] - dest_cidr = extra_routes[0][1] - if(not next_hop): - continue - # if(not ri.extern_extra_routes.get(dest_cidr, None)): - ri.extern_extra_routes[dest_cidr] = next_hop - ri.extra_routes_is_update = True - ri.internal_ports.append(p) - continue - local_existing_port_ids = set([pt['id'] - for pt in ri.local_internal_ports]) - self.create_cascaded_extern_net_and_router(local_existing_port_ids, - internal_ports, ri) - vm_port = self.get_one_compute_port(ri, p) - cascaded_net_id = self.get_or_create_cascaded_net_id(vm_port) - if(not cascaded_net_id): - LOG.error(_('ERR: can not get cascaded net_id from port' - ' %s by get_or_create_cascaded_net_id!'), p) - return - cascaded_subnet_id = \ - self.get_or_create_cascaded_subnet_id( - p['fixed_ips'][0]['subnet_id'], - vm_port) - if(not cascaded_subnet_id): - LOG.error(_('ERR: can not get cascaded subnet_id from port' - ' %s by get_or_create_cascaded_subnet_id!'), p) - return - cascaded_port_id = \ - self.create_cascaded_router_port(cascaded_net_id, p) - if(not cascaded_port_id): - return - p['cascaded_port_id'] = cascaded_port_id - if(not ri.cascaded_router_id): - LOG.error(_('ERR: can not create cascaded router port from' - 'port %s by create_cascaded_router_port!'), p) - return - self.add_interface_for_cascaded_router(ri.cascaded_router_id, - cascaded_subnet_id, - cascaded_port_id) - LOG.debug(_("Add interface for cascaded router, router:%s" - "cascaded_subnet_id:%s, cascaded_port_id:%s"), - ri.cascaded_router_id, cascaded_subnet_id, - cascaded_port_id) - ri.internal_ports.append(p) - ri.local_internal_ports.append(p) -#deleted by jiahaojie 00209498 -# self._set_subnet_info(p) -# self.internal_network_added(ri, p) -# ri.internal_ports.append(p) -# self._set_subnet_arp_info(ri, p) -# if (not new_ipv6_port and -# netaddr.IPNetwork(p['subnet']['cidr']).version == 6): -# new_ipv6_port = True - - for p in old_ports: - extra_routes = self.plugin_rpc.get_extra_routes_by_subnet( - self.context, - ri.router['id'], - p['fixed_ips'][0]['subnet_id']) - LOG.debug(_("Cascade Info, old ports, extra_routes:%s from " - "plugin_rpc.get_extra_routes_by_subnet"), extra_routes) - if('not_bound_network' in extra_routes): - continue - if ('big2Layer' not in extra_routes and - 'local_network' not in extra_routes): - next_hop = extra_routes[0][0] - dest_cidr = extra_routes[0][1] - # if(not ri.extern_extra_routes.get(dest_cidr, None)): - ri.extern_extra_routes.pop(dest_cidr, None) - ri.extra_routes_is_update = True - ri.internal_ports.remove(p) - continue - - cascaded_subnet_id = self.subnet_map.get( - p['fixed_ips'][0]['subnet_id']) - if(not cascaded_subnet_id): - LOG.error(_('ERR: can not delete interface for cascaded' - ' router, not find cascaded_subnet_id!')) - return - self.delete_interface_for_cascaded_router(ri.cascaded_router_id, - cascaded_subnet_id) - # self.delete_cascaded_router_port(p['cascaded_port_id']) - ri.internal_ports.remove(p) - ri.local_internal_ports.remove(p) - - if ri.cascaded_router_id and (ri.extra_routes_is_update): - self.update_extra_routes_for_cascaded_router( - ri.cascaded_router_id, - ri.extern_extra_routes) - ri.extra_routes_is_update = False - - if(len(ri.local_internal_ports) == 0 and ri.cascaded_router_id): - ri.internal_ports = [] - ri.local_internal_ports = [] - ri.extern_extra_routes = {} - ri.extra_routes_is_update = False - self.delete_cascaded_router(ri.cascaded_router_id) - self.plugin_rpc.update_router_extern_ip_map(self.context, - ri.router['id'], - None) - ri.cascaded_router_id = None - -#deleted by jiahaojie 00209498 -# self.internal_network_removed(ri, p) -# ri.internal_ports.remove(p) -# if (not old_ipv6_port and -# netaddr.IPNetwork(p['subnet']['cidr']).version == 6): -# old_ipv6_port = True - -# if new_ipv6_port or old_ipv6_port: -# ra.enable_ipv6_ra(ri.router_id, -# ri.ns_name, -# internal_ports, -# self.get_internal_device_name, -# self.root_helper) - - # not support external network, so return. by jiahaojie 00209498 - # return -# commented by jiahaojie 00209498 -# existing_devices = self._get_existing_devices(ri) -# current_internal_devs = set([n for n in existing_devices -# if n.startswith(INTERNAL_DEV_PREFIX)]) -# current_port_devs = set([self.get_internal_device_name(id) for -# id in current_port_ids]) -# stale_devs = current_internal_devs - current_port_devs -# for stale_dev in stale_devs: -# LOG.debug(_('Deleting stale internal router device: %s'), -# stale_dev) -# self.driver.unplug(stale_dev, -# namespace=ri.ns_name, -# prefix=INTERNAL_DEV_PREFIX) -# -# # TODO(salv-orlando): RouterInfo would be a better place for -# # this logic too -# ex_gw_port_id = (ex_gw_port and ex_gw_port['id'] or -# ri.ex_gw_port and ri.ex_gw_port['id']) -# -# interface_name = None -# if ex_gw_port_id: -# interface_name = self.get_external_device_name(ex_gw_port_id) -# if ex_gw_port: -# def _gateway_ports_equal(port1, port2): -# def _get_filtered_dict(d, ignore): -# return dict((k, v) for k, v in d.iteritems() -# if k not in ignore) -# -# keys_to_ignore = set(['binding:host_id']) -# port1_filtered = _get_filtered_dict(port1, keys_to_ignore) -# port2_filtered = _get_filtered_dict(port2, keys_to_ignore) -# return port1_filtered == port2_filtered -# -# self._set_subnet_info(ex_gw_port) -# if not ri.ex_gw_port: -# self.external_gateway_added(ri, ex_gw_port, interface_name) -# elif not _gateway_ports_equal(ex_gw_port, ri.ex_gw_port): -# self.external_gateway_updated(ri, ex_gw_port, interface_name) -# elif not ex_gw_port and ri.ex_gw_port: -# self.external_gateway_removed(ri, ri.ex_gw_port, interface_name) -# -# stale_devs = [dev for dev in existing_devices -# if dev.startswith(EXTERNAL_DEV_PREFIX) -# and dev != interface_name] -# for stale_dev in stale_devs: -# LOG.debug(_('Deleting stale external router device: %s'), -# stale_dev) -# self.driver.unplug(stale_dev, -# bridge=self.conf.external_network_bridge, -# namespace=ri.ns_name, -# prefix=EXTERNAL_DEV_PREFIX) -# -# # Process static routes for router -# self.routes_updated(ri) -# # Process SNAT rules for external gateway -# if (not ri.router['distributed'] or -# ex_gw_port and ri.router['gw_port_host'] == self.host): -# # Get IPv4 only internal CIDRs -# internal_cidrs = [p['ip_cidr'] for p in ri.internal_ports -# if netaddr.IPNetwork(p['ip_cidr']).version == 4] -# ri.perform_snat_action(self._handle_router_snat_rules, -# internal_cidrs, interface_name) -# -# # Process SNAT/DNAT rules for floating IPs -# fip_statuses = {} -# try: -# if ex_gw_port: -# existing_floating_ips = ri.floating_ips -# self.process_router_floating_ip_nat_rules(ri) -# ri.iptables_manager.defer_apply_off() -# # Once NAT rules for floating IPs are safely in place -# # configure their addresses on the external gateway port -# fip_statuses = self.process_router_floating_ip_addresses( -# ri, ex_gw_port) -# except Exception: -# # TODO(salv-orlando): Less broad catching -# # All floating IPs must be put in error state -# for fip in ri.router.get(l3_constants.FLOATINGIP_KEY, []): -# fip_statuses[fip['id']] = l3_constants.FLOATINGIP_STATUS_ERROR -# -# if ex_gw_port: -# # Identify floating IPs which were disabled -# ri.floating_ips = set(fip_statuses.keys()) -# for fip_id in existing_floating_ips - ri.floating_ips: -# fip_statuses[fip_id] = l3_constants.FLOATINGIP_STATUS_DOWN -# # Update floating IP status on the neutron server -# self.plugin_rpc.update_floatingip_statuses( -# self.context, ri.router_id, fip_statuses) -# -# # Update ex_gw_port and enable_snat on the router info cache -# ri.ex_gw_port = ex_gw_port -# ri.snat_ports = snat_ports -# ri.enable_snat = ri.router.get('enable_snat') -# -# if ri.is_ha: -# if ri.ha_port: -# ri.spawn_keepalived() -# else: -# ri.disable_keepalived() - - def _handle_router_snat_rules(self, ri, ex_gw_port, internal_cidrs, - interface_name, action): - # Remove all the rules - # This is safe because if use_namespaces is set as False - # then the agent can only configure one router, otherwise - # each router's SNAT rules will be in their own namespace - if not ri.router['distributed']: - iptables_manager = ri.iptables_manager - elif ri.snat_iptables_manager: - iptables_manager = ri.snat_iptables_manager - else: - LOG.debug("DVR router: no snat rules to be handled") - return - - iptables_manager.ipv4['nat'].empty_chain('POSTROUTING') - iptables_manager.ipv4['nat'].empty_chain('snat') - - if not ri.router['distributed']: - # Add back the jump to float-snat - iptables_manager.ipv4['nat'].add_rule('snat', '-j $float-snat') - - # And add them back if the action is add_rules - if action == 'add_rules' and ex_gw_port: - # ex_gw_port should not be None in this case - # NAT rules are added only if ex_gw_port has an IPv4 address - for ip_addr in ex_gw_port['fixed_ips']: - ex_gw_ip = ip_addr['ip_address'] - if netaddr.IPAddress(ex_gw_ip).version == 4: - rules = self.external_gateway_nat_rules(ex_gw_ip, - internal_cidrs, - interface_name) - for rule in rules: - iptables_manager.ipv4['nat'].add_rule(*rule) - break - iptables_manager.apply() - - def _handle_router_fip_nat_rules(self, ri, interface_name, action): - """Configures NAT rules for Floating IPs for DVR. - - Remove all the rules. This is safe because if - use_namespaces is set as False then the agent can - only configure one router, otherwise each router's - NAT rules will be in their own namespace. - """ - ri.iptables_manager.ipv4['nat'].empty_chain('POSTROUTING') - ri.iptables_manager.ipv4['nat'].empty_chain('snat') - - # Add back the jump to float-snat - ri.iptables_manager.ipv4['nat'].add_rule('snat', '-j $float-snat') - - # And add them back if the action is add_rules - if action == 'add_rules' and interface_name: - rule = ('POSTROUTING', '! -i %(interface_name)s ' - '! -o %(interface_name)s -m conntrack ! ' - '--ctstate DNAT -j ACCEPT' % - {'interface_name': interface_name}) - ri.iptables_manager.ipv4['nat'].add_rule(*rule) - ri.iptables_manager.apply() - - def process_router_floating_ip_nat_rules(self, ri): - """Configure NAT rules for the router's floating IPs. - - Configures iptables rules for the floating ips of the given router - """ - # Clear out all iptables rules for floating ips - ri.iptables_manager.ipv4['nat'].clear_rules_by_tag('floating_ip') - - floating_ips = self.get_floating_ips(ri) - # Loop once to ensure that floating ips are configured. - for fip in floating_ips: - # Rebuild iptables rules for the floating ip. - fixed = fip['fixed_ip_address'] - fip_ip = fip['floating_ip_address'] - for chain, rule in self.floating_forward_rules(fip_ip, fixed): - ri.iptables_manager.ipv4['nat'].add_rule(chain, rule, - tag='floating_ip') - - ri.iptables_manager.apply() - - def _get_external_device_interface_name(self, ri, ex_gw_port, - floating_ips): - if ri.router['distributed']: - # filter out only FIPs for this host/agent - floating_ips = [i for i in floating_ips if i['host'] == self.host] - if floating_ips and self.agent_gateway_port is None: - self._create_agent_gateway_port(ri, floating_ips[0] - ['floating_network_id']) - - if self.agent_gateway_port: - if floating_ips and ri.dist_fip_count == 0: - self.create_rtr_2_fip_link(ri, floating_ips[0] - ['floating_network_id']) - return self.get_rtr_int_device_name(ri.router_id) - else: - # there are no fips or agent port, no work to do - return None - - return self.get_external_device_name(ex_gw_port['id']) - - def _add_floating_ip(self, ri, fip, interface_name, device): - fip_ip = fip['floating_ip_address'] - ip_cidr = str(fip_ip) + FLOATING_IP_CIDR_SUFFIX - - if ri.is_ha: - self._add_vip(ri, ip_cidr, interface_name) - else: - net = netaddr.IPNetwork(ip_cidr) - try: - device.addr.add(net.version, ip_cidr, str(net.broadcast)) - except (processutils.UnknownArgumentError, - processutils.ProcessExecutionError): - # any exception occurred here should cause the floating IP - # to be set in error state - LOG.warn(_("Unable to configure IP address for " - "floating IP: %s"), fip['id']) - return l3_constants.FLOATINGIP_STATUS_ERROR - if ri.router['distributed']: - # Special Handling for DVR - update FIP namespace - # and ri.namespace to handle DVR based FIP - self.floating_ip_added_dist(ri, fip) - else: - # As GARP is processed in a distinct thread the call below - # won't raise an exception to be handled. - self._send_gratuitous_arp_packet( - ri.ns_name, interface_name, fip_ip) - return l3_constants.FLOATINGIP_STATUS_ACTIVE - - def _remove_floating_ip(self, ri, device, ip_cidr): - if ri.is_ha: - self._remove_vip(ri, ip_cidr) - else: - net = netaddr.IPNetwork(ip_cidr) - device.addr.delete(net.version, ip_cidr) - self.driver.delete_conntrack_state(root_helper=self.root_helper, - namespace=ri.ns_name, - ip=ip_cidr) - if ri.router['distributed']: - self.floating_ip_removed_dist(ri, ip_cidr) - - def process_router_floating_ip_addresses(self, ri, ex_gw_port): - """Configure IP addresses on router's external gateway interface. - - Ensures addresses for existing floating IPs and cleans up - those that should not longer be configured. - """ - - fip_statuses = {} - floating_ips = ri.router.get(l3_constants.FLOATINGIP_KEY, []) - interface_name = self._get_external_device_interface_name( - ri, ex_gw_port, floating_ips) - if interface_name is None: - return fip_statuses - - device = ip_lib.IPDevice(interface_name, self.root_helper, - namespace=ri.ns_name) - existing_cidrs = set([addr['cidr'] for addr in device.addr.list()]) - new_cidrs = set() - - # Loop once to ensure that floating ips are configured. - for fip in floating_ips: - fip_ip = fip['floating_ip_address'] - ip_cidr = str(fip_ip) + FLOATING_IP_CIDR_SUFFIX - new_cidrs.add(ip_cidr) - fip_statuses[fip['id']] = l3_constants.FLOATINGIP_STATUS_ACTIVE - if ip_cidr not in existing_cidrs: - fip_statuses[fip['id']] = self._add_floating_ip( - ri, fip, interface_name, device) - - fips_to_remove = ( - ip_cidr for ip_cidr in existing_cidrs - new_cidrs if - ip_cidr.endswith(FLOATING_IP_CIDR_SUFFIX)) - for ip_cidr in fips_to_remove: - self._remove_floating_ip(ri, device, ip_cidr) - - return fip_statuses - - def _get_ex_gw_port(self, ri): - return ri.router.get('gw_port') - - def _arping(self, ns_name, interface_name, ip_address, distributed=False): - if distributed: - device = ip_lib.IPDevice(interface_name, self.root_helper, - namespace=ns_name) - ip_cidr = str(ip_address) + FLOATING_IP_CIDR_SUFFIX - net = netaddr.IPNetwork(ip_cidr) - device.addr.add(net.version, ip_cidr, str(net.broadcast)) - - arping_cmd = ['arping', '-A', - '-I', interface_name, - '-c', self.conf.send_arp_for_ha, - ip_address] - try: - ip_wrapper = ip_lib.IPWrapper(self.root_helper, - namespace=ns_name) - ip_wrapper.netns.execute(arping_cmd, check_exit_code=True) - except Exception as e: - LOG.error(_("Failed sending gratuitous ARP: %s"), str(e)) - if distributed: - device.addr.delete(net.version, ip_cidr) - - def _send_gratuitous_arp_packet(self, ns_name, interface_name, ip_address, - distributed=False): - if self.conf.send_arp_for_ha > 0: - eventlet.spawn_n(self._arping, ns_name, interface_name, ip_address, - distributed) - - def get_internal_port(self, ri, subnet_id): - """Return internal router port based on subnet_id.""" - router_ports = ri.router.get(l3_constants.INTERFACE_KEY, []) - for port in router_ports: - fips = port['fixed_ips'] - for f in fips: - if f['subnet_id'] == subnet_id: - return port - - def get_internal_device_name(self, port_id): - return (INTERNAL_DEV_PREFIX + port_id)[:self.driver.DEV_NAME_LEN] - - def get_external_device_name(self, port_id): - return (EXTERNAL_DEV_PREFIX + port_id)[:self.driver.DEV_NAME_LEN] - - def get_fip_ext_device_name(self, port_id): - return (FIP_EXT_DEV_PREFIX + port_id)[:self.driver.DEV_NAME_LEN] - - def get_rtr_int_device_name(self, router_id): - return (ROUTER_2_FIP_DEV_PREFIX + router_id)[:self.driver.DEV_NAME_LEN] - - def get_fip_int_device_name(self, router_id): - return (FIP_2_ROUTER_DEV_PREFIX + router_id)[:self.driver.DEV_NAME_LEN] - - def get_snat_int_device_name(self, port_id): - return (SNAT_INT_DEV_PREFIX + port_id)[:self.driver.DEV_NAME_LEN] - - def get_fip_ns_name(self, ext_net_id): - return (FIP_NS_PREFIX + ext_net_id) - - def get_snat_ns_name(self, router_id): - return (SNAT_NS_PREFIX + router_id) - - def get_snat_interfaces(self, ri): - return ri.router.get(l3_constants.SNAT_ROUTER_INTF_KEY, []) - - def get_floating_ips(self, ri): - """Filter Floating IPs to be hosted on this agent.""" - floating_ips = ri.router.get(l3_constants.FLOATINGIP_KEY, []) - if ri.router['distributed']: - floating_ips = [i for i in floating_ips if i['host'] == self.host] - return floating_ips - - def _map_internal_interfaces(self, ri, int_port, snat_ports): - """Return the SNAT port for the given internal interface port.""" - fixed_ip = int_port['fixed_ips'][0] - subnet_id = fixed_ip['subnet_id'] - match_port = [p for p in snat_ports if - p['fixed_ips'][0]['subnet_id'] == subnet_id] - if match_port: - return match_port[0] - else: - LOG.error(_('DVR: no map match_port found!')) - - def _create_dvr_gateway(self, ri, ex_gw_port, gw_interface_name, - snat_ports): - """Create SNAT namespace.""" - snat_ns_name = self.get_snat_ns_name(ri.router['id']) - self._create_namespace(snat_ns_name) - # connect snat_ports to br_int from SNAT namespace - for port in snat_ports: - # create interface_name - self._set_subnet_info(port) - interface_name = self.get_snat_int_device_name(port['id']) - self._internal_network_added(snat_ns_name, port['network_id'], - port['id'], port['ip_cidr'], - port['mac_address'], interface_name, - SNAT_INT_DEV_PREFIX) - self._external_gateway_added(ri, ex_gw_port, gw_interface_name, - snat_ns_name, preserve_ips=[]) - ri.snat_iptables_manager = iptables_manager.IptablesManager( - root_helper=self.root_helper, - namespace=snat_ns_name, - use_ipv6=self.use_ipv6) - # kicks the FW Agent to add rules for the snat namespace - self.process_router_add(ri) - - def external_gateway_added(self, ri, ex_gw_port, interface_name): - if ri.router['distributed']: - ip_wrapr = ip_lib.IPWrapper(self.root_helper, namespace=ri.ns_name) - ip_wrapr.netns.execute(['sysctl', '-w', - 'net.ipv4.conf.all.send_redirects=0']) - snat_ports = self.get_snat_interfaces(ri) - for p in ri.internal_ports: - gateway = self._map_internal_interfaces(ri, p, snat_ports) - id_name = self.get_internal_device_name(p['id']) - if gateway: - self._snat_redirect_add(ri, gateway['fixed_ips'][0] - ['ip_address'], p, id_name) - - if (self.conf.agent_mode == 'dvr_snat' and - ri.router['gw_port_host'] == self.host): - self._create_dvr_gateway(ri, ex_gw_port, interface_name, - snat_ports) - for port in snat_ports: - for ip in port['fixed_ips']: - self._update_arp_entry(ri, ip['ip_address'], - port['mac_address'], - ip['subnet_id'], 'add') - return - - # Compute a list of addresses this router is supposed to have. - # This avoids unnecessarily removing those addresses and - # causing a momentarily network outage. - floating_ips = self.get_floating_ips(ri) - preserve_ips = [ip['floating_ip_address'] + FLOATING_IP_CIDR_SUFFIX - for ip in floating_ips] - - self._external_gateway_added(ri, ex_gw_port, interface_name, - ri.ns_name, preserve_ips) - - if ri.is_ha: - self._ha_external_gateway_added(ri, ex_gw_port, interface_name) - - def external_gateway_updated(self, ri, ex_gw_port, interface_name): - preserve_ips = [] - if ri.router['distributed']: - if (self.conf.agent_mode == 'dvr_snat' and - ri.router['gw_port_host'] == self.host): - ns_name = self.get_snat_ns_name(ri.router['id']) - else: - # no centralized SNAT gateway for this node/agent - LOG.debug("not hosting snat for router: %s", ri.router['id']) - return - else: - ns_name = ri.ns_name - floating_ips = self.get_floating_ips(ri) - preserve_ips = [ip['floating_ip_address'] + FLOATING_IP_CIDR_SUFFIX - for ip in floating_ips] - - self._external_gateway_added(ri, ex_gw_port, interface_name, - ns_name, preserve_ips) - - if ri.is_ha: - self._ha_external_gateway_updated(ri, ex_gw_port, interface_name) - - def _external_gateway_added(self, ri, ex_gw_port, interface_name, - ns_name, preserve_ips): - if not ip_lib.device_exists(interface_name, - root_helper=self.root_helper, - namespace=ns_name): - self.driver.plug(ex_gw_port['network_id'], - ex_gw_port['id'], interface_name, - ex_gw_port['mac_address'], - bridge=self.conf.external_network_bridge, - namespace=ns_name, - prefix=EXTERNAL_DEV_PREFIX) - - if not ri.is_ha: - self.driver.init_l3( - interface_name, [ex_gw_port['ip_cidr']], namespace=ns_name, - gateway=ex_gw_port['subnet'].get('gateway_ip'), - extra_subnets=ex_gw_port.get('extra_subnets', []), - preserve_ips=preserve_ips) - ip_address = ex_gw_port['ip_cidr'].split('/')[0] - self._send_gratuitous_arp_packet(ns_name, - interface_name, ip_address) - - def agent_gateway_added(self, ns_name, ex_gw_port, - interface_name): - """Add Floating IP gateway port to FIP namespace.""" - if not ip_lib.device_exists(interface_name, - root_helper=self.root_helper, - namespace=ns_name): - self.driver.plug(ex_gw_port['network_id'], - ex_gw_port['id'], interface_name, - ex_gw_port['mac_address'], - bridge=self.conf.external_network_bridge, - namespace=ns_name, - prefix=FIP_EXT_DEV_PREFIX) - - self.driver.init_l3(interface_name, [ex_gw_port['ip_cidr']], - namespace=ns_name) - ip_address = ex_gw_port['ip_cidr'].split('/')[0] - self._send_gratuitous_arp_packet(ns_name, interface_name, ip_address) - - gw_ip = ex_gw_port['subnet']['gateway_ip'] - if gw_ip: - ipd = ip_lib.IPDevice(interface_name, self.root_helper, - namespace=ns_name) - ipd.route.add_gateway(gw_ip) - - cmd = ['sysctl', '-w', 'net.ipv4.conf.%s.proxy_arp=1' % interface_name] - ip_wrapper = ip_lib.IPWrapper(self.root_helper, namespace=ns_name) - ip_wrapper.netns.execute(cmd, check_exit_code=False) - - def internal_ns_interface_added(self, ip_cidr, - interface_name, ns_name): - ip_wrapper = ip_lib.IPWrapper(self.root_helper, namespace=ns_name) - ip_wrapper.netns.execute(['ip', 'addr', 'add', - ip_cidr, 'dev', interface_name]) - - def external_gateway_removed(self, ri, ex_gw_port, interface_name): - if ri.router['distributed']: - for p in ri.internal_ports: - internal_interface = self.get_internal_device_name(p['id']) - self._snat_redirect_remove(ri, p, internal_interface) - - if self.conf.agent_mode == 'dvr_snat' and ( - ri.router['gw_port_host'] == self.host): - ns_name = self.get_snat_ns_name(ri.router['id']) - else: - # not hosting agent - no work to do - LOG.debug('DVR: CSNAT not hosted: %s', ex_gw_port) - return - else: - ns_name = ri.ns_name - - if ri.is_ha: - self._ha_external_gateway_removed(ri, interface_name) - - self.driver.unplug(interface_name, - bridge=self.conf.external_network_bridge, - namespace=ns_name, - prefix=EXTERNAL_DEV_PREFIX) - if ri.router['distributed']: - self._destroy_snat_namespace(ns_name) - - def metadata_filter_rules(self): - rules = [] - if self.conf.enable_metadata_proxy: - rules.append(('INPUT', '-s 0.0.0.0/0 -d 127.0.0.1 ' - '-p tcp -m tcp --dport %s ' - '-j ACCEPT' % self.conf.metadata_port)) - return rules - - def metadata_nat_rules(self): - rules = [] - if self.conf.enable_metadata_proxy: - rules.append(('PREROUTING', '-s 0.0.0.0/0 -d 169.254.169.254/32 ' - '-p tcp -m tcp --dport 80 -j REDIRECT ' - '--to-port %s' % self.conf.metadata_port)) - return rules - - def external_gateway_nat_rules(self, ex_gw_ip, internal_cidrs, - interface_name): - rules = [('POSTROUTING', '! -i %(interface_name)s ' - '! -o %(interface_name)s -m conntrack ! ' - '--ctstate DNAT -j ACCEPT' % - {'interface_name': interface_name})] - for cidr in internal_cidrs: - rules.extend(self.internal_network_nat_rules(ex_gw_ip, cidr)) - return rules - - def _snat_redirect_add(self, ri, gateway, sn_port, sn_int): - """Adds rules and routes for SNAT redirection.""" - try: - snat_idx = netaddr.IPNetwork(sn_port['ip_cidr']).value - ns_ipr = ip_lib.IpRule(self.root_helper, namespace=ri.ns_name) - ns_ipd = ip_lib.IPDevice(sn_int, self.root_helper, - namespace=ri.ns_name) - ns_ipd.route.add_gateway(gateway, table=snat_idx) - ns_ipr.add_rule_from(sn_port['ip_cidr'], snat_idx, snat_idx) - ns_ipr.netns.execute(['sysctl', '-w', 'net.ipv4.conf.%s.' - 'send_redirects=0' % sn_int]) - except Exception: - LOG.exception(_('DVR: error adding redirection logic')) - - def _snat_redirect_remove(self, ri, sn_port, sn_int): - """Removes rules and routes for SNAT redirection.""" - try: - snat_idx = netaddr.IPNetwork(sn_port['ip_cidr']).value - ns_ipr = ip_lib.IpRule(self.root_helper, namespace=ri.ns_name) - ns_ipd = ip_lib.IPDevice(sn_int, self.root_helper, - namespace=ri.ns_name) - ns_ipd.route.delete_gateway(table=snat_idx) - ns_ipr.delete_rule_priority(snat_idx) - except Exception: - LOG.exception(_('DVR: removed snat failed')) - - def _internal_network_added(self, ns_name, network_id, port_id, - internal_cidr, mac_address, - interface_name, prefix, is_ha=False): - if not ip_lib.device_exists(interface_name, - root_helper=self.root_helper, - namespace=ns_name): - self.driver.plug(network_id, port_id, interface_name, mac_address, - namespace=ns_name, - prefix=prefix) - - if not is_ha: - self.driver.init_l3(interface_name, [internal_cidr], - namespace=ns_name) - ip_address = internal_cidr.split('/')[0] - self._send_gratuitous_arp_packet(ns_name, interface_name, - ip_address) - - def internal_network_added(self, ri, port): - network_id = port['network_id'] - port_id = port['id'] - internal_cidr = port['ip_cidr'] - mac_address = port['mac_address'] - - interface_name = self.get_internal_device_name(port_id) - - self._internal_network_added(ri.ns_name, network_id, port_id, - internal_cidr, mac_address, - interface_name, INTERNAL_DEV_PREFIX, - ri.is_ha) - - if ri.is_ha: - self._add_vip(ri, internal_cidr, interface_name) - - ex_gw_port = self._get_ex_gw_port(ri) - if ri.router['distributed'] and ex_gw_port: - snat_ports = self.get_snat_interfaces(ri) - sn_port = self._map_internal_interfaces(ri, port, snat_ports) - if sn_port: - self._snat_redirect_add(ri, sn_port['fixed_ips'][0] - ['ip_address'], port, interface_name) - if (self.conf.agent_mode == 'dvr_snat' and - ri.router['gw_port_host'] == self.host): - ns_name = self.get_snat_ns_name(ri.router['id']) - self._set_subnet_info(sn_port) - interface_name = ( - self.get_snat_int_device_name(sn_port['id'])) - self._internal_network_added(ns_name, - sn_port['network_id'], - sn_port['id'], - sn_port['ip_cidr'], - sn_port['mac_address'], - interface_name, - SNAT_INT_DEV_PREFIX) - - def internal_network_removed(self, ri, port): - port_id = port['id'] - interface_name = self.get_internal_device_name(port_id) - if ri.router['distributed'] and ri.ex_gw_port: - # DVR handling code for SNAT - self._snat_redirect_remove(ri, port, interface_name) - if self.conf.agent_mode == 'dvr_snat' and ( - ri.ex_gw_port['binding:host_id'] == self.host): - snat_port = self._map_internal_interfaces(ri, port, - ri.snat_ports) - if snat_port: - snat_interface = ( - self.get_snat_int_device_name(snat_port['id']) - ) - ns_name = self.get_snat_ns_name(ri.router['id']) - prefix = SNAT_INT_DEV_PREFIX - if ip_lib.device_exists(snat_interface, - root_helper=self.root_helper, - namespace=ns_name): - self.driver.unplug(snat_interface, namespace=ns_name, - prefix=prefix) - - if ip_lib.device_exists(interface_name, - root_helper=self.root_helper, - namespace=ri.ns_name): - if ri.is_ha: - self._clear_vips(ri, interface_name) - self.driver.unplug(interface_name, namespace=ri.ns_name, - prefix=INTERNAL_DEV_PREFIX) - - def internal_network_nat_rules(self, ex_gw_ip, internal_cidr): - rules = [('snat', '-s %s -j SNAT --to-source %s' % - (internal_cidr, ex_gw_ip))] - return rules - - def _create_agent_gateway_port(self, ri, network_id): - """Create Floating IP gateway port. - - Request port creation from Plugin then creates - Floating IP namespace and adds gateway port. - """ - self.agent_gateway_port = ( - self.plugin_rpc.get_agent_gateway_port( - self.context, network_id)) - if 'subnet' not in self.agent_gateway_port: - LOG.error(_('Missing subnet/agent_gateway_port')) - return - self._set_subnet_info(self.agent_gateway_port) - - # add fip-namespace and agent_gateway_port - fip_ns_name = ( - self.get_fip_ns_name(str(network_id))) - self._create_namespace(fip_ns_name) - ri.fip_iptables_manager = iptables_manager.IptablesManager( - root_helper=self.root_helper, namespace=fip_ns_name, - use_ipv6=self.use_ipv6) - # no connection tracking needed in fip namespace - ri.fip_iptables_manager.ipv4['raw'].add_rule('PREROUTING', - '-j CT --notrack') - ri.fip_iptables_manager.apply() - interface_name = ( - self.get_fip_ext_device_name(self.agent_gateway_port['id'])) - self.agent_gateway_added(fip_ns_name, self.agent_gateway_port, - interface_name) - - def create_rtr_2_fip_link(self, ri, network_id): - """Create interface between router and Floating IP namespace.""" - rtr_2_fip_name = self.get_rtr_int_device_name(ri.router_id) - fip_2_rtr_name = self.get_fip_int_device_name(ri.router_id) - fip_ns_name = self.get_fip_ns_name(str(network_id)) - - # add link local IP to interface - if ri.rtr_fip_subnet is None: - ri.rtr_fip_subnet = self.local_subnets.allocate(ri.router_id) - rtr_2_fip, fip_2_rtr = ri.rtr_fip_subnet.get_pair() - ip_wrapper = ip_lib.IPWrapper(self.root_helper, - namespace=ri.ns_name) - int_dev = ip_wrapper.add_veth(rtr_2_fip_name, - fip_2_rtr_name, fip_ns_name) - self.internal_ns_interface_added(str(rtr_2_fip), - rtr_2_fip_name, ri.ns_name) - self.internal_ns_interface_added(str(fip_2_rtr), - fip_2_rtr_name, fip_ns_name) - int_dev[0].link.set_up() - int_dev[1].link.set_up() - # add default route for the link local interface - device = ip_lib.IPDevice(rtr_2_fip_name, self.root_helper, - namespace=ri.ns_name) - device.route.add_gateway(str(fip_2_rtr.ip), table=FIP_RT_TBL) - #setup the NAT rules and chains - self._handle_router_fip_nat_rules(ri, rtr_2_fip_name, 'add_rules') - # kicks the FW Agent to add rules for the IR namespace if configured - self.process_router_add(ri) - - def floating_ip_added_dist(self, ri, fip): - """Add floating IP to FIP namespace.""" - floating_ip = fip['floating_ip_address'] - fixed_ip = fip['fixed_ip_address'] - rule_pr = self.fip_priorities.pop() - ri.floating_ips_dict[floating_ip] = rule_pr - fip_2_rtr_name = self.get_fip_int_device_name(ri.router_id) - ip_rule = ip_lib.IpRule(self.root_helper, namespace=ri.ns_name) - ip_rule.add_rule_from(fixed_ip, FIP_RT_TBL, rule_pr) - - #Add routing rule in fip namespace - fip_cidr = str(floating_ip) + FLOATING_IP_CIDR_SUFFIX - fip_ns_name = self.get_fip_ns_name(str(fip['floating_network_id'])) - rtr_2_fip, _ = ri.rtr_fip_subnet.get_pair() - device = ip_lib.IPDevice(fip_2_rtr_name, self.root_helper, - namespace=fip_ns_name) - device.route.add_route(fip_cidr, str(rtr_2_fip.ip)) - interface_name = ( - self.get_fip_ext_device_name(self.agent_gateway_port['id'])) - self._send_gratuitous_arp_packet(fip_ns_name, - interface_name, floating_ip, - distributed=True) - # update internal structures - self.agent_fip_count = self.agent_fip_count + 1 - ri.dist_fip_count = ri.dist_fip_count + 1 - - def floating_ip_removed_dist(self, ri, fip_cidr): - """Remove floating IP from FIP namespace.""" - floating_ip = fip_cidr.split('/')[0] - rtr_2_fip_name = self.get_rtr_int_device_name(ri.router_id) - fip_2_rtr_name = self.get_fip_int_device_name(ri.router_id) - rtr_2_fip, fip_2_rtr = ri.rtr_fip_subnet.get_pair() - fip_ns_name = self.get_fip_ns_name(str(self._fetch_external_net_id())) - ip_rule_rtr = ip_lib.IpRule(self.root_helper, namespace=ri.ns_name) - if floating_ip in ri.floating_ips_dict: - rule_pr = ri.floating_ips_dict[floating_ip] - #TODO(rajeev): Handle else case - exception/log? - else: - rule_pr = None - - ip_rule_rtr.delete_rule_priority(rule_pr) - self.fip_priorities.add(rule_pr) - device = ip_lib.IPDevice(fip_2_rtr_name, self.root_helper, - namespace=fip_ns_name) - - device.route.delete_route(fip_cidr, str(rtr_2_fip.ip)) - # check if this is the last FIP for this router - ri.dist_fip_count = ri.dist_fip_count - 1 - if ri.dist_fip_count == 0: - #remove default route entry - device = ip_lib.IPDevice(rtr_2_fip_name, self.root_helper, - namespace=ri.ns_name) - ns_ip = ip_lib.IPWrapper(self.root_helper, namespace=fip_ns_name) - device.route.delete_gateway(str(fip_2_rtr.ip), table=FIP_RT_TBL) - self.local_subnets.release(ri.router_id) - ri.rtr_fip_subnet = None - ns_ip.del_veth(fip_2_rtr_name) - # clean up fip-namespace if this is the last FIP - self.agent_fip_count = self.agent_fip_count - 1 - if self.agent_fip_count == 0: - self._destroy_fip_namespace(fip_ns_name) - - def floating_forward_rules(self, floating_ip, fixed_ip): - return [('PREROUTING', '-d %s -j DNAT --to %s' % - (floating_ip, fixed_ip)), - ('OUTPUT', '-d %s -j DNAT --to %s' % - (floating_ip, fixed_ip)), - ('float-snat', '-s %s -j SNAT --to %s' % - (fixed_ip, floating_ip))] - - def router_deleted(self, context, router_id): - """Deal with router deletion RPC message.""" - LOG.debug(_('Got router deleted notification for %s'), router_id) - update = RouterUpdate(router_id, PRIORITY_RPC, action=DELETE_ROUTER) - self._queue.add(update) - - def _update_arp_entry(self, ri, ip, mac, subnet_id, operation): - """Add or delete arp entry into router namespace for the subnet.""" - return - port = self.get_internal_port(ri, subnet_id) - # update arp entry only if the subnet is attached to the router - if port: - ip_cidr = str(ip) + '/32' - try: - # TODO(mrsmith): optimize the calls below for bulk calls - net = netaddr.IPNetwork(ip_cidr) - interface_name = self.get_internal_device_name(port['id']) - device = ip_lib.IPDevice(interface_name, self.root_helper, - namespace=ri.ns_name) - if operation == 'add': - device.neigh.add(net.version, ip, mac) - elif operation == 'delete': - device.neigh.delete(net.version, ip, mac) - except Exception: - LOG.exception(_("DVR: Failed updating arp entry")) - self.fullsync = True - - def add_arp_entry(self, context, payload): - """Add arp entry into router namespace. Called from RPC.""" - arp_table = payload['arp_table'] - router_id = payload['router_id'] - ip = arp_table['ip_address'] - mac = arp_table['mac_address'] - subnet_id = arp_table['subnet_id'] - ri = self.router_info.get(router_id) - if ri: - self._update_arp_entry(ri, ip, mac, subnet_id, 'add') - - def del_arp_entry(self, context, payload): - """Delete arp entry from router namespace. Called from RPC.""" - arp_table = payload['arp_table'] - router_id = payload['router_id'] - ip = arp_table['ip_address'] - mac = arp_table['mac_address'] - subnet_id = arp_table['subnet_id'] - ri = self.router_info.get(router_id) - if ri: - self._update_arp_entry(ri, ip, mac, subnet_id, 'delete') - - def routers_updated(self, context, routers): - """Deal with routers modification and creation RPC message.""" - LOG.debug(_('Got routers updated notification :%s'), routers) - if routers: - # This is needed for backward compatibility - if isinstance(routers[0], dict): - routers = [router['id'] for router in routers] - for id in routers: - update = RouterUpdate(id, PRIORITY_RPC) - self._queue.add(update) - - def router_removed_from_agent(self, context, payload): - LOG.debug(_('Got router removed from agent :%r'), payload) - router_id = payload['router_id'] - update = RouterUpdate(router_id, PRIORITY_RPC, action=DELETE_ROUTER) - self._queue.add(update) - - def router_added_to_agent(self, context, payload): - LOG.debug(_('Got router added to agent :%r'), payload) - self.routers_updated(context, payload) - - def _process_routers(self, routers, all_routers=False): - pool = eventlet.GreenPool() - if (self.conf.external_network_bridge and - not ip_lib.device_exists(self.conf.external_network_bridge)): - LOG.error(_("The external network bridge '%s' does not exist"), - self.conf.external_network_bridge) - return - - target_ex_net_id = self._fetch_external_net_id() - # if routers are all the routers we have (They are from router sync on - # starting or when error occurs during running), we seek the - # routers which should be removed. - # If routers are from server side notification, we seek them - # from subset of incoming routers and ones we have now. - if all_routers: - prev_router_ids = set(self.router_info) - else: - prev_router_ids = set(self.router_info) & set( - [router['id'] for router in routers]) - cur_router_ids = set() - for r in routers: - # If namespaces are disabled, only process the router associated - # with the configured agent id. - if (not self.conf.use_namespaces and - r['id'] != self.conf.router_id): - continue - ex_net_id = (r['external_gateway_info'] or {}).get('network_id') - if not ex_net_id and not self.conf.handle_internal_only_routers: - continue - if (target_ex_net_id and ex_net_id and - ex_net_id != target_ex_net_id): - # Double check that our single external_net_id has not changed - # by forcing a check by RPC. - if (ex_net_id != self._fetch_external_net_id(force=True)): - continue - cur_router_ids.add(r['id']) - if r['id'] not in self.router_info: - self._router_added(r['id'], r) - ri = self.router_info[r['id']] - ri.router = r - pool.spawn_n(self.process_router, ri) - # identify and remove routers that no longer exist - for router_id in prev_router_ids - cur_router_ids: - pool.spawn_n(self._router_removed, router_id) - pool.waitall() - - def _process_router_update(self): - for rp, update in self._queue.each_update_to_next_router(): - LOG.debug("Starting router update for %s", update.id) - router = update.router - if update.action != DELETE_ROUTER and not router: - try: - update.timestamp = timeutils.utcnow() - routers = self.plugin_rpc.get_routers(self.context, - [update.id]) - except Exception: - msg = _("Failed to fetch router information for '%s'") - LOG.exception(msg, update.id) - self.fullsync = True - continue - - if routers: - router = routers[0] - - if not router: - self._router_removed(update.id) - continue - - self._process_routers([router]) - LOG.debug("Finished a router update for %s", update.id) - rp.fetched_and_processed(update.timestamp) - - def _process_routers_loop(self): - LOG.debug("Starting _process_routers_loop") - pool = eventlet.GreenPool(size=8) - while True: - pool.spawn_n(self._process_router_update) - - def _router_ids(self): - if not self.conf.use_namespaces: - return [self.conf.router_id] - - @periodic_task.periodic_task - def periodic_sync_routers_task(self, context): - self._sync_routers_task(context) - - def _sync_routers_task(self, context): - if self.services_sync: - super(L3NATAgent, self).process_services_sync(context) - LOG.debug(_("Starting _sync_routers_task - fullsync:%s"), - self.fullsync) - if not self.fullsync: - return - - # Capture a picture of namespaces *before* fetching the full list from - # the database. This is important to correctly identify stale ones. - namespaces = set() - if self._clean_stale_namespaces: - namespaces = self._list_namespaces() - prev_router_ids = set(self.router_info) - - try: - router_ids = self._router_ids() - timestamp = timeutils.utcnow() - routers = self.plugin_rpc.get_routers( - context, router_ids) - - LOG.debug(_('Processing :%r'), routers) - for r in routers: - update = RouterUpdate(r['id'], - PRIORITY_SYNC_ROUTERS_TASK, - router=r, - timestamp=timestamp) - self._queue.add(update) - self.fullsync = False - LOG.debug(_("_sync_routers_task successfully completed")) - except n_rpc.RPCException: - LOG.exception(_("Failed synchronizing routers due to RPC error")) - self.fullsync = True - except Exception: - LOG.exception(_("Failed synchronizing routers")) - self.fullsync = True - else: - # Resync is not necessary for the cleanup of stale namespaces - curr_router_ids = set([r['id'] for r in routers]) - - # Two kinds of stale routers: Routers for which info is cached in - # self.router_info and the others. First, handle the former. - for router_id in prev_router_ids - curr_router_ids: - update = RouterUpdate(router_id, - PRIORITY_SYNC_ROUTERS_TASK, - timestamp=timestamp, - action=DELETE_ROUTER) - self._queue.add(update) - - # Next, one effort to clean out namespaces for which we don't have - # a record. (i.e. _clean_stale_namespaces=False after one pass) - if self._clean_stale_namespaces: - ids_to_keep = curr_router_ids | prev_router_ids - self._cleanup_namespaces(namespaces, ids_to_keep) - - def after_start(self): - eventlet.spawn_n(self._process_routers_loop) - LOG.info(_("L3 agent started")) - - def _update_routing_table(self, ri, operation, route): - cmd = ['ip', 'route', operation, 'to', route['destination'], - 'via', route['nexthop']] - ip_wrapper = ip_lib.IPWrapper(self.root_helper, - namespace=ri.ns_name) - ip_wrapper.netns.execute(cmd, check_exit_code=False) - - def routes_updated(self, ri): - new_routes = ri.router['routes'] - if ri.is_ha: - self._process_virtual_routes(ri, new_routes) - return - - old_routes = ri.routes - adds, removes = common_utils.diff_list_of_dict(old_routes, - new_routes) - for route in adds: - LOG.debug(_("Added route entry is '%s'"), route) - # remove replaced route from deleted route - for del_route in removes: - if route['destination'] == del_route['destination']: - removes.remove(del_route) - #replace success even if there is no existing route - self._update_routing_table(ri, 'replace', route) - for route in removes: - LOG.debug(_("Removed route entry is '%s'"), route) - self._update_routing_table(ri, 'delete', route) - ri.routes = new_routes - - -class L3NATAgentWithStateReport(L3NATAgent): - - def __init__(self, host, conf=None): - super(L3NATAgentWithStateReport, self).__init__(host=host, conf=conf) - self.state_rpc = agent_rpc.PluginReportStateAPI(topics.PLUGIN) - self.agent_state = { - 'binary': 'neutron-l3-agent', - 'host': host, - 'topic': topics.L3_AGENT, - 'configurations': { - 'agent_mode': self.conf.agent_mode, - 'use_namespaces': self.conf.use_namespaces, - 'router_id': self.conf.router_id, - 'handle_internal_only_routers': - self.conf.handle_internal_only_routers, - 'external_network_bridge': self.conf.external_network_bridge, - 'gateway_external_network_id': - self.conf.gateway_external_network_id, - 'interface_driver': self.conf.interface_driver}, - 'start_flag': True, - 'agent_type': l3_constants.AGENT_TYPE_L3} - report_interval = cfg.CONF.AGENT.report_interval - self.use_call = True - if report_interval: - self.heartbeat = loopingcall.FixedIntervalLoopingCall( - self._report_state) - self.heartbeat.start(interval=report_interval) - - def _report_state(self): - LOG.debug(_("Report state task started")) - num_ex_gw_ports = 0 - num_interfaces = 0 - num_floating_ips = 0 - router_infos = self.router_info.values() - num_routers = len(router_infos) - for ri in router_infos: - ex_gw_port = self._get_ex_gw_port(ri) - if ex_gw_port: - num_ex_gw_ports += 1 - num_interfaces += len(ri.router.get(l3_constants.INTERFACE_KEY, - [])) - num_floating_ips += len(ri.router.get(l3_constants.FLOATINGIP_KEY, - [])) - configurations = self.agent_state['configurations'] - configurations['routers'] = num_routers - configurations['ex_gw_ports'] = num_ex_gw_ports - configurations['interfaces'] = num_interfaces - configurations['floating_ips'] = num_floating_ips - try: - self.state_rpc.report_state(self.context, self.agent_state, - self.use_call) - self.agent_state.pop('start_flag', None) - self.use_call = False - LOG.debug(_("Report state task successfully completed")) - except AttributeError: - # This means the server does not support report_state - LOG.warn(_("Neutron server does not support state report." - " State report for this agent will be disabled.")) - self.heartbeat.stop() - return - except Exception: - LOG.exception(_("Failed reporting state!")) - - def agent_updated(self, context, payload): - """Handle the agent_updated notification event.""" - self.fullsync = True - LOG.info(_("agent_updated by server side %s!"), payload) - - -def _register_opts(conf): - conf.register_opts(L3NATAgent.OPTS) - conf.register_opts(l3_ha_agent.OPTS) - config.register_interface_driver_opts_helper(conf) - config.register_use_namespaces_opts_helper(conf) - config.register_agent_state_opts_helper(conf) - config.register_root_helper(conf) - conf.register_opts(interface.OPTS) - conf.register_opts(external_process.OPTS) - - -def main(manager='neutron.agent.l3_proxy.L3NATAgentWithStateReport'): - _register_opts(cfg.CONF) - common_config.init(sys.argv[1:]) - config.setup_logging() - server = neutron_service.Service.create( - binary='neutron-l3-agent', - topic=topics.L3_AGENT, - report_interval=cfg.CONF.AGENT.report_interval, - manager=manager) - service.launch(server).wait() - -if __name__ == "__main__": - sys.exit(main()) - diff --git a/novaproxy/README.md b/novaproxy/README.md deleted file mode 100644 index 6a2d42bc..00000000 --- a/novaproxy/README.md +++ /dev/null @@ -1,165 +0,0 @@ -Openstack Nova Proxy -=============================== - - Nova-Proxy acts as the same role of Nova-Compute in cascading OpenStack. - Nova-Proxy treats cascaded Nova as its hypervisor, convert the internal request message from the message bus to restful API calling to cascaded Nova. - - -Key modules ------------ - -* The new nova proxy module manager_proxy,which is configured to manage specified Availability Zone cascaded Nova. All VM in the cascaded Nova of this AZ will be bind to the manager_proxy host in the cascading level: - - nova/compute/manager_proxy.py - -* The code include clients of various component service(nova neutron cinder glance),through the client you can call cascaded various component service API by restful API - - nova/compute/clients.py - -* The solution of that clients gets token or checks token from token: - nova/compute/compute_context.py - nova/compute/compute_keystoneclient.py - -Requirements ------------- -* openstack-nova-compute-2014.2(Juno) has been installed - -Installation ------------- - -We provide two ways to install the nova proxy code. In this section, we will guide you through installing the nova proxy with the minimum configuration. - -* **Note:** - - - Make sure you have an existing installation of **Openstack Juno**. - - We recommend that you Do backup at least the following files before installation, because they are to be overwritten or modified: - $NOVA_CONFIG_PARENT_DIR/nova.conf - (replace the $... with actual directory names.) - -* **Manual Installation** - - - Make sure you have performed backups properly. - - - - Navigate to the local repository and copy the contents in 'nova' sub-directory to the corresponding places in existing nova, e.g. - ```cp -r $LOCAL_REPOSITORY_DIR/nova $NOVA_PARENT_DIR``` - (replace the $... with actual directory name.) - - - Update the nova configuration file (e.g. /etc/nova/nova.conf) with the minimum option below. If the option already exists, modify its value, otherwise add it to the config file. Check the "Configurations" section below for a full configuration guide. - ``` - [DEFAULT] - ... - ###configuration for Nova cascading ### - proxy_region_name=$proxy_region_name - cascading_nova_url=$cascading_nova_url - cascaded_nova_url=$cascaded_nova_url - cascaded_neutron_url=$cascaded_neutron_url - cascaded_glance_flag=False - cascaded_glance_url=$cascaded_glance_url - os_region_name=$os_region_name - keystone_auth_url=$keystone_auth_url - cinder_endpoint_template=$cinder_endpoint_template - compute_manager=nova.compute.manager_proxy.ComputeManager - ``` - - - Restart the nova proxy. - ```service nova-compute restart``` - - - Done. The nova proxy should be working with a demo configuration. - -* **Automatic Installation** - - - Make sure you have performed backups properly. - - - run `source envrc` - - - Navigate to the installation directory and run installation script. - ``` - cd $LOCAL_REPOSITORY_DIR/installation - sudo bash ./install.sh - ``` - (replace the $... with actual directory name.) - - - Done. The installation code should setup the nova proxy with the minimum configuration below. Check the "Configurations" section for a full configuration guide. - ``` - [DEFAULT] - ... - ###cascade info ### - proxy_region_name=$proxy_region_name - cascading_nova_url=$cascading_nova_url - cascaded_nova_url=$cascaded_nova_url - cascaded_neutron_url=$cascaded_neutron_url - cascaded_glance_flag=False - cascaded_glance_url=$cascaded_glance_url - os_region_name=$os_region_name - keystone_auth_url=$keystone_auth_url - cinder_endpoint_template=$cinder_endpoint_template - compute_manager=nova.compute.manager_proxy.ComputeManager - -* **Troubleshooting** - - In case the automatic installation process is not complete, please check the followings: - - - Make sure your OpenStack version is Juno. - - - Check the variables in the beginning of the install.sh scripts. Your installation directories may be different from the default values we provide. - - - The installation code will automatically add the related codes to $NOVA_PARENT_DIR/nova and modify the related configuration. - - - In case the automatic installation does not work, try to install manually. - -Configurations --------------- - -* This is a (default) configuration sample for the nova proxy. Please add/modify these options in /etc/nova/nova.conf. -* Note: - - Please carefully make sure that options in the configuration file are not duplicated. If an option name already exists, modify its value instead of adding a new one of the same name. - - Please refer to the 'Configuration Details' section below for proper configuration and usage of costs and constraints. - -``` -[DEFAULT] - -... - -# -#Options defined in nova.compute.manager -# - -# Default driver to use for the nova proxy (string value) -compute_manager=nova.compute.manager_proxy.ComputeManager - -#The region name ,which will be set as a parameter when -#the cascaded level component services register endpoint to keystone -proxy_region_name=$proxy_region_name - -#The cascading level nova component service url, by which the nova porxy -#can access to cascading level nova service -cascading_nova_url=$cascading_nova_url - -#The cascaded level nova component service url, by which the nova porxy -#can access to cascaded level nova service -cascaded_nova_url=$cascaded_nova_url -cascaded_neutron_url=$cascaded_neutron_url - -#when cascaded_glance_flag is set to True, the cascaded nova will use casaded glance to -#provide image but not cascading level glance, if it cascaded_glance_flag is set to False -#casacaded Nova will use image from global glance. -cascaded_glance_flag=True or False - -#The cascaded level glance service url, by which the nova porxy -#can judge whether cascading glance has a location for this cascaded glance -cascaded_glance_url=$cascaded_glance_url - -#The region name ,which will be set as a parameter when -#the cascading level component services register endpoint to keystone -os_region_name=$os_region_name - -#The cascading level keystone component service url, by which the nova porxy -#can access to cascading level keystone service -keystone_auth_url=$keystone_auth_url - -Note: a few options that belonged to the original nova have to be configured: - -. allow_resize_to_same_host=True -. scheduler_default_filters=AvailabilityZoneFilter - diff --git a/novaproxy/installation/install.sh b/novaproxy/installation/install.sh deleted file mode 100644 index ccbfd749..00000000 --- a/novaproxy/installation/install.sh +++ /dev/null @@ -1,115 +0,0 @@ -#!/bin/bash - -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# Copyright (c) 2014 Huawei Technologies. - -_NOVA_CONF_DIR="/etc/nova" -_NOVA_CONF_FILE="nova.conf" -_NOVA_INSTALL=${OPENSTACK_INSTALL_DIR} -if [ ! -n ${_NOVA_INSTALL} ];then - _NOVA_INSTALL="/usr/lib/python2.7/dist-packages" -fi -_NOVA_DIR="${_NOVA_INSTALL}/nova" - -# if you did not make changes to the installation files, -# please do not edit the following directories. -_CODE_DIR="../nova" -_BACKUP_DIR="${_NOVA_INSTALL}/.nova-proxy-installation-backup" - -_SCRIPT_LOGFILE="/var/log/nova-proxy/installation/install.log" - - -function log() -{ - log_path=`dirname ${_SCRIPT_LOGFILE}` - if [ ! -d $log_path ] ; then - mkdir -p $log_path - fi - echo "$@" - echo "`date -u +'%Y-%m-%d %T.%N'`: $@" >> $_SCRIPT_LOGFILE -} - -if [[ ${EUID} -ne 0 ]]; then - log "Please run as root." - exit 1 -fi - - -cd `dirname $0` - -if [ ! -d "/var/log/nova-proxy/installation" ]; then - mkdir -p /var/log/nova-proxy/installation - touch _SCRIPT_LOGFILE -fi - -log "checking installation directories..." -if [ ! -d "${_NOVA_DIR}" ] ; then - log "Could not find the nova installation. Please check the variables in the beginning of the script." - log "aborted." - exit 1 -fi -if [ ! -f "${_NOVA_CONF_DIR}/${_NOVA_CONF_FILE}" ] ; then - log "Could not find nova config file. Please check the variables in the beginning of the script." - log "aborted." - exit 1 -fi - -log "checking previous installation..." -if [ -d "${_BACKUP_DIR}/nova" ] ; then - log "It seems nova-proxy has already been installed!" - log "Please check README for solution if this is not true." - exit 1 -fi - -log "backing up current files that might be overwritten..." -mkdir -p "${_BACKUP_DIR}/nova" -mkdir -p "${_BACKUP_DIR}/etc/nova" -cp "${_NOVA_CONF_DIR}/${_NOVA_CONF_FILE}" "${_BACKUP_DIR}/etc/nova/" -if [ $? -ne 0 ] ; then - rm -r "${_BACKUP_DIR}/nova" - rm -r "${_BACKUP_DIR}/etc" - log "Error in config backup, aborted." - exit 1 -fi - -log "copying in new files..." -cp -r "${_CODE_DIR}" `dirname ${_NOVA_DIR}` -if [ $? -ne 0 ] ; then - log "Error in copying, aborted." - log "Recovering original files..." - cp -r "${_BACKUP_DIR}/nova" `dirname ${_NOVA_DIR}` && rm -r "${_BACKUP_DIR}/nova" - if [ $? -ne 0 ] ; then - log "Recovering failed! Please install manually." - fi - exit 1 -fi - -cd `dirname $0`/../../script -python config.py nova -if [ $? -ne 0 ] ; then - log "configurate the nova options error." - exit 1 -fi -cd - - -log "restarting nova compute..." -service nova-compute restart -if [ $? -ne 0 ] ; then - log "There was an error in restarting the service, please restart nova scheduler manually." - exit 1 -fi - -log "Completed." -log "See README to get started." - -exit 0 \ No newline at end of file diff --git a/novaproxy/installation/uninstall.sh b/novaproxy/installation/uninstall.sh deleted file mode 100644 index f770fd93..00000000 --- a/novaproxy/installation/uninstall.sh +++ /dev/null @@ -1,19 +0,0 @@ -#!/bin/bash - -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# Copyright (c) 2014 Huawei Technologies. - - -# The uninstallation script don't had been realization, -# it will be supplied if needed. -exit 1 \ No newline at end of file diff --git a/script/README.md b/script/README.md deleted file mode 100755 index bc6a5e45..00000000 --- a/script/README.md +++ /dev/null @@ -1,44 +0,0 @@ -Tricircle Configuration Options Updating Module -=============================================== - -In Tricircle Project, we added many options in the *.conf files for cascading, these options -among nova, glance, neutron, cinder. When deploy the cascading environment, these options should -be modified based on the deployment context(IP, tenant, user, password etc.), so we have to -modify each install scripts(/installation) every time for deployment because the options is -configured by these scripts. It is inconvenient. - -This script module is created in order to managing the options in *.conf with a centralized way. -It is independent of the installation scripts, but the scripts can invoke the function in it to -finish the options' configuration. - -Composition ------- -* **config.py**: the implementation to execute options updating, using python build-in lib:ConfigParser. -* **tricircle.cfg**: the options you want to update are stored here. -* **exec.sh**: a very simple shell commend to invoke the python code. - -Usage -------- -- Format of the tricircle.cfg - - The tricircle.cfg is standard python config file(like nova.conf in /etc/nova), it contains - sections and options in each section like what the *.conf is in it. The only difference is - the **Naming Conventions** of the section: - - + Every section name start with the openstack service config-file name - (nova/neutron/glance-api/cinder); - - + If the option to be updated needs in a special section in *.conf, the special section - (keystone_authtoken e.g) should be added to the end of the section name with '_' ahead of - it. For example, if the 'auth_host' option in nova.conf need be updated, it should in - 'nova_keystone_authtoken' section in the tricircle.cfg. - -- Execution - - After you configured the options in tricircle.cfg, run the commend: - ```python config.py [openstack-service-name]``` - If you want update all services' options in tricircle.cfg, run ```python config.py all```. - - + **Note**: you can execute multiple times for an option with different value and do - not worry about it appears multiple times in *.conf, only the latest value in the conf - file. diff --git a/script/__init__.py b/script/__init__.py deleted file mode 100755 index 4a7de129..00000000 --- a/script/__init__.py +++ /dev/null @@ -1 +0,0 @@ -__author__ = 'openstack' diff --git a/script/config.py b/script/config.py deleted file mode 100755 index b8bc5c7e..00000000 --- a/script/config.py +++ /dev/null @@ -1,113 +0,0 @@ -# Copyright (c) 2014 OpenStack Foundation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# @author: Jia Dong, HuaWei - -import ConfigParser -import os -import sys - -DEFAULT_CFG_FILE_PATHS = { - 'nova': {'nova': '/etc/nova/nova.conf'}, - 'glance': { - 'api': '/etc/glance/glance-api.conf', - 'registry': '/etc/glance/glance-registry.conf', - 'sync': '/etc/glance/glance-sync.conf' - }, - 'cinder': {'cinder': '/etc/cinder/cinder.conf'} -} - - -class TricircleConfig(object): - CFG_FILE = "tricircle.cfg" - - def __init__(self): - self.cf = ConfigParser.ConfigParser() - self.cf.read(self.CFG_FILE) - - def update_options(self, module_name, module_cfg_file_paths=None): - """ - module_name like 'nova', 'glance-api' etc. - """ - cfg_mapping = module_cfg_file_paths if module_cfg_file_paths \ - else DEFAULT_CFG_FILE_PATHS.get(module_name, None) - - if not cfg_mapping: - print 'Abort, no cfg_file for module %s' \ - ' has configured.' % module_name - return - options = {} - for cfg_mod in cfg_mapping: - - sub_mod = cfg_mod - sub_file_path = cfg_mapping[sub_mod] - sub_module_name = module_name + '-' + sub_mod if module_name != sub_mod \ - else module_name - options[sub_module_name] = {} - - sections = filter(lambda x: x.startswith(sub_module_name), - self.cf.sections()) - for section in sections: - module_section = section[len(sub_module_name):] or 'DEFAULT' - module_section = module_section[1:] \ - if module_section[0] == '_' else module_section - - _options = {} - module_options = self.cf.items(section, raw=True) - for pair in module_options: - _options[pair[0]] = pair[1] - options[sub_module_name][module_section] = _options - - if options[sub_module_name]: - print '>>> Start updating %s config: ' % sub_module_name - TricircleConfig._replace_cfg(options[sub_module_name], sub_file_path) - print 'Finish updating %s config. <<< ' % sub_module_name - - @staticmethod - def _replace_cfg(options, file_path): - if not (file_path and os.path.isfile(file_path)): - print 'file_path %s not exists or not a file' % file_path - mod_cf = ConfigParser.SafeConfigParser() - mod_cf.read(file_path) - sections = mod_cf.sections() - for _section in options: - if _section not in sections and _section != 'DEFAULT': - mod_cf.add_section(_section) - - for option in options[_section]: - mod_cf.set(_section, option, options[_section][option]) - - mod_cf.write(open(file_path, 'w')) - print 'Done' - - -def main(): - module = sys.argv[1] - print module - if not module: - print 'The input parameters not exists.' - try: - config = TricircleConfig() - if module.upper() == 'ALL': - for mod in ('nova', 'glance', 'cinder', 'neutron'): - config.update_options(mod) - else: - config.update_options(module) - except Exception as e: - print e - print 'Update tricircle %s config options fails' % module - -if __name__ == '__main__': - main() \ No newline at end of file diff --git a/script/exec.sh b/script/exec.sh deleted file mode 100755 index 35cfcc07..00000000 --- a/script/exec.sh +++ /dev/null @@ -1,3 +0,0 @@ -#!/bin/sh - -python config.py all \ No newline at end of file diff --git a/script/tricircle.cfg b/script/tricircle.cfg deleted file mode 100755 index 3fbcc95c..00000000 --- a/script/tricircle.cfg +++ /dev/null @@ -1,124 +0,0 @@ -[glance-api] -#Configured to indicate if using the glance sync manager. -sync_enabled=True - -#The glance sync manager api address(host + port). -sync_server_port=9595 -sync_server_host=127.0.0.1 - -#This option's value must be True if using glance sync feature, -#for sync using glance's multiple-locations feature. -show_multiple_locations=True - -[glance-api_keystone_authtoken] -service_host=127.0.0.1 -auth_host=127.0.0.1 -auth_uri=http://127.0.0.1:5000/ -admin_tenant_name=service -admin_user=glance -admin_password=openstack - -[glance-sync] -#How to sync the image, the value can be ["None", "ALL", "USER", "nova"] -#When "ALL" chosen, means to sync to all the cascaded glances; -#When "USER" chosen, means according to user's role, -#project, etc (not implemen tation); -#When "nova" chosen, means nova sync the image when first time it -#create a instance. -sync_strategy=All - -#What the cascading glance api endpoint is, must be same with what is -#in keystone's endpoint list. -cascading_endpoint_url=http:/// - -#When create instance's snapshot image, indicate which regions the snapshot -#should to be synced. -snapshot_region_names= - -[glance-sync_keystone_authtoken] -auth_host=127.0.0.1 -admin_tenant_name=service -admin_user=glance -admin_password=openstack - -[nova] -#Modify to not wait neutron creating the vif. -vif_plugging_timeout=0 -vif_plugging_is_fatal=False - -#Config the admin user of nova for sync info. -nova_admin_username=admin -nova_admin_password=openstack -nova_admin_tenant_name=admin - -#The underlying openstack's region name that this proxy service manages. -proxy_region_name= - -#The cascading nova api address. -cascading_nova_url=http://127.0.0.1:8774/v2 - -#The underlying nova restful api address for proxy to call. -cascaded_nova_url=http://127.0.0.1:8774/v2 - -#The underlying neutron restful api address for proxy to call. -cascaded_neutron_url=http://127.0.0.1:9696 - -#The flag to show whether using glance cascade. -cascaded_glance_flag=True - -#If using glance cascade, the underlying glance restful api address -#for proxy to call. -cascaded_glance_url=http://127.0.0.1:9292 - -#The region name this proxy belongs to. -os_region_name= - -#The keystone auth url -keystone_auth_url=http://127.0.0.1:5000/v2.0/ - -#The Cinder endpoint template. -cinder_endpoint_template=http://127.0.0.1:8776/v2/%(project_id)s - -#The ComputeManger Implementation -compute_manager=nova.compute.manager_proxy.ComputeManager - -#The time interval to sync aggregate info from underlying to cascading. -sync_aggregate_info_interval = 1800 - -#Whether sync resources from underlying to cascading. -resource_tracker_synced = False - -#If using the feature that syncing the image when first time launch instance, -#these options must to configured for how to copy the image data from the -#source to the dest using ssh/scp command. -image_copy_dest_location_url=file:///var/lib/glance/images -image_copy_dest_host=127.0.0.1 -image_copy_dest_user=glance -image_copy_dest_password=openstack -image_copy_source_location_url=file:///var/lib/glance/images -image_copy_source_host=127.0.0.1 -image_copy_source_user=glance -image_copy_source_password=openstack - -[nova_keystone_authtoken] -auth_uri = http://127.0.0.1:5000 -auth_host = 127.0.0.1 -admin_tenant_name = service -admin_user = nova -admin_password = openstack - -[cinder] -volume_manager=cinder.volume.cinder_proxy.CinderProxy -volume_sync_interval=5 -voltype_sync_interval=3600 -periodic_interval=5 -cinder_tenant_name=admin -cinder_username=admin -cinder_password=1234 -keystone_auth_url=http://127.0.0.1:5000/v2.0/ -glance_cascading_flag=False -cascading_glance_url=127.0.0.1:9292 -cascaded_glance_url=http://127.0.0.1:9292 -cascaded_cinder_url=http://127.0.0.1:8776/v2/%(project_id)s -cascaded_region_name=Region_AZ -cascaded_available_zone=AZ \ No newline at end of file