From c53d8e343e50d4cf7ea9a6a81258848c2d893bfb Mon Sep 17 00:00:00 2001 From: Jenkins Date: Thu, 3 May 2012 10:48:26 -0700 Subject: [PATCH] Initial fork out of Nova. --- .gitignore | 24 + .gitreview | 4 + .mailmap | 81 + Authors | 211 + HACKING.rst | 213 + LICENSE | 176 + MANIFEST.in | 37 + README.rst | 21 + babel.cfg | 2 + bin/cinder-all | 70 + bin/cinder-api | 47 + bin/cinder-manage | 635 + bin/cinder-rootwrap | 74 + bin/cinder-scheduler | 51 + bin/cinder-volume | 49 + bin/clear_rabbit_queues | 80 + cinder/__init__.py | 42 + cinder/api/__init__.py | 17 + cinder/api/auth.py | 103 + cinder/api/openstack/__init__.py | 143 + cinder/api/openstack/auth.py | 65 + cinder/api/openstack/common.py | 380 + cinder/api/openstack/compute/__init__.py | 23 + .../openstack/compute/schemas/atom-link.rng | 141 + .../compute/schemas/v1.1/extension.rng | 11 + .../compute/schemas/v1.1/extensions.rng | 6 + .../compute/schemas/v1.1/metadata.rng | 9 + cinder/api/openstack/compute/versions.py | 244 + .../api/openstack/compute/views/__init__.py | 0 .../api/openstack/compute/views/versions.py | 94 + cinder/api/openstack/extensions.py | 395 + cinder/api/openstack/urlmap.py | 297 + cinder/api/openstack/volume/__init__.py | 62 + .../api/openstack/volume/contrib/__init__.py | 39 + .../volume/contrib/types_extra_specs.py | 152 + .../openstack/volume/contrib/types_manage.py | 91 + cinder/api/openstack/volume/extensions.py | 33 + cinder/api/openstack/volume/snapshots.py | 170 + cinder/api/openstack/volume/types.py | 76 + cinder/api/openstack/volume/versions.py | 83 + cinder/api/openstack/volume/views/__init__.py | 16 + cinder/api/openstack/volume/views/versions.py | 36 + cinder/api/openstack/volume/volumes.py | 263 + cinder/api/openstack/wsgi.py | 1123 ++ cinder/api/openstack/xmlutil.py | 908 + cinder/api/sizelimit.py | 54 + cinder/common/__init__.py | 15 + cinder/common/memorycache.py | 64 + cinder/common/policy.py | 222 + cinder/compat/__init__.py | 15 + cinder/compat/flagfile.py | 188 + cinder/compute/__init__.py | 0 cinder/compute/aggregate_states.py | 44 + cinder/context.py | 138 + cinder/db/__init__.py | 23 + cinder/db/api.py | 1335 ++ cinder/db/base.py | 40 + cinder/db/migration.py | 35 + cinder/db/sqlalchemy/__init__.py | 17 + cinder/db/sqlalchemy/api.py | 1499 ++ cinder/db/sqlalchemy/migrate_repo/README | 4 + cinder/db/sqlalchemy/migrate_repo/__init__.py | 0 cinder/db/sqlalchemy/migrate_repo/manage.py | 4 + cinder/db/sqlalchemy/migrate_repo/migrate.cfg | 20 + .../migrate_repo/versions/001_austin.py | 627 + .../migrate_repo/versions/002_bexar.py | 236 + .../versions/002_postgresql_downgrade.sql | 20 + .../versions/002_sqlite_downgrade.sql | 388 + .../versions/003_add_label_to_networks.py | 42 + .../versions/003_sqlite_downgrade.sql | 111 + .../versions/004_add_zone_tables.py | 66 + .../versions/005_add_instance_metadata.py | 81 + .../006_add_provider_data_to_volumes.py | 54 + .../versions/006_sqlite_downgrade.sql | 113 + .../versions/007_add_ipv6_to_fixed_ips.py | 70 + .../versions/007_sqlite_downgrade.sql | 79 + .../versions/008_add_instance_types.py | 85 + .../versions/009_add_instance_migrations.py | 70 + .../versions/010_add_os_type_to_instances.py | 45 + .../versions/011_live_migration.py | 85 + .../versions/012_add_ipv6_flatmanager.py | 90 + .../versions/012_sqlite_upgrade.sql | 195 + .../versions/013_add_flavors_to_migrations.py | 43 + .../versions/013_sqlite_downgrade.sql | 69 + .../014_add_instance_type_id_to_instances.py | 74 + .../015_add_auto_assign_to_floating_ips.py | 35 + .../versions/015_sqlite_downgrade.sql | 62 + .../versions/016_make_quotas_key_and_value.py | 213 + .../017_make_instance_type_id_an_integer.py | 87 + .../018_rename_server_management_url.py | 35 + .../019_add_volume_snapshot_support.py | 82 + .../020_add_snapshot_id_to_volumes.py | 40 + .../versions/020_sqlite_downgrade.sql | 119 + .../versions/021_rename_image_ids.py | 38 + .../versions/022_set_engine_mysql_innodb.py | 64 + .../versions/023_add_vm_mode_to_instances.py | 42 + .../versions/024_add_block_device_mapping.py | 92 + .../versions/025_add_uuid_to_instances.py | 45 + .../versions/026_add_agent_table.py | 89 + .../027_add_provider_firewall_rules.py | 65 + .../028_add_instance_type_extra_specs.py | 76 + .../versions/029_add_zone_weight_offsets.py | 41 + .../migrate_repo/versions/030_multi_nic.py | 146 + .../versions/030_sqlite_downgrade.sql | 377 + .../031_fk_fixed_ips_virtual_interface_id.py | 59 + .../versions/031_sqlite_downgrade.sql | 48 + .../versions/031_sqlite_upgrade.sql | 48 + .../versions/032_add_root_device_name.py | 42 + .../migrate_repo/versions/033_ha_network.py | 42 + .../versions/033_sqlite_downgrade.sql | 193 + .../034_change_instance_id_in_migrations.py | 46 + .../versions/035_secondary_dns.py | 39 + .../036_change_flavor_id_in_migrations.py | 79 + .../versions/037_instances_drop_admin_pass.py | 42 + .../038_add_uuid_to_virtual_interfaces.py | 45 + .../versions/038_sqlite_downgrade.sql | 63 + .../versions/039_add_instances_accessip.py | 49 + .../versions/040_add_uuid_to_networks.py | 45 + .../041_add_config_drive_to_instances.py | 36 + .../042_add_volume_types_and_extradata.py | 122 + .../versions/042_sqlite_downgrade.sql | 129 + .../migrate_repo/versions/043_add_vsa_data.py | 84 + .../versions/044_update_instance_states.py | 52 + .../versions/045_add_network_priority.py | 44 + .../versions/046_add_instance_swap.py | 49 + .../047_remove_instances_fk_from_vif.py | 61 + .../versions/047_sqlite_downgrade.sql | 46 + .../versions/047_sqlite_upgrade.sql | 45 + .../versions/048_add_zone_name.py | 33 + .../versions/049_add_instances_progress.py | 44 + .../050_add_disk_config_to_instances.py | 37 + .../versions/050_sqlite_downgrade.sql | 207 + .../051_add_vcpu_weight_to_instance_types.py | 34 + .../versions/052_kill_export_devices.py | 65 + ...connection_info_to_block_device_mapping.py | 38 + .../versions/053_sqlite_downgrade.sql | 87 + .../versions/054_add_bw_usage_data_cache.py | 64 + .../versions/055_convert_flavor_id_to_str.py | 112 + .../versions/056_add_s3_images.py | 60 + .../versions/057_add_sm_driver_tables.py | 113 + .../versions/058_rename_managed_disk.py | 37 + .../059_split_rxtx_quota_into_network.py | 61 + .../versions/059_sqlite_downgrade.sql | 137 + .../versions/059_sqlite_upgrade.sql | 87 + .../060_remove_network_fk_from_vif.py | 62 + .../versions/060_sqlite_downgrade.sql | 45 + .../versions/060_sqlite_upgrade.sql | 44 + .../061_add_index_to_instance_uuid.py | 29 + .../062_add_instance_info_cache_table.py | 70 + .../versions/063_add_instance_faults_table.py | 60 + ...instance_id_to_uuid_in_instance_actions.py | 80 + .../065_add_index_to_instance_project_id.py | 31 + .../066_preload_instance_info_cache_table.py | 31 + ...7_add_pool_and_interface_to_floating_ip.py | 41 + .../versions/067_sqlite_downgrade.sql | 69 + .../versions/068_add_instance_attribute.py | 36 + .../versions/068_sqlite_downgrade.sql | 219 + .../versions/069_block_migration.py | 50 + .../versions/070_sqlite_downgrade.sql | 103 + .../versions/070_sqlite_upgrade.sql | 99 + .../versions/070_untie_nova_network_models.py | 100 + .../versions/071_add_host_aggregate_tables.py | 108 + .../versions/072_add_dns_table.py | 77 + .../versions/072_mysql_upgrade.sql | 13 + .../migrate_repo/versions/073_add_capacity.py | 49 + .../versions/074_change_flavor_local_gb.py | 130 + .../versions/074_sqlite_upgrade.sql | 313 + ...75_convert_bw_usage_to_store_network_id.py | 97 + .../versions/076_remove_unique_constraints.py | 84 + .../versions/076_sqlite_upgrade.sql | 61 + .../versions/077_convert_to_utf8.py | 61 + .../versions/078_add_rpc_info_to_zones.py | 46 + .../versions/078_sqlite_downgrade.sql | 35 + .../079_add_zone_name_to_instances.py | 30 + ...dd_hypervisor_hostname_to_compute_nodes.py | 30 + .../versions/081_drop_instance_id_bw_cache.py | 69 + .../migrate_repo/versions/082_zone_to_cell.py | 35 + .../migrate_repo/versions/083_quota_class.py | 61 + .../versions/084_quotas_unlimited.py | 43 + .../085_add_index_to_fixed_ips_by_address.py | 31 + .../versions/086_set_engine_mysql_innodb.py | 44 + .../087_add_uuid_to_bw_usage_cache.py | 56 + ...ance_id_to_uuid_in_block_device_mapping.py | 81 + .../versions/088_sqlite_downgrade.sql | 97 + .../versions/088_sqlite_upgrade.sql | 97 + .../versions/089_add_volume_id_mappings.py | 116 + .../versions/090_modify_volume_id_datatype.py | 239 + .../versions/090_sqlite_downgrade.sql | 226 + .../versions/090_sqlite_upgrade.sql | 226 + .../091_convert_volume_ids_to_uuid.py | 145 + .../migrate_repo/versions/__init__.py | 0 cinder/db/sqlalchemy/migration.py | 129 + cinder/db/sqlalchemy/models.py | 1063 + cinder/db/sqlalchemy/session.py | 156 + cinder/exception.py | 938 + cinder/flags.py | 356 + cinder/locale/bs/LC_MESSAGES/nova.po | 8201 ++++++++ cinder/locale/cs/LC_MESSAGES/nova.po | 8251 ++++++++ cinder/locale/da/LC_MESSAGES/nova.po | 8203 ++++++++ cinder/locale/de/LC_MESSAGES/nova.po | 8208 ++++++++ cinder/locale/en_AU/LC_MESSAGES/nova.po | 8209 ++++++++ cinder/locale/en_GB/LC_MESSAGES/nova.po | 8209 ++++++++ cinder/locale/es/LC_MESSAGES/nova.po | 8220 ++++++++ cinder/locale/fr/LC_MESSAGES/nova.po | 8251 ++++++++ cinder/locale/it/LC_MESSAGES/nova.po | 8210 ++++++++ cinder/locale/ja/LC_MESSAGES/nova.po | 8196 ++++++++ cinder/locale/ko/LC_MESSAGES/nova.po | 8207 ++++++++ cinder/locale/nova.pot | 7463 +++++++ cinder/locale/pt_BR/LC_MESSAGES/nova.po | 8208 ++++++++ cinder/locale/ru/LC_MESSAGES/nova.po | 8304 ++++++++ cinder/locale/tl/LC_MESSAGES/nova.po | 8200 ++++++++ cinder/locale/tr/LC_MESSAGES/nova.po | 8202 ++++++++ cinder/locale/uk/LC_MESSAGES/nova.po | 8199 ++++++++ cinder/locale/zh_CN/LC_MESSAGES/nova.po | 8064 ++++++++ cinder/locale/zh_TW/LC_MESSAGES/nova.po | 8207 ++++++++ cinder/log.py | 416 + cinder/manager.py | 205 + cinder/notifier/__init__.py | 14 + cinder/notifier/api.py | 133 + cinder/notifier/capacity_notifier.py | 81 + cinder/notifier/list_notifier.py | 71 + cinder/notifier/log_notifier.py | 34 + cinder/notifier/no_op_notifier.py | 19 + cinder/notifier/rabbit_notifier.py | 46 + cinder/notifier/test_notifier.py | 25 + cinder/openstack/__init__.py | 15 + cinder/openstack/common/README | 13 + cinder/openstack/common/__init__.py | 15 + cinder/openstack/common/cfg.py | 1298 ++ cinder/openstack/common/exception.py | 147 + cinder/openstack/common/importutils.py | 45 + cinder/openstack/common/iniparser.py | 126 + cinder/openstack/common/local.py | 37 + cinder/policy.py | 90 + cinder/quota.py | 234 + cinder/rootwrap/__init__.py | 16 + cinder/rootwrap/filters.py | 147 + cinder/rootwrap/volume.py | 45 + cinder/rootwrap/wrapper.py | 60 + cinder/rpc/__init__.py | 227 + cinder/rpc/amqp.py | 405 + cinder/rpc/common.py | 220 + cinder/rpc/impl_fake.py | 185 + cinder/rpc/impl_kombu.py | 713 + cinder/rpc/impl_qpid.py | 563 + cinder/scheduler/__init__.py | 27 + cinder/scheduler/api.py | 72 + cinder/scheduler/chance.py | 83 + cinder/scheduler/driver.py | 164 + cinder/scheduler/host_manager.py | 36 + cinder/scheduler/manager.py | 204 + cinder/scheduler/simple.py | 144 + cinder/service.py | 429 + cinder/test.py | 295 + cinder/testing/README.rst | 66 + cinder/testing/__init__.py | 0 cinder/testing/fake/__init__.py | 1 + cinder/testing/runner.py | 372 + cinder/tests/__init__.py | 84 + cinder/tests/api/__init__.py | 19 + cinder/tests/api/openstack/__init__.py | 19 + cinder/tests/api/openstack/common.py | 58 + cinder/tests/api/openstack/fakes.py | 234 + cinder/tests/api/openstack/test_common.py | 526 + cinder/tests/api/openstack/test_faults.py | 208 + cinder/tests/api/openstack/test_wsgi.py | 833 + cinder/tests/api/openstack/test_xmlutil.py | 722 + cinder/tests/api/openstack/volume/__init__.py | 19 + .../api/openstack/volume/contrib/__init__.py | 19 + .../volume/contrib/test_types_extra_specs.py | 202 + .../volume/contrib/test_types_manage.py | 103 + .../openstack/volume/extensions/__init__.py | 15 + .../openstack/volume/extensions/foxinsocks.py | 94 + .../api/openstack/volume/test_extensions.py | 156 + .../tests/api/openstack/volume/test_router.py | 105 + .../api/openstack/volume/test_snapshots.py | 214 + .../tests/api/openstack/volume/test_types.py | 146 + .../api/openstack/volume/test_volumes.py | 290 + cinder/tests/api/test_auth.py | 58 + cinder/tests/api/test_sizelimit.py | 51 + cinder/tests/api/test_wsgi.py | 67 + cinder/tests/db/__init__.py | 20 + cinder/tests/db/fakes.py | 47 + cinder/tests/declare_flags.py | 23 + cinder/tests/fake_flags.py | 34 + cinder/tests/fake_utils.py | 112 + cinder/tests/integrated/__init__.py | 22 + cinder/tests/integrated/api/__init__.py | 20 + cinder/tests/integrated/api/client.py | 217 + cinder/tests/integrated/integrated_helpers.py | 130 + cinder/tests/integrated/test_extensions.py | 42 + cinder/tests/integrated/test_login.py | 31 + cinder/tests/integrated/test_volumes.py | 181 + cinder/tests/integrated/test_xml.py | 52 + cinder/tests/monkey_patch_example/__init__.py | 33 + .../tests/monkey_patch_example/example_a.py | 29 + .../tests/monkey_patch_example/example_b.py | 30 + cinder/tests/notifier/__init__.py | 16 + .../tests/notifier/test_capacity_notifier.py | 59 + cinder/tests/notifier/test_list_notifier.py | 84 + cinder/tests/policy.json | 25 + cinder/tests/rpc/__init__.py | 19 + cinder/tests/rpc/common.py | 239 + cinder/tests/rpc/test_common.py | 147 + cinder/tests/rpc/test_fake.py | 33 + cinder/tests/rpc/test_kombu.py | 350 + cinder/tests/rpc/test_kombu_ssl.py | 58 + cinder/tests/rpc/test_qpid.py | 340 + cinder/tests/runtime_flags.py | 23 + cinder/tests/scheduler/__init__.py | 19 + cinder/tests/scheduler/fakes.py | 62 + cinder/tests/scheduler/test_scheduler.py | 322 + cinder/tests/test_SolidFireSanISCSIDriver.py | 186 + cinder/tests/test_api.py | 75 + cinder/tests/test_compat_flagfile.py | 175 + cinder/tests/test_context.py | 70 + cinder/tests/test_db_api.py | 331 + cinder/tests/test_exception.py | 126 + cinder/tests/test_flags.py | 146 + cinder/tests/test_iscsi.py | 116 + cinder/tests/test_log.py | 218 + cinder/tests/test_migrations.conf | 9 + cinder/tests/test_migrations.py | 296 + cinder/tests/test_misc.py | 184 + cinder/tests/test_netapp.py | 927 + cinder/tests/test_nexenta.py | 281 + cinder/tests/test_notifier.py | 133 + cinder/tests/test_nova_rootwrap.py | 133 + cinder/tests/test_policy.py | 189 + cinder/tests/test_quota.py | 316 + cinder/tests/test_service.py | 221 + cinder/tests/test_skip_examples.py | 47 + cinder/tests/test_test.py | 44 + cinder/tests/test_test_utils.py | 29 + cinder/tests/test_utils.py | 1188 ++ cinder/tests/test_versions.py | 59 + cinder/tests/test_volume.py | 501 + cinder/tests/test_volume_types.py | 167 + cinder/tests/test_volume_types_extra_specs.py | 130 + cinder/tests/test_wsgi.py | 92 + cinder/tests/utils.py | 25 + cinder/utils.py | 1678 ++ cinder/version.py | 38 + cinder/volume/__init__.py | 25 + cinder/volume/api.py | 371 + cinder/volume/driver.py | 709 + cinder/volume/iscsi.py | 160 + cinder/volume/manager.py | 331 + cinder/volume/netapp.py | 676 + cinder/volume/nexenta/__init__.py | 33 + cinder/volume/nexenta/jsonrpc.py | 84 + cinder/volume/nexenta/volume.py | 282 + cinder/volume/san.py | 897 + cinder/volume/volume_types.py | 125 + cinder/volume/xensm.py | 237 + cinder/wsgi.py | 374 + contrib/openstack-config | 65 + contrib/redhat-eventlet.patch | 16 + doc/.gitignore | 3 + doc/Makefile | 97 + doc/README.rst | 55 + doc/ext/__init__.py | 0 doc/ext/nova_autodoc.py | 12 + doc/ext/nova_todo.py | 101 + doc/find_autodoc_modules.sh | 20 + doc/generate_autodoc_index.sh | 46 + doc/source/_ga/layout.html | 17 + doc/source/_static/.gitignore | 0 doc/source/_static/.placeholder | 0 doc/source/_static/basic.css | 416 + doc/source/_static/default.css | 230 + doc/source/_static/jquery.tweet.js | 154 + doc/source/_static/tweaks.css | 218 + doc/source/_templates/.gitignore | 0 doc/source/_templates/.placeholder | 0 doc/source/_theme/layout.html | 95 + doc/source/_theme/theme.conf | 5 + doc/source/conf.py | 234 + doc/source/devref/addmethod.openstackapi.rst | 56 + doc/source/devref/aggregates.rst | 65 + doc/source/devref/api.rst | 270 + doc/source/devref/architecture.rst | 52 + doc/source/devref/auth.rst | 276 + doc/source/devref/cloudpipe.rst | 166 + doc/source/devref/database.rst | 63 + doc/source/devref/development.environment.rst | 152 + doc/source/devref/down.sh | 7 + doc/source/devref/fakes.rst | 85 + doc/source/devref/filter_scheduler.rst | 258 + doc/source/devref/gerrit.rst | 16 + doc/source/devref/glance.rst | 28 + doc/source/devref/il8n.rst | 34 + doc/source/devref/index.rst | 86 + doc/source/devref/interfaces | 17 + doc/source/devref/jenkins.rst | 41 + doc/source/devref/launchpad.rst | 54 + doc/source/devref/multinic.rst | 39 + doc/source/devref/network.rst | 128 + doc/source/devref/nova.rst | 215 + doc/source/devref/rc.local | 36 + doc/source/devref/rpc.rst | 151 + doc/source/devref/scheduler.rst | 71 + doc/source/devref/server.conf.template | 34 + doc/source/devref/services.rst | 55 + doc/source/devref/threading.rst | 51 + doc/source/devref/unit_tests.rst | 159 + doc/source/devref/up.sh | 7 + doc/source/devref/volume.rst | 66 + doc/source/devref/xensmvolume.rst | 88 + doc/source/image_src/multinic_1.odg | Bin 0 -> 12363 bytes doc/source/image_src/multinic_2.odg | Bin 0 -> 13425 bytes doc/source/image_src/multinic_3.odg | Bin 0 -> 13598 bytes doc/source/images/NOVA_ARCH.png | Bin 0 -> 191332 bytes doc/source/images/NOVA_ARCH.svg | 5854 ++++++ doc/source/images/NOVA_ARCH_200dpi.png | Bin 0 -> 439024 bytes doc/source/images/NOVA_ARCH_66dpi.png | Bin 0 -> 110890 bytes doc/source/images/NOVA_clouds_A_B.png | Bin 0 -> 77007 bytes doc/source/images/NOVA_clouds_A_B.svg | 16342 ++++++++++++++++ doc/source/images/NOVA_clouds_C1_C2.svg | 9763 +++++++++ doc/source/images/NOVA_clouds_C1_C2.svg.png | Bin 0 -> 448574 bytes doc/source/images/Novadiagram.png | Bin 0 -> 52609 bytes doc/source/images/base_scheduler.png | Bin 0 -> 17068 bytes doc/source/images/cloudpipe.png | Bin 0 -> 89812 bytes doc/source/images/fabric.png | Bin 0 -> 125915 bytes doc/source/images/filteringWorkflow1.png | Bin 0 -> 66997 bytes doc/source/images/filteringWorkflow2.png | Bin 0 -> 75288 bytes doc/source/images/multinic_dhcp.png | Bin 0 -> 54531 bytes doc/source/images/multinic_flat.png | Bin 0 -> 40871 bytes doc/source/images/multinic_vlan.png | Bin 0 -> 58552 bytes doc/source/images/nova.compute.api.create.png | Bin 0 -> 50171 bytes doc/source/images/novascreens.png | Bin 0 -> 27949 bytes doc/source/images/novashvirtually.png | Bin 0 -> 39000 bytes doc/source/images/rpc/arch.png | Bin 0 -> 26690 bytes doc/source/images/rpc/arch.svg | 292 + doc/source/images/rpc/flow1.png | Bin 0 -> 40982 bytes doc/source/images/rpc/flow1.svg | 617 + doc/source/images/rpc/flow2.png | Bin 0 -> 30650 bytes doc/source/images/rpc/flow2.svg | 423 + doc/source/images/rpc/rabt.png | Bin 0 -> 44964 bytes doc/source/images/rpc/rabt.svg | 581 + doc/source/images/rpc/state.png | Bin 0 -> 38543 bytes doc/source/images/vmwareapi_blockdiagram.jpg | Bin 0 -> 75363 bytes doc/source/images/zone_aware_overview.png | Bin 0 -> 56142 bytes doc/source/images/zone_aware_scheduler.png | Bin 0 -> 20902 bytes doc/source/images/zone_overview.png | Bin 0 -> 51587 bytes doc/source/index.rst | 67 + doc/source/man/nova-manage.rst | 281 + etc/cinder/api-paste.ini | 51 + etc/cinder/cinder.conf.sample | 673 + etc/cinder/logging_sample.conf | 76 + etc/cinder/policy.json | 15 + openstack-common.conf | 7 + pylintrc | 38 + run_tests.sh | 176 + setup.cfg | 32 + setup.py | 73 + tools/clean-vlans | 25 + tools/clean_file_locks.py | 63 + tools/conf/create_conf.py | 159 + tools/conf/generate_sample.sh | 25 + tools/enable-pre-commit-hook.sh | 42 + tools/hacking.py | 391 + tools/install_venv.py | 248 + tools/pip-requires | 22 + tools/rfc.sh | 150 + tools/test-requires | 11 + tools/with_venv.sh | 4 + tox.ini | 38 + 468 files changed, 247534 insertions(+) create mode 100644 .gitignore create mode 100644 .gitreview create mode 100644 .mailmap create mode 100644 Authors create mode 100644 HACKING.rst create mode 100644 LICENSE create mode 100644 MANIFEST.in create mode 100644 README.rst create mode 100644 babel.cfg create mode 100755 bin/cinder-all create mode 100755 bin/cinder-api create mode 100755 bin/cinder-manage create mode 100755 bin/cinder-rootwrap create mode 100755 bin/cinder-scheduler create mode 100755 bin/cinder-volume create mode 100755 bin/clear_rabbit_queues create mode 100644 cinder/__init__.py create mode 100644 cinder/api/__init__.py create mode 100644 cinder/api/auth.py create mode 100644 cinder/api/openstack/__init__.py create mode 100644 cinder/api/openstack/auth.py create mode 100644 cinder/api/openstack/common.py create mode 100644 cinder/api/openstack/compute/__init__.py create mode 100644 cinder/api/openstack/compute/schemas/atom-link.rng create mode 100644 cinder/api/openstack/compute/schemas/v1.1/extension.rng create mode 100644 cinder/api/openstack/compute/schemas/v1.1/extensions.rng create mode 100644 cinder/api/openstack/compute/schemas/v1.1/metadata.rng create mode 100644 cinder/api/openstack/compute/versions.py create mode 100644 cinder/api/openstack/compute/views/__init__.py create mode 100644 cinder/api/openstack/compute/views/versions.py create mode 100644 cinder/api/openstack/extensions.py create mode 100644 cinder/api/openstack/urlmap.py create mode 100644 cinder/api/openstack/volume/__init__.py create mode 100644 cinder/api/openstack/volume/contrib/__init__.py create mode 100644 cinder/api/openstack/volume/contrib/types_extra_specs.py create mode 100644 cinder/api/openstack/volume/contrib/types_manage.py create mode 100644 cinder/api/openstack/volume/extensions.py create mode 100644 cinder/api/openstack/volume/snapshots.py create mode 100644 cinder/api/openstack/volume/types.py create mode 100644 cinder/api/openstack/volume/versions.py create mode 100644 cinder/api/openstack/volume/views/__init__.py create mode 100644 cinder/api/openstack/volume/views/versions.py create mode 100644 cinder/api/openstack/volume/volumes.py create mode 100644 cinder/api/openstack/wsgi.py create mode 100644 cinder/api/openstack/xmlutil.py create mode 100644 cinder/api/sizelimit.py create mode 100644 cinder/common/__init__.py create mode 100644 cinder/common/memorycache.py create mode 100644 cinder/common/policy.py create mode 100644 cinder/compat/__init__.py create mode 100644 cinder/compat/flagfile.py create mode 100644 cinder/compute/__init__.py create mode 100644 cinder/compute/aggregate_states.py create mode 100644 cinder/context.py create mode 100644 cinder/db/__init__.py create mode 100644 cinder/db/api.py create mode 100644 cinder/db/base.py create mode 100644 cinder/db/migration.py create mode 100644 cinder/db/sqlalchemy/__init__.py create mode 100644 cinder/db/sqlalchemy/api.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/README create mode 100644 cinder/db/sqlalchemy/migrate_repo/__init__.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/manage.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/migrate.cfg create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/001_austin.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/002_bexar.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/002_postgresql_downgrade.sql create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/002_sqlite_downgrade.sql create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/003_add_label_to_networks.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/003_sqlite_downgrade.sql create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/004_add_zone_tables.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/005_add_instance_metadata.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/006_add_provider_data_to_volumes.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/006_sqlite_downgrade.sql create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/007_add_ipv6_to_fixed_ips.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/007_sqlite_downgrade.sql create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/008_add_instance_types.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/009_add_instance_migrations.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/010_add_os_type_to_instances.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/011_live_migration.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/012_add_ipv6_flatmanager.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/012_sqlite_upgrade.sql create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/013_add_flavors_to_migrations.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/013_sqlite_downgrade.sql create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/014_add_instance_type_id_to_instances.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/015_add_auto_assign_to_floating_ips.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/015_sqlite_downgrade.sql create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/016_make_quotas_key_and_value.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/017_make_instance_type_id_an_integer.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/018_rename_server_management_url.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/019_add_volume_snapshot_support.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/020_add_snapshot_id_to_volumes.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/020_sqlite_downgrade.sql create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/021_rename_image_ids.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/022_set_engine_mysql_innodb.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/023_add_vm_mode_to_instances.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/024_add_block_device_mapping.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/025_add_uuid_to_instances.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/026_add_agent_table.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/027_add_provider_firewall_rules.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/028_add_instance_type_extra_specs.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/029_add_zone_weight_offsets.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/030_sqlite_downgrade.sql create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/031_fk_fixed_ips_virtual_interface_id.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/031_sqlite_downgrade.sql create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/031_sqlite_upgrade.sql create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/032_add_root_device_name.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/033_ha_network.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/033_sqlite_downgrade.sql create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/034_change_instance_id_in_migrations.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/035_secondary_dns.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/036_change_flavor_id_in_migrations.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/037_instances_drop_admin_pass.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/038_add_uuid_to_virtual_interfaces.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/038_sqlite_downgrade.sql create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/039_add_instances_accessip.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/040_add_uuid_to_networks.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/041_add_config_drive_to_instances.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/042_add_volume_types_and_extradata.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/042_sqlite_downgrade.sql create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/043_add_vsa_data.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/044_update_instance_states.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/045_add_network_priority.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/046_add_instance_swap.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/047_remove_instances_fk_from_vif.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/047_sqlite_downgrade.sql create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/047_sqlite_upgrade.sql create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/048_add_zone_name.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/049_add_instances_progress.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/050_add_disk_config_to_instances.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/050_sqlite_downgrade.sql create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/051_add_vcpu_weight_to_instance_types.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/052_kill_export_devices.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/053_add_connection_info_to_block_device_mapping.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/053_sqlite_downgrade.sql create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/054_add_bw_usage_data_cache.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/055_convert_flavor_id_to_str.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/056_add_s3_images.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/057_add_sm_driver_tables.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/058_rename_managed_disk.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/059_split_rxtx_quota_into_network.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/059_sqlite_downgrade.sql create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/059_sqlite_upgrade.sql create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/060_remove_network_fk_from_vif.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/060_sqlite_downgrade.sql create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/060_sqlite_upgrade.sql create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/061_add_index_to_instance_uuid.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/062_add_instance_info_cache_table.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/063_add_instance_faults_table.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/064_change_instance_id_to_uuid_in_instance_actions.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/065_add_index_to_instance_project_id.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/066_preload_instance_info_cache_table.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/067_add_pool_and_interface_to_floating_ip.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/067_sqlite_downgrade.sql create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/068_add_instance_attribute.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/068_sqlite_downgrade.sql create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/069_block_migration.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/070_sqlite_downgrade.sql create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/070_sqlite_upgrade.sql create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/070_untie_nova_network_models.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/071_add_host_aggregate_tables.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/072_add_dns_table.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/072_mysql_upgrade.sql create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/073_add_capacity.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/074_change_flavor_local_gb.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/074_sqlite_upgrade.sql create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/075_convert_bw_usage_to_store_network_id.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/076_remove_unique_constraints.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/076_sqlite_upgrade.sql create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/077_convert_to_utf8.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/078_add_rpc_info_to_zones.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/078_sqlite_downgrade.sql create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/079_add_zone_name_to_instances.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/080_add_hypervisor_hostname_to_compute_nodes.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/081_drop_instance_id_bw_cache.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/082_zone_to_cell.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/083_quota_class.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/084_quotas_unlimited.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/085_add_index_to_fixed_ips_by_address.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/086_set_engine_mysql_innodb.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/087_add_uuid_to_bw_usage_cache.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/088_change_instance_id_to_uuid_in_block_device_mapping.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/088_sqlite_downgrade.sql create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/088_sqlite_upgrade.sql create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/089_add_volume_id_mappings.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/090_modify_volume_id_datatype.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/090_sqlite_downgrade.sql create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/090_sqlite_upgrade.sql create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/091_convert_volume_ids_to_uuid.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/__init__.py create mode 100644 cinder/db/sqlalchemy/migration.py create mode 100644 cinder/db/sqlalchemy/models.py create mode 100644 cinder/db/sqlalchemy/session.py create mode 100644 cinder/exception.py create mode 100644 cinder/flags.py create mode 100644 cinder/locale/bs/LC_MESSAGES/nova.po create mode 100644 cinder/locale/cs/LC_MESSAGES/nova.po create mode 100644 cinder/locale/da/LC_MESSAGES/nova.po create mode 100644 cinder/locale/de/LC_MESSAGES/nova.po create mode 100644 cinder/locale/en_AU/LC_MESSAGES/nova.po create mode 100644 cinder/locale/en_GB/LC_MESSAGES/nova.po create mode 100644 cinder/locale/es/LC_MESSAGES/nova.po create mode 100644 cinder/locale/fr/LC_MESSAGES/nova.po create mode 100644 cinder/locale/it/LC_MESSAGES/nova.po create mode 100644 cinder/locale/ja/LC_MESSAGES/nova.po create mode 100644 cinder/locale/ko/LC_MESSAGES/nova.po create mode 100644 cinder/locale/nova.pot create mode 100644 cinder/locale/pt_BR/LC_MESSAGES/nova.po create mode 100644 cinder/locale/ru/LC_MESSAGES/nova.po create mode 100644 cinder/locale/tl/LC_MESSAGES/nova.po create mode 100644 cinder/locale/tr/LC_MESSAGES/nova.po create mode 100644 cinder/locale/uk/LC_MESSAGES/nova.po create mode 100644 cinder/locale/zh_CN/LC_MESSAGES/nova.po create mode 100644 cinder/locale/zh_TW/LC_MESSAGES/nova.po create mode 100644 cinder/log.py create mode 100644 cinder/manager.py create mode 100644 cinder/notifier/__init__.py create mode 100644 cinder/notifier/api.py create mode 100644 cinder/notifier/capacity_notifier.py create mode 100644 cinder/notifier/list_notifier.py create mode 100644 cinder/notifier/log_notifier.py create mode 100644 cinder/notifier/no_op_notifier.py create mode 100644 cinder/notifier/rabbit_notifier.py create mode 100644 cinder/notifier/test_notifier.py create mode 100644 cinder/openstack/__init__.py create mode 100644 cinder/openstack/common/README create mode 100644 cinder/openstack/common/__init__.py create mode 100644 cinder/openstack/common/cfg.py create mode 100644 cinder/openstack/common/exception.py create mode 100644 cinder/openstack/common/importutils.py create mode 100644 cinder/openstack/common/iniparser.py create mode 100644 cinder/openstack/common/local.py create mode 100644 cinder/policy.py create mode 100644 cinder/quota.py create mode 100755 cinder/rootwrap/__init__.py create mode 100755 cinder/rootwrap/filters.py create mode 100755 cinder/rootwrap/volume.py create mode 100755 cinder/rootwrap/wrapper.py create mode 100644 cinder/rpc/__init__.py create mode 100644 cinder/rpc/amqp.py create mode 100644 cinder/rpc/common.py create mode 100644 cinder/rpc/impl_fake.py create mode 100644 cinder/rpc/impl_kombu.py create mode 100644 cinder/rpc/impl_qpid.py create mode 100644 cinder/scheduler/__init__.py create mode 100644 cinder/scheduler/api.py create mode 100644 cinder/scheduler/chance.py create mode 100644 cinder/scheduler/driver.py create mode 100644 cinder/scheduler/host_manager.py create mode 100644 cinder/scheduler/manager.py create mode 100644 cinder/scheduler/simple.py create mode 100644 cinder/service.py create mode 100644 cinder/test.py create mode 100644 cinder/testing/README.rst create mode 100644 cinder/testing/__init__.py create mode 100644 cinder/testing/fake/__init__.py create mode 100644 cinder/testing/runner.py create mode 100644 cinder/tests/__init__.py create mode 100644 cinder/tests/api/__init__.py create mode 100644 cinder/tests/api/openstack/__init__.py create mode 100644 cinder/tests/api/openstack/common.py create mode 100644 cinder/tests/api/openstack/fakes.py create mode 100644 cinder/tests/api/openstack/test_common.py create mode 100644 cinder/tests/api/openstack/test_faults.py create mode 100644 cinder/tests/api/openstack/test_wsgi.py create mode 100644 cinder/tests/api/openstack/test_xmlutil.py create mode 100644 cinder/tests/api/openstack/volume/__init__.py create mode 100644 cinder/tests/api/openstack/volume/contrib/__init__.py create mode 100644 cinder/tests/api/openstack/volume/contrib/test_types_extra_specs.py create mode 100644 cinder/tests/api/openstack/volume/contrib/test_types_manage.py create mode 100644 cinder/tests/api/openstack/volume/extensions/__init__.py create mode 100644 cinder/tests/api/openstack/volume/extensions/foxinsocks.py create mode 100644 cinder/tests/api/openstack/volume/test_extensions.py create mode 100644 cinder/tests/api/openstack/volume/test_router.py create mode 100644 cinder/tests/api/openstack/volume/test_snapshots.py create mode 100644 cinder/tests/api/openstack/volume/test_types.py create mode 100644 cinder/tests/api/openstack/volume/test_volumes.py create mode 100644 cinder/tests/api/test_auth.py create mode 100644 cinder/tests/api/test_sizelimit.py create mode 100644 cinder/tests/api/test_wsgi.py create mode 100644 cinder/tests/db/__init__.py create mode 100644 cinder/tests/db/fakes.py create mode 100644 cinder/tests/declare_flags.py create mode 100644 cinder/tests/fake_flags.py create mode 100644 cinder/tests/fake_utils.py create mode 100644 cinder/tests/integrated/__init__.py create mode 100644 cinder/tests/integrated/api/__init__.py create mode 100644 cinder/tests/integrated/api/client.py create mode 100644 cinder/tests/integrated/integrated_helpers.py create mode 100644 cinder/tests/integrated/test_extensions.py create mode 100644 cinder/tests/integrated/test_login.py create mode 100644 cinder/tests/integrated/test_volumes.py create mode 100644 cinder/tests/integrated/test_xml.py create mode 100644 cinder/tests/monkey_patch_example/__init__.py create mode 100644 cinder/tests/monkey_patch_example/example_a.py create mode 100644 cinder/tests/monkey_patch_example/example_b.py create mode 100644 cinder/tests/notifier/__init__.py create mode 100644 cinder/tests/notifier/test_capacity_notifier.py create mode 100644 cinder/tests/notifier/test_list_notifier.py create mode 100644 cinder/tests/policy.json create mode 100644 cinder/tests/rpc/__init__.py create mode 100644 cinder/tests/rpc/common.py create mode 100644 cinder/tests/rpc/test_common.py create mode 100644 cinder/tests/rpc/test_fake.py create mode 100644 cinder/tests/rpc/test_kombu.py create mode 100644 cinder/tests/rpc/test_kombu_ssl.py create mode 100644 cinder/tests/rpc/test_qpid.py create mode 100644 cinder/tests/runtime_flags.py create mode 100644 cinder/tests/scheduler/__init__.py create mode 100644 cinder/tests/scheduler/fakes.py create mode 100644 cinder/tests/scheduler/test_scheduler.py create mode 100644 cinder/tests/test_SolidFireSanISCSIDriver.py create mode 100644 cinder/tests/test_api.py create mode 100644 cinder/tests/test_compat_flagfile.py create mode 100644 cinder/tests/test_context.py create mode 100644 cinder/tests/test_db_api.py create mode 100644 cinder/tests/test_exception.py create mode 100644 cinder/tests/test_flags.py create mode 100644 cinder/tests/test_iscsi.py create mode 100644 cinder/tests/test_log.py create mode 100644 cinder/tests/test_migrations.conf create mode 100644 cinder/tests/test_migrations.py create mode 100644 cinder/tests/test_misc.py create mode 100644 cinder/tests/test_netapp.py create mode 100644 cinder/tests/test_nexenta.py create mode 100644 cinder/tests/test_notifier.py create mode 100644 cinder/tests/test_nova_rootwrap.py create mode 100644 cinder/tests/test_policy.py create mode 100644 cinder/tests/test_quota.py create mode 100644 cinder/tests/test_service.py create mode 100644 cinder/tests/test_skip_examples.py create mode 100644 cinder/tests/test_test.py create mode 100644 cinder/tests/test_test_utils.py create mode 100644 cinder/tests/test_utils.py create mode 100644 cinder/tests/test_versions.py create mode 100644 cinder/tests/test_volume.py create mode 100644 cinder/tests/test_volume_types.py create mode 100644 cinder/tests/test_volume_types_extra_specs.py create mode 100644 cinder/tests/test_wsgi.py create mode 100644 cinder/tests/utils.py create mode 100644 cinder/utils.py create mode 100644 cinder/version.py create mode 100644 cinder/volume/__init__.py create mode 100644 cinder/volume/api.py create mode 100644 cinder/volume/driver.py create mode 100644 cinder/volume/iscsi.py create mode 100644 cinder/volume/manager.py create mode 100644 cinder/volume/netapp.py create mode 100644 cinder/volume/nexenta/__init__.py create mode 100644 cinder/volume/nexenta/jsonrpc.py create mode 100644 cinder/volume/nexenta/volume.py create mode 100644 cinder/volume/san.py create mode 100644 cinder/volume/volume_types.py create mode 100644 cinder/volume/xensm.py create mode 100644 cinder/wsgi.py create mode 100755 contrib/openstack-config create mode 100644 contrib/redhat-eventlet.patch create mode 100644 doc/.gitignore create mode 100644 doc/Makefile create mode 100644 doc/README.rst create mode 100644 doc/ext/__init__.py create mode 100644 doc/ext/nova_autodoc.py create mode 100644 doc/ext/nova_todo.py create mode 100755 doc/find_autodoc_modules.sh create mode 100755 doc/generate_autodoc_index.sh create mode 100644 doc/source/_ga/layout.html create mode 100644 doc/source/_static/.gitignore create mode 100644 doc/source/_static/.placeholder create mode 100644 doc/source/_static/basic.css create mode 100644 doc/source/_static/default.css create mode 100644 doc/source/_static/jquery.tweet.js create mode 100644 doc/source/_static/tweaks.css create mode 100644 doc/source/_templates/.gitignore create mode 100644 doc/source/_templates/.placeholder create mode 100644 doc/source/_theme/layout.html create mode 100644 doc/source/_theme/theme.conf create mode 100644 doc/source/conf.py create mode 100644 doc/source/devref/addmethod.openstackapi.rst create mode 100644 doc/source/devref/aggregates.rst create mode 100644 doc/source/devref/api.rst create mode 100644 doc/source/devref/architecture.rst create mode 100644 doc/source/devref/auth.rst create mode 100644 doc/source/devref/cloudpipe.rst create mode 100644 doc/source/devref/database.rst create mode 100644 doc/source/devref/development.environment.rst create mode 100644 doc/source/devref/down.sh create mode 100644 doc/source/devref/fakes.rst create mode 100644 doc/source/devref/filter_scheduler.rst create mode 100644 doc/source/devref/gerrit.rst create mode 100644 doc/source/devref/glance.rst create mode 100644 doc/source/devref/il8n.rst create mode 100644 doc/source/devref/index.rst create mode 100644 doc/source/devref/interfaces create mode 100644 doc/source/devref/jenkins.rst create mode 100644 doc/source/devref/launchpad.rst create mode 100644 doc/source/devref/multinic.rst create mode 100644 doc/source/devref/network.rst create mode 100644 doc/source/devref/nova.rst create mode 100644 doc/source/devref/rc.local create mode 100644 doc/source/devref/rpc.rst create mode 100644 doc/source/devref/scheduler.rst create mode 100644 doc/source/devref/server.conf.template create mode 100644 doc/source/devref/services.rst create mode 100644 doc/source/devref/threading.rst create mode 100644 doc/source/devref/unit_tests.rst create mode 100644 doc/source/devref/up.sh create mode 100644 doc/source/devref/volume.rst create mode 100644 doc/source/devref/xensmvolume.rst create mode 100644 doc/source/image_src/multinic_1.odg create mode 100644 doc/source/image_src/multinic_2.odg create mode 100644 doc/source/image_src/multinic_3.odg create mode 100644 doc/source/images/NOVA_ARCH.png create mode 100644 doc/source/images/NOVA_ARCH.svg create mode 100644 doc/source/images/NOVA_ARCH_200dpi.png create mode 100644 doc/source/images/NOVA_ARCH_66dpi.png create mode 100644 doc/source/images/NOVA_clouds_A_B.png create mode 100644 doc/source/images/NOVA_clouds_A_B.svg create mode 100644 doc/source/images/NOVA_clouds_C1_C2.svg create mode 100644 doc/source/images/NOVA_clouds_C1_C2.svg.png create mode 100644 doc/source/images/Novadiagram.png create mode 100644 doc/source/images/base_scheduler.png create mode 100644 doc/source/images/cloudpipe.png create mode 100644 doc/source/images/fabric.png create mode 100644 doc/source/images/filteringWorkflow1.png create mode 100644 doc/source/images/filteringWorkflow2.png create mode 100644 doc/source/images/multinic_dhcp.png create mode 100644 doc/source/images/multinic_flat.png create mode 100644 doc/source/images/multinic_vlan.png create mode 100755 doc/source/images/nova.compute.api.create.png create mode 100644 doc/source/images/novascreens.png create mode 100644 doc/source/images/novashvirtually.png create mode 100644 doc/source/images/rpc/arch.png create mode 100644 doc/source/images/rpc/arch.svg create mode 100644 doc/source/images/rpc/flow1.png create mode 100644 doc/source/images/rpc/flow1.svg create mode 100644 doc/source/images/rpc/flow2.png create mode 100644 doc/source/images/rpc/flow2.svg create mode 100644 doc/source/images/rpc/rabt.png create mode 100644 doc/source/images/rpc/rabt.svg create mode 100644 doc/source/images/rpc/state.png create mode 100644 doc/source/images/vmwareapi_blockdiagram.jpg create mode 100755 doc/source/images/zone_aware_overview.png create mode 100644 doc/source/images/zone_aware_scheduler.png create mode 100755 doc/source/images/zone_overview.png create mode 100644 doc/source/index.rst create mode 100644 doc/source/man/nova-manage.rst create mode 100644 etc/cinder/api-paste.ini create mode 100644 etc/cinder/cinder.conf.sample create mode 100644 etc/cinder/logging_sample.conf create mode 100644 etc/cinder/policy.json create mode 100644 openstack-common.conf create mode 100644 pylintrc create mode 100755 run_tests.sh create mode 100644 setup.cfg create mode 100644 setup.py create mode 100755 tools/clean-vlans create mode 100755 tools/clean_file_locks.py create mode 100644 tools/conf/create_conf.py create mode 100755 tools/conf/generate_sample.sh create mode 100755 tools/enable-pre-commit-hook.sh create mode 100755 tools/hacking.py create mode 100644 tools/install_venv.py create mode 100644 tools/pip-requires create mode 100755 tools/rfc.sh create mode 100644 tools/test-requires create mode 100755 tools/with_venv.sh create mode 100644 tox.ini diff --git a/.gitignore b/.gitignore new file mode 100644 index 00000000000..97cda3c7c18 --- /dev/null +++ b/.gitignore @@ -0,0 +1,24 @@ +*.pyc +*.DS_Store +local_settings.py +CA/ +keeper +instances +keys +build/* +build-stamp +cinder.egg-info +nova.egg-info +.cinder-venv +.nova-venv +.venv +.tox +*.sqlite +*.log +*.mo +tools/conf/cinder.conf* +tools/conf/nova.conf* +cover/* +dist/* +.coverage +covhtml diff --git a/.gitreview b/.gitreview new file mode 100644 index 00000000000..eecf939449d --- /dev/null +++ b/.gitreview @@ -0,0 +1,4 @@ +[gerrit] +host=review.openstack.org +port=29418 +project=openstack/cinder.git diff --git a/.mailmap b/.mailmap new file mode 100644 index 00000000000..f38c68aeb49 --- /dev/null +++ b/.mailmap @@ -0,0 +1,81 @@ +# Format is: +# +# + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Masumoto + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/Authors b/Authors new file mode 100644 index 00000000000..f4bd7344f29 --- /dev/null +++ b/Authors @@ -0,0 +1,211 @@ +Aaron Lee +Adam Gandelman +Adam Johnson +Adrian Smith +Ahmad Hassan +Alex Meade +Alexander Sakhnov +Alexander Kovalev +Alvaro Lopez Garcia +Andrew Bogott +Andrew Clay Shafer +Andrey Brindeyev +Andy Smith +Andy Southgate +Anne Gentle +Ante Karamatić +Anthony Young +Antony Messerli +Armando Migliaccio +Arvind Somya +Asbjørn Sannes +Ben McGraw +Ben Swartzlander +Bilal Akhtar +Brad Hall +Brad McConnell +Brendan Maguire +Brian Elliott +Brian Lamar +Brian Schott +Brian Waldon +Chiradeep Vittal +Chmouel Boudjnah +Chris Behrens +Christian Berendt +Chris Fattarsi +Christopher MacGown +Chuck Short +Cole Robinson +Cor Cornelisse +Cory Wright +Dan Prince +Dan Wendlandt +Daniel P. Berrange +Dave Lapsley +Dave Walker +David Pravec +David Subiros +Dean Troyer +Deepak Garg +Derek Higgins +Devdeep Singh +Devendra Modium +Devin Carlen +Dina Belova +Donal Lafferty +Dong-In David Kang +Doug Hellmann +Duncan McGreggor +Ed Leafe +Edouard Thuleau +Eldar Nugaev +Eoghan Glynn +Eric Day +Eric Windisch +Evan Callicoat +Ewan Mellor +François Charlier +Gabe Westmaas +Gabriel Hurley +Gary Kotton +Gaurav Gupta +Greg Althaus +Hengqing Hu +Hisaharu Ishii +Hisaki Ohara +Ilya Alekseyev +Ionuț Arțăriși +Isaku Yamahata +Ivan Kolodyazhny +J. Daniel Schmidt +Jake Dahn +James E. Blair +Jason Cannavale +Jason Koelker +Jay Pipes +JC Martin +Jesse Andrews +Jimmy Bergman +Joe Gordon +Joe Heck +Joel Moore +Johannes Erdfelt +John Dewey +John Garbutt +John Griffith +John Kennedy +John Tran +Jonathan Bryce +Jordan Rinke +Joseph Suh +Joseph W. Breu +Josh Durgin +Josh Kearney +Josh Kleinpeter +Joshua Harlow +Joshua McKenty +Juan G. Hernando Rivero +Julien Danjou +Justin Santa Barbara +Justin Shepherd +Kei Masumoto +Keisuke Tagami +masumoto +masukotm +Ken Pepple +Kevin Bringard +Kevin L. Mitchell +Kiall Mac Innes +Kirill Shileev +Koji Iida +Liam Kelleher +Likitha Shetty +Loganathan Parthipan +Lorin Hochstein +Lvov Maxim +Mandar Vaze +Mandell Degerness +Mark McClain +Mark McLoughlin +Mark Washenberger +Maru Newby +Masanori Itoh +Matt Dietz +Matt Stephenson +Matthew Hooker +Michael Basnight +Michael Gundlach +Michael Still +Mike Lundy +Mike Milner +Mike Pittaro +Mike Scherbakov +Mikyung Kang +Mohammed Naser +Monsyne Dragon +Monty Taylor +MORITA Kazutaka +MotoKen +Muneyuki Noguchi +Nachi Ueno +Naveed Massjouni +Nick Bartos +Nikhil Komawar +Nikolay Sokolov +Nirmal Ranganathan +Ollie Leahy +Pádraig Brady +Paul McMillan +Paul Voccio +Peng Yong +Philip Knouff +Renier Morales +Renuka Apte +Ricardo Carrillo Cruz +Rick Clark +Rick Harris +Rob Kost +Robert Esker +Russell Bryant +Russell Sim +Ryan Lane +Ryan Lucio +Ryu Ishimoto +Salvatore Orlando +Sandy Walsh +Sateesh Chodapuneedi +Scott Moser +Sean Dague +Soren Hansen +Stanislaw Pitucha +Stephanie Reese +Sumit Naiksatam +Thierry Carrez +Tim Simpson +Todd Willey +Tomoe Sugihara +Tomoya Masuko +Thorsten Tarrach +Trey Morris +Troy Toman +Tushar Patil +Unmesh Gurjar +Vasiliy Shlykov +Vishvananda Ishaya +Vivek Y S +Vladimir Popovski +Vaddi kiran +William Henry +William Kelly +William Wolf +Yaguang Tang +Yoshiaki Tamura +Youcef Laribi +Yun Mao +Yun Shen +Yuriy Taraday +Zed Shaw +Zhixue Wu +Zhongyue Luo +Ziad Sawalha diff --git a/HACKING.rst b/HACKING.rst new file mode 100644 index 00000000000..e9c0162f54e --- /dev/null +++ b/HACKING.rst @@ -0,0 +1,213 @@ +Cinder Style Commandments +======================= + +- Step 1: Read http://www.python.org/dev/peps/pep-0008/ +- Step 2: Read http://www.python.org/dev/peps/pep-0008/ again +- Step 3: Read on + + +General +------- +- Put two newlines between top-level code (funcs, classes, etc) +- Put one newline between methods in classes and anywhere else +- Do not write "except:", use "except Exception:" at the very least +- Include your name with TODOs as in "#TODO(termie)" +- Do not shadow a built-in or reserved word. Example:: + + def list(): + return [1, 2, 3] + + mylist = list() # BAD, shadows `list` built-in + + class Foo(object): + def list(self): + return [1, 2, 3] + + mylist = Foo().list() # OKAY, does not shadow built-in + + +Imports +------- +- Do not import objects, only modules (*) +- Do not import more than one module per line (*) +- Do not make relative imports +- Order your imports by the full module path +- Organize your imports according to the following template + +(*) exceptions are: + +- imports from ``migrate`` package +- imports from ``sqlalchemy`` package +- imports from ``cinder.db.sqlalchemy.session`` module + +Example:: + + # vim: tabstop=4 shiftwidth=4 softtabstop=4 + {{stdlib imports in human alphabetical order}} + \n + {{third-party lib imports in human alphabetical order}} + \n + {{cinder imports in human alphabetical order}} + \n + \n + {{begin your code}} + + +Human Alphabetical Order Examples +--------------------------------- +Example:: + + import httplib + import logging + import random + import StringIO + import time + import unittest + + import eventlet + import webob.exc + + import cinder.api.ec2 + from cinder.api import openstack + from cinder.auth import users + from cinder.endpoint import cloud + import cinder.flags + from cinder import test + + +Docstrings +---------- +Example:: + + """A one line docstring looks like this and ends in a period.""" + + + """A multi line docstring has a one-line summary, less than 80 characters. + + Then a new paragraph after a newline that explains in more detail any + general information about the function, class or method. Example usages + are also great to have here if it is a complex class for function. + + When writing the docstring for a class, an extra line should be placed + after the closing quotations. For more in-depth explanations for these + decisions see http://www.python.org/dev/peps/pep-0257/ + + If you are going to describe parameters and return values, use Sphinx, the + appropriate syntax is as follows. + + :param foo: the foo parameter + :param bar: the bar parameter + :returns: return_type -- description of the return value + :returns: description of the return value + :raises: AttributeError, KeyError + """ + + +Dictionaries/Lists +------------------ +If a dictionary (dict) or list object is longer than 80 characters, its items +should be split with newlines. Embedded iterables should have their items +indented. Additionally, the last item in the dictionary should have a trailing +comma. This increases readability and simplifies future diffs. + +Example:: + + my_dictionary = { + "image": { + "name": "Just a Snapshot", + "size": 2749573, + "properties": { + "user_id": 12, + "arch": "x86_64", + }, + "things": [ + "thing_one", + "thing_two", + ], + "status": "ACTIVE", + }, + } + + +Calling Methods +--------------- +Calls to methods 80 characters or longer should format each argument with +newlines. This is not a requirement, but a guideline:: + + unnecessarily_long_function_name('string one', + 'string two', + kwarg1=constants.ACTIVE, + kwarg2=['a', 'b', 'c']) + + +Rather than constructing parameters inline, it is better to break things up:: + + list_of_strings = [ + 'what_a_long_string', + 'not as long', + ] + + dict_of_numbers = { + 'one': 1, + 'two': 2, + 'twenty four': 24, + } + + object_one.call_a_method('string three', + 'string four', + kwarg1=list_of_strings, + kwarg2=dict_of_numbers) + + +Internationalization (i18n) Strings +----------------------------------- +In order to support multiple languages, we have a mechanism to support +automatic translations of exception and log strings. + +Example:: + + msg = _("An error occurred") + raise HTTPBadRequest(explanation=msg) + +If you have a variable to place within the string, first internationalize the +template string then do the replacement. + +Example:: + + msg = _("Missing parameter: %s") % ("flavor",) + LOG.error(msg) + +If you have multiple variables to place in the string, use keyword parameters. +This helps our translators reorder parameters when needed. + +Example:: + + msg = _("The server with id %(s_id)s has no key %(m_key)s") + LOG.error(msg % {"s_id": "1234", "m_key": "imageId"}) + + +Creating Unit Tests +------------------- +For every new feature, unit tests should be created that both test and +(implicitly) document the usage of said feature. If submitting a patch for a +bug that had no unit test, a new passing unit test should be added. If a +submitted bug fix does have a unit test, be sure to add a new one that fails +without the patch and passes with the patch. + +For more information on creating unit tests and utilizing the testing +infrastructure in OpenStack Cinder, please read cinder/testing/README.rst. + + +openstack-common +---------------- + +A number of modules from openstack-common are imported into the project. + +These modules are "incubating" in openstack-common and are kept in sync +with the help of openstack-common's update.py script. See: + + http://wiki.openstack.org/CommonLibrary#Incubation + +The copy of the code should never be directly modified here. Please +always update openstack-common first and then run the script to copy +the changes across. diff --git a/LICENSE b/LICENSE new file mode 100644 index 00000000000..68c771a0999 --- /dev/null +++ b/LICENSE @@ -0,0 +1,176 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + diff --git a/MANIFEST.in b/MANIFEST.in new file mode 100644 index 00000000000..97278f3f4ab --- /dev/null +++ b/MANIFEST.in @@ -0,0 +1,37 @@ +include HACKING.rst +include LICENSE run_tests.sh +include README.rst +include MANIFEST.in pylintrc Authors +include openstack-common.conf +include babel.cfg tox.ini +graft cinder/CA +graft doc +graft smoketests +graft tools +graft etc +graft contrib +graft plugins +graft cinder/api/openstack/*/schemas +include cinder/auth/*.schema +include cinder/auth/cinderrc.template +include cinder/auth/opendj.sh +include cinder/auth/slap.sh +include cinder/db/sqlalchemy/migrate_repo/migrate.cfg +include cinder/db/sqlalchemy/migrate_repo/README +include cinder/db/sqlalchemy/migrate_repo/versions/*.sql +include cinder/openstack/common/README +include cinder/virt/interfaces.template +include cinder/virt/libvirt*.xml.template +include cinder/virt/cpuinfo.xml.template +include cinder/testing/README.rst +include cinder/tests/db/cinder.austin.sqlite +include cinder/tests/image/*.tar.gz +include cinder/tests/policy.json +include cinder/tests/test_migrations.conf +include cinder/tests/xenapi/vm_rrd.xml +include plugins/xenapi/README +include plugins/xenapi/etc/xapi.d/plugins/objectstore +include plugins/xenapi/etc/xapi.d/plugins/pluginlib_cinder.py +global-exclude *.pyc + +recursive-include cinder/locale * diff --git a/README.rst b/README.rst new file mode 100644 index 00000000000..822401a80ae --- /dev/null +++ b/README.rst @@ -0,0 +1,21 @@ +The Choose Your Own Adventure README for Cinder +=============================================== + +You have come across a storage service for an open cloud computing service. +It has identified itself as "Cinder." It was abstracted from the Nova project. + +To monitor it from a distance: follow `@openstack `_ on twitter. + +To tame it for use in your own cloud: read http://docs.openstack.org + +To study its anatomy: read http://cinder.openstack.org + +To dissect it in detail: visit http://github.com/openstack/cinder + +To taunt it with its weaknesses: use http://bugs.launchpad.net/cinder + +To watch it: http://jenkins.openstack.org + +To hack at it: read HACKING + +To cry over its pylint problems: http://jenkins.openstack.org/job/cinder-pylint/violations diff --git a/babel.cfg b/babel.cfg new file mode 100644 index 00000000000..15cd6cb76b9 --- /dev/null +++ b/babel.cfg @@ -0,0 +1,2 @@ +[python: **.py] + diff --git a/bin/cinder-all b/bin/cinder-all new file mode 100755 index 00000000000..8bec9bbb39b --- /dev/null +++ b/bin/cinder-all @@ -0,0 +1,70 @@ +#!/usr/bin/env python +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack, LLC +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Starter script for All cinder services. + +This script attempts to start all the cinder services in one process. Each +service is started in its own greenthread. Please note that exceptions and +sys.exit() on the starting of a service are logged and the script will +continue attempting to launch the rest of the services. + +""" + +import eventlet +eventlet.monkey_patch() + +import os +import sys + + +possible_topdir = os.path.normpath(os.path.join(os.path.abspath( + sys.argv[0]), os.pardir, os.pardir)) +if os.path.exists(os.path.join(possible_topdir, "cinder", "__init__.py")): + sys.path.insert(0, possible_topdir) + + +from cinder import flags +from cinder import log as logging +from cinder import service +from cinder import utils + + +LOG = logging.getLogger('cinder.all') + +if __name__ == '__main__': + utils.default_flagfile() + flags.FLAGS(sys.argv) + logging.setup() + utils.monkey_patch() + servers = [] + # cinder-api + for api in flags.FLAGS.enabled_apis: + try: + servers.append(service.WSGIService(api)) + except (Exception, SystemExit): + logging.exception(_('Failed to load %s') % '%s-api' % api) + + for binary in ['cinder-volume', 'cinder-scheduler']: + try: + servers.append(service.Service.create(binary=binary)) + except (Exception, SystemExit): + LOG.exception(_('Failed to load %s'), binary) + service.serve(*servers) + service.wait() diff --git a/bin/cinder-api b/bin/cinder-api new file mode 100755 index 00000000000..ba28b1a445d --- /dev/null +++ b/bin/cinder-api @@ -0,0 +1,47 @@ +#!/usr/bin/env python +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Starter script for Cinder OS API.""" + +import eventlet +eventlet.monkey_patch() + +import os +import sys + + +possible_topdir = os.path.normpath(os.path.join(os.path.abspath( + sys.argv[0]), os.pardir, os.pardir)) +if os.path.exists(os.path.join(possible_topdir, "cinder", "__init__.py")): + sys.path.insert(0, possible_topdir) + + +from cinder import flags +from cinder import log as logging +from cinder import service +from cinder import utils + +if __name__ == '__main__': + utils.default_flagfile() + flags.FLAGS(sys.argv) + logging.setup() + utils.monkey_patch() + server = service.WSGIService('osapi_volume') + service.serve(server) + service.wait() diff --git a/bin/cinder-manage b/bin/cinder-manage new file mode 100755 index 00000000000..0cc6d82c892 --- /dev/null +++ b/bin/cinder-manage @@ -0,0 +1,635 @@ +#!/usr/bin/env python +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 X.commerce, a business unit of eBay Inc. +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# Interactive shell based on Django: +# +# Copyright (c) 2005, the Lawrence Journal-World +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# +# 3. Neither the name of Django nor the names of its contributors may be +# used to endorse or promote products derived from this software without +# specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +""" + CLI interface for cinder management. +""" + +import ast +import errno +import gettext +import json +import math +import netaddr +import optparse +import os +import StringIO +import sys + + +# If ../cinder/__init__.py exists, add ../ to Python search path, so that +# it will override what happens to be installed in /usr/(local/)lib/python... +POSSIBLE_TOPDIR = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), + os.pardir, + os.pardir)) +if os.path.exists(os.path.join(POSSIBLE_TOPDIR, 'cinder', '__init__.py')): + sys.path.insert(0, POSSIBLE_TOPDIR) + +gettext.install('cinder', unicode=1) + +from cinder.compat import flagfile +from cinder import context +from cinder import db +from cinder.db import migration +from cinder import exception +from cinder import flags +from cinder import log as logging +from cinder.openstack.common import importutils +from cinder import quota +from cinder import rpc +from cinder import utils +from cinder import version +from cinder.volume import volume_types + +FLAGS = flags.FLAGS + + +# Decorators for actions +def args(*args, **kwargs): + def _decorator(func): + func.__dict__.setdefault('options', []).insert(0, (args, kwargs)) + return func + return _decorator + + +def param2id(object_id): + """Helper function to convert various id types to internal id. + args: [object_id], e.g. 'vol-0000000a' or 'volume-0000000a' or '10' + """ + if '-' in object_id: + # FIXME(ja): mapping occurs in nova? + pass + else: + return int(object_id) + + +class ShellCommands(object): + def bpython(self): + """Runs a bpython shell. + + Falls back to Ipython/python shell if unavailable""" + self.run('bpython') + + def ipython(self): + """Runs an Ipython shell. + + Falls back to Python shell if unavailable""" + self.run('ipython') + + def python(self): + """Runs a python shell. + + Falls back to Python shell if unavailable""" + self.run('python') + + @args('--shell', dest="shell", metavar='', + help='Python shell') + def run(self, shell=None): + """Runs a Python interactive interpreter.""" + if not shell: + shell = 'bpython' + + if shell == 'bpython': + try: + import bpython + bpython.embed() + except ImportError: + shell = 'ipython' + if shell == 'ipython': + try: + import IPython + # Explicitly pass an empty list as arguments, because + # otherwise IPython would use sys.argv from this script. + shell = IPython.Shell.IPShell(argv=[]) + shell.mainloop() + except ImportError: + shell = 'python' + + if shell == 'python': + import code + try: + # Try activating rlcompleter, because it's handy. + import readline + except ImportError: + pass + else: + # We don't have to wrap the following import in a 'try', + # because we already know 'readline' was imported successfully. + import rlcompleter + readline.parse_and_bind("tab:complete") + code.interact() + + @args('--path', dest='path', metavar='', help='Script path') + def script(self, path): + """Runs the script from the specifed path with flags set properly. + arguments: path""" + exec(compile(open(path).read(), path, 'exec'), locals(), globals()) + + +def _db_error(caught_exception): + print caught_exception + print _("The above error may show that the database has not " + "been created.\nPlease create a database using " + "'cinder-manage db sync' before running this command.") + exit(1) + + +class HostCommands(object): + """List hosts""" + + def list(self, zone=None): + """Show a list of all physical hosts. Filter by zone. + args: [zone]""" + print "%-25s\t%-15s" % (_('host'), + _('zone')) + ctxt = context.get_admin_context() + now = utils.utcnow() + services = db.service_get_all(ctxt) + if zone: + services = [s for s in services if s['availability_zone'] == zone] + hosts = [] + for srv in services: + if not [h for h in hosts if h['host'] == srv['host']]: + hosts.append(srv) + + for h in hosts: + print "%-25s\t%-15s" % (h['host'], h['availability_zone']) + + +class DbCommands(object): + """Class for managing the database.""" + + def __init__(self): + pass + + @args('--version', dest='version', metavar='', + help='Database version') + def sync(self, version=None): + """Sync the database up to the most recent version.""" + return migration.db_sync(version) + + def version(self): + """Print the current database version.""" + print migration.db_version() + + +class VersionCommands(object): + """Class for exposing the codebase version.""" + + def __init__(self): + pass + + def list(self): + print _("%(version)s (%(vcs)s)") % \ + {'version': version.version_string(), + 'vcs': version.version_string_with_vcs()} + + def __call__(self): + self.list() + + +class VolumeCommands(object): + """Methods for dealing with a cloud in an odd state""" + + @args('--volume', dest='volume_id', metavar='', + help='Volume ID') + def delete(self, volume_id): + """Delete a volume, bypassing the check that it + must be available.""" + ctxt = context.get_admin_context() + volume = db.volume_get(ctxt, param2id(volume_id)) + host = volume['host'] + + if not host: + print "Volume not yet assigned to host." + print "Deleting volume from database and skipping rpc." + db.volume_destroy(ctxt, param2id(volume_id)) + return + + if volume['status'] == 'in-use': + print "Volume is in-use." + print "Detach volume from instance and then try again." + return + + rpc.cast(ctxt, + db.queue_get_for(ctxt, FLAGS.volume_topic, host), + {"method": "delete_volume", + "args": {"volume_id": volume['id']}}) + + @args('--volume', dest='volume_id', metavar='', + help='Volume ID') + def reattach(self, volume_id): + """Re-attach a volume that has previously been attached + to an instance. Typically called after a compute host + has been rebooted.""" + ctxt = context.get_admin_context() + volume = db.volume_get(ctxt, param2id(volume_id)) + if not volume['instance_id']: + print "volume is not attached to an instance" + return + instance = db.instance_get(ctxt, volume['instance_id']) + host = instance['host'] + rpc.cast(ctxt, + db.queue_get_for(ctxt, FLAGS.compute_topic, host), + {"method": "attach_volume", + "args": {"instance_id": instance['id'], + "volume_id": volume['id'], + "mountpoint": volume['mountpoint']}}) + + +class StorageManagerCommands(object): + """Class for mangaging Storage Backends and Flavors""" + + def flavor_list(self, flavor=None): + ctxt = context.get_admin_context() + + try: + if flavor is None: + flavors = db.sm_flavor_get_all(ctxt) + else: + flavors = db.sm_flavor_get(ctxt, flavor) + except exception.NotFound as ex: + print "error: %s" % ex + sys.exit(2) + + print "%-18s\t%-20s\t%s" % (_('id'), + _('Label'), + _('Description')) + + for flav in flavors: + print "%-18s\t%-20s\t%s" % ( + flav['id'], + flav['label'], + flav['description']) + + def flavor_create(self, label, desc): + # TODO(renukaapte) flavor name must be unique + try: + db.sm_flavor_create(context.get_admin_context(), + dict(label=label, + description=desc)) + except exception.DBError, e: + _db_error(e) + + def flavor_delete(self, label): + try: + db.sm_flavor_delete(context.get_admin_context(), label) + + except exception.DBError, e: + _db_error(e) + + def _splitfun(self, item): + i = item.split("=") + return i[0:2] + + def backend_list(self, backend_conf_id=None): + ctxt = context.get_admin_context() + + try: + if backend_conf_id is None: + backends = db.sm_backend_conf_get_all(ctxt) + else: + backends = db.sm_backend_conf_get(ctxt, backend_conf_id) + + except exception.NotFound as ex: + print "error: %s" % ex + sys.exit(2) + + print "%-5s\t%-10s\t%-40s\t%-10s\t%s" % (_('id'), + _('Flavor id'), + _('SR UUID'), + _('SR Type'), + _('Config Parameters'),) + + for b in backends: + print "%-5s\t%-10s\t%-40s\t%-10s\t%s" % (b['id'], + b['flavor_id'], + b['sr_uuid'], + b['sr_type'], + b['config_params'],) + + def backend_add(self, flavor_label, sr_type, *args): + # TODO(renukaapte) Add backend_introduce. + ctxt = context.get_admin_context() + params = dict(map(self._splitfun, args)) + sr_uuid = utils.gen_uuid() + + if flavor_label is None: + print "error: backend needs to be associated with flavor" + sys.exit(2) + + try: + flavors = db.sm_flavor_get(ctxt, flavor_label) + + except exception.NotFound as ex: + print "error: %s" % ex + sys.exit(2) + + config_params = " ".join(['%s=%s' % + (key, params[key]) for key in params]) + + if 'sr_uuid' in params: + sr_uuid = params['sr_uuid'] + try: + backend = db.sm_backend_conf_get_by_sr(ctxt, sr_uuid) + except exception.DBError, e: + _db_error(e) + + if backend: + print 'Backend config found. Would you like to recreate this?' + print '(WARNING:Recreating will destroy all VDIs on backend!!)' + c = raw_input('Proceed? (y/n) ') + if c == 'y' or c == 'Y': + try: + db.sm_backend_conf_update(ctxt, backend['id'], + dict(created=False, + flavor_id=flavors['id'], + sr_type=sr_type, + config_params=config_params)) + except exception.DBError, e: + _db_error(e) + return + + else: + print 'Backend config not found. Would you like to create it?' + + print '(WARNING: Creating will destroy all data on backend!!!)' + c = raw_input('Proceed? (y/n) ') + if c == 'y' or c == 'Y': + try: + db.sm_backend_conf_create(ctxt, + dict(flavor_id=flavors['id'], + sr_uuid=sr_uuid, + sr_type=sr_type, + config_params=config_params)) + except exception.DBError, e: + _db_error(e) + + def backend_remove(self, backend_conf_id): + try: + db.sm_backend_conf_delete(context.get_admin_context(), + backend_conf_id) + + except exception.DBError, e: + _db_error(e) + + +class ConfigCommands(object): + """Class for exposing the flags defined by flag_file(s).""" + + def __init__(self): + pass + + def list(self): + for key, value in FLAGS.iteritems(): + if value is not None: + print '%s = %s' % (key, value) + + @args('--infile', dest='infile', metavar='', + help='old-style flagfile to convert to config') + @args('--outfile', dest='outfile', metavar='', + help='path for output file. Writes config' + 'to stdout if not specified.') + def convert(self, infile, outfile=None): + """Converts a flagfile and prints results to stdout.""" + arg = '--flagfile=%s' % infile + with flagfile.handle_flagfiles_managed([arg]) as newargs: + with open(newargs[0].split('=')[1]) as configfile: + config = configfile.read() + if outfile: + with open(outfile, 'w') as configfile: + configfile.write(config) + else: + print config, + + +class GetLogCommands(object): + """Get logging information""" + + def errors(self): + """Get all of the errors from the log files""" + error_found = 0 + if FLAGS.logdir: + logs = [x for x in os.listdir(FLAGS.logdir) if x.endswith('.log')] + for file in logs: + log_file = os.path.join(FLAGS.logdir, file) + lines = [line.strip() for line in open(log_file, "r")] + lines.reverse() + print_name = 0 + for index, line in enumerate(lines): + if line.find(" ERROR ") > 0: + error_found += 1 + if print_name == 0: + print log_file + ":-" + print_name = 1 + print "Line %d : %s" % (len(lines) - index, line) + if error_found == 0: + print "No errors in logfiles!" + + def syslog(self, num_entries=10): + """Get of the cinder syslog events""" + entries = int(num_entries) + count = 0 + log_file = '' + if os.path.exists('/var/log/syslog'): + log_file = '/var/log/syslog' + elif os.path.exists('/var/log/messages'): + log_file = '/var/log/messages' + else: + print "Unable to find system log file!" + sys.exit(1) + lines = [line.strip() for line in open(log_file, "r")] + lines.reverse() + print "Last %s cinder syslog entries:-" % (entries) + for line in lines: + if line.find("cinder") > 0: + count += 1 + print "%s" % (line) + if count == entries: + break + + if count == 0: + print "No cinder entries in syslog!" + + +CATEGORIES = [ + ('config', ConfigCommands), + ('db', DbCommands), + ('host', HostCommands), + ('logs', GetLogCommands), + ('shell', ShellCommands), + ('sm', StorageManagerCommands), + ('version', VersionCommands), + ('volume', VolumeCommands), +] + + +def lazy_match(name, key_value_tuples): + """Finds all objects that have a key that case insensitively contains + [name] key_value_tuples is a list of tuples of the form (key, value) + returns a list of tuples of the form (key, value)""" + result = [] + for (k, v) in key_value_tuples: + if k.lower().find(name.lower()) == 0: + result.append((k, v)) + if len(result) == 0: + print "%s does not match any options:" % name + for k, _v in key_value_tuples: + print "\t%s" % k + sys.exit(2) + if len(result) > 1: + print "%s matched multiple options:" % name + for k, _v in result: + print "\t%s" % k + sys.exit(2) + return result + + +def methods_of(obj): + """Get all callable methods of an object that don't start with underscore + returns a list of tuples of the form (method_name, method)""" + result = [] + for i in dir(obj): + if callable(getattr(obj, i)) and not i.startswith('_'): + result.append((i, getattr(obj, i))) + return result + + +def main(): + """Parse options and call the appropriate class/method.""" + flagfile = utils.default_flagfile() + + if flagfile and not os.access(flagfile, os.R_OK): + st = os.stat(flagfile) + print "Could not read %s. Re-running with sudo" % flagfile + try: + os.execvp('sudo', ['sudo', '-u', '#%s' % st.st_uid] + sys.argv) + except Exception: + print 'sudo failed, continuing as if nothing happened' + + rpc.register_opts(FLAGS) + + try: + argv = FLAGS(sys.argv) + logging.setup() + except IOError, e: + if e.errno == errno.EACCES: + print _('Please re-run cinder-manage as root.') + sys.exit(2) + raise + script_name = argv.pop(0) + if len(argv) < 1: + print _("\nOpenStack Cinder version: %(version)s (%(vcs)s)\n") % \ + {'version': version.version_string(), + 'vcs': version.version_string_with_vcs()} + print script_name + " category action []" + print _("Available categories:") + for k, _v in CATEGORIES: + print "\t%s" % k + sys.exit(2) + category = argv.pop(0) + matches = lazy_match(category, CATEGORIES) + # instantiate the command group object + category, fn = matches[0] + command_object = fn() + actions = methods_of(command_object) + if len(argv) < 1: + if hasattr(command_object, '__call__'): + action = '' + fn = command_object.__call__ + else: + print script_name + " category action []" + print _("Available actions for %s category:") % category + for k, _v in actions: + print "\t%s" % k + sys.exit(2) + else: + action = argv.pop(0) + matches = lazy_match(action, actions) + action, fn = matches[0] + + # For not decorated methods + options = getattr(fn, 'options', []) + + usage = "%%prog %s %s [options]" % (category, action) + parser = optparse.OptionParser(usage=usage) + for ar, kw in options: + parser.add_option(*ar, **kw) + (opts, fn_args) = parser.parse_args(argv) + fn_kwargs = vars(opts) + + for k, v in fn_kwargs.items(): + if v is None: + del fn_kwargs[k] + elif isinstance(v, basestring): + fn_kwargs[k] = v.decode('utf-8') + else: + fn_kwargs[k] = v + + fn_args = [arg.decode('utf-8') for arg in fn_args] + + # call the action with the remaining arguments + try: + fn(*fn_args, **fn_kwargs) + rpc.cleanup() + sys.exit(0) + except TypeError: + print _("Possible wrong number of arguments supplied") + print fn.__doc__ + parser.print_help() + raise + except Exception: + print _("Command failed, please check log for more info") + raise + +if __name__ == '__main__': + main() diff --git a/bin/cinder-rootwrap b/bin/cinder-rootwrap new file mode 100755 index 00000000000..537324c6c10 --- /dev/null +++ b/bin/cinder-rootwrap @@ -0,0 +1,74 @@ +#!/usr/bin/env python +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 Openstack, LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Root wrapper for Cinder + + Uses modules in cinder.rootwrap containing filters for commands + that cinder is allowed to run as another user. + + To switch to using this, you should: + * Set "--root_helper=sudo cinder-rootwrap" in cinder.conf + * Allow cinder to run cinder-rootwrap as root in cinder_sudoers: + cinder ALL = (root) NOPASSWD: /usr/bin/cinder-rootwrap + (all other commands can be removed from this file) + + To make allowed commands node-specific, your packaging should only + install cinder/rootwrap/{compute,network,volume}.py respectively on + compute, network and volume nodes (i.e. cinder-api nodes should not + have any of those files installed). +""" + +import os +import subprocess +import sys + + +RC_UNAUTHORIZED = 99 +RC_NOCOMMAND = 98 + +if __name__ == '__main__': + # Split arguments, require at least a command + execname = sys.argv.pop(0) + if len(sys.argv) == 0: + print "%s: %s" % (execname, "No command specified") + sys.exit(RC_NOCOMMAND) + + userargs = sys.argv[:] + + # Add ../ to sys.path to allow running from branch + possible_topdir = os.path.normpath(os.path.join(os.path.abspath(execname), + os.pardir, os.pardir)) + if os.path.exists(os.path.join(possible_topdir, "cinder", "__init__.py")): + sys.path.insert(0, possible_topdir) + + from cinder.rootwrap import wrapper + + # Execute command if it matches any of the loaded filters + filters = wrapper.load_filters() + filtermatch = wrapper.match_filter(filters, userargs) + if filtermatch: + obj = subprocess.Popen(filtermatch.get_command(userargs), + stdin=sys.stdin, + stdout=sys.stdout, + stderr=sys.stderr, + env=filtermatch.get_environment(userargs)) + obj.wait() + sys.exit(obj.returncode) + + print "Unauthorized command: %s" % ' '.join(userargs) + sys.exit(RC_UNAUTHORIZED) diff --git a/bin/cinder-scheduler b/bin/cinder-scheduler new file mode 100755 index 00000000000..f423bef63e9 --- /dev/null +++ b/bin/cinder-scheduler @@ -0,0 +1,51 @@ +#!/usr/bin/env python +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Starter script for Cinder Scheduler.""" + +import eventlet +eventlet.monkey_patch() + +import gettext +import os +import sys + +# If ../cinder/__init__.py exists, add ../ to Python search path, so that +# it will override what happens to be installed in /usr/(local/)lib/python... +possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), + os.pardir, + os.pardir)) +if os.path.exists(os.path.join(possible_topdir, 'cinder', '__init__.py')): + sys.path.insert(0, possible_topdir) + +gettext.install('cinder', unicode=1) + +from cinder import flags +from cinder import log as logging +from cinder import service +from cinder import utils + +if __name__ == '__main__': + utils.default_flagfile() + flags.FLAGS(sys.argv) + logging.setup() + utils.monkey_patch() + server = service.Service.create(binary='cinder-scheduler') + service.serve(server) + service.wait() diff --git a/bin/cinder-volume b/bin/cinder-volume new file mode 100755 index 00000000000..53aa635a6fa --- /dev/null +++ b/bin/cinder-volume @@ -0,0 +1,49 @@ +#!/usr/bin/env python +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Starter script for Cinder Volume.""" + +import eventlet +eventlet.monkey_patch() + +import os +import sys + +# If ../cinder/__init__.py exists, add ../ to Python search path, so that +# it will override what happens to be installed in /usr/(local/)lib/python... +possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), + os.pardir, + os.pardir)) +if os.path.exists(os.path.join(possible_topdir, 'cinder', '__init__.py')): + sys.path.insert(0, possible_topdir) + + +from cinder import flags +from cinder import log as logging +from cinder import service +from cinder import utils + +if __name__ == '__main__': + utils.default_flagfile() + flags.FLAGS(sys.argv) + logging.setup() + utils.monkey_patch() + server = service.Service.create(binary='cinder-volume') + service.serve(server) + service.wait() diff --git a/bin/clear_rabbit_queues b/bin/clear_rabbit_queues new file mode 100755 index 00000000000..d652d6e14c7 --- /dev/null +++ b/bin/clear_rabbit_queues @@ -0,0 +1,80 @@ +#!/usr/bin/env python +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 Openstack, LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Admin/debug script to wipe rabbitMQ (AMQP) queues cinder uses. + This can be used if you need to change durable options on queues, + or to wipe all messages in the queue system if things are in a + serious bad way. + +""" + +import datetime +import gettext +import os +import sys +import time + +# If ../cinder/__init__.py exists, add ../ to Python search path, so that +# it will override what happens to be installed in /usr/(local/)lib/python... +POSSIBLE_TOPDIR = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), + os.pardir, + os.pardir)) +if os.path.exists(os.path.join(POSSIBLE_TOPDIR, 'cinder', '__init__.py')): + sys.path.insert(0, POSSIBLE_TOPDIR) + +gettext.install('cinder', unicode=1) + + +from cinder import context +from cinder import exception +from cinder import flags +from cinder import log as logging +from cinder.openstack.common import cfg +from cinder import rpc +from cinder import utils + + +delete_exchange_opt = \ + cfg.BoolOpt('delete_exchange', + default=False, + help='delete cinder exchange too.') + +FLAGS = flags.FLAGS +FLAGS.register_cli_opt(delete_exchange_opt) + + +def delete_exchange(exch): + conn = rpc.create_connection() + x = conn.get_channel() + x.exchange_delete(exch) + + +def delete_queues(queues): + conn = rpc.create_connection() + x = conn.get_channel() + for q in queues: + x.queue_delete(q) + +if __name__ == '__main__': + utils.default_flagfile() + args = flags.FLAGS(sys.argv) + logging.setup() + rpc.register_opts(flags.FLAGS) + delete_queues(args[1:]) + if FLAGS.delete_exchange: + delete_exchange(FLAGS.control_exchange) diff --git a/cinder/__init__.py b/cinder/__init__.py new file mode 100644 index 00000000000..238c2812e69 --- /dev/null +++ b/cinder/__init__.py @@ -0,0 +1,42 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +:mod:`cinder` -- Cloud IaaS Platform +=================================== + +.. automodule:: cinder + :platform: Unix + :synopsis: Infrastructure-as-a-Service Cloud platform. +.. moduleauthor:: Jesse Andrews +.. moduleauthor:: Devin Carlen +.. moduleauthor:: Vishvananda Ishaya +.. moduleauthor:: Joshua McKenty +.. moduleauthor:: Manish Singh +.. moduleauthor:: Andy Smith +""" + +import gettext +import logging + + +gettext.install('cinder', unicode=1) +# NOTE(jkoelker) This configures the root logger if it is not already +# configured so messages from logging setup can be written +# to the console +logging.basicConfig(format='%(message)s') diff --git a/cinder/api/__init__.py b/cinder/api/__init__.py new file mode 100644 index 00000000000..747015af53e --- /dev/null +++ b/cinder/api/__init__.py @@ -0,0 +1,17 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/cinder/api/auth.py b/cinder/api/auth.py new file mode 100644 index 00000000000..1b8f303e23c --- /dev/null +++ b/cinder/api/auth.py @@ -0,0 +1,103 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 OpenStack, LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Common Auth Middleware. + +""" + +import webob.dec +import webob.exc + +from cinder import context +from cinder import flags +from cinder import log as logging +from cinder.openstack.common import cfg +from cinder import wsgi + + +use_forwarded_for_opt = cfg.BoolOpt('use_forwarded_for', + default=False, + help='Treat X-Forwarded-For as the canonical remote address. ' + 'Only enable this if you have a sanitizing proxy.') + +FLAGS = flags.FLAGS +FLAGS.register_opt(use_forwarded_for_opt) +LOG = logging.getLogger(__name__) + + +def pipeline_factory(loader, global_conf, **local_conf): + """A paste pipeline replica that keys off of auth_strategy.""" + pipeline = local_conf[FLAGS.auth_strategy] + if not FLAGS.api_rate_limit: + limit_name = FLAGS.auth_strategy + '_nolimit' + pipeline = local_conf.get(limit_name, pipeline) + pipeline = pipeline.split() + filters = [loader.get_filter(n) for n in pipeline[:-1]] + app = loader.get_app(pipeline[-1]) + filters.reverse() + for filter in filters: + app = filter(app) + return app + + +class InjectContext(wsgi.Middleware): + """Add a 'cinder.context' to WSGI environ.""" + + def __init__(self, context, *args, **kwargs): + self.context = context + super(InjectContext, self).__init__(*args, **kwargs) + + @webob.dec.wsgify(RequestClass=wsgi.Request) + def __call__(self, req): + req.environ['cinder.context'] = self.context + return self.application + + +class CinderKeystoneContext(wsgi.Middleware): + """Make a request context from keystone headers""" + + @webob.dec.wsgify(RequestClass=wsgi.Request) + def __call__(self, req): + user_id = req.headers.get('X_USER') + user_id = req.headers.get('X_USER_ID', user_id) + if user_id is None: + LOG.debug("Neither X_USER_ID nor X_USER found in request") + return webob.exc.HTTPUnauthorized() + # get the roles + roles = [r.strip() for r in req.headers.get('X_ROLE', '').split(',')] + if 'X_TENANT_ID' in req.headers: + # This is the new header since Keystone went to ID/Name + project_id = req.headers['X_TENANT_ID'] + else: + # This is for legacy compatibility + project_id = req.headers['X_TENANT'] + + # Get the auth token + auth_token = req.headers.get('X_AUTH_TOKEN', + req.headers.get('X_STORAGE_TOKEN')) + + # Build a context, including the auth_token... + remote_address = req.remote_addr + if FLAGS.use_forwarded_for: + remote_address = req.headers.get('X-Forwarded-For', remote_address) + ctx = context.RequestContext(user_id, + project_id, + roles=roles, + auth_token=auth_token, + remote_address=remote_address) + + req.environ['cinder.context'] = ctx + return self.application diff --git a/cinder/api/openstack/__init__.py b/cinder/api/openstack/__init__.py new file mode 100644 index 00000000000..22ff5de047a --- /dev/null +++ b/cinder/api/openstack/__init__.py @@ -0,0 +1,143 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +WSGI middleware for OpenStack API controllers. +""" + +import routes +import webob.dec +import webob.exc + +from cinder.api.openstack import wsgi +from cinder import log as logging +from cinder import wsgi as base_wsgi + + +LOG = logging.getLogger(__name__) + + +class FaultWrapper(base_wsgi.Middleware): + """Calls down the middleware stack, making exceptions into faults.""" + + @webob.dec.wsgify(RequestClass=wsgi.Request) + def __call__(self, req): + try: + return req.get_response(self.application) + except Exception as ex: + LOG.exception(_("Caught error: %s"), unicode(ex)) + msg_dict = dict(url=req.url, status=500) + LOG.info(_("%(url)s returned with HTTP %(status)d") % msg_dict) + exc = webob.exc.HTTPInternalServerError() + # NOTE(johannes): We leave the explanation empty here on + # purpose. It could possibly have sensitive information + # that should not be returned back to the user. See + # bugs 868360 and 874472 + return wsgi.Fault(exc) + + +class APIMapper(routes.Mapper): + def routematch(self, url=None, environ=None): + if url is "": + result = self._match("", environ) + return result[0], result[1] + return routes.Mapper.routematch(self, url, environ) + + +class ProjectMapper(APIMapper): + def resource(self, member_name, collection_name, **kwargs): + if not ('parent_resource' in kwargs): + kwargs['path_prefix'] = '{project_id}/' + else: + parent_resource = kwargs['parent_resource'] + p_collection = parent_resource['collection_name'] + p_member = parent_resource['member_name'] + kwargs['path_prefix'] = '{project_id}/%s/:%s_id' % (p_collection, + p_member) + routes.Mapper.resource(self, member_name, + collection_name, + **kwargs) + + +class APIRouter(base_wsgi.Router): + """ + Routes requests on the OpenStack API to the appropriate controller + and method. + """ + ExtensionManager = None # override in subclasses + + @classmethod + def factory(cls, global_config, **local_config): + """Simple paste factory, :class:`cinder.wsgi.Router` doesn't have""" + return cls() + + def __init__(self, ext_mgr=None): + if ext_mgr is None: + if self.ExtensionManager: + ext_mgr = self.ExtensionManager() + else: + raise Exception(_("Must specify an ExtensionManager class")) + + mapper = ProjectMapper() + self.resources = {} + self._setup_routes(mapper) + self._setup_ext_routes(mapper, ext_mgr) + self._setup_extensions(ext_mgr) + super(APIRouter, self).__init__(mapper) + + def _setup_ext_routes(self, mapper, ext_mgr): + for resource in ext_mgr.get_resources(): + LOG.debug(_('Extended resource: %s'), + resource.collection) + + wsgi_resource = wsgi.Resource(resource.controller) + self.resources[resource.collection] = wsgi_resource + kargs = dict( + controller=wsgi_resource, + collection=resource.collection_actions, + member=resource.member_actions) + + if resource.parent: + kargs['parent_resource'] = resource.parent + + mapper.resource(resource.collection, resource.collection, **kargs) + + if resource.custom_routes_fn: + resource.custom_routes_fn(mapper, wsgi_resource) + + def _setup_extensions(self, ext_mgr): + for extension in ext_mgr.get_controller_extensions(): + ext_name = extension.extension.name + collection = extension.collection + controller = extension.controller + + if collection not in self.resources: + LOG.warning(_('Extension %(ext_name)s: Cannot extend ' + 'resource %(collection)s: No such resource') % + locals()) + continue + + LOG.debug(_('Extension %(ext_name)s extending resource: ' + '%(collection)s') % locals()) + + resource = self.resources[collection] + resource.register_actions(controller) + resource.register_extensions(controller) + + def _setup_routes(self, mapper): + raise NotImplementedError diff --git a/cinder/api/openstack/auth.py b/cinder/api/openstack/auth.py new file mode 100644 index 00000000000..cbc20843266 --- /dev/null +++ b/cinder/api/openstack/auth.py @@ -0,0 +1,65 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import os + +import webob.dec +import webob.exc + +from cinder.api.openstack import wsgi +from cinder import context +from cinder import flags +from cinder import log as logging +from cinder import wsgi as base_wsgi + +LOG = logging.getLogger(__name__) +FLAGS = flags.FLAGS +flags.DECLARE('use_forwarded_for', 'cinder.api.auth') + + +class NoAuthMiddleware(base_wsgi.Middleware): + """Return a fake token if one isn't specified.""" + + @webob.dec.wsgify(RequestClass=wsgi.Request) + def __call__(self, req): + if 'X-Auth-Token' not in req.headers: + user_id = req.headers.get('X-Auth-User', 'admin') + project_id = req.headers.get('X-Auth-Project-Id', 'admin') + os_url = os.path.join(req.url, project_id) + res = webob.Response() + # NOTE(vish): This is expecting and returning Auth(1.1), whereas + # keystone uses 2.0 auth. We should probably allow + # 2.0 auth here as well. + res.headers['X-Auth-Token'] = '%s:%s' % (user_id, project_id) + res.headers['X-Server-Management-Url'] = os_url + res.content_type = 'text/plain' + res.status = '204' + return res + + token = req.headers['X-Auth-Token'] + user_id, _sep, project_id = token.partition(':') + project_id = project_id or user_id + remote_address = getattr(req, 'remote_address', '127.0.0.1') + if FLAGS.use_forwarded_for: + remote_address = req.headers.get('X-Forwarded-For', remote_address) + ctx = context.RequestContext(user_id, + project_id, + is_admin=True, + remote_address=remote_address) + + req.environ['cinder.context'] = ctx + return self.application diff --git a/cinder/api/openstack/common.py b/cinder/api/openstack/common.py new file mode 100644 index 00000000000..ce2d2bd934d --- /dev/null +++ b/cinder/api/openstack/common.py @@ -0,0 +1,380 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import functools +import os +import re +import urlparse + +import webob +from xml.dom import minidom + +from cinder.api.openstack import wsgi +from cinder.api.openstack import xmlutil +from cinder import flags +from cinder import log as logging +from cinder import quota + + +LOG = logging.getLogger(__name__) +FLAGS = flags.FLAGS + + +XML_NS_V1 = 'http://docs.openstack.org/volume/api/v1' + + +def get_pagination_params(request): + """Return marker, limit tuple from request. + + :param request: `wsgi.Request` possibly containing 'marker' and 'limit' + GET variables. 'marker' is the id of the last element + the client has seen, and 'limit' is the maximum number + of items to return. If 'limit' is not specified, 0, or + > max_limit, we default to max_limit. Negative values + for either marker or limit will cause + exc.HTTPBadRequest() exceptions to be raised. + + """ + params = {} + if 'limit' in request.GET: + params['limit'] = _get_limit_param(request) + if 'marker' in request.GET: + params['marker'] = _get_marker_param(request) + return params + + +def _get_limit_param(request): + """Extract integer limit from request or fail""" + try: + limit = int(request.GET['limit']) + except ValueError: + msg = _('limit param must be an integer') + raise webob.exc.HTTPBadRequest(explanation=msg) + if limit < 0: + msg = _('limit param must be positive') + raise webob.exc.HTTPBadRequest(explanation=msg) + return limit + + +def _get_marker_param(request): + """Extract marker id from request or fail""" + return request.GET['marker'] + + +def limited(items, request, max_limit=FLAGS.osapi_max_limit): + """Return a slice of items according to requested offset and limit. + + :param items: A sliceable entity + :param request: ``wsgi.Request`` possibly containing 'offset' and 'limit' + GET variables. 'offset' is where to start in the list, + and 'limit' is the maximum number of items to return. If + 'limit' is not specified, 0, or > max_limit, we default + to max_limit. Negative values for either offset or limit + will cause exc.HTTPBadRequest() exceptions to be raised. + :kwarg max_limit: The maximum number of items to return from 'items' + """ + try: + offset = int(request.GET.get('offset', 0)) + except ValueError: + msg = _('offset param must be an integer') + raise webob.exc.HTTPBadRequest(explanation=msg) + + try: + limit = int(request.GET.get('limit', max_limit)) + except ValueError: + msg = _('limit param must be an integer') + raise webob.exc.HTTPBadRequest(explanation=msg) + + if limit < 0: + msg = _('limit param must be positive') + raise webob.exc.HTTPBadRequest(explanation=msg) + + if offset < 0: + msg = _('offset param must be positive') + raise webob.exc.HTTPBadRequest(explanation=msg) + + limit = min(max_limit, limit or max_limit) + range_end = offset + limit + return items[offset:range_end] + + +def limited_by_marker(items, request, max_limit=FLAGS.osapi_max_limit): + """Return a slice of items according to the requested marker and limit.""" + params = get_pagination_params(request) + + limit = params.get('limit', max_limit) + marker = params.get('marker') + + limit = min(max_limit, limit) + start_index = 0 + if marker: + start_index = -1 + for i, item in enumerate(items): + if 'flavorid' in item: + if item['flavorid'] == marker: + start_index = i + 1 + break + elif item['id'] == marker or item.get('uuid') == marker: + start_index = i + 1 + break + if start_index < 0: + msg = _('marker [%s] not found') % marker + raise webob.exc.HTTPBadRequest(explanation=msg) + range_end = start_index + limit + return items[start_index:range_end] + + +def get_id_from_href(href): + """Return the id or uuid portion of a url. + + Given: 'http://www.foo.com/bar/123?q=4' + Returns: '123' + + Given: 'http://www.foo.com/bar/abc123?q=4' + Returns: 'abc123' + + """ + return urlparse.urlsplit("%s" % href).path.split('/')[-1] + + +def remove_version_from_href(href): + """Removes the first api version from the href. + + Given: 'http://www.cinder.com/v1.1/123' + Returns: 'http://www.cinder.com/123' + + Given: 'http://www.cinder.com/v1.1' + Returns: 'http://www.cinder.com' + + """ + parsed_url = urlparse.urlsplit(href) + url_parts = parsed_url.path.split('/', 2) + + # NOTE: this should match vX.X or vX + expression = re.compile(r'^v([0-9]+|[0-9]+\.[0-9]+)(/.*|$)') + if expression.match(url_parts[1]): + del url_parts[1] + + new_path = '/'.join(url_parts) + + if new_path == parsed_url.path: + msg = _('href %s does not contain version') % href + LOG.debug(msg) + raise ValueError(msg) + + parsed_url = list(parsed_url) + parsed_url[2] = new_path + return urlparse.urlunsplit(parsed_url) + + +def get_version_from_href(href): + """Returns the api version in the href. + + Returns the api version in the href. + If no version is found, '2' is returned + + Given: 'http://www.cinder.com/123' + Returns: '2' + + Given: 'http://www.cinder.com/v1.1' + Returns: '1.1' + + """ + try: + expression = r'/v([0-9]+|[0-9]+\.[0-9]+)(/|$)' + return re.findall(expression, href)[0][0] + except IndexError: + return '2' + + +def dict_to_query_str(params): + # TODO(throughnothing): we should just use urllib.urlencode instead of this + # But currently we don't work with urlencoded url's + param_str = "" + for key, val in params.iteritems(): + param_str = param_str + '='.join([str(key), str(val)]) + '&' + + return param_str.rstrip('&') + + +def raise_http_conflict_for_instance_invalid_state(exc, action): + """Return a webob.exc.HTTPConflict instance containing a message + appropriate to return via the API based on the original + InstanceInvalidState exception. + """ + attr = exc.kwargs.get('attr') + state = exc.kwargs.get('state') + if attr and state: + msg = _("Cannot '%(action)s' while instance is in %(attr)s %(state)s") + else: + # At least give some meaningful message + msg = _("Instance is in an invalid state for '%(action)s'") + raise webob.exc.HTTPConflict(explanation=msg % locals()) + + +class MetadataDeserializer(wsgi.MetadataXMLDeserializer): + def deserialize(self, text): + dom = minidom.parseString(text) + metadata_node = self.find_first_child_named(dom, "metadata") + metadata = self.extract_metadata(metadata_node) + return {'body': {'metadata': metadata}} + + +class MetaItemDeserializer(wsgi.MetadataXMLDeserializer): + def deserialize(self, text): + dom = minidom.parseString(text) + metadata_item = self.extract_metadata(dom) + return {'body': {'meta': metadata_item}} + + +class MetadataXMLDeserializer(wsgi.XMLDeserializer): + + def extract_metadata(self, metadata_node): + """Marshal the metadata attribute of a parsed request""" + if metadata_node is None: + return {} + metadata = {} + for meta_node in self.find_children_named(metadata_node, "meta"): + key = meta_node.getAttribute("key") + metadata[key] = self.extract_text(meta_node) + return metadata + + def _extract_metadata_container(self, datastring): + dom = minidom.parseString(datastring) + metadata_node = self.find_first_child_named(dom, "metadata") + metadata = self.extract_metadata(metadata_node) + return {'body': {'metadata': metadata}} + + def create(self, datastring): + return self._extract_metadata_container(datastring) + + def update_all(self, datastring): + return self._extract_metadata_container(datastring) + + def update(self, datastring): + dom = minidom.parseString(datastring) + metadata_item = self.extract_metadata(dom) + return {'body': {'meta': metadata_item}} + + +metadata_nsmap = {None: xmlutil.XMLNS_V11} + + +class MetaItemTemplate(xmlutil.TemplateBuilder): + def construct(self): + sel = xmlutil.Selector('meta', xmlutil.get_items, 0) + root = xmlutil.TemplateElement('meta', selector=sel) + root.set('key', 0) + root.text = 1 + return xmlutil.MasterTemplate(root, 1, nsmap=metadata_nsmap) + + +class MetadataTemplateElement(xmlutil.TemplateElement): + def will_render(self, datum): + return True + + +class MetadataTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = MetadataTemplateElement('metadata', selector='metadata') + elem = xmlutil.SubTemplateElement(root, 'meta', + selector=xmlutil.get_items) + elem.set('key', 0) + elem.text = 1 + return xmlutil.MasterTemplate(root, 1, nsmap=metadata_nsmap) + + +def check_snapshots_enabled(f): + @functools.wraps(f) + def inner(*args, **kwargs): + if not FLAGS.allow_instance_snapshots: + LOG.warn(_('Rejecting snapshot request, snapshots currently' + ' disabled')) + msg = _("Instance snapshots are not permitted at this time.") + raise webob.exc.HTTPBadRequest(explanation=msg) + return f(*args, **kwargs) + return inner + + +class ViewBuilder(object): + """Model API responses as dictionaries.""" + + _collection_name = None + + def _get_links(self, request, identifier): + return [{ + "rel": "self", + "href": self._get_href_link(request, identifier), + }, + { + "rel": "bookmark", + "href": self._get_bookmark_link(request, identifier), + }] + + def _get_next_link(self, request, identifier): + """Return href string with proper limit and marker params.""" + params = request.params.copy() + params["marker"] = identifier + prefix = self._update_link_prefix(request.application_url, + FLAGS.osapi_compute_link_prefix) + url = os.path.join(prefix, + request.environ["cinder.context"].project_id, + self._collection_name) + return "%s?%s" % (url, dict_to_query_str(params)) + + def _get_href_link(self, request, identifier): + """Return an href string pointing to this object.""" + prefix = self._update_link_prefix(request.application_url, + FLAGS.osapi_compute_link_prefix) + return os.path.join(prefix, + request.environ["cinder.context"].project_id, + self._collection_name, + str(identifier)) + + def _get_bookmark_link(self, request, identifier): + """Create a URL that refers to a specific resource.""" + base_url = remove_version_from_href(request.application_url) + base_url = self._update_link_prefix(base_url, + FLAGS.osapi_compute_link_prefix) + return os.path.join(base_url, + request.environ["cinder.context"].project_id, + self._collection_name, + str(identifier)) + + def _get_collection_links(self, request, items, id_key="uuid"): + """Retrieve 'next' link, if applicable.""" + links = [] + limit = int(request.params.get("limit", 0)) + if limit and limit == len(items): + last_item = items[-1] + if id_key in last_item: + last_item_id = last_item[id_key] + else: + last_item_id = last_item["id"] + links.append({ + "rel": "next", + "href": self._get_next_link(request, last_item_id), + }) + return links + + def _update_link_prefix(self, orig_url, prefix): + if not prefix: + return orig_url + url_parts = list(urlparse.urlsplit(orig_url)) + prefix_parts = list(urlparse.urlsplit(prefix)) + url_parts[0:2] = prefix_parts[0:2] + return urlparse.urlunsplit(url_parts) diff --git a/cinder/api/openstack/compute/__init__.py b/cinder/api/openstack/compute/__init__.py new file mode 100644 index 00000000000..7372b0c97ef --- /dev/null +++ b/cinder/api/openstack/compute/__init__.py @@ -0,0 +1,23 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +WSGI middleware for OpenStack Compute API. +""" + +from cinder.api.openstack.compute import versions diff --git a/cinder/api/openstack/compute/schemas/atom-link.rng b/cinder/api/openstack/compute/schemas/atom-link.rng new file mode 100644 index 00000000000..edba5eee6c4 --- /dev/null +++ b/cinder/api/openstack/compute/schemas/atom-link.rng @@ -0,0 +1,141 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 1 + [^:]* + + + + + + .+/.+ + + + + + + [A-Za-z]{1,8}(-[A-Za-z0-9]{1,8})* + + + + + + + + + + + + xml:base + xml:lang + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/cinder/api/openstack/compute/schemas/v1.1/extension.rng b/cinder/api/openstack/compute/schemas/v1.1/extension.rng new file mode 100644 index 00000000000..b16d8c13006 --- /dev/null +++ b/cinder/api/openstack/compute/schemas/v1.1/extension.rng @@ -0,0 +1,11 @@ + + + + + + + + + + diff --git a/cinder/api/openstack/compute/schemas/v1.1/extensions.rng b/cinder/api/openstack/compute/schemas/v1.1/extensions.rng new file mode 100644 index 00000000000..8538eaf2dad --- /dev/null +++ b/cinder/api/openstack/compute/schemas/v1.1/extensions.rng @@ -0,0 +1,6 @@ + + + + + diff --git a/cinder/api/openstack/compute/schemas/v1.1/metadata.rng b/cinder/api/openstack/compute/schemas/v1.1/metadata.rng new file mode 100644 index 00000000000..b2f5d702a2f --- /dev/null +++ b/cinder/api/openstack/compute/schemas/v1.1/metadata.rng @@ -0,0 +1,9 @@ + + + + + + + + diff --git a/cinder/api/openstack/compute/versions.py b/cinder/api/openstack/compute/versions.py new file mode 100644 index 00000000000..d5108c0cfac --- /dev/null +++ b/cinder/api/openstack/compute/versions.py @@ -0,0 +1,244 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import datetime + +from lxml import etree + +from cinder.api.openstack.compute.views import versions as views_versions +from cinder.api.openstack import wsgi +from cinder.api.openstack import xmlutil + + +LINKS = { + 'v2.0': { + 'pdf': 'http://docs.openstack.org/' + 'api/openstack-compute/1.1/os-compute-devguide-1.1.pdf', + 'wadl': 'http://docs.openstack.org/' + 'api/openstack-compute/1.1/wadl/os-compute-1.1.wadl', + }, +} + + +VERSIONS = { + "v2.0": { + "id": "v2.0", + "status": "CURRENT", + "updated": "2011-01-21T11:33:21Z", + "links": [ + { + "rel": "describedby", + "type": "application/pdf", + "href": LINKS['v2.0']['pdf'], + }, + { + "rel": "describedby", + "type": "application/vnd.sun.wadl+xml", + "href": LINKS['v2.0']['wadl'], + }, + ], + "media-types": [ + { + "base": "application/xml", + "type": "application/vnd.openstack.compute+xml;version=2", + }, + { + "base": "application/json", + "type": "application/vnd.openstack.compute+json;version=2", + } + ], + } +} + + +class MediaTypesTemplateElement(xmlutil.TemplateElement): + def will_render(self, datum): + return 'media-types' in datum + + +def make_version(elem): + elem.set('id') + elem.set('status') + elem.set('updated') + + mts = MediaTypesTemplateElement('media-types') + elem.append(mts) + + mt = xmlutil.SubTemplateElement(mts, 'media-type', selector='media-types') + mt.set('base') + mt.set('type') + + xmlutil.make_links(elem, 'links') + + +version_nsmap = {None: xmlutil.XMLNS_COMMON_V10, 'atom': xmlutil.XMLNS_ATOM} + + +class VersionTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = xmlutil.TemplateElement('version', selector='version') + make_version(root) + return xmlutil.MasterTemplate(root, 1, nsmap=version_nsmap) + + +class VersionsTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = xmlutil.TemplateElement('versions') + elem = xmlutil.SubTemplateElement(root, 'version', selector='versions') + make_version(elem) + return xmlutil.MasterTemplate(root, 1, nsmap=version_nsmap) + + +class ChoicesTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = xmlutil.TemplateElement('choices') + elem = xmlutil.SubTemplateElement(root, 'version', selector='choices') + make_version(elem) + return xmlutil.MasterTemplate(root, 1, nsmap=version_nsmap) + + +class AtomSerializer(wsgi.XMLDictSerializer): + + NSMAP = {None: xmlutil.XMLNS_ATOM} + + def __init__(self, metadata=None, xmlns=None): + self.metadata = metadata or {} + if not xmlns: + self.xmlns = wsgi.XMLNS_ATOM + else: + self.xmlns = xmlns + + def _get_most_recent_update(self, versions): + recent = None + for version in versions: + updated = datetime.datetime.strptime(version['updated'], + '%Y-%m-%dT%H:%M:%SZ') + if not recent: + recent = updated + elif updated > recent: + recent = updated + + return recent.strftime('%Y-%m-%dT%H:%M:%SZ') + + def _get_base_url(self, link_href): + # Make sure no trailing / + link_href = link_href.rstrip('/') + return link_href.rsplit('/', 1)[0] + '/' + + def _create_feed(self, versions, feed_title, feed_id): + feed = etree.Element('feed', nsmap=self.NSMAP) + title = etree.SubElement(feed, 'title') + title.set('type', 'text') + title.text = feed_title + + # Set this updated to the most recently updated version + recent = self._get_most_recent_update(versions) + etree.SubElement(feed, 'updated').text = recent + + etree.SubElement(feed, 'id').text = feed_id + + link = etree.SubElement(feed, 'link') + link.set('rel', 'self') + link.set('href', feed_id) + + author = etree.SubElement(feed, 'author') + etree.SubElement(author, 'name').text = 'Rackspace' + etree.SubElement(author, 'uri').text = 'http://www.rackspace.com/' + + for version in versions: + feed.append(self._create_version_entry(version)) + + return feed + + def _create_version_entry(self, version): + entry = etree.Element('entry') + etree.SubElement(entry, 'id').text = version['links'][0]['href'] + title = etree.SubElement(entry, 'title') + title.set('type', 'text') + title.text = 'Version %s' % version['id'] + etree.SubElement(entry, 'updated').text = version['updated'] + + for link in version['links']: + link_elem = etree.SubElement(entry, 'link') + link_elem.set('rel', link['rel']) + link_elem.set('href', link['href']) + if 'type' in link: + link_elem.set('type', link['type']) + + content = etree.SubElement(entry, 'content') + content.set('type', 'text') + content.text = 'Version %s %s (%s)' % (version['id'], + version['status'], + version['updated']) + return entry + + +class VersionsAtomSerializer(AtomSerializer): + def default(self, data): + versions = data['versions'] + feed_id = self._get_base_url(versions[0]['links'][0]['href']) + feed = self._create_feed(versions, 'Available API Versions', feed_id) + return self._to_xml(feed) + + +class VersionAtomSerializer(AtomSerializer): + def default(self, data): + version = data['version'] + feed_id = version['links'][0]['href'] + feed = self._create_feed([version], 'About This Version', feed_id) + return self._to_xml(feed) + + +class Versions(wsgi.Resource): + def __init__(self): + super(Versions, self).__init__(None) + + @wsgi.serializers(xml=VersionsTemplate, + atom=VersionsAtomSerializer) + def index(self, req): + """Return all versions.""" + builder = views_versions.get_view_builder(req) + return builder.build_versions(VERSIONS) + + @wsgi.serializers(xml=ChoicesTemplate) + @wsgi.response(300) + def multi(self, req): + """Return multiple choices.""" + builder = views_versions.get_view_builder(req) + return builder.build_choices(VERSIONS, req) + + def get_action_args(self, request_environment): + """Parse dictionary created by routes library.""" + args = {} + if request_environment['PATH_INFO'] == '/': + args['action'] = 'index' + else: + args['action'] = 'multi' + + return args + + +class VersionV2(object): + @wsgi.serializers(xml=VersionTemplate, + atom=VersionAtomSerializer) + def show(self, req): + builder = views_versions.get_view_builder(req) + return builder.build_version(VERSIONS['v2.0']) + + +def create_resource(): + return wsgi.Resource(VersionV2()) diff --git a/cinder/api/openstack/compute/views/__init__.py b/cinder/api/openstack/compute/views/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/cinder/api/openstack/compute/views/versions.py b/cinder/api/openstack/compute/views/versions.py new file mode 100644 index 00000000000..cb2fd9f4ad7 --- /dev/null +++ b/cinder/api/openstack/compute/views/versions.py @@ -0,0 +1,94 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010-2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import copy +import os + + +def get_view_builder(req): + base_url = req.application_url + return ViewBuilder(base_url) + + +class ViewBuilder(object): + + def __init__(self, base_url): + """ + :param base_url: url of the root wsgi application + """ + self.base_url = base_url + + def build_choices(self, VERSIONS, req): + version_objs = [] + for version in VERSIONS: + version = VERSIONS[version] + version_objs.append({ + "id": version['id'], + "status": version['status'], + "links": [ + { + "rel": "self", + "href": self.generate_href(req.path), + }, + ], + "media-types": version['media-types'], + }) + + return dict(choices=version_objs) + + def build_versions(self, versions): + version_objs = [] + for version in sorted(versions.keys()): + version = versions[version] + version_objs.append({ + "id": version['id'], + "status": version['status'], + "updated": version['updated'], + "links": self._build_links(version), + }) + + return dict(versions=version_objs) + + def build_version(self, version): + reval = copy.deepcopy(version) + reval['links'].insert(0, { + "rel": "self", + "href": self.base_url.rstrip('/') + '/', + }) + return dict(version=reval) + + def _build_links(self, version_data): + """Generate a container of links that refer to the provided version.""" + href = self.generate_href() + + links = [ + { + "rel": "self", + "href": href, + }, + ] + + return links + + def generate_href(self, path=None): + """Create an url that refers to a specific version_number.""" + version_number = 'v2' + if path: + path = path.strip('/') + return os.path.join(self.base_url, version_number, path) + else: + return os.path.join(self.base_url, version_number) + '/' diff --git a/cinder/api/openstack/extensions.py b/cinder/api/openstack/extensions.py new file mode 100644 index 00000000000..baa9510b653 --- /dev/null +++ b/cinder/api/openstack/extensions.py @@ -0,0 +1,395 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# Copyright 2011 Justin Santa Barbara +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import os + +import webob.dec +import webob.exc + +import cinder.api.openstack +from cinder.api.openstack import wsgi +from cinder.api.openstack import xmlutil +from cinder import exception +from cinder import flags +from cinder import log as logging +from cinder.openstack.common import exception as common_exception +from cinder.openstack.common import importutils +import cinder.policy + + +LOG = logging.getLogger(__name__) +FLAGS = flags.FLAGS + + +class ExtensionDescriptor(object): + """Base class that defines the contract for extensions. + + Note that you don't have to derive from this class to have a valid + extension; it is purely a convenience. + + """ + + # The name of the extension, e.g., 'Fox In Socks' + name = None + + # The alias for the extension, e.g., 'FOXNSOX' + alias = None + + # Description comes from the docstring for the class + + # The XML namespace for the extension, e.g., + # 'http://www.fox.in.socks/api/ext/pie/v1.0' + namespace = None + + # The timestamp when the extension was last updated, e.g., + # '2011-01-22T13:25:27-06:00' + updated = None + + def __init__(self, ext_mgr): + """Register extension with the extension manager.""" + + ext_mgr.register(self) + + def get_resources(self): + """List of extensions.ResourceExtension extension objects. + + Resources define new nouns, and are accessible through URLs. + + """ + resources = [] + return resources + + def get_controller_extensions(self): + """List of extensions.ControllerExtension extension objects. + + Controller extensions are used to extend existing controllers. + """ + controller_exts = [] + return controller_exts + + @classmethod + def nsmap(cls): + """Synthesize a namespace map from extension.""" + + # Start with a base nsmap + nsmap = ext_nsmap.copy() + + # Add the namespace for the extension + nsmap[cls.alias] = cls.namespace + + return nsmap + + @classmethod + def xmlname(cls, name): + """Synthesize element and attribute names.""" + + return '{%s}%s' % (cls.namespace, name) + + +def make_ext(elem): + elem.set('name') + elem.set('namespace') + elem.set('alias') + elem.set('updated') + + desc = xmlutil.SubTemplateElement(elem, 'description') + desc.text = 'description' + + xmlutil.make_links(elem, 'links') + + +ext_nsmap = {None: xmlutil.XMLNS_COMMON_V10, 'atom': xmlutil.XMLNS_ATOM} + + +class ExtensionTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = xmlutil.TemplateElement('extension', selector='extension') + make_ext(root) + return xmlutil.MasterTemplate(root, 1, nsmap=ext_nsmap) + + +class ExtensionsTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = xmlutil.TemplateElement('extensions') + elem = xmlutil.SubTemplateElement(root, 'extension', + selector='extensions') + make_ext(elem) + return xmlutil.MasterTemplate(root, 1, nsmap=ext_nsmap) + + +class ExtensionsResource(wsgi.Resource): + + def __init__(self, extension_manager): + self.extension_manager = extension_manager + super(ExtensionsResource, self).__init__(None) + + def _translate(self, ext): + ext_data = {} + ext_data['name'] = ext.name + ext_data['alias'] = ext.alias + ext_data['description'] = ext.__doc__ + ext_data['namespace'] = ext.namespace + ext_data['updated'] = ext.updated + ext_data['links'] = [] # TODO(dprince): implement extension links + return ext_data + + @wsgi.serializers(xml=ExtensionsTemplate) + def index(self, req): + extensions = [] + for _alias, ext in self.extension_manager.extensions.iteritems(): + extensions.append(self._translate(ext)) + return dict(extensions=extensions) + + @wsgi.serializers(xml=ExtensionTemplate) + def show(self, req, id): + try: + # NOTE(dprince): the extensions alias is used as the 'id' for show + ext = self.extension_manager.extensions[id] + except KeyError: + raise webob.exc.HTTPNotFound() + + return dict(extension=self._translate(ext)) + + def delete(self, req, id): + raise webob.exc.HTTPNotFound() + + def create(self, req): + raise webob.exc.HTTPNotFound() + + +class ExtensionManager(object): + """Load extensions from the configured extension path. + + See cinder/tests/api/openstack/extensions/foxinsocks/extension.py for an + example extension implementation. + + """ + + def register(self, ext): + # Do nothing if the extension doesn't check out + if not self._check_extension(ext): + return + + alias = ext.alias + LOG.audit(_('Loaded extension: %s'), alias) + + if alias in self.extensions: + raise exception.Error("Found duplicate extension: %s" % alias) + self.extensions[alias] = ext + + def get_resources(self): + """Returns a list of ResourceExtension objects.""" + + resources = [] + resources.append(ResourceExtension('extensions', + ExtensionsResource(self))) + + for ext in self.extensions.values(): + try: + resources.extend(ext.get_resources()) + except AttributeError: + # NOTE(dprince): Extension aren't required to have resource + # extensions + pass + return resources + + def get_controller_extensions(self): + """Returns a list of ControllerExtension objects.""" + controller_exts = [] + for ext in self.extensions.values(): + try: + controller_exts.extend(ext.get_controller_extensions()) + except AttributeError: + # NOTE(Vek): Extensions aren't required to have + # controller extensions + pass + return controller_exts + + def _check_extension(self, extension): + """Checks for required methods in extension objects.""" + try: + LOG.debug(_('Ext name: %s'), extension.name) + LOG.debug(_('Ext alias: %s'), extension.alias) + LOG.debug(_('Ext description: %s'), + ' '.join(extension.__doc__.strip().split())) + LOG.debug(_('Ext namespace: %s'), extension.namespace) + LOG.debug(_('Ext updated: %s'), extension.updated) + except AttributeError as ex: + LOG.exception(_("Exception loading extension: %s"), unicode(ex)) + return False + + return True + + def load_extension(self, ext_factory): + """Execute an extension factory. + + Loads an extension. The 'ext_factory' is the name of a + callable that will be imported and called with one + argument--the extension manager. The factory callable is + expected to call the register() method at least once. + """ + + LOG.debug(_("Loading extension %s"), ext_factory) + + # Load the factory + factory = importutils.import_class(ext_factory) + + # Call it + LOG.debug(_("Calling extension factory %s"), ext_factory) + factory(self) + + def _load_extensions(self): + """Load extensions specified on the command line.""" + + extensions = list(self.cls_list) + + for ext_factory in extensions: + try: + self.load_extension(ext_factory) + except Exception as exc: + LOG.warn(_('Failed to load extension %(ext_factory)s: ' + '%(exc)s') % locals()) + + +class ControllerExtension(object): + """Extend core controllers of cinder OpenStack API. + + Provide a way to extend existing cinder OpenStack API core + controllers. + """ + + def __init__(self, extension, collection, controller): + self.extension = extension + self.collection = collection + self.controller = controller + + +class ResourceExtension(object): + """Add top level resources to the OpenStack API in cinder.""" + + def __init__(self, collection, controller, parent=None, + collection_actions=None, member_actions=None, + custom_routes_fn=None): + if not collection_actions: + collection_actions = {} + if not member_actions: + member_actions = {} + self.collection = collection + self.controller = controller + self.parent = parent + self.collection_actions = collection_actions + self.member_actions = member_actions + self.custom_routes_fn = custom_routes_fn + + +def wrap_errors(fn): + """Ensure errors are not passed along.""" + def wrapped(*args, **kwargs): + try: + return fn(*args, **kwargs) + except webob.exc.HTTPException: + raise + except Exception: + raise webob.exc.HTTPInternalServerError() + return wrapped + + +def load_standard_extensions(ext_mgr, logger, path, package, ext_list=None): + """Registers all standard API extensions.""" + + # Walk through all the modules in our directory... + our_dir = path[0] + for dirpath, dirnames, filenames in os.walk(our_dir): + # Compute the relative package name from the dirpath + relpath = os.path.relpath(dirpath, our_dir) + if relpath == '.': + relpkg = '' + else: + relpkg = '.%s' % '.'.join(relpath.split(os.sep)) + + # Now, consider each file in turn, only considering .py files + for fname in filenames: + root, ext = os.path.splitext(fname) + + # Skip __init__ and anything that's not .py + if ext != '.py' or root == '__init__': + continue + + # Try loading it + classname = "%s%s" % (root[0].upper(), root[1:]) + classpath = ("%s%s.%s.%s" % + (package, relpkg, root, classname)) + + if ext_list is not None and classname not in ext_list: + logger.debug("Skipping extension: %s" % classpath) + continue + + try: + ext_mgr.load_extension(classpath) + except Exception as exc: + logger.warn(_('Failed to load extension %(classpath)s: ' + '%(exc)s') % locals()) + + # Now, let's consider any subdirectories we may have... + subdirs = [] + for dname in dirnames: + # Skip it if it does not have __init__.py + if not os.path.exists(os.path.join(dirpath, dname, + '__init__.py')): + continue + + # If it has extension(), delegate... + ext_name = ("%s%s.%s.extension" % + (package, relpkg, dname)) + try: + ext = importutils.import_class(ext_name) + except common_exception.NotFound: + # extension() doesn't exist on it, so we'll explore + # the directory for ourselves + subdirs.append(dname) + else: + try: + ext(ext_mgr) + except Exception as exc: + logger.warn(_('Failed to load extension %(ext_name)s: ' + '%(exc)s') % locals()) + + # Update the list of directories we'll explore... + dirnames[:] = subdirs + + +def extension_authorizer(api_name, extension_name): + def authorize(context, target=None): + if target is None: + target = {'project_id': context.project_id, + 'user_id': context.user_id} + action = '%s_extension:%s' % (api_name, extension_name) + cinder.policy.enforce(context, action, target) + return authorize + + +def soft_extension_authorizer(api_name, extension_name): + hard_authorize = extension_authorizer(api_name, extension_name) + + def authorize(context): + try: + hard_authorize(context) + return True + except exception.NotAuthorized: + return False + return authorize diff --git a/cinder/api/openstack/urlmap.py b/cinder/api/openstack/urlmap.py new file mode 100644 index 00000000000..ac320985405 --- /dev/null +++ b/cinder/api/openstack/urlmap.py @@ -0,0 +1,297 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import paste.urlmap +import re +import urllib2 + +from cinder import log as logging +from cinder.api.openstack import wsgi + + +_quoted_string_re = r'"[^"\\]*(?:\\.[^"\\]*)*"' +_option_header_piece_re = re.compile(r';\s*([^\s;=]+|%s)\s*' + r'(?:=\s*([^;]+|%s))?\s*' % + (_quoted_string_re, _quoted_string_re)) + +LOG = logging.getLogger(__name__) + + +def unquote_header_value(value): + """Unquotes a header value. + This does not use the real unquoting but what browsers are actually + using for quoting. + + :param value: the header value to unquote. + """ + if value and value[0] == value[-1] == '"': + # this is not the real unquoting, but fixing this so that the + # RFC is met will result in bugs with internet explorer and + # probably some other browsers as well. IE for example is + # uploading files with "C:\foo\bar.txt" as filename + value = value[1:-1] + return value + + +def parse_list_header(value): + """Parse lists as described by RFC 2068 Section 2. + + In particular, parse comma-separated lists where the elements of + the list may include quoted-strings. A quoted-string could + contain a comma. A non-quoted string could have quotes in the + middle. Quotes are removed automatically after parsing. + + The return value is a standard :class:`list`: + + >>> parse_list_header('token, "quoted value"') + ['token', 'quoted value'] + + :param value: a string with a list header. + :return: :class:`list` + """ + result = [] + for item in urllib2.parse_http_list(value): + if item[:1] == item[-1:] == '"': + item = unquote_header_value(item[1:-1]) + result.append(item) + return result + + +def parse_options_header(value): + """Parse a ``Content-Type`` like header into a tuple with the content + type and the options: + + >>> parse_options_header('Content-Type: text/html; mimetype=text/html') + ('Content-Type:', {'mimetype': 'text/html'}) + + :param value: the header to parse. + :return: (str, options) + """ + def _tokenize(string): + for match in _option_header_piece_re.finditer(string): + key, value = match.groups() + key = unquote_header_value(key) + if value is not None: + value = unquote_header_value(value) + yield key, value + + if not value: + return '', {} + + parts = _tokenize(';' + value) + name = parts.next()[0] + extra = dict(parts) + return name, extra + + +class Accept(object): + def __init__(self, value): + self._content_types = [parse_options_header(v) for v in + parse_list_header(value)] + + def best_match(self, supported_content_types): + # FIXME: Should we have a more sophisticated matching algorithm that + # takes into account the version as well? + best_quality = -1 + best_content_type = None + best_params = {} + best_match = '*/*' + + for content_type in supported_content_types: + for content_mask, params in self._content_types: + try: + quality = float(params.get('q', 1)) + except ValueError: + continue + + if quality < best_quality: + continue + elif best_quality == quality: + if best_match.count('*') <= content_mask.count('*'): + continue + + if self._match_mask(content_mask, content_type): + best_quality = quality + best_content_type = content_type + best_params = params + best_match = content_mask + + return best_content_type, best_params + + def content_type_params(self, best_content_type): + """Find parameters in Accept header for given content type.""" + for content_type, params in self._content_types: + if best_content_type == content_type: + return params + + return {} + + def _match_mask(self, mask, content_type): + if '*' not in mask: + return content_type == mask + if mask == '*/*': + return True + mask_major = mask[:-2] + content_type_major = content_type.split('/', 1)[0] + return content_type_major == mask_major + + +def urlmap_factory(loader, global_conf, **local_conf): + if 'not_found_app' in local_conf: + not_found_app = local_conf.pop('not_found_app') + else: + not_found_app = global_conf.get('not_found_app') + if not_found_app: + not_found_app = loader.get_app(not_found_app, global_conf=global_conf) + urlmap = URLMap(not_found_app=not_found_app) + for path, app_name in local_conf.items(): + path = paste.urlmap.parse_path_expression(path) + app = loader.get_app(app_name, global_conf=global_conf) + urlmap[path] = app + return urlmap + + +class URLMap(paste.urlmap.URLMap): + def _match(self, host, port, path_info): + """Find longest match for a given URL path.""" + for (domain, app_url), app in self.applications: + if domain and domain != host and domain != host + ':' + port: + continue + if (path_info == app_url + or path_info.startswith(app_url + '/')): + return app, app_url + + return None, None + + def _set_script_name(self, app, app_url): + def wrap(environ, start_response): + environ['SCRIPT_NAME'] += app_url + return app(environ, start_response) + + return wrap + + def _munge_path(self, app, path_info, app_url): + def wrap(environ, start_response): + environ['SCRIPT_NAME'] += app_url + environ['PATH_INFO'] = path_info[len(app_url):] + return app(environ, start_response) + + return wrap + + def _path_strategy(self, host, port, path_info): + """Check path suffix for MIME type and path prefix for API version.""" + mime_type = app = app_url = None + + parts = path_info.rsplit('.', 1) + if len(parts) > 1: + possible_type = 'application/' + parts[1] + if possible_type in wsgi.SUPPORTED_CONTENT_TYPES: + mime_type = possible_type + + parts = path_info.split('/') + if len(parts) > 1: + possible_app, possible_app_url = self._match(host, port, path_info) + # Don't use prefix if it ends up matching default + if possible_app and possible_app_url: + app_url = possible_app_url + app = self._munge_path(possible_app, path_info, app_url) + + return mime_type, app, app_url + + def _content_type_strategy(self, host, port, environ): + """Check Content-Type header for API version.""" + app = None + params = parse_options_header(environ.get('CONTENT_TYPE', ''))[1] + if 'version' in params: + app, app_url = self._match(host, port, '/v' + params['version']) + if app: + app = self._set_script_name(app, app_url) + + return app + + def _accept_strategy(self, host, port, environ, supported_content_types): + """Check Accept header for best matching MIME type and API version.""" + accept = Accept(environ.get('HTTP_ACCEPT', '')) + + app = None + + # Find the best match in the Accept header + mime_type, params = accept.best_match(supported_content_types) + if 'version' in params: + app, app_url = self._match(host, port, '/v' + params['version']) + if app: + app = self._set_script_name(app, app_url) + + return mime_type, app + + def __call__(self, environ, start_response): + host = environ.get('HTTP_HOST', environ.get('SERVER_NAME')).lower() + if ':' in host: + host, port = host.split(':', 1) + else: + if environ['wsgi.url_scheme'] == 'http': + port = '80' + else: + port = '443' + + path_info = environ['PATH_INFO'] + path_info = self.normalize_url(path_info, False)[1] + + # The MIME type for the response is determined in one of two ways: + # 1) URL path suffix (eg /servers/detail.json) + # 2) Accept header (eg application/json;q=0.8, application/xml;q=0.2) + + # The API version is determined in one of three ways: + # 1) URL path prefix (eg /v1.1/tenant/servers/detail) + # 2) Content-Type header (eg application/json;version=1.1) + # 3) Accept header (eg application/json;q=0.8;version=1.1) + + supported_content_types = list(wsgi.SUPPORTED_CONTENT_TYPES) + + mime_type, app, app_url = self._path_strategy(host, port, path_info) + + # Accept application/atom+xml for the index query of each API + # version mount point as well as the root index + if (app_url and app_url + '/' == path_info) or path_info == '/': + supported_content_types.append('application/atom+xml') + + if not app: + app = self._content_type_strategy(host, port, environ) + + if not mime_type or not app: + possible_mime_type, possible_app = self._accept_strategy( + host, port, environ, supported_content_types) + if possible_mime_type and not mime_type: + mime_type = possible_mime_type + if possible_app and not app: + app = possible_app + + if not mime_type: + mime_type = 'application/json' + + if not app: + # Didn't match a particular version, probably matches default + app, app_url = self._match(host, port, path_info) + if app: + app = self._munge_path(app, path_info, app_url) + + if app: + environ['cinder.best_content_type'] = mime_type + return app(environ, start_response) + + environ['paste.urlmap_object'] = self + return self.not_found_application(environ, start_response) diff --git a/cinder/api/openstack/volume/__init__.py b/cinder/api/openstack/volume/__init__.py new file mode 100644 index 00000000000..2d9ac302bd8 --- /dev/null +++ b/cinder/api/openstack/volume/__init__.py @@ -0,0 +1,62 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +WSGI middleware for OpenStack Volume API. +""" + +import cinder.api.openstack +from cinder.api.openstack.volume import extensions +from cinder.api.openstack.volume import snapshots +from cinder.api.openstack.volume import types +from cinder.api.openstack.volume import volumes +from cinder.api.openstack.volume import versions +from cinder import log as logging + + +LOG = logging.getLogger(__name__) + + +class APIRouter(cinder.api.openstack.APIRouter): + """ + Routes requests on the OpenStack API to the appropriate controller + and method. + """ + ExtensionManager = extensions.ExtensionManager + + def _setup_routes(self, mapper): + self.resources['versions'] = versions.create_resource() + mapper.connect("versions", "/", + controller=self.resources['versions'], + action='show') + + mapper.redirect("", "/") + + self.resources['volumes'] = volumes.create_resource() + mapper.resource("volume", "volumes", + controller=self.resources['volumes'], + collection={'detail': 'GET'}) + + self.resources['types'] = types.create_resource() + mapper.resource("type", "types", + controller=self.resources['types']) + + self.resources['snapshots'] = snapshots.create_resource() + mapper.resource("snapshot", "snapshots", + controller=self.resources['snapshots'], + collection={'detail': 'GET'}) diff --git a/cinder/api/openstack/volume/contrib/__init__.py b/cinder/api/openstack/volume/contrib/__init__.py new file mode 100644 index 00000000000..c49a4c6d3ad --- /dev/null +++ b/cinder/api/openstack/volume/contrib/__init__.py @@ -0,0 +1,39 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 Justin Santa Barbara +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Contrib contains extensions that are shipped with cinder. + +It can't be called 'extensions' because that causes namespacing problems. + +""" + +from cinder import flags +from cinder import log as logging +from cinder.api.openstack import extensions + + +FLAGS = flags.FLAGS +LOG = logging.getLogger(__name__) + + +def standard_extensions(ext_mgr): + extensions.load_standard_extensions(ext_mgr, LOG, __path__, __package__) + + +def select_extensions(ext_mgr): + extensions.load_standard_extensions(ext_mgr, LOG, __path__, __package__, + FLAGS.osapi_volume_ext_list) diff --git a/cinder/api/openstack/volume/contrib/types_extra_specs.py b/cinder/api/openstack/volume/contrib/types_extra_specs.py new file mode 100644 index 00000000000..e0c4d595cdf --- /dev/null +++ b/cinder/api/openstack/volume/contrib/types_extra_specs.py @@ -0,0 +1,152 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 Zadara Storage Inc. +# Copyright (c) 2011 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""The volume types extra specs extension""" + +import webob + +from cinder.api.openstack import extensions +from cinder.api.openstack import wsgi +from cinder.api.openstack import xmlutil +from cinder import db +from cinder import exception +from cinder.volume import volume_types + + +authorize = extensions.extension_authorizer('volume', 'types_extra_specs') + + +class VolumeTypeExtraSpecsTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = xmlutil.make_flat_dict('extra_specs', selector='extra_specs') + return xmlutil.MasterTemplate(root, 1) + + +class VolumeTypeExtraSpecTemplate(xmlutil.TemplateBuilder): + def construct(self): + tagname = xmlutil.Selector('key') + + def extraspec_sel(obj, do_raise=False): + # Have to extract the key and value for later use... + key, value = obj.items()[0] + return dict(key=key, value=value) + + root = xmlutil.TemplateElement(tagname, selector=extraspec_sel) + root.text = 'value' + return xmlutil.MasterTemplate(root, 1) + + +class VolumeTypeExtraSpecsController(object): + """ The volume type extra specs API controller for the OpenStack API """ + + def _get_extra_specs(self, context, type_id): + extra_specs = db.volume_type_extra_specs_get(context, type_id) + specs_dict = {} + for key, value in extra_specs.iteritems(): + specs_dict[key] = value + return dict(extra_specs=specs_dict) + + def _check_body(self, body): + if not body: + expl = _('No Request Body') + raise webob.exc.HTTPBadRequest(explanation=expl) + + def _check_type(self, context, type_id): + try: + volume_types.get_volume_type(context, type_id) + except exception.NotFound as ex: + raise webob.exc.HTTPNotFound(explanation=unicode(ex)) + + @wsgi.serializers(xml=VolumeTypeExtraSpecsTemplate) + def index(self, req, type_id): + """ Returns the list of extra specs for a given volume type """ + context = req.environ['cinder.context'] + authorize(context) + self._check_type(context, type_id) + return self._get_extra_specs(context, type_id) + + @wsgi.serializers(xml=VolumeTypeExtraSpecsTemplate) + def create(self, req, type_id, body=None): + context = req.environ['cinder.context'] + authorize(context) + self._check_type(context, type_id) + self._check_body(body) + specs = body.get('extra_specs') + if not isinstance(specs, dict): + expl = _('Malformed extra specs') + raise webob.exc.HTTPBadRequest(explanation=expl) + db.volume_type_extra_specs_update_or_create(context, + type_id, + specs) + return body + + @wsgi.serializers(xml=VolumeTypeExtraSpecTemplate) + def update(self, req, type_id, id, body=None): + context = req.environ['cinder.context'] + authorize(context) + self._check_type(context, type_id) + self._check_body(body) + if not id in body: + expl = _('Request body and URI mismatch') + raise webob.exc.HTTPBadRequest(explanation=expl) + if len(body) > 1: + expl = _('Request body contains too many items') + raise webob.exc.HTTPBadRequest(explanation=expl) + db.volume_type_extra_specs_update_or_create(context, + type_id, + body) + return body + + @wsgi.serializers(xml=VolumeTypeExtraSpecTemplate) + def show(self, req, type_id, id): + """Return a single extra spec item.""" + context = req.environ['cinder.context'] + authorize(context) + self._check_type(context, type_id) + specs = self._get_extra_specs(context, type_id) + if id in specs['extra_specs']: + return {id: specs['extra_specs'][id]} + else: + raise webob.exc.HTTPNotFound() + + def delete(self, req, type_id, id): + """ Deletes an existing extra spec """ + context = req.environ['cinder.context'] + self._check_type(context, type_id) + authorize(context) + db.volume_type_extra_specs_delete(context, type_id, id) + return webob.Response(status_int=202) + + +class Types_extra_specs(extensions.ExtensionDescriptor): + """Types extra specs support""" + + name = "TypesExtraSpecs" + alias = "os-types-extra-specs" + namespace = "http://docs.openstack.org/volume/ext/types-extra-specs/api/v1" + updated = "2011-08-24T00:00:00+00:00" + + def get_resources(self): + resources = [] + res = extensions.ResourceExtension('extra_specs', + VolumeTypeExtraSpecsController(), + parent=dict( + member_name='type', + collection_name='types')) + resources.append(res) + + return resources diff --git a/cinder/api/openstack/volume/contrib/types_manage.py b/cinder/api/openstack/volume/contrib/types_manage.py new file mode 100644 index 00000000000..bb8921a0f3e --- /dev/null +++ b/cinder/api/openstack/volume/contrib/types_manage.py @@ -0,0 +1,91 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 Zadara Storage Inc. +# Copyright (c) 2011 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""The volume types manage extension.""" + +import webob + +from cinder.api.openstack import extensions +from cinder.api.openstack.volume import types +from cinder.api.openstack import wsgi +from cinder import exception +from cinder.volume import volume_types + + +authorize = extensions.extension_authorizer('volume', 'types_manage') + + +class VolumeTypesManageController(wsgi.Controller): + """ The volume types API controller for the OpenStack API """ + + @wsgi.action("create") + @wsgi.serializers(xml=types.VolumeTypeTemplate) + def _create(self, req, body): + """Creates a new volume type.""" + context = req.environ['cinder.context'] + authorize(context) + + if not body or body == "": + raise webob.exc.HTTPUnprocessableEntity() + + vol_type = body.get('volume_type', None) + if vol_type is None or vol_type == "": + raise webob.exc.HTTPUnprocessableEntity() + + name = vol_type.get('name', None) + specs = vol_type.get('extra_specs', {}) + + if name is None or name == "": + raise webob.exc.HTTPUnprocessableEntity() + + try: + volume_types.create(context, name, specs) + vol_type = volume_types.get_volume_type_by_name(context, name) + except exception.VolumeTypeExists as err: + raise webob.exc.HTTPConflict(explanation=str(err)) + except exception.NotFound: + raise webob.exc.HTTPNotFound() + + return {'volume_type': vol_type} + + @wsgi.action("delete") + def _delete(self, req, id): + """ Deletes an existing volume type """ + context = req.environ['cinder.context'] + authorize(context) + + try: + vol_type = volume_types.get_volume_type(context, id) + volume_types.destroy(context, vol_type['name']) + except exception.NotFound: + raise webob.exc.HTTPNotFound() + + return webob.Response(status_int=202) + + +class Types_manage(extensions.ExtensionDescriptor): + """Types manage support""" + + name = "TypesManage" + alias = "os-types-manage" + namespace = "http://docs.openstack.org/volume/ext/types-manage/api/v1" + updated = "2011-08-24T00:00:00+00:00" + + def get_controller_extensions(self): + controller = VolumeTypesManageController() + extension = extensions.ControllerExtension(self, 'types', controller) + return [extension] diff --git a/cinder/api/openstack/volume/extensions.py b/cinder/api/openstack/volume/extensions.py new file mode 100644 index 00000000000..ffe284555e2 --- /dev/null +++ b/cinder/api/openstack/volume/extensions.py @@ -0,0 +1,33 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from cinder.api.openstack import extensions as base_extensions +from cinder import flags +from cinder import log as logging + + +LOG = logging.getLogger(__name__) +FLAGS = flags.FLAGS + + +class ExtensionManager(base_extensions.ExtensionManager): + def __init__(self): + LOG.audit(_('Initializing extension manager.')) + + self.cls_list = FLAGS.osapi_volume_extension + self.extensions = {} + self._load_extensions() diff --git a/cinder/api/openstack/volume/snapshots.py b/cinder/api/openstack/volume/snapshots.py new file mode 100644 index 00000000000..f6d5304ec18 --- /dev/null +++ b/cinder/api/openstack/volume/snapshots.py @@ -0,0 +1,170 @@ +# Copyright 2011 Justin Santa Barbara +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""The volumes snapshots api.""" + +from webob import exc +import webob + +from cinder.api.openstack import common +from cinder.api.openstack import wsgi +from cinder.api.openstack import xmlutil +from cinder import exception +from cinder import flags +from cinder import log as logging +from cinder import volume + + +LOG = logging.getLogger(__name__) + + +FLAGS = flags.FLAGS + + +def _translate_snapshot_detail_view(context, vol): + """Maps keys for snapshots details view.""" + + d = _translate_snapshot_summary_view(context, vol) + + # NOTE(gagupta): No additional data / lookups at the moment + return d + + +def _translate_snapshot_summary_view(context, vol): + """Maps keys for snapshots summary view.""" + d = {} + + # TODO(bcwaldon): remove str cast once we use uuids + d['id'] = str(vol['id']) + d['volume_id'] = str(vol['volume_id']) + d['status'] = vol['status'] + # NOTE(gagupta): We map volume_size as the snapshot size + d['size'] = vol['volume_size'] + d['created_at'] = vol['created_at'] + d['display_name'] = vol['display_name'] + d['display_description'] = vol['display_description'] + return d + + +def make_snapshot(elem): + elem.set('id') + elem.set('status') + elem.set('size') + elem.set('created_at') + elem.set('display_name') + elem.set('display_description') + elem.set('volume_id') + + +class SnapshotTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = xmlutil.TemplateElement('snapshot', selector='snapshot') + make_snapshot(root) + return xmlutil.MasterTemplate(root, 1) + + +class SnapshotsTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = xmlutil.TemplateElement('snapshots') + elem = xmlutil.SubTemplateElement(root, 'snapshot', + selector='snapshots') + make_snapshot(elem) + return xmlutil.MasterTemplate(root, 1) + + +class SnapshotsController(object): + """The Volumes API controller for the OpenStack API.""" + + def __init__(self): + self.volume_api = volume.API() + super(SnapshotsController, self).__init__() + + @wsgi.serializers(xml=SnapshotTemplate) + def show(self, req, id): + """Return data about the given snapshot.""" + context = req.environ['cinder.context'] + + try: + vol = self.volume_api.get_snapshot(context, id) + except exception.NotFound: + raise exc.HTTPNotFound() + + return {'snapshot': _translate_snapshot_detail_view(context, vol)} + + def delete(self, req, id): + """Delete a snapshot.""" + context = req.environ['cinder.context'] + + LOG.audit(_("Delete snapshot with id: %s"), id, context=context) + + try: + snapshot = self.volume_api.get_snapshot(context, id) + self.volume_api.delete_snapshot(context, snapshot) + except exception.NotFound: + raise exc.HTTPNotFound() + return webob.Response(status_int=202) + + @wsgi.serializers(xml=SnapshotsTemplate) + def index(self, req): + """Returns a summary list of snapshots.""" + return self._items(req, entity_maker=_translate_snapshot_summary_view) + + @wsgi.serializers(xml=SnapshotsTemplate) + def detail(self, req): + """Returns a detailed list of snapshots.""" + return self._items(req, entity_maker=_translate_snapshot_detail_view) + + def _items(self, req, entity_maker): + """Returns a list of snapshots, transformed through entity_maker.""" + context = req.environ['cinder.context'] + + snapshots = self.volume_api.get_all_snapshots(context) + limited_list = common.limited(snapshots, req) + res = [entity_maker(context, snapshot) for snapshot in limited_list] + return {'snapshots': res} + + @wsgi.serializers(xml=SnapshotTemplate) + def create(self, req, body): + """Creates a new snapshot.""" + context = req.environ['cinder.context'] + + if not body: + return exc.HTTPUnprocessableEntity() + + snapshot = body['snapshot'] + volume_id = snapshot['volume_id'] + volume = self.volume_api.get(context, volume_id) + force = snapshot.get('force', False) + msg = _("Create snapshot from volume %s") + LOG.audit(msg, volume_id, context=context) + + if force: + new_snapshot = self.volume_api.create_snapshot_force(context, + volume, + snapshot.get('display_name'), + snapshot.get('display_description')) + else: + new_snapshot = self.volume_api.create_snapshot(context, + volume, + snapshot.get('display_name'), + snapshot.get('display_description')) + + retval = _translate_snapshot_detail_view(context, new_snapshot) + + return {'snapshot': retval} + + +def create_resource(): + return wsgi.Resource(SnapshotsController()) diff --git a/cinder/api/openstack/volume/types.py b/cinder/api/openstack/volume/types.py new file mode 100644 index 00000000000..8fea061902c --- /dev/null +++ b/cinder/api/openstack/volume/types.py @@ -0,0 +1,76 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 Zadara Storage Inc. +# Copyright (c) 2011 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" The volume type & volume types extra specs extension""" + +from webob import exc + +from cinder.api.openstack import wsgi +from cinder.api.openstack import xmlutil +from cinder import exception +from cinder.volume import volume_types + + +def make_voltype(elem): + elem.set('id') + elem.set('name') + extra_specs = xmlutil.make_flat_dict('extra_specs', selector='extra_specs') + elem.append(extra_specs) + + +class VolumeTypeTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = xmlutil.TemplateElement('volume_type', selector='volume_type') + make_voltype(root) + return xmlutil.MasterTemplate(root, 1) + + +class VolumeTypesTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = xmlutil.TemplateElement('volume_types') + elem = xmlutil.SubTemplateElement(root, 'volume_type', + selector='volume_types') + make_voltype(elem) + return xmlutil.MasterTemplate(root, 1) + + +class VolumeTypesController(object): + """ The volume types API controller for the OpenStack API """ + + @wsgi.serializers(xml=VolumeTypesTemplate) + def index(self, req): + """ Returns the list of volume types """ + context = req.environ['cinder.context'] + return {'volume_types': volume_types.get_all_types(context).values()} + + @wsgi.serializers(xml=VolumeTypeTemplate) + def show(self, req, id): + """ Return a single volume type item """ + context = req.environ['cinder.context'] + + try: + vol_type = volume_types.get_volume_type(context, id) + except exception.NotFound: + raise exc.HTTPNotFound() + + # TODO(bcwaldon): remove str cast once we use uuids + vol_type['id'] = str(vol_type['id']) + return {'volume_type': vol_type} + + +def create_resource(): + return wsgi.Resource(VolumeTypesController()) diff --git a/cinder/api/openstack/volume/versions.py b/cinder/api/openstack/volume/versions.py new file mode 100644 index 00000000000..7dcfdbe6c04 --- /dev/null +++ b/cinder/api/openstack/volume/versions.py @@ -0,0 +1,83 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +from cinder.api.openstack.compute import versions +from cinder.api.openstack.volume.views import versions as views_versions +from cinder.api.openstack import wsgi + + +VERSIONS = { + "v1.0": { + "id": "v1.0", + "status": "CURRENT", + "updated": "2012-01-04T11:33:21Z", + "links": [ + { + "rel": "describedby", + "type": "application/pdf", + "href": "http://jorgew.github.com/block-storage-api/" + "content/os-block-storage-1.0.pdf", + }, + { + "rel": "describedby", + "type": "application/vnd.sun.wadl+xml", + #(anthony) FIXME + "href": "http://docs.rackspacecloud.com/" + "servers/api/v1.1/application.wadl", + }, + ], + "media-types": [ + { + "base": "application/xml", + "type": "application/vnd.openstack.volume+xml;version=1", + }, + { + "base": "application/json", + "type": "application/vnd.openstack.volume+json;version=1", + } + ], + } +} + + +class Versions(versions.Versions): + @wsgi.serializers(xml=versions.VersionsTemplate, + atom=versions.VersionsAtomSerializer) + def index(self, req): + """Return all versions.""" + builder = views_versions.get_view_builder(req) + return builder.build_versions(VERSIONS) + + @wsgi.serializers(xml=versions.ChoicesTemplate) + @wsgi.response(300) + def multi(self, req): + """Return multiple choices.""" + builder = views_versions.get_view_builder(req) + return builder.build_choices(VERSIONS, req) + + +class VolumeVersionV1(object): + @wsgi.serializers(xml=versions.VersionTemplate, + atom=versions.VersionAtomSerializer) + def show(self, req): + builder = views_versions.get_view_builder(req) + return builder.build_version(VERSIONS['v1.0']) + + +def create_resource(): + return wsgi.Resource(VolumeVersionV1()) diff --git a/cinder/api/openstack/volume/views/__init__.py b/cinder/api/openstack/volume/views/__init__.py new file mode 100644 index 00000000000..d65c689a83d --- /dev/null +++ b/cinder/api/openstack/volume/views/__init__.py @@ -0,0 +1,16 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/cinder/api/openstack/volume/views/versions.py b/cinder/api/openstack/volume/views/versions.py new file mode 100644 index 00000000000..a4bd164b2d4 --- /dev/null +++ b/cinder/api/openstack/volume/views/versions.py @@ -0,0 +1,36 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010-2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import os + +from cinder.api.openstack.compute.views import versions as compute_views + + +def get_view_builder(req): + base_url = req.application_url + return ViewBuilder(base_url) + + +class ViewBuilder(compute_views.ViewBuilder): + def generate_href(self, path=None): + """Create an url that refers to a specific version_number.""" + version_number = 'v1' + if path: + path = path.strip('/') + return os.path.join(self.base_url, version_number, path) + else: + return os.path.join(self.base_url, version_number) + '/' diff --git a/cinder/api/openstack/volume/volumes.py b/cinder/api/openstack/volume/volumes.py new file mode 100644 index 00000000000..9d4b4b5d588 --- /dev/null +++ b/cinder/api/openstack/volume/volumes.py @@ -0,0 +1,263 @@ +# Copyright 2011 Justin Santa Barbara +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""The volumes api.""" + +from webob import exc +import webob + +from cinder.api.openstack import common +from cinder.api.openstack import wsgi +from cinder.api.openstack import xmlutil +from cinder import exception +from cinder import flags +from cinder import log as logging +from cinder import volume +from cinder.volume import volume_types + + +LOG = logging.getLogger(__name__) + + +FLAGS = flags.FLAGS + + +def _translate_attachment_detail_view(_context, vol): + """Maps keys for attachment details view.""" + + d = _translate_attachment_summary_view(_context, vol) + + # No additional data / lookups at the moment + + return d + + +def _translate_attachment_summary_view(_context, vol): + """Maps keys for attachment summary view.""" + d = {} + + # TODO(bcwaldon): remove str cast once we use uuids + volume_id = str(vol['id']) + + # NOTE(justinsb): We use the volume id as the id of the attachment object + d['id'] = volume_id + + d['volume_id'] = volume_id + if vol.get('instance'): + d['server_id'] = vol['instance']['uuid'] + if vol.get('mountpoint'): + d['device'] = vol['mountpoint'] + + return d + + +def _translate_volume_detail_view(context, vol): + """Maps keys for volumes details view.""" + + d = _translate_volume_summary_view(context, vol) + + # No additional data / lookups at the moment + + return d + + +def _translate_volume_summary_view(context, vol): + """Maps keys for volumes summary view.""" + d = {} + + # TODO(bcwaldon): remove str cast once we use uuids + d['id'] = str(vol['id']) + d['status'] = vol['status'] + d['size'] = vol['size'] + d['availability_zone'] = vol['availability_zone'] + d['created_at'] = vol['created_at'] + + d['attachments'] = [] + if vol['attach_status'] == 'attached': + attachment = _translate_attachment_detail_view(context, vol) + d['attachments'].append(attachment) + + d['display_name'] = vol['display_name'] + d['display_description'] = vol['display_description'] + + if vol['volume_type_id'] and vol.get('volume_type'): + d['volume_type'] = vol['volume_type']['name'] + else: + # TODO(bcwaldon): remove str cast once we use uuids + d['volume_type'] = str(vol['volume_type_id']) + + d['snapshot_id'] = vol['snapshot_id'] + # TODO(bcwaldon): remove str cast once we use uuids + if d['snapshot_id'] is not None: + d['snapshot_id'] = str(d['snapshot_id']) + + LOG.audit(_("vol=%s"), vol, context=context) + + if vol.get('volume_metadata'): + meta_dict = {} + for i in vol['volume_metadata']: + meta_dict[i['key']] = i['value'] + d['metadata'] = meta_dict + else: + d['metadata'] = {} + + return d + + +def make_attachment(elem): + elem.set('id') + elem.set('server_id') + elem.set('volume_id') + elem.set('device') + + +def make_volume(elem): + elem.set('id') + elem.set('status') + elem.set('size') + elem.set('availability_zone') + elem.set('created_at') + elem.set('display_name') + elem.set('display_description') + elem.set('volume_type') + elem.set('snapshot_id') + + attachments = xmlutil.SubTemplateElement(elem, 'attachments') + attachment = xmlutil.SubTemplateElement(attachments, 'attachment', + selector='attachments') + make_attachment(attachment) + + metadata = xmlutil.make_flat_dict('metadata') + elem.append(metadata) + + +volume_nsmap = {None: xmlutil.XMLNS_VOLUME_V1, 'atom': xmlutil.XMLNS_ATOM} + + +class VolumeTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = xmlutil.TemplateElement('volume', selector='volume') + make_volume(root) + return xmlutil.MasterTemplate(root, 1, nsmap=volume_nsmap) + + +class VolumesTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = xmlutil.TemplateElement('volumes') + elem = xmlutil.SubTemplateElement(root, 'volume', selector='volumes') + make_volume(elem) + return xmlutil.MasterTemplate(root, 1, nsmap=volume_nsmap) + + +class VolumeController(object): + """The Volumes API controller for the OpenStack API.""" + + def __init__(self): + self.volume_api = volume.API() + super(VolumeController, self).__init__() + + @wsgi.serializers(xml=VolumeTemplate) + def show(self, req, id): + """Return data about the given volume.""" + context = req.environ['cinder.context'] + + try: + vol = self.volume_api.get(context, id) + except exception.NotFound: + raise exc.HTTPNotFound() + + return {'volume': _translate_volume_detail_view(context, vol)} + + def delete(self, req, id): + """Delete a volume.""" + context = req.environ['cinder.context'] + + LOG.audit(_("Delete volume with id: %s"), id, context=context) + + try: + volume = self.volume_api.get(context, id) + self.volume_api.delete(context, volume) + except exception.NotFound: + raise exc.HTTPNotFound() + return webob.Response(status_int=202) + + @wsgi.serializers(xml=VolumesTemplate) + def index(self, req): + """Returns a summary list of volumes.""" + return self._items(req, entity_maker=_translate_volume_summary_view) + + @wsgi.serializers(xml=VolumesTemplate) + def detail(self, req): + """Returns a detailed list of volumes.""" + return self._items(req, entity_maker=_translate_volume_detail_view) + + def _items(self, req, entity_maker): + """Returns a list of volumes, transformed through entity_maker.""" + context = req.environ['cinder.context'] + + volumes = self.volume_api.get_all(context) + limited_list = common.limited(volumes, req) + res = [entity_maker(context, vol) for vol in limited_list] + return {'volumes': res} + + @wsgi.serializers(xml=VolumeTemplate) + def create(self, req, body): + """Creates a new volume.""" + context = req.environ['cinder.context'] + + if not body: + raise exc.HTTPUnprocessableEntity() + + volume = body['volume'] + size = volume['size'] + LOG.audit(_("Create volume of %s GB"), size, context=context) + + kwargs = {} + + req_volume_type = volume.get('volume_type', None) + if req_volume_type: + try: + kwargs['volume_type'] = volume_types.get_volume_type_by_name( + context, req_volume_type) + except exception.NotFound: + raise exc.HTTPNotFound() + + kwargs['metadata'] = volume.get('metadata', None) + + snapshot_id = volume.get('snapshot_id') + if snapshot_id is not None: + kwargs['snapshot'] = self.volume_api.get_snapshot(context, + snapshot_id) + else: + kwargs['snapshot'] = None + + kwargs['availability_zone'] = volume.get('availability_zone', None) + + new_volume = self.volume_api.create(context, + size, + volume.get('display_name'), + volume.get('display_description'), + **kwargs) + + # TODO(vish): Instance should be None at db layer instead of + # trying to lazy load, but for now we turn it into + # a dict to avoid an error. + retval = _translate_volume_detail_view(context, dict(new_volume)) + + return {'volume': retval} + + +def create_resource(): + return wsgi.Resource(VolumeController()) diff --git a/cinder/api/openstack/wsgi.py b/cinder/api/openstack/wsgi.py new file mode 100644 index 00000000000..bb309056ead --- /dev/null +++ b/cinder/api/openstack/wsgi.py @@ -0,0 +1,1123 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import inspect +from xml.dom import minidom +from xml.parsers import expat +import math +import time + +from lxml import etree +import webob + +from cinder import exception +from cinder import log as logging +from cinder import utils +from cinder import wsgi + + +XMLNS_V1 = 'http://docs.openstack.org/volume/api/v1' + +XMLNS_ATOM = 'http://www.w3.org/2005/Atom' + +LOG = logging.getLogger(__name__) + +# The vendor content types should serialize identically to the non-vendor +# content types. So to avoid littering the code with both options, we +# map the vendor to the other when looking up the type +_CONTENT_TYPE_MAP = { + 'application/vnd.openstack.volume+json': 'application/json', + 'application/vnd.openstack.volume+xml': 'application/xml', +} + +SUPPORTED_CONTENT_TYPES = ( + 'application/json', + 'application/vnd.openstack.volume+json', + 'application/xml', + 'application/vnd.openstack.volume+xml', +) + +_MEDIA_TYPE_MAP = { + 'application/vnd.openstack.volume+json': 'json', + 'application/json': 'json', + 'application/vnd.openstack.volume+xml': 'xml', + 'application/xml': 'xml', + 'application/atom+xml': 'atom', +} + + +class Request(webob.Request): + """Add some OpenStack API-specific logic to the base webob.Request.""" + + def best_match_content_type(self): + """Determine the requested response content-type.""" + if 'cinder.best_content_type' not in self.environ: + # Calculate the best MIME type + content_type = None + + # Check URL path suffix + parts = self.path.rsplit('.', 1) + if len(parts) > 1: + possible_type = 'application/' + parts[1] + if possible_type in SUPPORTED_CONTENT_TYPES: + content_type = possible_type + + if not content_type: + content_type = self.accept.best_match(SUPPORTED_CONTENT_TYPES) + + self.environ['cinder.best_content_type'] = (content_type or + 'application/json') + + return self.environ['cinder.best_content_type'] + + def get_content_type(self): + """Determine content type of the request body. + + Does not do any body introspection, only checks header + + """ + if not "Content-Type" in self.headers: + return None + + allowed_types = SUPPORTED_CONTENT_TYPES + content_type = self.content_type + + if content_type not in allowed_types: + raise exception.InvalidContentType(content_type=content_type) + + return content_type + + +class ActionDispatcher(object): + """Maps method name to local methods through action name.""" + + def dispatch(self, *args, **kwargs): + """Find and call local method.""" + action = kwargs.pop('action', 'default') + action_method = getattr(self, str(action), self.default) + return action_method(*args, **kwargs) + + def default(self, data): + raise NotImplementedError() + + +class TextDeserializer(ActionDispatcher): + """Default request body deserialization""" + + def deserialize(self, datastring, action='default'): + return self.dispatch(datastring, action=action) + + def default(self, datastring): + return {} + + +class JSONDeserializer(TextDeserializer): + + def _from_json(self, datastring): + try: + return utils.loads(datastring) + except ValueError: + msg = _("cannot understand JSON") + raise exception.MalformedRequestBody(reason=msg) + + def default(self, datastring): + return {'body': self._from_json(datastring)} + + +class XMLDeserializer(TextDeserializer): + + def __init__(self, metadata=None): + """ + :param metadata: information needed to deserialize xml into + a dictionary. + """ + super(XMLDeserializer, self).__init__() + self.metadata = metadata or {} + + def _from_xml(self, datastring): + plurals = set(self.metadata.get('plurals', {})) + + try: + node = minidom.parseString(datastring).childNodes[0] + return {node.nodeName: self._from_xml_node(node, plurals)} + except expat.ExpatError: + msg = _("cannot understand XML") + raise exception.MalformedRequestBody(reason=msg) + + def _from_xml_node(self, node, listnames): + """Convert a minidom node to a simple Python type. + + :param listnames: list of XML node names whose subnodes should + be considered list items. + + """ + if len(node.childNodes) == 1 and node.childNodes[0].nodeType == 3: + return node.childNodes[0].nodeValue + elif node.nodeName in listnames: + return [self._from_xml_node(n, listnames) for n in node.childNodes] + else: + result = dict() + for attr in node.attributes.keys(): + result[attr] = node.attributes[attr].nodeValue + for child in node.childNodes: + if child.nodeType != node.TEXT_NODE: + result[child.nodeName] = self._from_xml_node(child, + listnames) + return result + + def find_first_child_named(self, parent, name): + """Search a nodes children for the first child with a given name""" + for node in parent.childNodes: + if node.nodeName == name: + return node + return None + + def find_children_named(self, parent, name): + """Return all of a nodes children who have the given name""" + for node in parent.childNodes: + if node.nodeName == name: + yield node + + def extract_text(self, node): + """Get the text field contained by the given node""" + if len(node.childNodes) == 1: + child = node.childNodes[0] + if child.nodeType == child.TEXT_NODE: + return child.nodeValue + return "" + + def find_attribute_or_element(self, parent, name): + """Get an attribute value; fallback to an element if not found""" + if parent.hasAttribute(name): + return parent.getAttribute(name) + + node = self.find_first_child_named(parent, name) + if node: + return self.extract_text(node) + + return None + + def default(self, datastring): + return {'body': self._from_xml(datastring)} + + +class MetadataXMLDeserializer(XMLDeserializer): + + def extract_metadata(self, metadata_node): + """Marshal the metadata attribute of a parsed request""" + metadata = {} + if metadata_node is not None: + for meta_node in self.find_children_named(metadata_node, "meta"): + key = meta_node.getAttribute("key") + metadata[key] = self.extract_text(meta_node) + return metadata + + +class DictSerializer(ActionDispatcher): + """Default request body serialization""" + + def serialize(self, data, action='default'): + return self.dispatch(data, action=action) + + def default(self, data): + return "" + + +class JSONDictSerializer(DictSerializer): + """Default JSON request body serialization""" + + def default(self, data): + return utils.dumps(data) + + +class XMLDictSerializer(DictSerializer): + + def __init__(self, metadata=None, xmlns=None): + """ + :param metadata: information needed to deserialize xml into + a dictionary. + :param xmlns: XML namespace to include with serialized xml + """ + super(XMLDictSerializer, self).__init__() + self.metadata = metadata or {} + self.xmlns = xmlns + + def default(self, data): + # We expect data to contain a single key which is the XML root. + root_key = data.keys()[0] + doc = minidom.Document() + node = self._to_xml_node(doc, self.metadata, root_key, data[root_key]) + + return self.to_xml_string(node) + + def to_xml_string(self, node, has_atom=False): + self._add_xmlns(node, has_atom) + return node.toxml('UTF-8') + + #NOTE (ameade): the has_atom should be removed after all of the + # xml serializers and view builders have been updated to the current + # spec that required all responses include the xmlns:atom, the has_atom + # flag is to prevent current tests from breaking + def _add_xmlns(self, node, has_atom=False): + if self.xmlns is not None: + node.setAttribute('xmlns', self.xmlns) + if has_atom: + node.setAttribute('xmlns:atom', "http://www.w3.org/2005/Atom") + + def _to_xml_node(self, doc, metadata, nodename, data): + """Recursive method to convert data members to XML nodes.""" + result = doc.createElement(nodename) + + # Set the xml namespace if one is specified + # TODO(justinsb): We could also use prefixes on the keys + xmlns = metadata.get('xmlns', None) + if xmlns: + result.setAttribute('xmlns', xmlns) + + #TODO(bcwaldon): accomplish this without a type-check + if isinstance(data, list): + collections = metadata.get('list_collections', {}) + if nodename in collections: + metadata = collections[nodename] + for item in data: + node = doc.createElement(metadata['item_name']) + node.setAttribute(metadata['item_key'], str(item)) + result.appendChild(node) + return result + singular = metadata.get('plurals', {}).get(nodename, None) + if singular is None: + if nodename.endswith('s'): + singular = nodename[:-1] + else: + singular = 'item' + for item in data: + node = self._to_xml_node(doc, metadata, singular, item) + result.appendChild(node) + #TODO(bcwaldon): accomplish this without a type-check + elif isinstance(data, dict): + collections = metadata.get('dict_collections', {}) + if nodename in collections: + metadata = collections[nodename] + for k, v in data.items(): + node = doc.createElement(metadata['item_name']) + node.setAttribute(metadata['item_key'], str(k)) + text = doc.createTextNode(str(v)) + node.appendChild(text) + result.appendChild(node) + return result + attrs = metadata.get('attributes', {}).get(nodename, {}) + for k, v in data.items(): + if k in attrs: + result.setAttribute(k, str(v)) + else: + node = self._to_xml_node(doc, metadata, k, v) + result.appendChild(node) + else: + # Type is atom + node = doc.createTextNode(str(data)) + result.appendChild(node) + return result + + def _create_link_nodes(self, xml_doc, links): + link_nodes = [] + for link in links: + link_node = xml_doc.createElement('atom:link') + link_node.setAttribute('rel', link['rel']) + link_node.setAttribute('href', link['href']) + if 'type' in link: + link_node.setAttribute('type', link['type']) + link_nodes.append(link_node) + return link_nodes + + def _to_xml(self, root): + """Convert the xml object to an xml string.""" + return etree.tostring(root, encoding='UTF-8', xml_declaration=True) + + +def serializers(**serializers): + """Attaches serializers to a method. + + This decorator associates a dictionary of serializers with a + method. Note that the function attributes are directly + manipulated; the method is not wrapped. + """ + + def decorator(func): + if not hasattr(func, 'wsgi_serializers'): + func.wsgi_serializers = {} + func.wsgi_serializers.update(serializers) + return func + return decorator + + +def deserializers(**deserializers): + """Attaches deserializers to a method. + + This decorator associates a dictionary of deserializers with a + method. Note that the function attributes are directly + manipulated; the method is not wrapped. + """ + + def decorator(func): + if not hasattr(func, 'wsgi_deserializers'): + func.wsgi_deserializers = {} + func.wsgi_deserializers.update(deserializers) + return func + return decorator + + +def response(code): + """Attaches response code to a method. + + This decorator associates a response code with a method. Note + that the function attributes are directly manipulated; the method + is not wrapped. + """ + + def decorator(func): + func.wsgi_code = code + return func + return decorator + + +class ResponseObject(object): + """Bundles a response object with appropriate serializers. + + Object that app methods may return in order to bind alternate + serializers with a response object to be serialized. Its use is + optional. + """ + + def __init__(self, obj, code=None, **serializers): + """Binds serializers with an object. + + Takes keyword arguments akin to the @serializer() decorator + for specifying serializers. Serializers specified will be + given preference over default serializers or method-specific + serializers on return. + """ + + self.obj = obj + self.serializers = serializers + self._default_code = 200 + self._code = code + self._headers = {} + self.serializer = None + self.media_type = None + + def __getitem__(self, key): + """Retrieves a header with the given name.""" + + return self._headers[key.lower()] + + def __setitem__(self, key, value): + """Sets a header with the given name to the given value.""" + + self._headers[key.lower()] = value + + def __delitem__(self, key): + """Deletes the header with the given name.""" + + del self._headers[key.lower()] + + def _bind_method_serializers(self, meth_serializers): + """Binds method serializers with the response object. + + Binds the method serializers with the response object. + Serializers specified to the constructor will take precedence + over serializers specified to this method. + + :param meth_serializers: A dictionary with keys mapping to + response types and values containing + serializer objects. + """ + + # We can't use update because that would be the wrong + # precedence + for mtype, serializer in meth_serializers.items(): + self.serializers.setdefault(mtype, serializer) + + def get_serializer(self, content_type, default_serializers=None): + """Returns the serializer for the wrapped object. + + Returns the serializer for the wrapped object subject to the + indicated content type. If no serializer matching the content + type is attached, an appropriate serializer drawn from the + default serializers will be used. If no appropriate + serializer is available, raises InvalidContentType. + """ + + default_serializers = default_serializers or {} + + try: + mtype = _MEDIA_TYPE_MAP.get(content_type, content_type) + if mtype in self.serializers: + return mtype, self.serializers[mtype] + else: + return mtype, default_serializers[mtype] + except (KeyError, TypeError): + raise exception.InvalidContentType(content_type=content_type) + + def preserialize(self, content_type, default_serializers=None): + """Prepares the serializer that will be used to serialize. + + Determines the serializer that will be used and prepares an + instance of it for later call. This allows the serializer to + be accessed by extensions for, e.g., template extension. + """ + + mtype, serializer = self.get_serializer(content_type, + default_serializers) + self.media_type = mtype + self.serializer = serializer() + + def attach(self, **kwargs): + """Attach slave templates to serializers.""" + + if self.media_type in kwargs: + self.serializer.attach(kwargs[self.media_type]) + + def serialize(self, request, content_type, default_serializers=None): + """Serializes the wrapped object. + + Utility method for serializing the wrapped object. Returns a + webob.Response object. + """ + + if self.serializer: + serializer = self.serializer + else: + _mtype, _serializer = self.get_serializer(content_type, + default_serializers) + serializer = _serializer() + + response = webob.Response() + response.status_int = self.code + for hdr, value in self._headers.items(): + response.headers[hdr] = value + response.headers['Content-Type'] = content_type + if self.obj is not None: + response.body = serializer.serialize(self.obj) + + return response + + @property + def code(self): + """Retrieve the response status.""" + + return self._code or self._default_code + + @property + def headers(self): + """Retrieve the headers.""" + + return self._headers.copy() + + +def action_peek_json(body): + """Determine action to invoke.""" + + try: + decoded = utils.loads(body) + except ValueError: + msg = _("cannot understand JSON") + raise exception.MalformedRequestBody(reason=msg) + + # Make sure there's exactly one key... + if len(decoded) != 1: + msg = _("too many body keys") + raise exception.MalformedRequestBody(reason=msg) + + # Return the action and the decoded body... + return decoded.keys()[0] + + +def action_peek_xml(body): + """Determine action to invoke.""" + + dom = minidom.parseString(body) + action_node = dom.childNodes[0] + + return action_node.tagName + + +class ResourceExceptionHandler(object): + """Context manager to handle Resource exceptions. + + Used when processing exceptions generated by API implementation + methods (or their extensions). Converts most exceptions to Fault + exceptions, with the appropriate logging. + """ + + def __enter__(self): + return None + + def __exit__(self, ex_type, ex_value, ex_traceback): + if not ex_value: + return True + + if isinstance(ex_value, exception.NotAuthorized): + msg = unicode(ex_value) + raise Fault(webob.exc.HTTPForbidden(explanation=msg)) + elif isinstance(ex_value, exception.Invalid): + raise Fault(exception.ConvertedException( + code=ex_value.code, explanation=unicode(ex_value))) + elif isinstance(ex_value, TypeError): + exc_info = (ex_type, ex_value, ex_traceback) + LOG.error(_('Exception handling resource: %s') % ex_value, + exc_info=exc_info) + raise Fault(webob.exc.HTTPBadRequest()) + elif isinstance(ex_value, Fault): + LOG.info(_("Fault thrown: %s"), unicode(ex_value)) + raise ex_value + elif isinstance(ex_value, webob.exc.HTTPException): + LOG.info(_("HTTP exception thrown: %s"), unicode(ex_value)) + raise Fault(ex_value) + + # We didn't handle the exception + return False + + +class Resource(wsgi.Application): + """WSGI app that handles (de)serialization and controller dispatch. + + WSGI app that reads routing information supplied by RoutesMiddleware + and calls the requested action method upon its controller. All + controller action methods must accept a 'req' argument, which is the + incoming wsgi.Request. If the operation is a PUT or POST, the controller + method must also accept a 'body' argument (the deserialized request body). + They may raise a webob.exc exception or return a dict, which will be + serialized by requested content type. + + Exceptions derived from webob.exc.HTTPException will be automatically + wrapped in Fault() to provide API friendly error responses. + + """ + + def __init__(self, controller, action_peek=None, **deserializers): + """ + :param controller: object that implement methods created by routes lib + :param action_peek: dictionary of routines for peeking into an action + request body to determine the desired action + """ + + self.controller = controller + + default_deserializers = dict(xml=XMLDeserializer, + json=JSONDeserializer) + default_deserializers.update(deserializers) + + self.default_deserializers = default_deserializers + self.default_serializers = dict(xml=XMLDictSerializer, + json=JSONDictSerializer) + + self.action_peek = dict(xml=action_peek_xml, + json=action_peek_json) + self.action_peek.update(action_peek or {}) + + # Copy over the actions dictionary + self.wsgi_actions = {} + if controller: + self.register_actions(controller) + + # Save a mapping of extensions + self.wsgi_extensions = {} + self.wsgi_action_extensions = {} + + def register_actions(self, controller): + """Registers controller actions with this resource.""" + + actions = getattr(controller, 'wsgi_actions', {}) + for key, method_name in actions.items(): + self.wsgi_actions[key] = getattr(controller, method_name) + + def register_extensions(self, controller): + """Registers controller extensions with this resource.""" + + extensions = getattr(controller, 'wsgi_extensions', []) + for method_name, action_name in extensions: + # Look up the extending method + extension = getattr(controller, method_name) + + if action_name: + # Extending an action... + if action_name not in self.wsgi_action_extensions: + self.wsgi_action_extensions[action_name] = [] + self.wsgi_action_extensions[action_name].append(extension) + else: + # Extending a regular method + if method_name not in self.wsgi_extensions: + self.wsgi_extensions[method_name] = [] + self.wsgi_extensions[method_name].append(extension) + + def get_action_args(self, request_environment): + """Parse dictionary created by routes library.""" + + # NOTE(Vek): Check for get_action_args() override in the + # controller + if hasattr(self.controller, 'get_action_args'): + return self.controller.get_action_args(request_environment) + + try: + args = request_environment['wsgiorg.routing_args'][1].copy() + except (KeyError, IndexError, AttributeError): + return {} + + try: + del args['controller'] + except KeyError: + pass + + try: + del args['format'] + except KeyError: + pass + + return args + + def get_body(self, request): + try: + content_type = request.get_content_type() + except exception.InvalidContentType: + LOG.debug(_("Unrecognized Content-Type provided in request")) + return None, '' + + if not content_type: + LOG.debug(_("No Content-Type provided in request")) + return None, '' + + if len(request.body) <= 0: + LOG.debug(_("Empty body provided in request")) + return None, '' + + return content_type, request.body + + def deserialize(self, meth, content_type, body): + meth_deserializers = getattr(meth, 'wsgi_deserializers', {}) + try: + mtype = _MEDIA_TYPE_MAP.get(content_type, content_type) + if mtype in meth_deserializers: + deserializer = meth_deserializers[mtype] + else: + deserializer = self.default_deserializers[mtype] + except (KeyError, TypeError): + raise exception.InvalidContentType(content_type=content_type) + + return deserializer().deserialize(body) + + def pre_process_extensions(self, extensions, request, action_args): + # List of callables for post-processing extensions + post = [] + + for ext in extensions: + if inspect.isgeneratorfunction(ext): + response = None + + # If it's a generator function, the part before the + # yield is the preprocessing stage + try: + with ResourceExceptionHandler(): + gen = ext(req=request, **action_args) + response = gen.next() + except Fault as ex: + response = ex + + # We had a response... + if response: + return response, [] + + # No response, queue up generator for post-processing + post.append(gen) + else: + # Regular functions only perform post-processing + post.append(ext) + + # Run post-processing in the reverse order + return None, reversed(post) + + def post_process_extensions(self, extensions, resp_obj, request, + action_args): + for ext in extensions: + response = None + if inspect.isgenerator(ext): + # If it's a generator, run the second half of + # processing + try: + with ResourceExceptionHandler(): + response = ext.send(resp_obj) + except StopIteration: + # Normal exit of generator + continue + except Fault as ex: + response = ex + else: + # Regular functions get post-processing... + try: + with ResourceExceptionHandler(): + response = ext(req=request, resp_obj=resp_obj, + **action_args) + except Fault as ex: + response = ex + + # We had a response... + if response: + return response + + return None + + @webob.dec.wsgify(RequestClass=Request) + def __call__(self, request): + """WSGI method that controls (de)serialization and method dispatch.""" + + LOG.info("%(method)s %(url)s" % {"method": request.method, + "url": request.url}) + + # Identify the action, its arguments, and the requested + # content type + action_args = self.get_action_args(request.environ) + action = action_args.pop('action', None) + content_type, body = self.get_body(request) + accept = request.best_match_content_type() + + # NOTE(Vek): Splitting the function up this way allows for + # auditing by external tools that wrap the existing + # function. If we try to audit __call__(), we can + # run into troubles due to the @webob.dec.wsgify() + # decorator. + return self._process_stack(request, action, action_args, + content_type, body, accept) + + def _process_stack(self, request, action, action_args, + content_type, body, accept): + """Implement the processing stack.""" + + # Get the implementing method + try: + meth, extensions = self.get_method(request, action, + content_type, body) + except (AttributeError, TypeError): + return Fault(webob.exc.HTTPNotFound()) + except KeyError as ex: + msg = _("There is no such action: %s") % ex.args[0] + return Fault(webob.exc.HTTPBadRequest(explanation=msg)) + except exception.MalformedRequestBody: + msg = _("Malformed request body") + return Fault(webob.exc.HTTPBadRequest(explanation=msg)) + + # Now, deserialize the request body... + try: + if content_type: + contents = self.deserialize(meth, content_type, body) + else: + contents = {} + except exception.InvalidContentType: + msg = _("Unsupported Content-Type") + return Fault(webob.exc.HTTPBadRequest(explanation=msg)) + except exception.MalformedRequestBody: + msg = _("Malformed request body") + return Fault(webob.exc.HTTPBadRequest(explanation=msg)) + + # Update the action args + action_args.update(contents) + + project_id = action_args.pop("project_id", None) + context = request.environ.get('cinder.context') + if (context and project_id and (project_id != context.project_id)): + msg = _("Malformed request url") + return Fault(webob.exc.HTTPBadRequest(explanation=msg)) + + # Run pre-processing extensions + response, post = self.pre_process_extensions(extensions, + request, action_args) + + if not response: + try: + with ResourceExceptionHandler(): + action_result = self.dispatch(meth, request, action_args) + except Fault as ex: + response = ex + + if not response: + # No exceptions; convert action_result into a + # ResponseObject + resp_obj = None + if type(action_result) is dict or action_result is None: + resp_obj = ResponseObject(action_result) + elif isinstance(action_result, ResponseObject): + resp_obj = action_result + else: + response = action_result + + # Run post-processing extensions + if resp_obj: + _set_request_id_header(request, resp_obj) + # Do a preserialize to set up the response object + serializers = getattr(meth, 'wsgi_serializers', {}) + resp_obj._bind_method_serializers(serializers) + if hasattr(meth, 'wsgi_code'): + resp_obj._default_code = meth.wsgi_code + resp_obj.preserialize(accept, self.default_serializers) + + # Process post-processing extensions + response = self.post_process_extensions(post, resp_obj, + request, action_args) + + if resp_obj and not response: + response = resp_obj.serialize(request, accept, + self.default_serializers) + + try: + msg_dict = dict(url=request.url, status=response.status_int) + msg = _("%(url)s returned with HTTP %(status)d") % msg_dict + except AttributeError, e: + msg_dict = dict(url=request.url, e=e) + msg = _("%(url)s returned a fault: %(e)s") % msg_dict + + LOG.info(msg) + + return response + + def get_method(self, request, action, content_type, body): + """Look up the action-specific method and its extensions.""" + + # Look up the method + try: + if not self.controller: + meth = getattr(self, action) + else: + meth = getattr(self.controller, action) + except AttributeError: + if (not self.wsgi_actions or + action not in ['action', 'create', 'delete']): + # Propagate the error + raise + else: + return meth, self.wsgi_extensions.get(action, []) + + if action == 'action': + # OK, it's an action; figure out which action... + mtype = _MEDIA_TYPE_MAP.get(content_type) + action_name = self.action_peek[mtype](body) + else: + action_name = action + + # Look up the action method + return (self.wsgi_actions[action_name], + self.wsgi_action_extensions.get(action_name, [])) + + def dispatch(self, method, request, action_args): + """Dispatch a call to the action-specific method.""" + + return method(req=request, **action_args) + + +def action(name): + """Mark a function as an action. + + The given name will be taken as the action key in the body. + + This is also overloaded to allow extensions to provide + non-extending definitions of create and delete operations. + """ + + def decorator(func): + func.wsgi_action = name + return func + return decorator + + +def extends(*args, **kwargs): + """Indicate a function extends an operation. + + Can be used as either:: + + @extends + def index(...): + pass + + or as:: + + @extends(action='resize') + def _action_resize(...): + pass + """ + + def decorator(func): + # Store enough information to find what we're extending + func.wsgi_extends = (func.__name__, kwargs.get('action')) + return func + + # If we have positional arguments, call the decorator + if args: + return decorator(*args) + + # OK, return the decorator instead + return decorator + + +class ControllerMetaclass(type): + """Controller metaclass. + + This metaclass automates the task of assembling a dictionary + mapping action keys to method names. + """ + + def __new__(mcs, name, bases, cls_dict): + """Adds the wsgi_actions dictionary to the class.""" + + # Find all actions + actions = {} + extensions = [] + for key, value in cls_dict.items(): + if not callable(value): + continue + if getattr(value, 'wsgi_action', None): + actions[value.wsgi_action] = key + elif getattr(value, 'wsgi_extends', None): + extensions.append(value.wsgi_extends) + + # Add the actions and extensions to the class dict + cls_dict['wsgi_actions'] = actions + cls_dict['wsgi_extensions'] = extensions + + return super(ControllerMetaclass, mcs).__new__(mcs, name, bases, + cls_dict) + + +class Controller(object): + """Default controller.""" + + __metaclass__ = ControllerMetaclass + + _view_builder_class = None + + def __init__(self, view_builder=None): + """Initialize controller with a view builder instance.""" + if view_builder: + self._view_builder = view_builder + elif self._view_builder_class: + self._view_builder = self._view_builder_class() + else: + self._view_builder = None + + +class Fault(webob.exc.HTTPException): + """Wrap webob.exc.HTTPException to provide API friendly response.""" + + _fault_names = { + 400: "badRequest", + 401: "unauthorized", + 403: "forbidden", + 404: "itemNotFound", + 405: "badMethod", + 409: "conflictingRequest", + 413: "overLimit", + 415: "badMediaType", + 501: "notImplemented", + 503: "serviceUnavailable"} + + def __init__(self, exception): + """Create a Fault for the given webob.exc.exception.""" + self.wrapped_exc = exception + self.status_int = exception.status_int + + @webob.dec.wsgify(RequestClass=Request) + def __call__(self, req): + """Generate a WSGI response based on the exception passed to ctor.""" + # Replace the body with fault details. + code = self.wrapped_exc.status_int + fault_name = self._fault_names.get(code, "computeFault") + fault_data = { + fault_name: { + 'code': code, + 'message': self.wrapped_exc.explanation}} + if code == 413: + retry = self.wrapped_exc.headers['Retry-After'] + fault_data[fault_name]['retryAfter'] = retry + + # 'code' is an attribute on the fault tag itself + metadata = {'attributes': {fault_name: 'code'}} + + xml_serializer = XMLDictSerializer(metadata, XMLNS_V1) + + content_type = req.best_match_content_type() + serializer = { + 'application/xml': xml_serializer, + 'application/json': JSONDictSerializer(), + }[content_type] + + self.wrapped_exc.body = serializer.serialize(fault_data) + self.wrapped_exc.content_type = content_type + _set_request_id_header(req, self.wrapped_exc.headers) + + return self.wrapped_exc + + def __str__(self): + return self.wrapped_exc.__str__() + + +class OverLimitFault(webob.exc.HTTPException): + """ + Rate-limited request response. + """ + + def __init__(self, message, details, retry_time): + """ + Initialize new `OverLimitFault` with relevant information. + """ + hdrs = OverLimitFault._retry_after(retry_time) + self.wrapped_exc = webob.exc.HTTPRequestEntityTooLarge(headers=hdrs) + self.content = { + "overLimitFault": { + "code": self.wrapped_exc.status_int, + "message": message, + "details": details, + }, + } + + @staticmethod + def _retry_after(retry_time): + delay = int(math.ceil(retry_time - time.time())) + retry_after = delay if delay > 0 else 0 + headers = {'Retry-After': '%d' % retry_after} + return headers + + @webob.dec.wsgify(RequestClass=Request) + def __call__(self, request): + """ + Return the wrapped exception with a serialized body conforming to our + error format. + """ + content_type = request.best_match_content_type() + metadata = {"attributes": {"overLimitFault": "code"}} + + xml_serializer = XMLDictSerializer(metadata, XMLNS_V1) + serializer = { + 'application/xml': xml_serializer, + 'application/json': JSONDictSerializer(), + }[content_type] + + content = serializer.serialize(self.content) + self.wrapped_exc.body = content + + return self.wrapped_exc + + +def _set_request_id_header(req, headers): + context = req.environ.get('cinder.context') + if context: + headers['x-compute-request-id'] = context.request_id diff --git a/cinder/api/openstack/xmlutil.py b/cinder/api/openstack/xmlutil.py new file mode 100644 index 00000000000..5dfe0c1220b --- /dev/null +++ b/cinder/api/openstack/xmlutil.py @@ -0,0 +1,908 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import os.path + +from lxml import etree + +from cinder import utils + + +XMLNS_V10 = 'http://docs.rackspacecloud.com/servers/api/v1.0' +XMLNS_V11 = 'http://docs.openstack.org/compute/api/v1.1' +XMLNS_COMMON_V10 = 'http://docs.openstack.org/common/api/v1.0' +XMLNS_ATOM = 'http://www.w3.org/2005/Atom' +XMLNS_VOLUME_V1 = 'http://docs.openstack.org/volume/api/v1' + + +def validate_schema(xml, schema_name): + if isinstance(xml, str): + xml = etree.fromstring(xml) + base_path = 'cinder/api/openstack/compute/schemas/v1.1/' + if schema_name in ('atom', 'atom-link'): + base_path = 'cinder/api/openstack/compute/schemas/' + schema_path = os.path.join(utils.cinderdir(), + '%s%s.rng' % (base_path, schema_name)) + schema_doc = etree.parse(schema_path) + relaxng = etree.RelaxNG(schema_doc) + relaxng.assertValid(xml) + + +class Selector(object): + """Selects datum to operate on from an object.""" + + def __init__(self, *chain): + """Initialize the selector. + + Each argument is a subsequent index into the object. + """ + + self.chain = chain + + def __repr__(self): + """Return a representation of the selector.""" + + return "Selector" + repr(self.chain) + + def __call__(self, obj, do_raise=False): + """Select a datum to operate on. + + Selects the relevant datum within the object. + + :param obj: The object from which to select the object. + :param do_raise: If False (the default), return None if the + indexed datum does not exist. Otherwise, + raise a KeyError. + """ + + # Walk the selector list + for elem in self.chain: + # If it's callable, call it + if callable(elem): + obj = elem(obj) + else: + # Use indexing + try: + obj = obj[elem] + except (KeyError, IndexError): + # No sense going any further + if do_raise: + # Convert to a KeyError, for consistency + raise KeyError(elem) + return None + + # Return the finally-selected object + return obj + + +def get_items(obj): + """Get items in obj.""" + + return list(obj.items()) + + +class EmptyStringSelector(Selector): + """Returns the empty string if Selector would return None.""" + def __call__(self, obj, do_raise=False): + """Returns empty string if the selected value does not exist.""" + + try: + return super(EmptyStringSelector, self).__call__(obj, True) + except KeyError: + return "" + + +class ConstantSelector(object): + """Returns a constant.""" + + def __init__(self, value): + """Initialize the selector. + + :param value: The value to return. + """ + + self.value = value + + def __repr__(self): + """Return a representation of the selector.""" + + return repr(self.value) + + def __call__(self, _obj, _do_raise=False): + """Select a datum to operate on. + + Returns a constant value. Compatible with + Selector.__call__(). + """ + + return self.value + + +class TemplateElement(object): + """Represent an element in the template.""" + + def __init__(self, tag, attrib=None, selector=None, subselector=None, + **extra): + """Initialize an element. + + Initializes an element in the template. Keyword arguments + specify attributes to be set on the element; values must be + callables. See TemplateElement.set() for more information. + + :param tag: The name of the tag to create. + :param attrib: An optional dictionary of element attributes. + :param selector: An optional callable taking an object and + optional boolean do_raise indicator and + returning the object bound to the element. + :param subselector: An optional callable taking an object and + optional boolean do_raise indicator and + returning the object bound to the element. + This is used to further refine the datum + object returned by selector in the event + that it is a list of objects. + """ + + # Convert selector into a Selector + if selector is None: + selector = Selector() + elif not callable(selector): + selector = Selector(selector) + + # Convert subselector into a Selector + if subselector is not None and not callable(subselector): + subselector = Selector(subselector) + + self.tag = tag + self.selector = selector + self.subselector = subselector + self.attrib = {} + self._text = None + self._children = [] + self._childmap = {} + + # Run the incoming attributes through set() so that they + # become selectorized + if not attrib: + attrib = {} + attrib.update(extra) + for k, v in attrib.items(): + self.set(k, v) + + def __repr__(self): + """Return a representation of the template element.""" + + return ('<%s.%s %r at %#x>' % + (self.__class__.__module__, self.__class__.__name__, + self.tag, id(self))) + + def __len__(self): + """Return the number of child elements.""" + + return len(self._children) + + def __contains__(self, key): + """Determine whether a child node named by key exists.""" + + return key in self._childmap + + def __getitem__(self, idx): + """Retrieve a child node by index or name.""" + + if isinstance(idx, basestring): + # Allow access by node name + return self._childmap[idx] + else: + return self._children[idx] + + def append(self, elem): + """Append a child to the element.""" + + # Unwrap templates... + elem = elem.unwrap() + + # Avoid duplications + if elem.tag in self._childmap: + raise KeyError(elem.tag) + + self._children.append(elem) + self._childmap[elem.tag] = elem + + def extend(self, elems): + """Append children to the element.""" + + # Pre-evaluate the elements + elemmap = {} + elemlist = [] + for elem in elems: + # Unwrap templates... + elem = elem.unwrap() + + # Avoid duplications + if elem.tag in self._childmap or elem.tag in elemmap: + raise KeyError(elem.tag) + + elemmap[elem.tag] = elem + elemlist.append(elem) + + # Update the children + self._children.extend(elemlist) + self._childmap.update(elemmap) + + def insert(self, idx, elem): + """Insert a child element at the given index.""" + + # Unwrap templates... + elem = elem.unwrap() + + # Avoid duplications + if elem.tag in self._childmap: + raise KeyError(elem.tag) + + self._children.insert(idx, elem) + self._childmap[elem.tag] = elem + + def remove(self, elem): + """Remove a child element.""" + + # Unwrap templates... + elem = elem.unwrap() + + # Check if element exists + if elem.tag not in self._childmap or self._childmap[elem.tag] != elem: + raise ValueError(_('element is not a child')) + + self._children.remove(elem) + del self._childmap[elem.tag] + + def get(self, key): + """Get an attribute. + + Returns a callable which performs datum selection. + + :param key: The name of the attribute to get. + """ + + return self.attrib[key] + + def set(self, key, value=None): + """Set an attribute. + + :param key: The name of the attribute to set. + + :param value: A callable taking an object and optional boolean + do_raise indicator and returning the datum bound + to the attribute. If None, a Selector() will be + constructed from the key. If a string, a + Selector() will be constructed from the string. + """ + + # Convert value to a selector + if value is None: + value = Selector(key) + elif not callable(value): + value = Selector(value) + + self.attrib[key] = value + + def keys(self): + """Return the attribute names.""" + + return self.attrib.keys() + + def items(self): + """Return the attribute names and values.""" + + return self.attrib.items() + + def unwrap(self): + """Unwraps a template to return a template element.""" + + # We are a template element + return self + + def wrap(self): + """Wraps a template element to return a template.""" + + # Wrap in a basic Template + return Template(self) + + def apply(self, elem, obj): + """Apply text and attributes to an etree.Element. + + Applies the text and attribute instructions in the template + element to an etree.Element instance. + + :param elem: An etree.Element instance. + :param obj: The base object associated with this template + element. + """ + + # Start with the text... + if self.text is not None: + elem.text = unicode(self.text(obj)) + + # Now set up all the attributes... + for key, value in self.attrib.items(): + try: + elem.set(key, unicode(value(obj, True))) + except KeyError: + # Attribute has no value, so don't include it + pass + + def _render(self, parent, datum, patches, nsmap): + """Internal rendering. + + Renders the template node into an etree.Element object. + Returns the etree.Element object. + + :param parent: The parent etree.Element instance. + :param datum: The datum associated with this template element. + :param patches: A list of other template elements that must + also be applied. + :param nsmap: An optional namespace dictionary to be + associated with the etree.Element instance. + """ + + # Allocate a node + if callable(self.tag): + tagname = self.tag(datum) + else: + tagname = self.tag + elem = etree.Element(tagname, nsmap=nsmap) + + # If we have a parent, append the node to the parent + if parent is not None: + parent.append(elem) + + # If the datum is None, do nothing else + if datum is None: + return elem + + # Apply this template element to the element + self.apply(elem, datum) + + # Additionally, apply the patches + for patch in patches: + patch.apply(elem, datum) + + # We have fully rendered the element; return it + return elem + + def render(self, parent, obj, patches=[], nsmap=None): + """Render an object. + + Renders an object against this template node. Returns a list + of two-item tuples, where the first item is an etree.Element + instance and the second item is the datum associated with that + instance. + + :param parent: The parent for the etree.Element instances. + :param obj: The object to render this template element + against. + :param patches: A list of other template elements to apply + when rendering this template element. + :param nsmap: An optional namespace dictionary to attach to + the etree.Element instances. + """ + + # First, get the datum we're rendering + data = None if obj is None else self.selector(obj) + + # Check if we should render at all + if not self.will_render(data): + return [] + elif data is None: + return [(self._render(parent, None, patches, nsmap), None)] + + # Make the data into a list if it isn't already + if not isinstance(data, list): + data = [data] + elif parent is None: + raise ValueError(_('root element selecting a list')) + + # Render all the elements + elems = [] + for datum in data: + if self.subselector is not None: + datum = self.subselector(datum) + elems.append((self._render(parent, datum, patches, nsmap), datum)) + + # Return all the elements rendered, as well as the + # corresponding datum for the next step down the tree + return elems + + def will_render(self, datum): + """Hook method. + + An overridable hook method to determine whether this template + element will be rendered at all. By default, returns False + (inhibiting rendering) if the datum is None. + + :param datum: The datum associated with this template element. + """ + + # Don't render if datum is None + return datum is not None + + def _text_get(self): + """Template element text. + + Either None or a callable taking an object and optional + boolean do_raise indicator and returning the datum bound to + the text of the template element. + """ + + return self._text + + def _text_set(self, value): + # Convert value to a selector + if value is not None and not callable(value): + value = Selector(value) + + self._text = value + + def _text_del(self): + self._text = None + + text = property(_text_get, _text_set, _text_del) + + def tree(self): + """Return string representation of the template tree. + + Returns a representation of the template rooted at this + element as a string, suitable for inclusion in debug logs. + """ + + # Build the inner contents of the tag... + contents = [self.tag, '!selector=%r' % self.selector] + + # Add the text... + if self.text is not None: + contents.append('!text=%r' % self.text) + + # Add all the other attributes + for key, value in self.attrib.items(): + contents.append('%s=%r' % (key, value)) + + # If there are no children, return it as a closed tag + if len(self) == 0: + return '<%s/>' % ' '.join([str(i) for i in contents]) + + # OK, recurse to our children + children = [c.tree() for c in self] + + # Return the result + return ('<%s>%s' % + (' '.join(contents), ''.join(children), self.tag)) + + +def SubTemplateElement(parent, tag, attrib=None, selector=None, + subselector=None, **extra): + """Create a template element as a child of another. + + Corresponds to the etree.SubElement interface. Parameters are as + for TemplateElement, with the addition of the parent. + """ + + # Convert attributes + attrib = attrib or {} + attrib.update(extra) + + # Get a TemplateElement + elem = TemplateElement(tag, attrib=attrib, selector=selector, + subselector=subselector) + + # Append the parent safely + if parent is not None: + parent.append(elem) + + return elem + + +class Template(object): + """Represent a template.""" + + def __init__(self, root, nsmap=None): + """Initialize a template. + + :param root: The root element of the template. + :param nsmap: An optional namespace dictionary to be + associated with the root element of the + template. + """ + + self.root = root.unwrap() if root is not None else None + self.nsmap = nsmap or {} + self.serialize_options = dict(encoding='UTF-8', xml_declaration=True) + + def _serialize(self, parent, obj, siblings, nsmap=None): + """Internal serialization. + + Recursive routine to build a tree of etree.Element instances + from an object based on the template. Returns the first + etree.Element instance rendered, or None. + + :param parent: The parent etree.Element instance. Can be + None. + :param obj: The object to render. + :param siblings: The TemplateElement instances against which + to render the object. + :param nsmap: An optional namespace dictionary to be + associated with the etree.Element instance + rendered. + """ + + # First step, render the element + elems = siblings[0].render(parent, obj, siblings[1:], nsmap) + + # Now, recurse to all child elements + seen = set() + for idx, sibling in enumerate(siblings): + for child in sibling: + # Have we handled this child already? + if child.tag in seen: + continue + seen.add(child.tag) + + # Determine the child's siblings + nieces = [child] + for sib in siblings[idx + 1:]: + if child.tag in sib: + nieces.append(sib[child.tag]) + + # Now we recurse for every data element + for elem, datum in elems: + self._serialize(elem, datum, nieces) + + # Return the first element; at the top level, this will be the + # root element + if elems: + return elems[0][0] + + def serialize(self, obj, *args, **kwargs): + """Serialize an object. + + Serializes an object against the template. Returns a string + with the serialized XML. Positional and keyword arguments are + passed to etree.tostring(). + + :param obj: The object to serialize. + """ + + elem = self.make_tree(obj) + if elem is None: + return '' + + for k, v in self.serialize_options.items(): + kwargs.setdefault(k, v) + + # Serialize it into XML + return etree.tostring(elem, *args, **kwargs) + + def make_tree(self, obj): + """Create a tree. + + Serializes an object against the template. Returns an Element + node with appropriate children. + + :param obj: The object to serialize. + """ + + # If the template is empty, return the empty string + if self.root is None: + return None + + # Get the siblings and nsmap of the root element + siblings = self._siblings() + nsmap = self._nsmap() + + # Form the element tree + return self._serialize(None, obj, siblings, nsmap) + + def _siblings(self): + """Hook method for computing root siblings. + + An overridable hook method to return the siblings of the root + element. By default, this is the root element itself. + """ + + return [self.root] + + def _nsmap(self): + """Hook method for computing the namespace dictionary. + + An overridable hook method to return the namespace dictionary. + """ + + return self.nsmap.copy() + + def unwrap(self): + """Unwraps a template to return a template element.""" + + # Return the root element + return self.root + + def wrap(self): + """Wraps a template element to return a template.""" + + # We are a template + return self + + def apply(self, master): + """Hook method for determining slave applicability. + + An overridable hook method used to determine if this template + is applicable as a slave to a given master template. + + :param master: The master template to test. + """ + + return True + + def tree(self): + """Return string representation of the template tree. + + Returns a representation of the template as a string, suitable + for inclusion in debug logs. + """ + + return "%r: %s" % (self, self.root.tree()) + + +class MasterTemplate(Template): + """Represent a master template. + + Master templates are versioned derivatives of templates that + additionally allow slave templates to be attached. Slave + templates allow modification of the serialized result without + directly changing the master. + """ + + def __init__(self, root, version, nsmap=None): + """Initialize a master template. + + :param root: The root element of the template. + :param version: The version number of the template. + :param nsmap: An optional namespace dictionary to be + associated with the root element of the + template. + """ + + super(MasterTemplate, self).__init__(root, nsmap) + self.version = version + self.slaves = [] + + def __repr__(self): + """Return string representation of the template.""" + + return ("<%s.%s object version %s at %#x>" % + (self.__class__.__module__, self.__class__.__name__, + self.version, id(self))) + + def _siblings(self): + """Hook method for computing root siblings. + + An overridable hook method to return the siblings of the root + element. This is the root element plus the root elements of + all the slave templates. + """ + + return [self.root] + [slave.root for slave in self.slaves] + + def _nsmap(self): + """Hook method for computing the namespace dictionary. + + An overridable hook method to return the namespace dictionary. + The namespace dictionary is computed by taking the master + template's namespace dictionary and updating it from all the + slave templates. + """ + + nsmap = self.nsmap.copy() + for slave in self.slaves: + nsmap.update(slave._nsmap()) + return nsmap + + def attach(self, *slaves): + """Attach one or more slave templates. + + Attaches one or more slave templates to the master template. + Slave templates must have a root element with the same tag as + the master template. The slave template's apply() method will + be called to determine if the slave should be applied to this + master; if it returns False, that slave will be skipped. + (This allows filtering of slaves based on the version of the + master template.) + """ + + slave_list = [] + for slave in slaves: + slave = slave.wrap() + + # Make sure we have a tree match + if slave.root.tag != self.root.tag: + slavetag = slave.root.tag + mastertag = self.root.tag + msg = _("Template tree mismatch; adding slave %(slavetag)s " + "to master %(mastertag)s") % locals() + raise ValueError(msg) + + # Make sure slave applies to this template + if not slave.apply(self): + continue + + slave_list.append(slave) + + # Add the slaves + self.slaves.extend(slave_list) + + def copy(self): + """Return a copy of this master template.""" + + # Return a copy of the MasterTemplate + tmp = self.__class__(self.root, self.version, self.nsmap) + tmp.slaves = self.slaves[:] + return tmp + + +class SlaveTemplate(Template): + """Represent a slave template. + + Slave templates are versioned derivatives of templates. Each + slave has a minimum version and optional maximum version of the + master template to which they can be attached. + """ + + def __init__(self, root, min_vers, max_vers=None, nsmap=None): + """Initialize a slave template. + + :param root: The root element of the template. + :param min_vers: The minimum permissible version of the master + template for this slave template to apply. + :param max_vers: An optional upper bound for the master + template version. + :param nsmap: An optional namespace dictionary to be + associated with the root element of the + template. + """ + + super(SlaveTemplate, self).__init__(root, nsmap) + self.min_vers = min_vers + self.max_vers = max_vers + + def __repr__(self): + """Return string representation of the template.""" + + return ("<%s.%s object versions %s-%s at %#x>" % + (self.__class__.__module__, self.__class__.__name__, + self.min_vers, self.max_vers, id(self))) + + def apply(self, master): + """Hook method for determining slave applicability. + + An overridable hook method used to determine if this template + is applicable as a slave to a given master template. This + version requires the master template to have a version number + between min_vers and max_vers. + + :param master: The master template to test. + """ + + # Does the master meet our minimum version requirement? + if master.version < self.min_vers: + return False + + # How about our maximum version requirement? + if self.max_vers is not None and master.version > self.max_vers: + return False + + return True + + +class TemplateBuilder(object): + """Template builder. + + This class exists to allow templates to be lazily built without + having to build them each time they are needed. It must be + subclassed, and the subclass must implement the construct() + method, which must return a Template (or subclass) instance. The + constructor will always return the template returned by + construct(), or, if it has a copy() method, a copy of that + template. + """ + + _tmpl = None + + def __new__(cls, copy=True): + """Construct and return a template. + + :param copy: If True (the default), a copy of the template + will be constructed and returned, if possible. + """ + + # Do we need to construct the template? + if cls._tmpl is None: + tmp = super(TemplateBuilder, cls).__new__(cls) + + # Construct the template + cls._tmpl = tmp.construct() + + # If the template has a copy attribute, return the result of + # calling it + if copy and hasattr(cls._tmpl, 'copy'): + return cls._tmpl.copy() + + # Return the template + return cls._tmpl + + def construct(self): + """Construct a template. + + Called to construct a template instance, which it must return. + Only called once. + """ + + raise NotImplementedError(_("subclasses must implement construct()!")) + + +def make_links(parent, selector=None): + """ + Attach an Atom element to the parent. + """ + + elem = SubTemplateElement(parent, '{%s}link' % XMLNS_ATOM, + selector=selector) + elem.set('rel') + elem.set('type') + elem.set('href') + + # Just for completeness... + return elem + + +def make_flat_dict(name, selector=None, subselector=None, ns=None): + """ + Utility for simple XML templates that traditionally used + XMLDictSerializer with no metadata. Returns a template element + where the top-level element has the given tag name, and where + sub-elements have tag names derived from the object's keys and + text derived from the object's values. This only works for flat + dictionary objects, not dictionaries containing nested lists or + dictionaries. + """ + + # Set up the names we need... + if ns is None: + elemname = name + tagname = Selector(0) + else: + elemname = '{%s}%s' % (ns, name) + tagname = lambda obj, do_raise=False: '{%s}%s' % (ns, obj[0]) + + if selector is None: + selector = name + + # Build the root element + root = TemplateElement(elemname, selector=selector, + subselector=subselector) + + # Build an element to represent all the keys and values + elem = SubTemplateElement(root, tagname, selector=get_items) + elem.text = 1 + + # Return the template + return root diff --git a/cinder/api/sizelimit.py b/cinder/api/sizelimit.py new file mode 100644 index 00000000000..3bde3bf8dc0 --- /dev/null +++ b/cinder/api/sizelimit.py @@ -0,0 +1,54 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2012 OpenStack, LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Request Body limiting middleware. + +""" + +import webob.dec +import webob.exc + +from cinder import context +from cinder import flags +from cinder import log as logging +from cinder.openstack.common import cfg +from cinder import wsgi + + +#default request size is 112k +max_request_body_size_opt = cfg.BoolOpt('osapi_max_request_body_size', + default=114688, + help='') + +FLAGS = flags.FLAGS +FLAGS.register_opt(max_request_body_size_opt) +LOG = logging.getLogger(__name__) + + +class RequestBodySizeLimiter(wsgi.Middleware): + """Add a 'cinder.context' to WSGI environ.""" + + def __init__(self, *args, **kwargs): + super(RequestBodySizeLimiter, self).__init__(*args, **kwargs) + + @webob.dec.wsgify(RequestClass=wsgi.Request) + def __call__(self, req): + if (req.content_length > FLAGS.osapi_max_request_body_size + or len(req.body) > FLAGS.osapi_max_request_body_size): + msg = _("Request is too large.") + raise webob.exc.HTTPBadRequest(explanation=msg) + else: + return self.application diff --git a/cinder/common/__init__.py b/cinder/common/__init__.py new file mode 100644 index 00000000000..0a3b98867a2 --- /dev/null +++ b/cinder/common/__init__.py @@ -0,0 +1,15 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/cinder/common/memorycache.py b/cinder/common/memorycache.py new file mode 100644 index 00000000000..564526092d0 --- /dev/null +++ b/cinder/common/memorycache.py @@ -0,0 +1,64 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Super simple fake memcache client.""" + +from cinder import utils + + +class Client(object): + """Replicates a tiny subset of memcached client interface.""" + + def __init__(self, *args, **kwargs): + """Ignores the passed in args.""" + self.cache = {} + + def get(self, key): + """Retrieves the value for a key or None. + + this expunges expired keys during each get""" + + for k in self.cache.keys(): + (timeout, _value) = self.cache[k] + if timeout and utils.utcnow_ts() >= timeout: + del self.cache[k] + + return self.cache.get(key, (0, None))[1] + + def set(self, key, value, time=0, min_compress_len=0): + """Sets the value for a key.""" + timeout = 0 + if time != 0: + timeout = utils.utcnow_ts() + time + self.cache[key] = (timeout, value) + return True + + def add(self, key, value, time=0, min_compress_len=0): + """Sets the value for a key if it doesn't exist.""" + if not self.get(key) is None: + return False + return self.set(key, value, time, min_compress_len) + + def incr(self, key, delta=1): + """Increments the value for a key.""" + value = self.get(key) + if value is None: + return None + new_value = int(value) + delta + self.cache[key] = (self.cache[key][0], str(new_value)) + return new_value diff --git a/cinder/common/policy.py b/cinder/common/policy.py new file mode 100644 index 00000000000..ec944a1ccb6 --- /dev/null +++ b/cinder/common/policy.py @@ -0,0 +1,222 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 OpenStack, LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Common Policy Engine Implementation""" + +import json +import urllib +import urllib2 + + +class NotAuthorized(Exception): + pass + + +_BRAIN = None + + +def set_brain(brain): + """Set the brain used by enforce(). + + Defaults use Brain() if not set. + + """ + global _BRAIN + _BRAIN = brain + + +def reset(): + """Clear the brain used by enforce().""" + global _BRAIN + _BRAIN = None + + +def enforce(match_list, target_dict, credentials_dict): + """Enforces authorization of some rules against credentials. + + :param match_list: nested tuples of data to match against + + The basic brain supports three types of match lists: + + 1) rules + + looks like: ``('rule:compute:get_instance',)`` + + Retrieves the named rule from the rules dict and recursively + checks against the contents of the rule. + + 2) roles + + looks like: ``('role:compute:admin',)`` + + Matches if the specified role is in credentials_dict['roles']. + + 3) generic + + looks like: ``('tenant_id:%(tenant_id)s',)`` + + Substitutes values from the target dict into the match using + the % operator and matches them against the creds dict. + + Combining rules: + + The brain returns True if any of the outer tuple of rules + match and also True if all of the inner tuples match. You + can use this to perform simple boolean logic. For + example, the following rule would return True if the creds + contain the role 'admin' OR the if the tenant_id matches + the target dict AND the the creds contains the role + 'compute_sysadmin': + + :: + + { + "rule:combined": ( + 'role:admin', + ('tenant_id:%(tenant_id)s', 'role:compute_sysadmin') + ) + } + + Note that rule and role are reserved words in the credentials match, so + you can't match against properties with those names. Custom brains may + also add new reserved words. For example, the HttpBrain adds http as a + reserved word. + + :param target_dict: dict of object properties + + Target dicts contain as much information as we can about the object being + operated on. + + :param credentials_dict: dict of actor properties + + Credentials dicts contain as much information as we can about the user + performing the action. + + :raises NotAuthorized: if the check fails + + """ + global _BRAIN + if not _BRAIN: + _BRAIN = Brain() + if not _BRAIN.check(match_list, target_dict, credentials_dict): + raise NotAuthorized() + + +class Brain(object): + """Implements policy checking.""" + @classmethod + def load_json(cls, data, default_rule=None): + """Init a brain using json instead of a rules dictionary.""" + rules_dict = json.loads(data) + return cls(rules=rules_dict, default_rule=default_rule) + + def __init__(self, rules=None, default_rule=None): + self.rules = rules or {} + self.default_rule = default_rule + + def add_rule(self, key, match): + self.rules[key] = match + + def _check(self, match, target_dict, cred_dict): + match_kind, match_value = match.split(':', 1) + try: + f = getattr(self, '_check_%s' % match_kind) + except AttributeError: + if not self._check_generic(match, target_dict, cred_dict): + return False + else: + if not f(match_value, target_dict, cred_dict): + return False + return True + + def check(self, match_list, target_dict, cred_dict): + """Checks authorization of some rules against credentials. + + Detailed description of the check with examples in policy.enforce(). + + :param match_list: nested tuples of data to match against + :param target_dict: dict of object properties + :param credentials_dict: dict of actor properties + + :returns: True if the check passes + + """ + if not match_list: + return True + for and_list in match_list: + if isinstance(and_list, basestring): + and_list = (and_list,) + if all([self._check(item, target_dict, cred_dict) + for item in and_list]): + return True + return False + + def _check_rule(self, match, target_dict, cred_dict): + """Recursively checks credentials based on the brains rules.""" + try: + new_match_list = self.rules[match] + except KeyError: + if self.default_rule and match != self.default_rule: + new_match_list = ('rule:%s' % self.default_rule,) + else: + return False + + return self.check(new_match_list, target_dict, cred_dict) + + def _check_role(self, match, target_dict, cred_dict): + """Check that there is a matching role in the cred dict.""" + return match.lower() in [x.lower() for x in cred_dict['roles']] + + def _check_generic(self, match, target_dict, cred_dict): + """Check an individual match. + + Matches look like: + + tenant:%(tenant_id)s + role:compute:admin + + """ + + # TODO(termie): do dict inspection via dot syntax + match = match % target_dict + key, value = match.split(':', 1) + if key in cred_dict: + return value == cred_dict[key] + return False + + +class HttpBrain(Brain): + """A brain that can check external urls for policy. + + Posts json blobs for target and credentials. + + """ + + def _check_http(self, match, target_dict, cred_dict): + """Check http: rules by calling to a remote server. + + This example implementation simply verifies that the response is + exactly 'True'. A custom brain using response codes could easily + be implemented. + + """ + url = match % target_dict + data = {'target': json.dumps(target_dict), + 'credentials': json.dumps(cred_dict)} + post_data = urllib.urlencode(data) + f = urllib2.urlopen(url, post_data) + return f.read() == "True" diff --git a/cinder/compat/__init__.py b/cinder/compat/__init__.py new file mode 100644 index 00000000000..8f085d939ff --- /dev/null +++ b/cinder/compat/__init__.py @@ -0,0 +1,15 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2012 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/cinder/compat/flagfile.py b/cinder/compat/flagfile.py new file mode 100644 index 00000000000..9690217b29d --- /dev/null +++ b/cinder/compat/flagfile.py @@ -0,0 +1,188 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2012 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import contextlib +import os +import shutil +import tempfile + +''' +Compatibility code for handling the deprecated --flagfile option. + +gflags style configuration files are deprecated and will be removed in future. + +The code in this module transles --flagfile options into --config-file and can +be removed when support for --flagfile is removed. +''' + + +def _get_flagfile(argp): + '''Parse the filename from a --flagfile argument. + + The current and next arguments are passed as a 2 item list. If the + flagfile filename is in the next argument, the two arguments are + joined into the first item while the second item is set to None. + ''' + i = argp[0].find('-flagfile') + if i < 0: + return None + + # Accept -flagfile or -flagfile + if i != 0 and (i != 1 or argp[0][i] != '-'): + return None + + i += len('-flagfile') + if i == len(argp[0]): # Accept [-]-flagfile foo + argp[0] += '=' + argp[1] + argp[1] = None + + if argp[0][i] != '=': # Accept [-]-flagfile=foo + return None + + return argp[0][i + 1:] + + +def _open_file_for_reading(path): + '''Helper method which test code may stub out.''' + return open(path, 'r') + + +def _open_fd_for_writing(fd, _path): + '''Helper method which test code may stub out.''' + return os.fdopen(fd, 'w') + + +def _read_lines(flagfile): + '''Read a flag file, returning all lines with comments stripped.''' + with _open_file_for_reading(flagfile) as f: + lines = f.readlines() + ret = [] + for l in lines: + if l.isspace() or l.startswith('#') or l.startswith('//'): + continue + ret.append(l.strip()) + return ret + + +def _read_flagfile(arg, next_arg, tempdir=None): + '''Convert a --flagfile argument to --config-file. + + If the supplied argument is a --flagfile argument, read the contents + of the file and convert it to a .ini format config file. Return a + --config-file argument with the converted file. + + If the flag file contains more --flagfile arguments, multiple + --config-file arguments will be returned. + + The returned argument list may also contain None values which should + be filtered out later. + ''' + argp = [arg, next_arg] + flagfile = _get_flagfile(argp) + if not flagfile: + return argp + + args = _read_lines(flagfile) + + if args and not args[0].startswith('--'): + # This is a config file, not a flagfile, so return it. + return ['--config-file=' + flagfile] + argp[1:] + + # + # We're recursing here to convert any --flagfile arguments + # read from this flagfile into --config-file arguments + # + # We don't actually include those --config-file arguments + # in the generated config file; instead we include all those + # --config-file args in the final command line + # + args = _iterate_args(args, _read_flagfile, tempdir=tempdir) + + config_file_args = [] + + (fd, tmpconf) = tempfile.mkstemp(suffix='.conf', dir=tempdir) + + with _open_fd_for_writing(fd, tmpconf) as f: + f.write('[DEFAULT]\n') + for arg in args: + if arg.startswith('--config-file='): + config_file_args.append(arg) + continue + if '=' in arg: + f.write(arg[2:] + '\n') + elif arg[2:].startswith('no'): + f.write(arg[4:] + '=false\n') + else: + f.write(arg[2:] + '=true\n') + + return ['--config-file=' + tmpconf] + argp[1:] + config_file_args + + +def _iterate_args(args, iterator, **kwargs): + '''Run an iterator function on the supplied args list. + + The iterator is passed the current arg and next arg and returns a + list of args. The returned args replace the suppied args in the + resulting args list. + + The iterator will be passed None for the next arg when processing + the last arg. + ''' + args.append(None) + + ret = [] + for i in range(len(args)): + if args[i] is None: # last item, or consumed file name + continue + + modified = iterator(args[i], args[i + 1], **kwargs) + args[i], args[i + 1] = modified[:2] + + ret.extend(modified[:1] + modified[2:]) # don't append next arg + + return filter(None, ret) + + +def handle_flagfiles(args, tempdir=None): + '''Replace --flagfile arguments with --config-file arguments. + + Replace any --flagfile argument in the supplied list with a --config-file + argument containing a temporary config file with the contents of the flag + file translated to .ini format. + + The tempdir argument is a directory which will be used to create temporary + files. + ''' + return _iterate_args(args[:], _read_flagfile, tempdir=tempdir) + + +@contextlib.contextmanager +def handle_flagfiles_managed(args): + '''A context manager for handle_flagfiles() which removes temp files. + + For use with the 'with' statement, i.e.:: + + with handle_flagfiles_managed(args) as args: + # Do stuff + # Any temporary fils have been removed + ''' + # NOTE(johannes): Would be nice to use utils.tempdir(), but it + # causes an import loop + tempdir = tempfile.mkdtemp(prefix='cinder-conf-') + try: + yield handle_flagfiles(args, tempdir=tempdir) + finally: + shutil.rmtree(tempdir) diff --git a/cinder/compute/__init__.py b/cinder/compute/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/cinder/compute/aggregate_states.py b/cinder/compute/aggregate_states.py new file mode 100644 index 00000000000..92e19402776 --- /dev/null +++ b/cinder/compute/aggregate_states.py @@ -0,0 +1,44 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Possible states for host aggregates. + +An aggregate may be 'created', in which case the admin has triggered its +creation, but the underlying hypervisor pool has not actually being set up +yet. An aggregate may be 'changing', meaning that the underlying hypervisor +pool is being setup. An aggregate may be 'active', in which case the underlying +hypervisor pool is up and running. An aggregate may be 'dismissed' when it has +no hosts and it has been deleted. An aggregate may be in 'error' in all other +cases. +A 'created' aggregate becomes 'changing' during the first request of +adding a host. During a 'changing' status no other requests will be accepted; +this is to allow the hypervisor layer to instantiate the underlying pool +without any potential race condition that may incur in master/slave-based +configurations. The aggregate goes into the 'active' state when the underlying +pool has been correctly instantiated. +All other operations (e.g. add/remove hosts) that succeed will keep the +aggregate in the 'active' state. If a number of continuous requests fail, +an 'active' aggregate goes into an 'error' state. To recover from such a state, +admin intervention is required. Currently an error state is irreversible, +that is, in order to recover from it an aggregate must be deleted. +""" + +CREATED = 'created' +CHANGING = 'changing' +ACTIVE = 'active' +ERROR = 'error' +DISMISSED = 'dismissed' diff --git a/cinder/context.py b/cinder/context.py new file mode 100644 index 00000000000..a9b5519c2ab --- /dev/null +++ b/cinder/context.py @@ -0,0 +1,138 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""RequestContext: context for requests that persist through all of cinder.""" + +import copy + +from cinder import log as logging +from cinder.openstack.common import local +from cinder import utils + + +LOG = logging.getLogger(__name__) + + +def generate_request_id(): + return 'req-' + str(utils.gen_uuid()) + + +class RequestContext(object): + """Security context and request information. + + Represents the user taking a given action within the system. + + """ + + def __init__(self, user_id, project_id, is_admin=None, read_deleted="no", + roles=None, remote_address=None, timestamp=None, + request_id=None, auth_token=None, overwrite=True, + quota_class=None, **kwargs): + """ + :param read_deleted: 'no' indicates deleted records are hidden, 'yes' + indicates deleted records are visible, 'only' indicates that + *only* deleted records are visible. + + :param overwrite: Set to False to ensure that the greenthread local + copy of the index is not overwritten. + + :param kwargs: Extra arguments that might be present, but we ignore + because they possibly came in from older rpc messages. + """ + if kwargs: + LOG.warn(_('Arguments dropped when creating context: %s') % + str(kwargs)) + + self.user_id = user_id + self.project_id = project_id + self.roles = roles or [] + self.is_admin = is_admin + if self.is_admin is None: + self.is_admin = 'admin' in [x.lower() for x in self.roles] + elif self.is_admin and 'admin' not in self.roles: + self.roles.append('admin') + self.read_deleted = read_deleted + self.remote_address = remote_address + if not timestamp: + timestamp = utils.utcnow() + if isinstance(timestamp, basestring): + timestamp = utils.parse_strtime(timestamp) + self.timestamp = timestamp + if not request_id: + request_id = generate_request_id() + self.request_id = request_id + self.auth_token = auth_token + self.quota_class = quota_class + if overwrite or not hasattr(local.store, 'context'): + self.update_store() + + def _get_read_deleted(self): + return self._read_deleted + + def _set_read_deleted(self, read_deleted): + if read_deleted not in ('no', 'yes', 'only'): + raise ValueError(_("read_deleted can only be one of 'no', " + "'yes' or 'only', not %r") % read_deleted) + self._read_deleted = read_deleted + + def _del_read_deleted(self): + del self._read_deleted + + read_deleted = property(_get_read_deleted, _set_read_deleted, + _del_read_deleted) + + def update_store(self): + local.store.context = self + + def to_dict(self): + return {'user_id': self.user_id, + 'project_id': self.project_id, + 'is_admin': self.is_admin, + 'read_deleted': self.read_deleted, + 'roles': self.roles, + 'remote_address': self.remote_address, + 'timestamp': utils.strtime(self.timestamp), + 'request_id': self.request_id, + 'auth_token': self.auth_token, + 'quota_class': self.quota_class} + + @classmethod + def from_dict(cls, values): + return cls(**values) + + def elevated(self, read_deleted=None, overwrite=False): + """Return a version of this context with admin flag set.""" + context = copy.copy(self) + context.is_admin = True + + if 'admin' not in context.roles: + context.roles.append('admin') + + if read_deleted is not None: + context.read_deleted = read_deleted + + return context + + +def get_admin_context(read_deleted="no"): + return RequestContext(user_id=None, + project_id=None, + is_admin=True, + read_deleted=read_deleted, + overwrite=False) diff --git a/cinder/db/__init__.py b/cinder/db/__init__.py new file mode 100644 index 00000000000..f4eb417ec9f --- /dev/null +++ b/cinder/db/__init__.py @@ -0,0 +1,23 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +DB abstraction for Cinder +""" + +from cinder.db.api import * diff --git a/cinder/db/api.py b/cinder/db/api.py new file mode 100644 index 00000000000..1e39531fda6 --- /dev/null +++ b/cinder/db/api.py @@ -0,0 +1,1335 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 X.commerce, a business unit of eBay Inc. +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Defines interface for DB access. + +The underlying driver is loaded as a :class:`LazyPluggable`. + +Functions in this module are imported into the cinder.db namespace. Call these +functions from cinder.db namespace, not the cinder.db.api namespace. + +All functions in this module return objects that implement a dictionary-like +interface. Currently, many of these objects are sqlalchemy objects that +implement a dictionary interface. However, a future goal is to have all of +these objects be simple dictionaries. + + +**Related Flags** + +:db_backend: string to lookup in the list of LazyPluggable backends. + `sqlalchemy` is the only supported backend right now. + +:sql_connection: string specifying the sqlalchemy connection to use, like: + `sqlite:///var/lib/cinder/cinder.sqlite`. + +:enable_new_services: when adding a new service to the database, is it in the + pool of available hardware (Default: True) + +""" + +from cinder import exception +from cinder import flags +from cinder.openstack.common import cfg +from cinder import utils + + +db_opts = [ + cfg.StrOpt('db_backend', + default='sqlalchemy', + help='The backend to use for db'), + cfg.BoolOpt('enable_new_services', + default=True, + help='Services to be added to the available pool on create'), + cfg.StrOpt('instance_name_template', + default='instance-%08x', + help='Template string to be used to generate instance names'), + cfg.StrOpt('volume_name_template', + default='volume-%s', + help='Template string to be used to generate instance names'), + cfg.StrOpt('snapshot_name_template', + default='snapshot-%s', + help='Template string to be used to generate snapshot names'), + ] + +FLAGS = flags.FLAGS +FLAGS.register_opts(db_opts) + +IMPL = utils.LazyPluggable('db_backend', + sqlalchemy='cinder.db.sqlalchemy.api') + + +class NoMoreNetworks(exception.Error): + """No more available networks.""" + pass + + +class NoMoreTargets(exception.Error): + """No more available targets""" + pass + + +################### + + +def service_destroy(context, service_id): + """Destroy the service or raise if it does not exist.""" + return IMPL.service_destroy(context, service_id) + + +def service_get(context, service_id): + """Get a service or raise if it does not exist.""" + return IMPL.service_get(context, service_id) + + +def service_get_by_host_and_topic(context, host, topic): + """Get a service by host it's on and topic it listens to.""" + return IMPL.service_get_by_host_and_topic(context, host, topic) + + +def service_get_all(context, disabled=None): + """Get all services.""" + return IMPL.service_get_all(context, disabled) + + +def service_get_all_by_topic(context, topic): + """Get all services for a given topic.""" + return IMPL.service_get_all_by_topic(context, topic) + + +def service_get_all_by_host(context, host): + """Get all services for a given host.""" + return IMPL.service_get_all_by_host(context, host) + + +def service_get_all_compute_by_host(context, host): + """Get all compute services for a given host.""" + return IMPL.service_get_all_compute_by_host(context, host) + + +def service_get_all_compute_sorted(context): + """Get all compute services sorted by instance count. + + :returns: a list of (Service, instance_count) tuples. + + """ + return IMPL.service_get_all_compute_sorted(context) + + +def service_get_all_volume_sorted(context): + """Get all volume services sorted by volume count. + + :returns: a list of (Service, volume_count) tuples. + + """ + return IMPL.service_get_all_volume_sorted(context) + + +def service_get_by_args(context, host, binary): + """Get the state of an service by node name and binary.""" + return IMPL.service_get_by_args(context, host, binary) + + +def service_create(context, values): + """Create a service from the values dictionary.""" + return IMPL.service_create(context, values) + + +def service_update(context, service_id, values): + """Set the given properties on an service and update it. + + Raises NotFound if service does not exist. + + """ + return IMPL.service_update(context, service_id, values) + + +################### + + +def compute_node_get(context, compute_id): + """Get an computeNode or raise if it does not exist.""" + return IMPL.compute_node_get(context, compute_id) + + +def compute_node_get_all(context): + """Get all computeNodes.""" + return IMPL.compute_node_get_all(context) + + +def compute_node_create(context, values): + """Create a computeNode from the values dictionary.""" + return IMPL.compute_node_create(context, values) + + +def compute_node_update(context, compute_id, values, auto_adjust=True): + """Set the given properties on an computeNode and update it. + + Raises NotFound if computeNode does not exist. + """ + return IMPL.compute_node_update(context, compute_id, values, auto_adjust) + + +def compute_node_get_by_host(context, host): + return IMPL.compute_node_get_by_host(context, host) + + +def compute_node_utilization_update(context, host, free_ram_mb_delta=0, + free_disk_gb_delta=0, work_delta=0, vm_delta=0): + return IMPL.compute_node_utilization_update(context, host, + free_ram_mb_delta, free_disk_gb_delta, work_delta, + vm_delta) + + +def compute_node_utilization_set(context, host, free_ram_mb=None, + free_disk_gb=None, work=None, vms=None): + return IMPL.compute_node_utilization_set(context, host, free_ram_mb, + free_disk_gb, work, vms) + +################### + + +def certificate_create(context, values): + """Create a certificate from the values dictionary.""" + return IMPL.certificate_create(context, values) + + +def certificate_get_all_by_project(context, project_id): + """Get all certificates for a project.""" + return IMPL.certificate_get_all_by_project(context, project_id) + + +def certificate_get_all_by_user(context, user_id): + """Get all certificates for a user.""" + return IMPL.certificate_get_all_by_user(context, user_id) + + +def certificate_get_all_by_user_and_project(context, user_id, project_id): + """Get all certificates for a user and project.""" + return IMPL.certificate_get_all_by_user_and_project(context, + user_id, + project_id) + + +################### + +def floating_ip_get(context, id): + return IMPL.floating_ip_get(context, id) + + +def floating_ip_get_pools(context): + """Returns a list of floating ip pools""" + return IMPL.floating_ip_get_pools(context) + + +def floating_ip_allocate_address(context, project_id, pool): + """Allocate free floating ip from specified pool and return the address. + + Raises if one is not available. + + """ + return IMPL.floating_ip_allocate_address(context, project_id, pool) + + +def floating_ip_create(context, values): + """Create a floating ip from the values dictionary.""" + return IMPL.floating_ip_create(context, values) + + +def floating_ip_count_by_project(context, project_id): + """Count floating ips used by project.""" + return IMPL.floating_ip_count_by_project(context, project_id) + + +def floating_ip_deallocate(context, address): + """Deallocate an floating ip by address.""" + return IMPL.floating_ip_deallocate(context, address) + + +def floating_ip_destroy(context, address): + """Destroy the floating_ip or raise if it does not exist.""" + return IMPL.floating_ip_destroy(context, address) + + +def floating_ip_disassociate(context, address): + """Disassociate an floating ip from a fixed ip by address. + + :returns: the address of the existing fixed ip. + + """ + return IMPL.floating_ip_disassociate(context, address) + + +def floating_ip_fixed_ip_associate(context, floating_address, + fixed_address, host): + """Associate an floating ip to a fixed_ip by address.""" + return IMPL.floating_ip_fixed_ip_associate(context, + floating_address, + fixed_address, + host) + + +def floating_ip_get_all(context): + """Get all floating ips.""" + return IMPL.floating_ip_get_all(context) + + +def floating_ip_get_all_by_host(context, host): + """Get all floating ips by host.""" + return IMPL.floating_ip_get_all_by_host(context, host) + + +def floating_ip_get_all_by_project(context, project_id): + """Get all floating ips by project.""" + return IMPL.floating_ip_get_all_by_project(context, project_id) + + +def floating_ip_get_by_address(context, address): + """Get a floating ip by address or raise if it doesn't exist.""" + return IMPL.floating_ip_get_by_address(context, address) + + +def floating_ip_get_by_fixed_address(context, fixed_address): + """Get a floating ips by fixed address""" + return IMPL.floating_ip_get_by_fixed_address(context, fixed_address) + + +def floating_ip_get_by_fixed_ip_id(context, fixed_ip_id): + """Get a floating ips by fixed address""" + return IMPL.floating_ip_get_by_fixed_ip_id(context, fixed_ip_id) + + +def floating_ip_update(context, address, values): + """Update a floating ip by address or raise if it doesn't exist.""" + return IMPL.floating_ip_update(context, address, values) + + +def floating_ip_set_auto_assigned(context, address): + """Set auto_assigned flag to floating ip""" + return IMPL.floating_ip_set_auto_assigned(context, address) + + +def dnsdomain_list(context): + """Get a list of all zones in our database, public and private.""" + return IMPL.dnsdomain_list(context) + + +def dnsdomain_register_for_zone(context, fqdomain, zone): + """Associated a DNS domain with an availability zone""" + return IMPL.dnsdomain_register_for_zone(context, fqdomain, zone) + + +def dnsdomain_register_for_project(context, fqdomain, project): + """Associated a DNS domain with a project id""" + return IMPL.dnsdomain_register_for_project(context, fqdomain, project) + + +def dnsdomain_unregister(context, fqdomain): + """Purge associations for the specified DNS zone""" + return IMPL.dnsdomain_unregister(context, fqdomain) + + +def dnsdomain_get(context, fqdomain): + """Get the db record for the specified domain.""" + return IMPL.dnsdomain_get(context, fqdomain) + + +#################### + + +def migration_update(context, id, values): + """Update a migration instance.""" + return IMPL.migration_update(context, id, values) + + +def migration_create(context, values): + """Create a migration record.""" + return IMPL.migration_create(context, values) + + +def migration_get(context, migration_id): + """Finds a migration by the id.""" + return IMPL.migration_get(context, migration_id) + + +def migration_get_by_instance_and_status(context, instance_uuid, status): + """Finds a migration by the instance uuid its migrating.""" + return IMPL.migration_get_by_instance_and_status(context, instance_uuid, + status) + + +def migration_get_all_unconfirmed(context, confirm_window): + """Finds all unconfirmed migrations within the confirmation window.""" + return IMPL.migration_get_all_unconfirmed(context, confirm_window) + + +################### + + +def queue_get_for(context, topic, physical_node_id): + """Return a channel to send a message to a node with a topic.""" + return IMPL.queue_get_for(context, topic, physical_node_id) + + +################### + + +def iscsi_target_count_by_host(context, host): + """Return count of export devices.""" + return IMPL.iscsi_target_count_by_host(context, host) + + +def iscsi_target_create_safe(context, values): + """Create an iscsi_target from the values dictionary. + + The device is not returned. If the create violates the unique + constraints because the iscsi_target and host already exist, + no exception is raised. + + """ + return IMPL.iscsi_target_create_safe(context, values) + + +############### + + +def auth_token_destroy(context, token_id): + """Destroy an auth token.""" + return IMPL.auth_token_destroy(context, token_id) + + +def auth_token_get(context, token_hash): + """Retrieves a token given the hash representing it.""" + return IMPL.auth_token_get(context, token_hash) + + +def auth_token_update(context, token_hash, values): + """Updates a token given the hash representing it.""" + return IMPL.auth_token_update(context, token_hash, values) + + +def auth_token_create(context, token): + """Creates a new token.""" + return IMPL.auth_token_create(context, token) + + +################### + + +def quota_create(context, project_id, resource, limit): + """Create a quota for the given project and resource.""" + return IMPL.quota_create(context, project_id, resource, limit) + + +def quota_get(context, project_id, resource): + """Retrieve a quota or raise if it does not exist.""" + return IMPL.quota_get(context, project_id, resource) + + +def quota_get_all_by_project(context, project_id): + """Retrieve all quotas associated with a given project.""" + return IMPL.quota_get_all_by_project(context, project_id) + + +def quota_update(context, project_id, resource, limit): + """Update a quota or raise if it does not exist.""" + return IMPL.quota_update(context, project_id, resource, limit) + + +def quota_destroy(context, project_id, resource): + """Destroy the quota or raise if it does not exist.""" + return IMPL.quota_destroy(context, project_id, resource) + + +def quota_destroy_all_by_project(context, project_id): + """Destroy all quotas associated with a given project.""" + return IMPL.quota_get_all_by_project(context, project_id) + + +################### + + +def quota_class_create(context, class_name, resource, limit): + """Create a quota class for the given name and resource.""" + return IMPL.quota_class_create(context, class_name, resource, limit) + + +def quota_class_get(context, class_name, resource): + """Retrieve a quota class or raise if it does not exist.""" + return IMPL.quota_class_get(context, class_name, resource) + + +def quota_class_get_all_by_name(context, class_name): + """Retrieve all quotas associated with a given quota class.""" + return IMPL.quota_class_get_all_by_name(context, class_name) + + +def quota_class_update(context, class_name, resource, limit): + """Update a quota class or raise if it does not exist.""" + return IMPL.quota_class_update(context, class_name, resource, limit) + + +def quota_class_destroy(context, class_name, resource): + """Destroy the quota class or raise if it does not exist.""" + return IMPL.quota_class_destroy(context, class_name, resource) + + +def quota_class_destroy_all_by_name(context, class_name): + """Destroy all quotas associated with a given quota class.""" + return IMPL.quota_class_destroy_all_by_name(context, class_name) + + +################### + + +def volume_allocate_iscsi_target(context, volume_id, host): + """Atomically allocate a free iscsi_target from the pool.""" + return IMPL.volume_allocate_iscsi_target(context, volume_id, host) + + +def volume_attached(context, volume_id, instance_id, mountpoint): + """Ensure that a volume is set as attached.""" + return IMPL.volume_attached(context, volume_id, instance_id, mountpoint) + + +def volume_create(context, values): + """Create a volume from the values dictionary.""" + return IMPL.volume_create(context, values) + + +def volume_data_get_for_project(context, project_id): + """Get (volume_count, gigabytes) for project.""" + return IMPL.volume_data_get_for_project(context, project_id) + + +def volume_destroy(context, volume_id): + """Destroy the volume or raise if it does not exist.""" + return IMPL.volume_destroy(context, volume_id) + + +def volume_detached(context, volume_id): + """Ensure that a volume is set as detached.""" + return IMPL.volume_detached(context, volume_id) + + +def volume_get(context, volume_id): + """Get a volume or raise if it does not exist.""" + return IMPL.volume_get(context, volume_id) + + +def volume_get_all(context): + """Get all volumes.""" + return IMPL.volume_get_all(context) + + +def volume_get_all_by_host(context, host): + """Get all volumes belonging to a host.""" + return IMPL.volume_get_all_by_host(context, host) + + +def volume_get_all_by_instance(context, instance_id): + """Get all volumes belonging to a instance.""" + return IMPL.volume_get_all_by_instance(context, instance_id) + + +def volume_get_all_by_project(context, project_id): + """Get all volumes belonging to a project.""" + return IMPL.volume_get_all_by_project(context, project_id) + + +def volume_get_instance(context, volume_id): + """Get the instance that a volume is attached to.""" + return IMPL.volume_get_instance(context, volume_id) + + +def volume_get_iscsi_target_num(context, volume_id): + """Get the target num (tid) allocated to the volume.""" + return IMPL.volume_get_iscsi_target_num(context, volume_id) + + +def volume_update(context, volume_id, values): + """Set the given properties on an volume and update it. + + Raises NotFound if volume does not exist. + + """ + return IMPL.volume_update(context, volume_id, values) + + +#################### + + +def snapshot_create(context, values): + """Create a snapshot from the values dictionary.""" + return IMPL.snapshot_create(context, values) + + +def snapshot_destroy(context, snapshot_id): + """Destroy the snapshot or raise if it does not exist.""" + return IMPL.snapshot_destroy(context, snapshot_id) + + +def snapshot_get(context, snapshot_id): + """Get a snapshot or raise if it does not exist.""" + return IMPL.snapshot_get(context, snapshot_id) + + +def snapshot_get_all(context): + """Get all snapshots.""" + return IMPL.snapshot_get_all(context) + + +def snapshot_get_all_by_project(context, project_id): + """Get all snapshots belonging to a project.""" + return IMPL.snapshot_get_all_by_project(context, project_id) + + +def snapshot_get_all_for_volume(context, volume_id): + """Get all snapshots for a volume.""" + return IMPL.snapshot_get_all_for_volume(context, volume_id) + + +def snapshot_update(context, snapshot_id, values): + """Set the given properties on an snapshot and update it. + + Raises NotFound if snapshot does not exist. + + """ + return IMPL.snapshot_update(context, snapshot_id, values) + + +#################### + + +def block_device_mapping_create(context, values): + """Create an entry of block device mapping""" + return IMPL.block_device_mapping_create(context, values) + + +def block_device_mapping_update(context, bdm_id, values): + """Update an entry of block device mapping""" + return IMPL.block_device_mapping_update(context, bdm_id, values) + + +def block_device_mapping_update_or_create(context, values): + """Update an entry of block device mapping. + If not existed, create a new entry""" + return IMPL.block_device_mapping_update_or_create(context, values) + + +def block_device_mapping_get_all_by_instance(context, instance_uuid): + """Get all block device mapping belonging to a instance""" + return IMPL.block_device_mapping_get_all_by_instance(context, + instance_uuid) + + +def block_device_mapping_destroy(context, bdm_id): + """Destroy the block device mapping.""" + return IMPL.block_device_mapping_destroy(context, bdm_id) + + +def block_device_mapping_destroy_by_instance_and_volume(context, instance_uuid, + volume_id): + """Destroy the block device mapping or raise if it does not exist.""" + return IMPL.block_device_mapping_destroy_by_instance_and_volume( + context, instance_uuid, volume_id) + + +#################### + + +def security_group_get_all(context): + """Get all security groups.""" + return IMPL.security_group_get_all(context) + + +def security_group_get(context, security_group_id): + """Get security group by its id.""" + return IMPL.security_group_get(context, security_group_id) + + +def security_group_get_by_name(context, project_id, group_name): + """Returns a security group with the specified name from a project.""" + return IMPL.security_group_get_by_name(context, project_id, group_name) + + +def security_group_get_by_project(context, project_id): + """Get all security groups belonging to a project.""" + return IMPL.security_group_get_by_project(context, project_id) + + +def security_group_get_by_instance(context, instance_id): + """Get security groups to which the instance is assigned.""" + return IMPL.security_group_get_by_instance(context, instance_id) + + +def security_group_exists(context, project_id, group_name): + """Indicates if a group name exists in a project.""" + return IMPL.security_group_exists(context, project_id, group_name) + + +def security_group_in_use(context, group_id): + """Indicates if a security group is currently in use.""" + return IMPL.security_group_in_use(context, group_id) + + +def security_group_create(context, values): + """Create a new security group.""" + return IMPL.security_group_create(context, values) + + +def security_group_destroy(context, security_group_id): + """Deletes a security group.""" + return IMPL.security_group_destroy(context, security_group_id) + + +def security_group_count_by_project(context, project_id): + """Count number of security groups in a project.""" + return IMPL.security_group_count_by_project(context, project_id) + + +#################### + + +def security_group_rule_create(context, values): + """Create a new security group.""" + return IMPL.security_group_rule_create(context, values) + + +def security_group_rule_get_by_security_group(context, security_group_id): + """Get all rules for a a given security group.""" + return IMPL.security_group_rule_get_by_security_group(context, + security_group_id) + + +def security_group_rule_get_by_security_group_grantee(context, + security_group_id): + """Get all rules that grant access to the given security group.""" + return IMPL.security_group_rule_get_by_security_group_grantee(context, + security_group_id) + + +def security_group_rule_destroy(context, security_group_rule_id): + """Deletes a security group rule.""" + return IMPL.security_group_rule_destroy(context, security_group_rule_id) + + +def security_group_rule_get(context, security_group_rule_id): + """Gets a security group rule.""" + return IMPL.security_group_rule_get(context, security_group_rule_id) + + +def security_group_rule_count_by_group(context, security_group_id): + """Count rules in a given security group.""" + return IMPL.security_group_rule_count_by_group(context, security_group_id) + + +################### + + +def provider_fw_rule_create(context, rule): + """Add a firewall rule at the provider level (all hosts & instances).""" + return IMPL.provider_fw_rule_create(context, rule) + + +def provider_fw_rule_get_all(context): + """Get all provider-level firewall rules.""" + return IMPL.provider_fw_rule_get_all(context) + + +def provider_fw_rule_destroy(context, rule_id): + """Delete a provider firewall rule from the database.""" + return IMPL.provider_fw_rule_destroy(context, rule_id) + + +################### + + +def user_get(context, id): + """Get user by id.""" + return IMPL.user_get(context, id) + + +def user_get_by_uid(context, uid): + """Get user by uid.""" + return IMPL.user_get_by_uid(context, uid) + + +def user_get_by_access_key(context, access_key): + """Get user by access key.""" + return IMPL.user_get_by_access_key(context, access_key) + + +def user_create(context, values): + """Create a new user.""" + return IMPL.user_create(context, values) + + +def user_delete(context, id): + """Delete a user.""" + return IMPL.user_delete(context, id) + + +def user_get_all(context): + """Create a new user.""" + return IMPL.user_get_all(context) + + +def user_add_role(context, user_id, role): + """Add another global role for user.""" + return IMPL.user_add_role(context, user_id, role) + + +def user_remove_role(context, user_id, role): + """Remove global role from user.""" + return IMPL.user_remove_role(context, user_id, role) + + +def user_get_roles(context, user_id): + """Get global roles for user.""" + return IMPL.user_get_roles(context, user_id) + + +def user_add_project_role(context, user_id, project_id, role): + """Add project role for user.""" + return IMPL.user_add_project_role(context, user_id, project_id, role) + + +def user_remove_project_role(context, user_id, project_id, role): + """Remove project role from user.""" + return IMPL.user_remove_project_role(context, user_id, project_id, role) + + +def user_get_roles_for_project(context, user_id, project_id): + """Return list of roles a user holds on project.""" + return IMPL.user_get_roles_for_project(context, user_id, project_id) + + +def user_update(context, user_id, values): + """Update user.""" + return IMPL.user_update(context, user_id, values) + + +################### + + +def project_get(context, id): + """Get project by id.""" + return IMPL.project_get(context, id) + + +def project_create(context, values): + """Create a new project.""" + return IMPL.project_create(context, values) + + +def project_add_member(context, project_id, user_id): + """Add user to project.""" + return IMPL.project_add_member(context, project_id, user_id) + + +def project_get_all(context): + """Get all projects.""" + return IMPL.project_get_all(context) + + +def project_get_by_user(context, user_id): + """Get all projects of which the given user is a member.""" + return IMPL.project_get_by_user(context, user_id) + + +def project_remove_member(context, project_id, user_id): + """Remove the given user from the given project.""" + return IMPL.project_remove_member(context, project_id, user_id) + + +def project_update(context, project_id, values): + """Update Remove the given user from the given project.""" + return IMPL.project_update(context, project_id, values) + + +def project_delete(context, project_id): + """Delete project.""" + return IMPL.project_delete(context, project_id) + + +def project_get_networks(context, project_id, associate=True): + """Return the network associated with the project. + + If associate is true, it will attempt to associate a new + network if one is not found, otherwise it returns None. + + """ + return IMPL.project_get_networks(context, project_id, associate) + + +################### + + +def console_pool_create(context, values): + """Create console pool.""" + return IMPL.console_pool_create(context, values) + + +def console_pool_get(context, pool_id): + """Get a console pool.""" + return IMPL.console_pool_get(context, pool_id) + + +def console_pool_get_by_host_type(context, compute_host, proxy_host, + console_type): + """Fetch a console pool for a given proxy host, compute host, and type.""" + return IMPL.console_pool_get_by_host_type(context, + compute_host, + proxy_host, + console_type) + + +def console_pool_get_all_by_host_type(context, host, console_type): + """Fetch all pools for given proxy host and type.""" + return IMPL.console_pool_get_all_by_host_type(context, + host, + console_type) + + +def console_create(context, values): + """Create a console.""" + return IMPL.console_create(context, values) + + +def console_delete(context, console_id): + """Delete a console.""" + return IMPL.console_delete(context, console_id) + + +def console_get_by_pool_instance(context, pool_id, instance_id): + """Get console entry for a given instance and pool.""" + return IMPL.console_get_by_pool_instance(context, pool_id, instance_id) + + +def console_get_all_by_instance(context, instance_id): + """Get consoles for a given instance.""" + return IMPL.console_get_all_by_instance(context, instance_id) + + +def console_get(context, console_id, instance_id=None): + """Get a specific console (possibly on a given instance).""" + return IMPL.console_get(context, console_id, instance_id) + + + ################## + + +def instance_type_create(context, values): + """Create a new instance type.""" + return IMPL.instance_type_create(context, values) + + +def instance_type_get_all(context, inactive=False, filters=None): + """Get all instance types.""" + return IMPL.instance_type_get_all( + context, inactive=inactive, filters=filters) + + +def instance_type_get(context, id): + """Get instance type by id.""" + return IMPL.instance_type_get(context, id) + + +def instance_type_get_by_name(context, name): + """Get instance type by name.""" + return IMPL.instance_type_get_by_name(context, name) + + +def instance_type_get_by_flavor_id(context, id): + """Get instance type by name.""" + return IMPL.instance_type_get_by_flavor_id(context, id) + + +def instance_type_destroy(context, name): + """Delete a instance type.""" + return IMPL.instance_type_destroy(context, name) + + +#################### + + +def cell_create(context, values): + """Create a new child Cell entry.""" + return IMPL.cell_create(context, values) + + +def cell_update(context, cell_id, values): + """Update a child Cell entry.""" + return IMPL.cell_update(context, cell_id, values) + + +def cell_delete(context, cell_id): + """Delete a child Cell.""" + return IMPL.cell_delete(context, cell_id) + + +def cell_get(context, cell_id): + """Get a specific child Cell.""" + return IMPL.cell_get(context, cell_id) + + +def cell_get_all(context): + """Get all child Cells.""" + return IMPL.cell_get_all(context) + + +#################### + + +def instance_metadata_get(context, instance_id): + """Get all metadata for an instance.""" + return IMPL.instance_metadata_get(context, instance_id) + + +def instance_metadata_delete(context, instance_id, key): + """Delete the given metadata item.""" + IMPL.instance_metadata_delete(context, instance_id, key) + + +def instance_metadata_update(context, instance_id, metadata, delete): + """Update metadata if it exists, otherwise create it.""" + IMPL.instance_metadata_update(context, instance_id, metadata, delete) + + +#################### + + +def agent_build_create(context, values): + """Create a new agent build entry.""" + return IMPL.agent_build_create(context, values) + + +def agent_build_get_by_triple(context, hypervisor, os, architecture): + """Get agent build by hypervisor/OS/architecture triple.""" + return IMPL.agent_build_get_by_triple(context, hypervisor, os, + architecture) + + +def agent_build_get_all(context): + """Get all agent builds.""" + return IMPL.agent_build_get_all(context) + + +def agent_build_destroy(context, agent_update_id): + """Destroy agent build entry.""" + IMPL.agent_build_destroy(context, agent_update_id) + + +def agent_build_update(context, agent_build_id, values): + """Update agent build entry.""" + IMPL.agent_build_update(context, agent_build_id, values) + + +#################### + + +def bw_usage_get_by_uuids(context, uuids, start_period): + """Return bw usages for instance(s) in a given audit period.""" + return IMPL.bw_usage_get_by_uuids(context, uuids, start_period) + + +def bw_usage_update(context, + uuid, + mac, + start_period, + bw_in, bw_out): + """Update cached bw usage for an instance and network + Creates new record if needed.""" + return IMPL.bw_usage_update(context, + uuid, + mac, + start_period, + bw_in, bw_out) + + +#################### + + +def instance_type_extra_specs_get(context, instance_type_id): + """Get all extra specs for an instance type.""" + return IMPL.instance_type_extra_specs_get(context, instance_type_id) + + +def instance_type_extra_specs_delete(context, instance_type_id, key): + """Delete the given extra specs item.""" + IMPL.instance_type_extra_specs_delete(context, instance_type_id, key) + + +def instance_type_extra_specs_update_or_create(context, instance_type_id, + extra_specs): + """Create or update instance type extra specs. This adds or modifies the + key/value pairs specified in the extra specs dict argument""" + IMPL.instance_type_extra_specs_update_or_create(context, instance_type_id, + extra_specs) + + +################## + + +def volume_metadata_get(context, volume_id): + """Get all metadata for a volume.""" + return IMPL.volume_metadata_get(context, volume_id) + + +def volume_metadata_delete(context, volume_id, key): + """Delete the given metadata item.""" + IMPL.volume_metadata_delete(context, volume_id, key) + + +def volume_metadata_update(context, volume_id, metadata, delete): + """Update metadata if it exists, otherwise create it.""" + IMPL.volume_metadata_update(context, volume_id, metadata, delete) + + +################## + + +def volume_type_create(context, values): + """Create a new volume type.""" + return IMPL.volume_type_create(context, values) + + +def volume_type_get_all(context, inactive=False): + """Get all volume types.""" + return IMPL.volume_type_get_all(context, inactive) + + +def volume_type_get(context, id): + """Get volume type by id.""" + return IMPL.volume_type_get(context, id) + + +def volume_type_get_by_name(context, name): + """Get volume type by name.""" + return IMPL.volume_type_get_by_name(context, name) + + +def volume_type_destroy(context, name): + """Delete a volume type.""" + return IMPL.volume_type_destroy(context, name) + + +#################### + + +def volume_type_extra_specs_get(context, volume_type_id): + """Get all extra specs for a volume type.""" + return IMPL.volume_type_extra_specs_get(context, volume_type_id) + + +def volume_type_extra_specs_delete(context, volume_type_id, key): + """Delete the given extra specs item.""" + IMPL.volume_type_extra_specs_delete(context, volume_type_id, key) + + +def volume_type_extra_specs_update_or_create(context, volume_type_id, + extra_specs): + """Create or update volume type extra specs. This adds or modifies the + key/value pairs specified in the extra specs dict argument""" + IMPL.volume_type_extra_specs_update_or_create(context, volume_type_id, + extra_specs) + + +################### + + +def s3_image_get(context, image_id): + """Find local s3 image represented by the provided id""" + return IMPL.s3_image_get(context, image_id) + + +def s3_image_get_by_uuid(context, image_uuid): + """Find local s3 image represented by the provided uuid""" + return IMPL.s3_image_get_by_uuid(context, image_uuid) + + +def s3_image_create(context, image_uuid): + """Create local s3 image represented by provided uuid""" + return IMPL.s3_image_create(context, image_uuid) + + +#################### + + +def sm_backend_conf_create(context, values): + """Create a new SM Backend Config entry.""" + return IMPL.sm_backend_conf_create(context, values) + + +def sm_backend_conf_update(context, sm_backend_conf_id, values): + """Update a SM Backend Config entry.""" + return IMPL.sm_backend_conf_update(context, sm_backend_conf_id, values) + + +def sm_backend_conf_delete(context, sm_backend_conf_id): + """Delete a SM Backend Config.""" + return IMPL.sm_backend_conf_delete(context, sm_backend_conf_id) + + +def sm_backend_conf_get(context, sm_backend_conf_id): + """Get a specific SM Backend Config.""" + return IMPL.sm_backend_conf_get(context, sm_backend_conf_id) + + +def sm_backend_conf_get_by_sr(context, sr_uuid): + """Get a specific SM Backend Config.""" + return IMPL.sm_backend_conf_get_by_sr(context, sr_uuid) + + +def sm_backend_conf_get_all(context): + """Get all SM Backend Configs.""" + return IMPL.sm_backend_conf_get_all(context) + + +#################### + + +def sm_flavor_create(context, values): + """Create a new SM Flavor entry.""" + return IMPL.sm_flavor_create(context, values) + + +def sm_flavor_update(context, sm_flavor_id, values): + """Update a SM Flavor entry.""" + return IMPL.sm_flavor_update(context, values) + + +def sm_flavor_delete(context, sm_flavor_id): + """Delete a SM Flavor.""" + return IMPL.sm_flavor_delete(context, sm_flavor_id) + + +def sm_flavor_get(context, sm_flavor): + """Get a specific SM Flavor.""" + return IMPL.sm_flavor_get(context, sm_flavor) + + +def sm_flavor_get_all(context): + """Get all SM Flavors.""" + return IMPL.sm_flavor_get_all(context) + + +#################### + + +def sm_volume_create(context, values): + """Create a new child Zone entry.""" + return IMPL.sm_volume_create(context, values) + + +def sm_volume_update(context, volume_id, values): + """Update a child Zone entry.""" + return IMPL.sm_volume_update(context, values) + + +def sm_volume_delete(context, volume_id): + """Delete a child Zone.""" + return IMPL.sm_volume_delete(context, volume_id) + + +def sm_volume_get(context, volume_id): + """Get a specific child Zone.""" + return IMPL.sm_volume_get(context, volume_id) + + +def sm_volume_get_all(context): + """Get all child Zones.""" + return IMPL.sm_volume_get_all(context) + + +#################### + + +def aggregate_create(context, values, metadata=None): + """Create a new aggregate with metadata.""" + return IMPL.aggregate_create(context, values, metadata) + + +def aggregate_get(context, aggregate_id): + """Get a specific aggregate by id.""" + return IMPL.aggregate_get(context, aggregate_id) + + +def aggregate_get_by_host(context, host): + """Get a specific aggregate by host""" + return IMPL.aggregate_get_by_host(context, host) + + +def aggregate_update(context, aggregate_id, values): + """Update the attributes of an aggregates. If values contains a metadata + key, it updates the aggregate metadata too.""" + return IMPL.aggregate_update(context, aggregate_id, values) + + +def aggregate_delete(context, aggregate_id): + """Delete an aggregate.""" + return IMPL.aggregate_delete(context, aggregate_id) + + +def aggregate_get_all(context): + """Get all aggregates.""" + return IMPL.aggregate_get_all(context) + + +def aggregate_metadata_add(context, aggregate_id, metadata, set_delete=False): + """Add/update metadata. If set_delete=True, it adds only.""" + IMPL.aggregate_metadata_add(context, aggregate_id, metadata, set_delete) + + +def aggregate_metadata_get(context, aggregate_id): + """Get metadata for the specified aggregate.""" + return IMPL.aggregate_metadata_get(context, aggregate_id) + + +def aggregate_metadata_delete(context, aggregate_id, key): + """Delete the given metadata key.""" + IMPL.aggregate_metadata_delete(context, aggregate_id, key) + + +def aggregate_host_add(context, aggregate_id, host): + """Add host to the aggregate.""" + IMPL.aggregate_host_add(context, aggregate_id, host) + + +def aggregate_host_get_all(context, aggregate_id): + """Get hosts for the specified aggregate.""" + return IMPL.aggregate_host_get_all(context, aggregate_id) + + +def aggregate_host_delete(context, aggregate_id, host): + """Delete the given host from the aggregate.""" + IMPL.aggregate_host_delete(context, aggregate_id, host) + + +#################### + + +def instance_fault_create(context, values): + """Create a new Instance Fault.""" + return IMPL.instance_fault_create(context, values) + + +def instance_fault_get_by_instance_uuids(context, instance_uuids): + """Get all instance faults for the provided instance_uuids.""" + return IMPL.instance_fault_get_by_instance_uuids(context, instance_uuids) diff --git a/cinder/db/base.py b/cinder/db/base.py new file mode 100644 index 00000000000..8b9d437c6dd --- /dev/null +++ b/cinder/db/base.py @@ -0,0 +1,40 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Base class for classes that need modular database access.""" + +from cinder import flags +from cinder.openstack.common import cfg +from cinder.openstack.common import importutils + + +db_driver_opt = cfg.StrOpt('db_driver', + default='cinder.db', + help='driver to use for database access') + +FLAGS = flags.FLAGS +FLAGS.register_opt(db_driver_opt) + + +class Base(object): + """DB driver is injected in the init method.""" + + def __init__(self, db_driver=None): + if not db_driver: + db_driver = FLAGS.db_driver + self.db = importutils.import_module(db_driver) # pylint: disable=C0103 diff --git a/cinder/db/migration.py b/cinder/db/migration.py new file mode 100644 index 00000000000..87147ce926c --- /dev/null +++ b/cinder/db/migration.py @@ -0,0 +1,35 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Database setup and migration commands.""" + +from cinder import utils + + +IMPL = utils.LazyPluggable('db_backend', + sqlalchemy='cinder.db.sqlalchemy.migration') + + +def db_sync(version=None): + """Migrate the database to `version` or the most recent version.""" + return IMPL.db_sync(version=version) + + +def db_version(): + """Display the current database version.""" + return IMPL.db_version() diff --git a/cinder/db/sqlalchemy/__init__.py b/cinder/db/sqlalchemy/__init__.py new file mode 100644 index 00000000000..747015af53e --- /dev/null +++ b/cinder/db/sqlalchemy/__init__.py @@ -0,0 +1,17 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/cinder/db/sqlalchemy/api.py b/cinder/db/sqlalchemy/api.py new file mode 100644 index 00000000000..2d40b30467d --- /dev/null +++ b/cinder/db/sqlalchemy/api.py @@ -0,0 +1,1499 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 X.commerce, a business unit of eBay Inc. +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Implementation of SQLAlchemy backend.""" + +import datetime +import functools +import warnings + +from cinder import db +from cinder import exception +from cinder import flags +from cinder import utils +from cinder import log as logging +from cinder.compute import aggregate_states +from cinder.db.sqlalchemy import models +from cinder.db.sqlalchemy.session import get_session +from sqlalchemy.exc import IntegrityError +from sqlalchemy.orm import joinedload +from sqlalchemy.sql import func +from sqlalchemy.sql.expression import literal_column + +FLAGS = flags.FLAGS + +LOG = logging.getLogger(__name__) + + +def is_admin_context(context): + """Indicates if the request context is an administrator.""" + if not context: + warnings.warn(_('Use of empty request context is deprecated'), + DeprecationWarning) + raise Exception('die') + return context.is_admin + + +def is_user_context(context): + """Indicates if the request context is a normal user.""" + if not context: + return False + if context.is_admin: + return False + if not context.user_id or not context.project_id: + return False + return True + + +def authorize_project_context(context, project_id): + """Ensures a request has permission to access the given project.""" + if is_user_context(context): + if not context.project_id: + raise exception.NotAuthorized() + elif context.project_id != project_id: + raise exception.NotAuthorized() + + +def authorize_user_context(context, user_id): + """Ensures a request has permission to access the given user.""" + if is_user_context(context): + if not context.user_id: + raise exception.NotAuthorized() + elif context.user_id != user_id: + raise exception.NotAuthorized() + + +def authorize_quota_class_context(context, class_name): + """Ensures a request has permission to access the given quota class.""" + if is_user_context(context): + if not context.quota_class: + raise exception.NotAuthorized() + elif context.quota_class != class_name: + raise exception.NotAuthorized() + + +def require_admin_context(f): + """Decorator to require admin request context. + + The first argument to the wrapped function must be the context. + + """ + + def wrapper(*args, **kwargs): + if not is_admin_context(args[0]): + raise exception.AdminRequired() + return f(*args, **kwargs) + return wrapper + + +def require_context(f): + """Decorator to require *any* user or admin context. + + This does no authorization for user or project access matching, see + :py:func:`authorize_project_context` and + :py:func:`authorize_user_context`. + + The first argument to the wrapped function must be the context. + + """ + + def wrapper(*args, **kwargs): + if not is_admin_context(args[0]) and not is_user_context(args[0]): + raise exception.NotAuthorized() + return f(*args, **kwargs) + return wrapper + + +def require_volume_exists(f): + """Decorator to require the specified volume to exist. + + Requires the wrapped function to use context and volume_id as + their first two arguments. + """ + + def wrapper(context, volume_id, *args, **kwargs): + db.volume_get(context, volume_id) + return f(context, volume_id, *args, **kwargs) + wrapper.__name__ = f.__name__ + return wrapper + + +def require_aggregate_exists(f): + """Decorator to require the specified aggregate to exist. + + Requires the wrapped function to use context and aggregate_id as + their first two arguments. + """ + + @functools.wraps(f) + def wrapper(context, aggregate_id, *args, **kwargs): + db.aggregate_get(context, aggregate_id) + return f(context, aggregate_id, *args, **kwargs) + return wrapper + + +def model_query(context, *args, **kwargs): + """Query helper that accounts for context's `read_deleted` field. + + :param context: context to query under + :param session: if present, the session to use + :param read_deleted: if present, overrides context's read_deleted field. + :param project_only: if present and context is user-type, then restrict + query to match the context's project_id. + """ + session = kwargs.get('session') or get_session() + read_deleted = kwargs.get('read_deleted') or context.read_deleted + project_only = kwargs.get('project_only') + + query = session.query(*args) + + if read_deleted == 'no': + query = query.filter_by(deleted=False) + elif read_deleted == 'yes': + pass # omit the filter to include deleted and active + elif read_deleted == 'only': + query = query.filter_by(deleted=True) + else: + raise Exception( + _("Unrecognized read_deleted value '%s'") % read_deleted) + + if project_only and is_user_context(context): + query = query.filter_by(project_id=context.project_id) + + return query + + +def exact_filter(query, model, filters, legal_keys): + """Applies exact match filtering to a query. + + Returns the updated query. Modifies filters argument to remove + filters consumed. + + :param query: query to apply filters to + :param model: model object the query applies to, for IN-style + filtering + :param filters: dictionary of filters; values that are lists, + tuples, sets, or frozensets cause an 'IN' test to + be performed, while exact matching ('==' operator) + is used for other values + :param legal_keys: list of keys to apply exact filtering to + """ + + filter_dict = {} + + # Walk through all the keys + for key in legal_keys: + # Skip ones we're not filtering on + if key not in filters: + continue + + # OK, filtering on this key; what value do we search for? + value = filters.pop(key) + + if isinstance(value, (list, tuple, set, frozenset)): + # Looking for values in a list; apply to query directly + column_attr = getattr(model, key) + query = query.filter(column_attr.in_(value)) + else: + # OK, simple exact match; save for later + filter_dict[key] = value + + # Apply simple exact matches + if filter_dict: + query = query.filter_by(**filter_dict) + + return query + + +################### + + +@require_admin_context +def service_destroy(context, service_id): + session = get_session() + with session.begin(): + service_ref = service_get(context, service_id, session=session) + service_ref.delete(session=session) + + +@require_admin_context +def service_get(context, service_id, session=None): + result = model_query(context, models.Service, session=session).\ + filter_by(id=service_id).\ + first() + if not result: + raise exception.ServiceNotFound(service_id=service_id) + + return result + + +@require_admin_context +def service_get_all(context, disabled=None): + query = model_query(context, models.Service) + + if disabled is not None: + query = query.filter_by(disabled=disabled) + + return query.all() + + +@require_admin_context +def service_get_all_by_topic(context, topic): + return model_query(context, models.Service, read_deleted="no").\ + filter_by(disabled=False).\ + filter_by(topic=topic).\ + all() + + +@require_admin_context +def service_get_by_host_and_topic(context, host, topic): + return model_query(context, models.Service, read_deleted="no").\ + filter_by(disabled=False).\ + filter_by(host=host).\ + filter_by(topic=topic).\ + first() + + +@require_admin_context +def service_get_all_by_host(context, host): + return model_query(context, models.Service, read_deleted="no").\ + filter_by(host=host).\ + all() + + +@require_admin_context +def _service_get_all_topic_subquery(context, session, topic, subq, label): + sort_value = getattr(subq.c, label) + return model_query(context, models.Service, + func.coalesce(sort_value, 0), + session=session, read_deleted="no").\ + filter_by(topic=topic).\ + filter_by(disabled=False).\ + outerjoin((subq, models.Service.host == subq.c.host)).\ + order_by(sort_value).\ + all() + + +@require_admin_context +def service_get_all_volume_sorted(context): + session = get_session() + with session.begin(): + topic = 'volume' + label = 'volume_gigabytes' + subq = model_query(context, models.Volume.host, + func.sum(models.Volume.size).label(label), + session=session, read_deleted="no").\ + group_by(models.Volume.host).\ + subquery() + return _service_get_all_topic_subquery(context, + session, + topic, + subq, + label) + + +@require_admin_context +def service_get_by_args(context, host, binary): + result = model_query(context, models.Service).\ + filter_by(host=host).\ + filter_by(binary=binary).\ + first() + + if not result: + raise exception.HostBinaryNotFound(host=host, binary=binary) + + return result + + +@require_admin_context +def service_create(context, values): + service_ref = models.Service() + service_ref.update(values) + if not FLAGS.enable_new_services: + service_ref.disabled = True + service_ref.save() + return service_ref + + +@require_admin_context +def service_update(context, service_id, values): + session = get_session() + with session.begin(): + service_ref = service_get(context, service_id, session=session) + service_ref.update(values) + service_ref.save(session=session) + + +################### + + +def _metadata_refs(metadata_dict, meta_class): + metadata_refs = [] + if metadata_dict: + for k, v in metadata_dict.iteritems(): + metadata_ref = meta_class() + metadata_ref['key'] = k + metadata_ref['value'] = v + metadata_refs.append(metadata_ref) + return metadata_refs + + +def _dict_with_extra_specs(inst_type_query): + """Takes an instance, volume, or instance type query returned + by sqlalchemy and returns it as a dictionary, converting the + extra_specs entry from a list of dicts: + + 'extra_specs' : [{'key': 'k1', 'value': 'v1', ...}, ...] + + to a single dict: + + 'extra_specs' : {'k1': 'v1'} + + """ + inst_type_dict = dict(inst_type_query) + extra_specs = dict([(x['key'], x['value']) + for x in inst_type_query['extra_specs']]) + inst_type_dict['extra_specs'] = extra_specs + return inst_type_dict + + +################### + + +def queue_get_for(context, topic, physical_node_id): + # FIXME(ja): this should be servername? + return "%s.%s" % (topic, physical_node_id) + + +################### + + +@require_admin_context +def iscsi_target_count_by_host(context, host): + return model_query(context, models.IscsiTarget).\ + filter_by(host=host).\ + count() + + +@require_admin_context +def iscsi_target_create_safe(context, values): + iscsi_target_ref = models.IscsiTarget() + + for (key, value) in values.iteritems(): + iscsi_target_ref[key] = value + try: + iscsi_target_ref.save() + return iscsi_target_ref + except IntegrityError: + return None + + +################### + + +@require_context +def quota_get(context, project_id, resource, session=None): + result = model_query(context, models.Quota, session=session, + read_deleted="no").\ + filter_by(project_id=project_id).\ + filter_by(resource=resource).\ + first() + + if not result: + raise exception.ProjectQuotaNotFound(project_id=project_id) + + return result + + +@require_context +def quota_get_all_by_project(context, project_id): + authorize_project_context(context, project_id) + + rows = model_query(context, models.Quota, read_deleted="no").\ + filter_by(project_id=project_id).\ + all() + + result = {'project_id': project_id} + for row in rows: + result[row.resource] = row.hard_limit + + return result + + +@require_admin_context +def quota_create(context, project_id, resource, limit): + quota_ref = models.Quota() + quota_ref.project_id = project_id + quota_ref.resource = resource + quota_ref.hard_limit = limit + quota_ref.save() + return quota_ref + + +@require_admin_context +def quota_update(context, project_id, resource, limit): + session = get_session() + with session.begin(): + quota_ref = quota_get(context, project_id, resource, session=session) + quota_ref.hard_limit = limit + quota_ref.save(session=session) + + +@require_admin_context +def quota_destroy(context, project_id, resource): + session = get_session() + with session.begin(): + quota_ref = quota_get(context, project_id, resource, session=session) + quota_ref.delete(session=session) + + +@require_admin_context +def quota_destroy_all_by_project(context, project_id): + session = get_session() + with session.begin(): + quotas = model_query(context, models.Quota, session=session, + read_deleted="no").\ + filter_by(project_id=project_id).\ + all() + + for quota_ref in quotas: + quota_ref.delete(session=session) + + +################### + + +@require_context +def quota_class_get(context, class_name, resource, session=None): + result = model_query(context, models.QuotaClass, session=session, + read_deleted="no").\ + filter_by(class_name=class_name).\ + filter_by(resource=resource).\ + first() + + if not result: + raise exception.QuotaClassNotFound(class_name=class_name) + + return result + + +@require_context +def quota_class_get_all_by_name(context, class_name): + authorize_quota_class_context(context, class_name) + + rows = model_query(context, models.QuotaClass, read_deleted="no").\ + filter_by(class_name=class_name).\ + all() + + result = {'class_name': class_name} + for row in rows: + result[row.resource] = row.hard_limit + + return result + + +@require_admin_context +def quota_class_create(context, class_name, resource, limit): + quota_class_ref = models.QuotaClass() + quota_class_ref.class_name = class_name + quota_class_ref.resource = resource + quota_class_ref.hard_limit = limit + quota_class_ref.save() + return quota_class_ref + + +@require_admin_context +def quota_class_update(context, class_name, resource, limit): + session = get_session() + with session.begin(): + quota_class_ref = quota_class_get(context, class_name, resource, + session=session) + quota_class_ref.hard_limit = limit + quota_class_ref.save(session=session) + + +@require_admin_context +def quota_class_destroy(context, class_name, resource): + session = get_session() + with session.begin(): + quota_class_ref = quota_class_get(context, class_name, resource, + session=session) + quota_class_ref.delete(session=session) + + +@require_admin_context +def quota_class_destroy_all_by_name(context, class_name): + session = get_session() + with session.begin(): + quota_classes = model_query(context, models.QuotaClass, + session=session, read_deleted="no").\ + filter_by(class_name=class_name).\ + all() + + for quota_class_ref in quota_classes: + quota_class_ref.delete(session=session) + + +################### + + +@require_admin_context +def volume_allocate_iscsi_target(context, volume_id, host): + session = get_session() + with session.begin(): + iscsi_target_ref = model_query(context, models.IscsiTarget, + session=session, read_deleted="no").\ + filter_by(volume=None).\ + filter_by(host=host).\ + with_lockmode('update').\ + first() + + # NOTE(vish): if with_lockmode isn't supported, as in sqlite, + # then this has concurrency issues + if not iscsi_target_ref: + raise db.NoMoreTargets() + + iscsi_target_ref.volume_id = volume_id + session.add(iscsi_target_ref) + + return iscsi_target_ref.target_num + + +@require_admin_context +def volume_attached(context, volume_id, instance_id, mountpoint): + session = get_session() + with session.begin(): + volume_ref = volume_get(context, volume_id, session=session) + volume_ref['status'] = 'in-use' + volume_ref['mountpoint'] = mountpoint + volume_ref['attach_status'] = 'attached' + volume_ref['instance_id'] = instance_id + volume_ref.save(session=session) + + +@require_context +def volume_create(context, values): + values['volume_metadata'] = _metadata_refs(values.get('metadata'), + models.VolumeMetadata) + volume_ref = models.Volume() + if not values.get('id'): + values['id'] = str(utils.gen_uuid()) + volume_ref.update(values) + + session = get_session() + with session.begin(): + volume_ref.save(session=session) + + return volume_ref + + +@require_admin_context +def volume_data_get_for_project(context, project_id): + result = model_query(context, + func.count(models.Volume.id), + func.sum(models.Volume.size), + read_deleted="no").\ + filter_by(project_id=project_id).\ + first() + + # NOTE(vish): convert None to 0 + return (result[0] or 0, result[1] or 0) + + +@require_admin_context +def volume_destroy(context, volume_id): + session = get_session() + with session.begin(): + session.query(models.Volume).\ + filter_by(id=volume_id).\ + update({'deleted': True, + 'deleted_at': utils.utcnow(), + 'updated_at': literal_column('updated_at')}) + session.query(models.IscsiTarget).\ + filter_by(volume_id=volume_id).\ + update({'volume_id': None}) + session.query(models.VolumeMetadata).\ + filter_by(volume_id=volume_id).\ + update({'deleted': True, + 'deleted_at': utils.utcnow(), + 'updated_at': literal_column('updated_at')}) + + +@require_admin_context +def volume_detached(context, volume_id): + session = get_session() + with session.begin(): + volume_ref = volume_get(context, volume_id, session=session) + volume_ref['status'] = 'available' + volume_ref['mountpoint'] = None + volume_ref['attach_status'] = 'detached' + volume_ref.instance = None + volume_ref.save(session=session) + + +@require_context +def _volume_get_query(context, session=None, project_only=False): + return model_query(context, models.Volume, session=session, + project_only=project_only).\ + options(joinedload('instance')).\ + options(joinedload('volume_metadata')).\ + options(joinedload('volume_type')) + + +@require_context +def volume_get(context, volume_id, session=None): + result = _volume_get_query(context, session=session, project_only=True).\ + filter_by(id=volume_id).\ + first() + + if not result: + raise exception.VolumeNotFound(volume_id=volume_id) + + return result + + +@require_admin_context +def volume_get_all(context): + return _volume_get_query(context).all() + + +@require_admin_context +def volume_get_all_by_host(context, host): + return _volume_get_query(context).filter_by(host=host).all() + + +@require_admin_context +def volume_get_all_by_instance(context, instance_id): + result = model_query(context, models.Volume, read_deleted="no").\ + options(joinedload('volume_metadata')).\ + options(joinedload('volume_type')).\ + filter_by(instance_id=instance_id).\ + all() + + return result + + +@require_context +def volume_get_all_by_project(context, project_id): + authorize_project_context(context, project_id) + return _volume_get_query(context).filter_by(project_id=project_id).all() + + +@require_admin_context +def volume_get_instance(context, volume_id): + result = _volume_get_query(context).filter_by(id=volume_id).first() + + if not result: + raise exception.VolumeNotFound(volume_id=volume_id) + + return result.instance + + +@require_admin_context +def volume_get_iscsi_target_num(context, volume_id): + result = model_query(context, models.IscsiTarget, read_deleted="yes").\ + filter_by(volume_id=volume_id).\ + first() + + if not result: + raise exception.ISCSITargetNotFoundForVolume(volume_id=volume_id) + + return result.target_num + + +@require_context +def volume_update(context, volume_id, values): + session = get_session() + metadata = values.get('metadata') + if metadata is not None: + volume_metadata_update(context, + volume_id, + values.pop('metadata'), + delete=True) + with session.begin(): + volume_ref = volume_get(context, volume_id, session=session) + volume_ref.update(values) + volume_ref.save(session=session) + + +#################### + +def _volume_metadata_get_query(context, volume_id, session=None): + return model_query(context, models.VolumeMetadata, + session=session, read_deleted="no").\ + filter_by(volume_id=volume_id) + + +@require_context +@require_volume_exists +def volume_metadata_get(context, volume_id): + rows = _volume_metadata_get_query(context, volume_id).all() + result = {} + for row in rows: + result[row['key']] = row['value'] + + return result + + +@require_context +@require_volume_exists +def volume_metadata_delete(context, volume_id, key): + _volume_metadata_get_query(context, volume_id).\ + filter_by(key=key).\ + update({'deleted': True, + 'deleted_at': utils.utcnow(), + 'updated_at': literal_column('updated_at')}) + + +@require_context +@require_volume_exists +def volume_metadata_get_item(context, volume_id, key, session=None): + result = _volume_metadata_get_query(context, volume_id, session=session).\ + filter_by(key=key).\ + first() + + if not result: + raise exception.VolumeMetadataNotFound(metadata_key=key, + volume_id=volume_id) + return result + + +@require_context +@require_volume_exists +def volume_metadata_update(context, volume_id, metadata, delete): + session = get_session() + + # Set existing metadata to deleted if delete argument is True + if delete: + original_metadata = volume_metadata_get(context, volume_id) + for meta_key, meta_value in original_metadata.iteritems(): + if meta_key not in metadata: + meta_ref = volume_metadata_get_item(context, volume_id, + meta_key, session) + meta_ref.update({'deleted': True}) + meta_ref.save(session=session) + + meta_ref = None + + # Now update all existing items with new values, or create new meta objects + for meta_key, meta_value in metadata.iteritems(): + + # update the value whether it exists or not + item = {"value": meta_value} + + try: + meta_ref = volume_metadata_get_item(context, volume_id, + meta_key, session) + except exception.VolumeMetadataNotFound, e: + meta_ref = models.VolumeMetadata() + item.update({"key": meta_key, "volume_id": volume_id}) + + meta_ref.update(item) + meta_ref.save(session=session) + + return metadata + + +################### + + +@require_context +def snapshot_create(context, values): + snapshot_ref = models.Snapshot() + if not values.get('id'): + values['id'] = str(utils.gen_uuid()) + snapshot_ref.update(values) + + session = get_session() + with session.begin(): + snapshot_ref.save(session=session) + return snapshot_ref + + +@require_admin_context +def snapshot_destroy(context, snapshot_id): + session = get_session() + with session.begin(): + session.query(models.Snapshot).\ + filter_by(id=snapshot_id).\ + update({'deleted': True, + 'deleted_at': utils.utcnow(), + 'updated_at': literal_column('updated_at')}) + + +@require_context +def snapshot_get(context, snapshot_id, session=None): + result = model_query(context, models.Snapshot, session=session, + project_only=True).\ + filter_by(id=snapshot_id).\ + first() + + if not result: + raise exception.SnapshotNotFound(snapshot_id=snapshot_id) + + return result + + +@require_admin_context +def snapshot_get_all(context): + return model_query(context, models.Snapshot).all() + + +@require_context +def snapshot_get_all_for_volume(context, volume_id): + return model_query(context, models.Snapshot, read_deleted='no', + project_only=True).\ + filter_by(volume_id=volume_id).all() + + +@require_context +def snapshot_get_all_by_project(context, project_id): + authorize_project_context(context, project_id) + return model_query(context, models.Snapshot).\ + filter_by(project_id=project_id).\ + all() + + +@require_context +def snapshot_update(context, snapshot_id, values): + session = get_session() + with session.begin(): + snapshot_ref = snapshot_get(context, snapshot_id, session=session) + snapshot_ref.update(values) + snapshot_ref.save(session=session) + + +################### + + +@require_admin_context +def migration_create(context, values): + migration = models.Migration() + migration.update(values) + migration.save() + return migration + + +@require_admin_context +def migration_update(context, id, values): + session = get_session() + with session.begin(): + migration = migration_get(context, id, session=session) + migration.update(values) + migration.save(session=session) + return migration + + +@require_admin_context +def migration_get(context, id, session=None): + result = model_query(context, models.Migration, session=session, + read_deleted="yes").\ + filter_by(id=id).\ + first() + + if not result: + raise exception.MigrationNotFound(migration_id=id) + + return result + + +@require_admin_context +def migration_get_by_instance_and_status(context, instance_uuid, status): + result = model_query(context, models.Migration, read_deleted="yes").\ + filter_by(instance_uuid=instance_uuid).\ + filter_by(status=status).\ + first() + + if not result: + raise exception.MigrationNotFoundByStatus(instance_id=instance_uuid, + status=status) + + return result + + +@require_admin_context +def migration_get_all_unconfirmed(context, confirm_window, session=None): + confirm_window = datetime.datetime.utcnow() - datetime.timedelta( + seconds=confirm_window) + + return model_query(context, models.Migration, session=session, + read_deleted="yes").\ + filter(models.Migration.updated_at <= confirm_window).\ + filter_by(status="finished").\ + all() + + +################## + + +@require_admin_context +def volume_type_create(context, values): + """Create a new instance type. In order to pass in extra specs, + the values dict should contain a 'extra_specs' key/value pair: + + {'extra_specs' : {'k1': 'v1', 'k2': 'v2', ...}} + + """ + session = get_session() + with session.begin(): + try: + volume_type_get_by_name(context, values['name'], session) + raise exception.VolumeTypeExists(name=values['name']) + except exception.VolumeTypeNotFoundByName: + pass + try: + specs = values.get('extra_specs') + + values['extra_specs'] = _metadata_refs(values.get('extra_specs'), + models.VolumeTypeExtraSpecs) + volume_type_ref = models.VolumeTypes() + volume_type_ref.update(values) + volume_type_ref.save() + except Exception, e: + raise exception.DBError(e) + return volume_type_ref + + +@require_context +def volume_type_get_all(context, inactive=False, filters=None): + """ + Returns a dict describing all volume_types with name as key. + """ + filters = filters or {} + + read_deleted = "yes" if inactive else "no" + rows = model_query(context, models.VolumeTypes, + read_deleted=read_deleted).\ + options(joinedload('extra_specs')).\ + order_by("name").\ + all() + + # TODO(sirp): this patern of converting rows to a result with extra_specs + # is repeated quite a bit, might be worth creating a method for it + result = {} + for row in rows: + result[row['name']] = _dict_with_extra_specs(row) + + return result + + +@require_context +def volume_type_get(context, id, session=None): + """Returns a dict describing specific volume_type""" + result = model_query(context, models.VolumeTypes, session=session).\ + options(joinedload('extra_specs')).\ + filter_by(id=id).\ + first() + + if not result: + raise exception.VolumeTypeNotFound(volume_type_id=id) + + return _dict_with_extra_specs(result) + + +@require_context +def volume_type_get_by_name(context, name, session=None): + """Returns a dict describing specific volume_type""" + result = model_query(context, models.VolumeTypes, session=session).\ + options(joinedload('extra_specs')).\ + filter_by(name=name).\ + first() + + if not result: + raise exception.VolumeTypeNotFoundByName(volume_type_name=name) + else: + return _dict_with_extra_specs(result) + + +@require_admin_context +def volume_type_destroy(context, name): + session = get_session() + with session.begin(): + volume_type_ref = volume_type_get_by_name(context, name, + session=session) + volume_type_id = volume_type_ref['id'] + session.query(models.VolumeTypes).\ + filter_by(id=volume_type_id).\ + update({'deleted': True, + 'deleted_at': utils.utcnow(), + 'updated_at': literal_column('updated_at')}) + session.query(models.VolumeTypeExtraSpecs).\ + filter_by(volume_type_id=volume_type_id).\ + update({'deleted': True, + 'deleted_at': utils.utcnow(), + 'updated_at': literal_column('updated_at')}) + + +#################### + + +def _volume_type_extra_specs_query(context, volume_type_id, session=None): + return model_query(context, models.VolumeTypeExtraSpecs, session=session, + read_deleted="no").\ + filter_by(volume_type_id=volume_type_id) + + +@require_context +def volume_type_extra_specs_get(context, volume_type_id): + rows = _volume_type_extra_specs_query(context, volume_type_id).\ + all() + + result = {} + for row in rows: + result[row['key']] = row['value'] + + return result + + +@require_context +def volume_type_extra_specs_delete(context, volume_type_id, key): + _volume_type_extra_specs_query(context, volume_type_id).\ + filter_by(key=key).\ + update({'deleted': True, + 'deleted_at': utils.utcnow(), + 'updated_at': literal_column('updated_at')}) + + +@require_context +def volume_type_extra_specs_get_item(context, volume_type_id, key, + session=None): + result = _volume_type_extra_specs_query( + context, volume_type_id, session=session).\ + filter_by(key=key).\ + first() + + if not result: + raise exception.VolumeTypeExtraSpecsNotFound( + extra_specs_key=key, volume_type_id=volume_type_id) + + return result + + +@require_context +def volume_type_extra_specs_update_or_create(context, volume_type_id, + specs): + session = get_session() + spec_ref = None + for key, value in specs.iteritems(): + try: + spec_ref = volume_type_extra_specs_get_item( + context, volume_type_id, key, session) + except exception.VolumeTypeExtraSpecsNotFound, e: + spec_ref = models.VolumeTypeExtraSpecs() + spec_ref.update({"key": key, "value": value, + "volume_type_id": volume_type_id, + "deleted": 0}) + spec_ref.save(session=session) + return specs + + +#################### + + +@require_admin_context +def sm_backend_conf_create(context, values): + backend_conf = models.SMBackendConf() + backend_conf.update(values) + backend_conf.save() + return backend_conf + + +@require_admin_context +def sm_backend_conf_update(context, sm_backend_id, values): + session = get_session() + with session.begin(): + backend_conf = model_query(context, models.SMBackendConf, + session=session, + read_deleted="yes").\ + filter_by(id=sm_backend_id).\ + first() + + if not backend_conf: + raise exception.NotFound( + _("No backend config with id %(sm_backend_id)s") % locals()) + + backend_conf.update(values) + backend_conf.save(session=session) + return backend_conf + + +@require_admin_context +def sm_backend_conf_delete(context, sm_backend_id): + # FIXME(sirp): for consistency, shouldn't this just mark as deleted with + # `purge` actually deleting the record? + session = get_session() + with session.begin(): + model_query(context, models.SMBackendConf, session=session, + read_deleted="yes").\ + filter_by(id=sm_backend_id).\ + delete() + + +@require_admin_context +def sm_backend_conf_get(context, sm_backend_id): + result = model_query(context, models.SMBackendConf, read_deleted="yes").\ + filter_by(id=sm_backend_id).\ + first() + + if not result: + raise exception.NotFound(_("No backend config with id " + "%(sm_backend_id)s") % locals()) + + return result + + +@require_admin_context +def sm_backend_conf_get_by_sr(context, sr_uuid): + session = get_session() + return model_query(context, models.SMBackendConf, read_deleted="yes").\ + filter_by(sr_uuid=sr_uuid).\ + first() + + +@require_admin_context +def sm_backend_conf_get_all(context): + return model_query(context, models.SMBackendConf, read_deleted="yes").\ + all() + + +#################### + + +def _sm_flavor_get_query(context, sm_flavor_label, session=None): + return model_query(context, models.SMFlavors, session=session, + read_deleted="yes").\ + filter_by(label=sm_flavor_label) + + +@require_admin_context +def sm_flavor_create(context, values): + sm_flavor = models.SMFlavors() + sm_flavor.update(values) + sm_flavor.save() + return sm_flavor + + +@require_admin_context +def sm_flavor_update(context, sm_flavor_label, values): + sm_flavor = sm_flavor_get(context, sm_flavor_label) + sm_flavor.update(values) + sm_flavor.save() + return sm_flavor + + +@require_admin_context +def sm_flavor_delete(context, sm_flavor_label): + session = get_session() + with session.begin(): + _sm_flavor_get_query(context, sm_flavor_label).delete() + + +@require_admin_context +def sm_flavor_get(context, sm_flavor_label): + result = _sm_flavor_get_query(context, sm_flavor_label).first() + + if not result: + raise exception.NotFound( + _("No sm_flavor called %(sm_flavor)s") % locals()) + + return result + + +@require_admin_context +def sm_flavor_get_all(context): + return model_query(context, models.SMFlavors, read_deleted="yes").all() + + +############################### + + +def _sm_volume_get_query(context, volume_id, session=None): + return model_query(context, models.SMVolume, session=session, + read_deleted="yes").\ + filter_by(id=volume_id) + + +def sm_volume_create(context, values): + sm_volume = models.SMVolume() + sm_volume.update(values) + sm_volume.save() + return sm_volume + + +def sm_volume_update(context, volume_id, values): + sm_volume = sm_volume_get(context, volume_id) + sm_volume.update(values) + sm_volume.save() + return sm_volume + + +def sm_volume_delete(context, volume_id): + session = get_session() + with session.begin(): + _sm_volume_get_query(context, volume_id, session=session).delete() + + +def sm_volume_get(context, volume_id): + result = _sm_volume_get_query(context, volume_id).first() + + if not result: + raise exception.NotFound( + _("No sm_volume with id %(volume_id)s") % locals()) + + return result + + +def sm_volume_get_all(context): + return model_query(context, models.SMVolume, read_deleted="yes").all() + + +################ + + +def _aggregate_get_query(context, model_class, id_field, id, + session=None, read_deleted=None): + return model_query(context, model_class, session=session, + read_deleted=read_deleted).filter(id_field == id) + + +@require_admin_context +def aggregate_create(context, values, metadata=None): + session = get_session() + aggregate = _aggregate_get_query(context, + models.Aggregate, + models.Aggregate.name, + values['name'], + session=session, + read_deleted='yes').first() + values.setdefault('operational_state', aggregate_states.CREATED) + if not aggregate: + aggregate = models.Aggregate() + aggregate.update(values) + aggregate.save(session=session) + elif aggregate.deleted: + values['deleted'] = False + values['deleted_at'] = None + aggregate.update(values) + aggregate.save(session=session) + else: + raise exception.AggregateNameExists(aggregate_name=values['name']) + if metadata: + aggregate_metadata_add(context, aggregate.id, metadata) + return aggregate + + +@require_admin_context +def aggregate_get(context, aggregate_id): + aggregate = _aggregate_get_query(context, + models.Aggregate, + models.Aggregate.id, + aggregate_id).first() + + if not aggregate: + raise exception.AggregateNotFound(aggregate_id=aggregate_id) + + return aggregate + + +@require_admin_context +def aggregate_get_by_host(context, host): + aggregate_host = _aggregate_get_query(context, + models.AggregateHost, + models.AggregateHost.host, + host).first() + + if not aggregate_host: + raise exception.AggregateHostNotFound(host=host) + + return aggregate_get(context, aggregate_host.aggregate_id) + + +@require_admin_context +def aggregate_update(context, aggregate_id, values): + session = get_session() + aggregate = _aggregate_get_query(context, + models.Aggregate, + models.Aggregate.id, + aggregate_id, + session=session).first() + if aggregate: + metadata = values.get('metadata') + if metadata is not None: + aggregate_metadata_add(context, + aggregate_id, + values.pop('metadata'), + set_delete=True) + with session.begin(): + aggregate.update(values) + aggregate.save(session=session) + values['metadata'] = metadata + return aggregate + else: + raise exception.AggregateNotFound(aggregate_id=aggregate_id) + + +@require_admin_context +def aggregate_delete(context, aggregate_id): + query = _aggregate_get_query(context, + models.Aggregate, + models.Aggregate.id, + aggregate_id) + if query.first(): + query.update({'deleted': True, + 'deleted_at': utils.utcnow(), + 'operational_state': aggregate_states.DISMISSED, + 'updated_at': literal_column('updated_at')}) + else: + raise exception.AggregateNotFound(aggregate_id=aggregate_id) + + +@require_admin_context +def aggregate_get_all(context): + return model_query(context, models.Aggregate).all() + + +@require_admin_context +@require_aggregate_exists +def aggregate_metadata_get(context, aggregate_id): + rows = model_query(context, + models.AggregateMetadata).\ + filter_by(aggregate_id=aggregate_id).all() + + return dict([(r['key'], r['value']) for r in rows]) + + +@require_admin_context +@require_aggregate_exists +def aggregate_metadata_delete(context, aggregate_id, key): + query = _aggregate_get_query(context, + models.AggregateMetadata, + models.AggregateMetadata.aggregate_id, + aggregate_id).\ + filter_by(key=key) + if query.first(): + query.update({'deleted': True, + 'deleted_at': utils.utcnow(), + 'updated_at': literal_column('updated_at')}) + else: + raise exception.AggregateMetadataNotFound(aggregate_id=aggregate_id, + metadata_key=key) + + +@require_admin_context +@require_aggregate_exists +def aggregate_metadata_get_item(context, aggregate_id, key, session=None): + result = _aggregate_get_query(context, + models.AggregateMetadata, + models.AggregateMetadata.aggregate_id, + aggregate_id, session=session, + read_deleted='yes').\ + filter_by(key=key).first() + + if not result: + raise exception.AggregateMetadataNotFound(metadata_key=key, + aggregate_id=aggregate_id) + + return result + + +@require_admin_context +@require_aggregate_exists +def aggregate_metadata_add(context, aggregate_id, metadata, set_delete=False): + session = get_session() + + if set_delete: + original_metadata = aggregate_metadata_get(context, aggregate_id) + for meta_key, meta_value in original_metadata.iteritems(): + if meta_key not in metadata: + meta_ref = aggregate_metadata_get_item(context, aggregate_id, + meta_key, session) + meta_ref.update({'deleted': True}) + meta_ref.save(session=session) + + meta_ref = None + + for meta_key, meta_value in metadata.iteritems(): + item = {"value": meta_value} + try: + meta_ref = aggregate_metadata_get_item(context, aggregate_id, + meta_key, session) + if meta_ref.deleted: + item.update({'deleted': False, 'deleted_at': None}) + except exception.AggregateMetadataNotFound: + meta_ref = models.AggregateMetadata() + item.update({"key": meta_key, "aggregate_id": aggregate_id}) + + meta_ref.update(item) + meta_ref.save(session=session) + + return metadata + + +@require_admin_context +@require_aggregate_exists +def aggregate_host_get_all(context, aggregate_id): + rows = model_query(context, + models.AggregateHost).\ + filter_by(aggregate_id=aggregate_id).all() + + return [r.host for r in rows] + + +@require_admin_context +@require_aggregate_exists +def aggregate_host_delete(context, aggregate_id, host): + query = _aggregate_get_query(context, + models.AggregateHost, + models.AggregateHost.aggregate_id, + aggregate_id).filter_by(host=host) + if query.first(): + query.update({'deleted': True, + 'deleted_at': utils.utcnow(), + 'updated_at': literal_column('updated_at')}) + else: + raise exception.AggregateHostNotFound(aggregate_id=aggregate_id, + host=host) + + +@require_admin_context +@require_aggregate_exists +def aggregate_host_add(context, aggregate_id, host): + session = get_session() + host_ref = _aggregate_get_query(context, + models.AggregateHost, + models.AggregateHost.aggregate_id, + aggregate_id, + session=session, + read_deleted='yes').\ + filter_by(host=host).first() + if not host_ref: + try: + host_ref = models.AggregateHost() + values = {"host": host, "aggregate_id": aggregate_id, } + host_ref.update(values) + host_ref.save(session=session) + except exception.DBError: + raise exception.AggregateHostConflict(host=host) + elif host_ref.deleted: + host_ref.update({'deleted': False, 'deleted_at': None}) + host_ref.save(session=session) + else: + raise exception.AggregateHostExists(host=host, + aggregate_id=aggregate_id) + return host_ref diff --git a/cinder/db/sqlalchemy/migrate_repo/README b/cinder/db/sqlalchemy/migrate_repo/README new file mode 100644 index 00000000000..6218f8cac42 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/README @@ -0,0 +1,4 @@ +This is a database migration repository. + +More information at +http://code.google.com/p/sqlalchemy-migrate/ diff --git a/cinder/db/sqlalchemy/migrate_repo/__init__.py b/cinder/db/sqlalchemy/migrate_repo/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/cinder/db/sqlalchemy/migrate_repo/manage.py b/cinder/db/sqlalchemy/migrate_repo/manage.py new file mode 100644 index 00000000000..09e340f44f9 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/manage.py @@ -0,0 +1,4 @@ +#!/usr/bin/env python +from migrate.versioning.shell import main +if __name__ == '__main__': + main(debug='False', repository='.') diff --git a/cinder/db/sqlalchemy/migrate_repo/migrate.cfg b/cinder/db/sqlalchemy/migrate_repo/migrate.cfg new file mode 100644 index 00000000000..10c685c0e50 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/migrate.cfg @@ -0,0 +1,20 @@ +[db_settings] +# Used to identify which repository this database is versioned under. +# You can use the name of your project. +repository_id=cinder + +# The name of the database table used to track the schema version. +# This name shouldn't already be used by your project. +# If this is changed once a database is under version control, you'll need to +# change the table name in each database too. +version_table=migrate_version + +# When committing a change script, Migrate will attempt to generate the +# sql for all supported databases; normally, if one of them fails - probably +# because you don't have that database installed - it is ignored and the +# commit continues, perhaps ending successfully. +# Databases in this list MUST compile successfully during a commit, or the +# entire commit will fail. List the databases your application will actually +# be using to ensure your updates to that database work properly. +# This must be a list; example: ['postgres','sqlite'] +required_dbs=[] diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/001_austin.py b/cinder/db/sqlalchemy/migrate_repo/versions/001_austin.py new file mode 100644 index 00000000000..2e21685f8f5 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/001_austin.py @@ -0,0 +1,627 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +## Table code mostly autogenerated by genmodel.py +from sqlalchemy import Boolean, Column, DateTime, ForeignKey +from sqlalchemy import ForeignKeyConstraint, Integer, MetaData, String +from sqlalchemy import Table, Text +from cinder import log as logging + +LOG = logging.getLogger(__name__) + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta = MetaData() + meta.bind = migrate_engine + + # + # New Tables + # + auth_tokens = Table('auth_tokens', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('token_hash', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + primary_key=True, + nullable=False), + Column('user_id', Integer()), + Column('server_manageent_url', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('storage_url', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('cdn_management_url', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + ) + + export_devices = Table('export_devices', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('shelf_id', Integer()), + Column('blade_id', Integer()), + Column('volume_id', + Integer(), + ForeignKey('volumes.id'), + nullable=True), + ) + + fixed_ips = Table('fixed_ips', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('address', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('network_id', + Integer(), + ForeignKey('networks.id'), + nullable=True), + Column('instance_id', + Integer(), + ForeignKey('instances.id'), + nullable=True), + Column('allocated', Boolean(create_constraint=True, name=None)), + Column('leased', Boolean(create_constraint=True, name=None)), + Column('reserved', Boolean(create_constraint=True, name=None)), + ) + + floating_ips = Table('floating_ips', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('address', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('fixed_ip_id', + Integer(), + ForeignKey('fixed_ips.id'), + nullable=True), + Column('project_id', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('host', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + ) + + instances = Table('instances', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('internal_id', Integer()), + Column('admin_pass', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('user_id', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('project_id', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('image_id', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('kernel_id', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('ramdisk_id', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('server_name', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('launch_index', Integer()), + Column('key_name', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('key_data', + Text(length=None, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('state', Integer()), + Column('state_description', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('memory_mb', Integer()), + Column('vcpus', Integer()), + Column('local_gb', Integer()), + Column('hostname', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('host', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('instance_type', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('user_data', + Text(length=None, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('reservation_id', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('mac_address', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('scheduled_at', DateTime(timezone=False)), + Column('launched_at', DateTime(timezone=False)), + Column('terminated_at', DateTime(timezone=False)), + Column('display_name', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('display_description', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + ) + + key_pairs = Table('key_pairs', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('name', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('user_id', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('fingerprint', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('public_key', + Text(length=None, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + ) + + networks = Table('networks', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('injected', Boolean(create_constraint=True, name=None)), + Column('cidr', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('netmask', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('bridge', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('gateway', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('broadcast', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('dns', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('vlan', Integer()), + Column('vpn_public_address', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('vpn_public_port', Integer()), + Column('vpn_private_address', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('dhcp_start', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('project_id', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('host', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + ) + + projects = Table('projects', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + primary_key=True, + nullable=False), + Column('name', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('description', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('project_manager', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + ForeignKey('users.id')), + ) + + quotas = Table('quotas', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('project_id', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('instances', Integer()), + Column('cores', Integer()), + Column('volumes', Integer()), + Column('gigabytes', Integer()), + Column('floating_ips', Integer()), + ) + + security_groups = Table('security_groups', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('name', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('description', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('user_id', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('project_id', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + ) + + security_group_inst_assoc = Table('security_group_instance_association', + meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('security_group_id', + Integer(), + ForeignKey('security_groups.id')), + Column('instance_id', Integer(), ForeignKey('instances.id')), + ) + + security_group_rules = Table('security_group_rules', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('parent_group_id', + Integer(), + ForeignKey('security_groups.id')), + Column('protocol', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('from_port', Integer()), + Column('to_port', Integer()), + Column('cidr', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('group_id', + Integer(), + ForeignKey('security_groups.id')), + ) + + services = Table('services', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('host', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('binary', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('topic', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('report_count', Integer(), nullable=False), + Column('disabled', Boolean(create_constraint=True, name=None)), + ) + + users = Table('users', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + primary_key=True, + nullable=False), + Column('name', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('access_key', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('secret_key', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('is_admin', Boolean(create_constraint=True, name=None)), + ) + + user_project_association = Table('user_project_association', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('user_id', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + ForeignKey('users.id'), + primary_key=True, + nullable=False), + Column('project_id', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + ForeignKey('projects.id'), + primary_key=True, + nullable=False), + ) + + user_project_role_association = Table('user_project_role_association', + meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('user_id', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + primary_key=True, + nullable=False), + Column('project_id', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + primary_key=True, + nullable=False), + Column('role', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + primary_key=True, + nullable=False), + ForeignKeyConstraint(['user_id', + 'project_id'], + ['user_project_association.user_id', + 'user_project_association.project_id']), + ) + + user_role_association = Table('user_role_association', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('user_id', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + ForeignKey('users.id'), + primary_key=True, + nullable=False), + Column('role', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + primary_key=True, + nullable=False), + ) + + volumes = Table('volumes', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('ec2_id', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('user_id', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('project_id', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('host', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('size', Integer()), + Column('availability_zone', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('instance_id', + Integer(), + ForeignKey('instances.id'), + nullable=True), + Column('mountpoint', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('attach_time', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('status', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('attach_status', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('scheduled_at', DateTime(timezone=False)), + Column('launched_at', DateTime(timezone=False)), + Column('terminated_at', DateTime(timezone=False)), + Column('display_name', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('display_description', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + ) + tables = [auth_tokens, + instances, key_pairs, networks, fixed_ips, floating_ips, + quotas, security_groups, security_group_inst_assoc, + security_group_rules, services, users, projects, + user_project_association, user_project_role_association, + user_role_association, volumes, export_devices] + + for table in tables: + try: + table.create() + except Exception: + LOG.info(repr(table)) + LOG.exception('Exception while creating table') + meta.drop_all(tables=tables) + raise + + +def downgrade(migrate_engine): + # Operations to reverse the above upgrade go here. + meta = MetaData() + meta.bind = migrate_engine + + auth_tokens = Table('auth_tokens', meta, autoload=True) + export_devices = Table('export_devices', meta, autoload=True) + fixed_ips = Table('fixed_ips', meta, autoload=True) + floating_ips = Table('floating_ips', meta, autoload=True) + instances = Table('instances', meta, autoload=True) + key_pairs = Table('key_pairs', meta, autoload=True) + networks = Table('networks', meta, autoload=True) + projects = Table('projects', meta, autoload=True) + quotas = Table('quotas', meta, autoload=True) + security_groups = Table('security_groups', meta, autoload=True) + security_group_inst_assoc = Table('security_group_instance_association', + meta, autoload=True) + security_group_rules = Table('security_group_rules', meta, autoload=True) + services = Table('services', meta, autoload=True) + users = Table('users', meta, autoload=True) + user_project_association = Table('user_project_association', meta, + autoload=True) + user_project_role_association = Table('user_project_role_association', + meta, + autoload=True) + user_role_association = Table('user_role_association', meta, autoload=True) + volumes = Table('volumes', meta, autoload=True) + + # table order matters, don't change + for table in (auth_tokens, export_devices, floating_ips, fixed_ips, + key_pairs, networks, + quotas, security_group_inst_assoc, + security_group_rules, security_groups, services, + user_project_role_association, user_project_association, + user_role_association, + projects, users, volumes, instances): + table.drop() diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/002_bexar.py b/cinder/db/sqlalchemy/migrate_repo/versions/002_bexar.py new file mode 100644 index 00000000000..ba1576b7b87 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/002_bexar.py @@ -0,0 +1,236 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Boolean, Column, DateTime, ForeignKey +from sqlalchemy import Integer, MetaData, String, Table, Text +from cinder import log as logging + +LOG = logging.getLogger(__name__) + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta = MetaData() + meta.bind = migrate_engine + + # load tables for fk + volumes = Table('volumes', meta, autoload=True) + + instances = Table('instances', meta, autoload=True) + services = Table('services', meta, autoload=True) + networks = Table('networks', meta, autoload=True) + auth_tokens = Table('auth_tokens', meta, autoload=True) + + # + # New Tables + # + certificates = Table('certificates', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('user_id', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('project_id', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('file_name', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + ) + + consoles = Table('consoles', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('instance_name', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('instance_id', Integer()), + Column('password', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('port', Integer(), nullable=True), + Column('pool_id', + Integer(), + ForeignKey('console_pools.id')), + ) + + console_pools = Table('console_pools', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('address', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('username', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('password', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('console_type', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('public_hostname', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('host', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('compute_host', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + ) + + instance_actions = Table('instance_actions', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('instance_id', + Integer(), + ForeignKey('instances.id')), + Column('action', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('error', + Text(length=None, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + ) + + iscsi_targets = Table('iscsi_targets', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('target_num', Integer()), + Column('host', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('volume_id', + Integer(), + ForeignKey('volumes.id'), + nullable=True), + ) + + tables = [certificates, console_pools, consoles, instance_actions, + iscsi_targets] + for table in tables: + try: + table.create() + except Exception: + LOG.info(repr(table)) + LOG.exception('Exception while creating table') + meta.drop_all(tables=tables) + raise + + auth_tokens.c.user_id.alter(type=String(length=255, + convert_unicode=False, + assert_unicode=None, + unicode_error=None, + _warn_on_bytestring=False)) + + # + # New Columns + # + instances_availability_zone = Column( + 'availability_zone', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)) + + instances_locked = Column('locked', + Boolean(create_constraint=True, name=None)) + + networks_cidr_v6 = Column( + 'cidr_v6', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)) + + networks_ra_server = Column( + 'ra_server', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)) + + services_availability_zone = Column( + 'availability_zone', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)) + + instances.create_column(instances_availability_zone) + instances.create_column(instances_locked) + networks.create_column(networks_cidr_v6) + networks.create_column(networks_ra_server) + services.create_column(services_availability_zone) + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + # load tables for fk + volumes = Table('volumes', meta, autoload=True) + + instances = Table('instances', meta, autoload=True) + services = Table('services', meta, autoload=True) + networks = Table('networks', meta, autoload=True) + auth_tokens = Table('auth_tokens', meta, autoload=True) + + certificates = Table('certificates', meta, autoload=True) + consoles = Table('consoles', meta, autoload=True) + console_pools = Table('console_pools', meta, autoload=True) + instance_actions = Table('instance_actions', meta, autoload=True) + iscsi_targets = Table('iscsi_targets', meta, autoload=True) + + # table order matters, don't change + tables = [certificates, consoles, console_pools, instance_actions, + iscsi_targets] + for table in tables: + table.drop() + + auth_tokens.c.user_id.alter(type=Integer()) + + instances.drop_column('availability_zone') + instances.drop_column('locked') + networks.drop_column('cidr_v6') + networks.drop_column('ra_server') + services.drop_column('availability_zone') diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/002_postgresql_downgrade.sql b/cinder/db/sqlalchemy/migrate_repo/versions/002_postgresql_downgrade.sql new file mode 100644 index 00000000000..cf5c1a20854 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/002_postgresql_downgrade.sql @@ -0,0 +1,20 @@ +BEGIN; + + DROP TABLE certificates; + DROP TABLE consoles; + DROP TABLE console_pools; + DROP TABLE instance_actions; + DROP TABLE iscsi_targets; + + ALTER TABLE auth_tokens ADD COLUMN user_id_backup INTEGER; + UPDATE auth_tokens SET user_id_backup = CAST(user_id AS INTEGER); + ALTER TABLE auth_tokens DROP COLUMN user_id; + ALTER TABLE auth_tokens RENAME COLUMN user_id_backup TO user_id; + + ALTER TABLE instances DROP COLUMN availability_zone; + ALTER TABLE instances DROP COLUMN locked; + ALTER TABLE networks DROP COLUMN cidr_v6; + ALTER TABLE networks DROP COLUMN ra_server; + ALTER TABLE services DROP COLUMN availability_zone; + +COMMIT; diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/002_sqlite_downgrade.sql b/cinder/db/sqlalchemy/migrate_repo/versions/002_sqlite_downgrade.sql new file mode 100644 index 00000000000..8c6a5becaac --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/002_sqlite_downgrade.sql @@ -0,0 +1,388 @@ +BEGIN TRANSACTION; + + DROP TABLE certificates; + + DROP TABLE console_pools; + + DROP TABLE consoles; + + DROP TABLE instance_actions; + + DROP TABLE iscsi_targets; + + CREATE TEMPORARY TABLE auth_tokens_backup ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + token_hash VARCHAR(255) NOT NULL, + user_id VARCHAR(255), + server_manageent_url VARCHAR(255), + storage_url VARCHAR(255), + cdn_management_url VARCHAR(255), + PRIMARY KEY (token_hash), + CHECK (deleted IN (0, 1)) + ); + + INSERT INTO auth_tokens_backup + SELECT created_at, + updated_at, + deleted_at, + deleted, + token_hash, + user_id, + server_manageent_url, + storage_url, + cdn_management_url + FROM auth_tokens; + + DROP TABLE auth_tokens; + + CREATE TABLE auth_tokens ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + token_hash VARCHAR(255) NOT NULL, + user_id INTEGER, + server_manageent_url VARCHAR(255), + storage_url VARCHAR(255), + cdn_management_url VARCHAR(255), + PRIMARY KEY (token_hash), + CHECK (deleted IN (0, 1)) + ); + + INSERT INTO auth_tokens + SELECT created_at, + updated_at, + deleted_at, + deleted, + token_hash, + user_id, + server_manageent_url, + storage_url, + cdn_management_url + FROM auth_tokens_backup; + + DROP TABLE auth_tokens_backup; + + CREATE TEMPORARY TABLE instances_backup ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + internal_id INTEGER, + admin_pass VARCHAR(255), + user_id VARCHAR(255), + project_id VARCHAR(255), + image_id VARCHAR(255), + kernel_id VARCHAR(255), + ramdisk_id VARCHAR(255), + server_name VARCHAR(255), + launch_index INTEGER, + key_name VARCHAR(255), + key_data TEXT, + state INTEGER, + state_description VARCHAR(255), + memory_mb INTEGER, + vcpus INTEGER, + local_gb INTEGER, + hostname VARCHAR(255), + host VARCHAR(255), + instance_type VARCHAR(255), + user_data TEXT, + reservation_id VARCHAR(255), + mac_address VARCHAR(255), + scheduled_at DATETIME, + launched_at DATETIME, + terminated_at DATETIME, + display_name VARCHAR(255), + display_description VARCHAR(255), + availability_zone VARCHAR(255), + locked BOOLEAN, + PRIMARY KEY (id), + CHECK (deleted IN (0, 1)), + CHECK (locked IN (0, 1)) + ); + + INSERT INTO instances_backup + SELECT created_at, + updated_at, + deleted_at, + deleted, + id, + internal_id, + admin_pass, + user_id, + project_id, + image_id, + kernel_id, + ramdisk_id, + server_name, + launch_index, + key_name, + key_data, + state, + state_description, + memory_mb, + vcpus, + local_gb, + hostname, + host, + instance_type, + user_data, + reservation_id, + mac_address, + scheduled_at, + launched_at, + terminated_at, + display_name, + display_description, + availability_zone, + locked + FROM instances; + + DROP TABLE instances; + + CREATE TABLE instances ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + internal_id INTEGER, + admin_pass VARCHAR(255), + user_id VARCHAR(255), + project_id VARCHAR(255), + image_id VARCHAR(255), + kernel_id VARCHAR(255), + ramdisk_id VARCHAR(255), + server_name VARCHAR(255), + launch_index INTEGER, + key_name VARCHAR(255), + key_data TEXT, + state INTEGER, + state_description VARCHAR(255), + memory_mb INTEGER, + vcpus INTEGER, + local_gb INTEGER, + hostname VARCHAR(255), + host VARCHAR(255), + instance_type VARCHAR(255), + user_data TEXT, + reservation_id VARCHAR(255), + mac_address VARCHAR(255), + scheduled_at DATETIME, + launched_at DATETIME, + terminated_at DATETIME, + display_name VARCHAR(255), + display_description VARCHAR(255), + PRIMARY KEY (id), + CHECK (deleted IN (0, 1)) + ); + + INSERT INTO instances + SELECT created_at, + updated_at, + deleted_at, + deleted, + id, + internal_id, + admin_pass, + user_id, + project_id, + image_id, + kernel_id, + ramdisk_id, + server_name, + launch_index, + key_name, + key_data, + state, + state_description, + memory_mb, + vcpus, + local_gb, + hostname, + host, + instance_type, + user_data, + reservation_id, + mac_address, + scheduled_at, + launched_at, + terminated_at, + display_name, + display_description + FROM instances_backup; + + DROP TABLE instances_backup; + + CREATE TEMPORARY TABLE networks_backup ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + injected BOOLEAN, + cidr VARCHAR(255), + netmask VARCHAR(255), + bridge VARCHAR(255), + gateway VARCHAR(255), + broadcast VARCHAR(255), + dns VARCHAR(255), + vlan INTEGER, + vpn_public_address VARCHAR(255), + vpn_public_port INTEGER, + vpn_private_address VARCHAR(255), + dhcp_start VARCHAR(255), + project_id VARCHAR(255), + host VARCHAR(255), + cidr_v6 VARCHAR(255), + ra_server VARCHAR(255), + PRIMARY KEY (id), + CHECK (deleted IN (0, 1)), + CHECK (injected IN (0, 1)) + ); + + INSERT INTO networks_backup + SELECT created_at, + updated_at, + deleted_at, + deleted, + id, + injected, + cidr, + netmask, + bridge, + gateway, + broadcast, + dns, + vlan, + vpn_public_address, + vpn_public_port, + vpn_private_address, + dhcp_start, + project_id, + host, + cidr_v6, + ra_server + FROM networks; + + DROP TABLE networks; + + CREATE TABLE networks ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + injected BOOLEAN, + cidr VARCHAR(255), + netmask VARCHAR(255), + bridge VARCHAR(255), + gateway VARCHAR(255), + broadcast VARCHAR(255), + dns VARCHAR(255), + vlan INTEGER, + vpn_public_address VARCHAR(255), + vpn_public_port INTEGER, + vpn_private_address VARCHAR(255), + dhcp_start VARCHAR(255), + project_id VARCHAR(255), + host VARCHAR(255), + PRIMARY KEY (id), + CHECK (deleted IN (0, 1)), + CHECK (injected IN (0, 1)) + ); + + INSERT INTO networks + SELECT created_at, + updated_at, + deleted_at, + deleted, + id, + injected, + cidr, + netmask, + bridge, + gateway, + broadcast, + dns, + vlan, + vpn_public_address, + vpn_public_port, + vpn_private_address, + dhcp_start, + project_id, + host + FROM networks_backup; + + DROP TABLE networks_backup; + + CREATE TEMPORARY TABLE services_backup ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + host VARCHAR(255), + binary VARCHAR(255), + topic VARCHAR(255), + report_count INTEGER NOT NULL, + disabled BOOLEAN, + availability_zone VARCHAR(255), + PRIMARY KEY (id), + CHECK (deleted IN (0, 1)), + CHECK (disabled IN (0, 1)) + ); + + INSERT INTO services_backup + SELECT created_at, + updated_at, + deleted_at, + deleted, + id, + host, + binary, + topic, + report_count, + disabled, + availability_zone + FROM services; + + DROP TABLE services; + + CREATE TABLE services ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + host VARCHAR(255), + binary VARCHAR(255), + topic VARCHAR(255), + report_count INTEGER NOT NULL, + disabled BOOLEAN, + PRIMARY KEY (id), + CHECK (deleted IN (0, 1)), + CHECK (disabled IN (0, 1)) + ); + + INSERT INTO services + SELECT created_at, + updated_at, + deleted_at, + deleted, + id, + host, + binary, + topic, + report_count, + disabled + FROM services_backup; + + DROP TABLE services_backup; + +COMMIT; diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/003_add_label_to_networks.py b/cinder/db/sqlalchemy/migrate_repo/versions/003_add_label_to_networks.py new file mode 100644 index 00000000000..668b77f0ffc --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/003_add_label_to_networks.py @@ -0,0 +1,42 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Column, Integer, MetaData, String, Table + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta = MetaData() + meta.bind = migrate_engine + + networks = Table('networks', meta, autoload=True) + + networks_label = Column( + 'label', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)) + networks.create_column(networks_label) + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + networks = Table('networks', meta, autoload=True) + + networks.drop_column('label') diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/003_sqlite_downgrade.sql b/cinder/db/sqlalchemy/migrate_repo/versions/003_sqlite_downgrade.sql new file mode 100644 index 00000000000..01601cac07b --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/003_sqlite_downgrade.sql @@ -0,0 +1,111 @@ +BEGIN TRANSACTION; + + CREATE TEMPORARY TABLE networks_backup ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + injected BOOLEAN, + cidr VARCHAR(255), + netmask VARCHAR(255), + bridge VARCHAR(255), + gateway VARCHAR(255), + broadcast VARCHAR(255), + dns VARCHAR(255), + vlan INTEGER, + vpn_public_address VARCHAR(255), + vpn_public_port INTEGER, + vpn_private_address VARCHAR(255), + dhcp_start VARCHAR(255), + project_id VARCHAR(255), + host VARCHAR(255), + cidr_v6 VARCHAR(255), + ra_server VARCHAR(255), + label VARCHAR(255), + PRIMARY KEY (id), + CHECK (deleted IN (0, 1)), + CHECK (injected IN (0, 1)) + ); + + INSERT INTO networks_backup + SELECT created_at, + updated_at, + deleted_at, + deleted, + id, + injected, + cidr, + netmask, + bridge, + gateway, + broadcast, + dns, + vlan, + vpn_public_address, + vpn_public_port, + vpn_private_address, + dhcp_start, + project_id, + host, + cidr_v6, + ra_server, + label + FROM networks; + + DROP TABLE networks; + + CREATE TABLE networks ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + injected BOOLEAN, + cidr VARCHAR(255), + netmask VARCHAR(255), + bridge VARCHAR(255), + gateway VARCHAR(255), + broadcast VARCHAR(255), + dns VARCHAR(255), + vlan INTEGER, + vpn_public_address VARCHAR(255), + vpn_public_port INTEGER, + vpn_private_address VARCHAR(255), + dhcp_start VARCHAR(255), + project_id VARCHAR(255), + host VARCHAR(255), + cidr_v6 VARCHAR(255), + ra_server VARCHAR(255), + PRIMARY KEY (id), + CHECK (deleted IN (0, 1)), + CHECK (injected IN (0, 1)) + ); + + INSERT INTO networks + SELECT created_at, + updated_at, + deleted_at, + deleted, + id, + injected, + cidr, + netmask, + bridge, + gateway, + broadcast, + dns, + vlan, + vpn_public_address, + vpn_public_port, + vpn_private_address, + dhcp_start, + project_id, + host, + cidr_v6, + ra_server + FROM networks_backup; + + DROP TABLE networks_backup; + +COMMIT; diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/004_add_zone_tables.py b/cinder/db/sqlalchemy/migrate_repo/versions/004_add_zone_tables.py new file mode 100644 index 00000000000..e46d9d44300 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/004_add_zone_tables.py @@ -0,0 +1,66 @@ +# Copyright 2010 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Boolean, Column, DateTime, Integer +from sqlalchemy import MetaData, String, Table +from cinder import log as logging + +LOG = logging.getLogger(__name__) + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta = MetaData() + meta.bind = migrate_engine + + # + # New Tables + # + zones = Table('zones', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('api_url', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('username', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('password', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + ) + + for table in (zones, ): + try: + table.create() + except Exception: + LOG.info(repr(table)) + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + zones = Table('zones', meta, autoload=True) + + for table in (zones, ): + table.drop() diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/005_add_instance_metadata.py b/cinder/db/sqlalchemy/migrate_repo/versions/005_add_instance_metadata.py new file mode 100644 index 00000000000..4eb66111a06 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/005_add_instance_metadata.py @@ -0,0 +1,81 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 Justin Santa Barbara +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Boolean, Column, DateTime, ForeignKey, Integer +from sqlalchemy import MetaData, String, Table +from cinder import log as logging + +LOG = logging.getLogger(__name__) + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta = MetaData() + meta.bind = migrate_engine + + # load tables for fk + instances = Table('instances', meta, autoload=True) + + quotas = Table('quotas', meta, autoload=True) + + instance_metadata_table = Table('instance_metadata', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('instance_id', + Integer(), + ForeignKey('instances.id'), + nullable=False), + Column('key', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('value', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False))) + + for table in (instance_metadata_table, ): + try: + table.create() + except Exception: + LOG.info(repr(table)) + LOG.exception('Exception while creating table') + raise + + quota_metadata_items = Column('metadata_items', Integer()) + quotas.create_column(quota_metadata_items) + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + # load tables for fk + instances = Table('instances', meta, autoload=True) + + quotas = Table('quotas', meta, autoload=True) + + instance_metadata_table = Table('instance_metadata', meta, autoload=True) + + for table in (instance_metadata_table, ): + table.drop() + + quotas.drop_column('metadata_items') diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/006_add_provider_data_to_volumes.py b/cinder/db/sqlalchemy/migrate_repo/versions/006_add_provider_data_to_volumes.py new file mode 100644 index 00000000000..df2be9df44c --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/006_add_provider_data_to_volumes.py @@ -0,0 +1,54 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 Justin Santa Barbara. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Column, Integer, MetaData, String, Table + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta = MetaData() + meta.bind = migrate_engine + + volumes = Table('volumes', meta, autoload=True) + + # Add columns to existing tables + volumes_provider_location = Column('provider_location', + String(length=256, + convert_unicode=False, + assert_unicode=None, + unicode_error=None, + _warn_on_bytestring=False)) + + volumes_provider_auth = Column('provider_auth', + String(length=256, + convert_unicode=False, + assert_unicode=None, + unicode_error=None, + _warn_on_bytestring=False)) + volumes.create_column(volumes_provider_location) + volumes.create_column(volumes_provider_auth) + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + volumes = Table('volumes', meta, autoload=True) + + volumes.drop_column('provider_location') + volumes.drop_column('provider_auth') diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/006_sqlite_downgrade.sql b/cinder/db/sqlalchemy/migrate_repo/versions/006_sqlite_downgrade.sql new file mode 100644 index 00000000000..f55c284379a --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/006_sqlite_downgrade.sql @@ -0,0 +1,113 @@ +BEGIN TRANSACTION; + + CREATE TEMPORARY TABLE volumes_backup ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + ec2_id VARCHAR(255), + user_id VARCHAR(255), + project_id VARCHAR(255), + host VARCHAR(255), + size INTEGER, + availability_zone VARCHAR(255), + instance_id INTEGER, + mountpoint VARCHAR(255), + attach_time VARCHAR(255), + status VARCHAR(255), + attach_status VARCHAR(255), + scheduled_at DATETIME, + launched_at DATETIME, + terminated_at DATETIME, + display_name VARCHAR(255), + display_description VARCHAR(255), + provider_location VARCHAR(256), + provider_auth VARCHAR(256), + PRIMARY KEY (id), + CHECK (deleted IN (0, 1)), + FOREIGN KEY(instance_id) REFERENCES instances (id) + ); + + INSERT INTO volumes_backup + SELECT created_at, + updated_at, + deleted_at, + deleted, + id, + ec2_id, + user_id, + project_id, + host, + size, + availability_zone, + instance_id, + mountpoint, + attach_time, + status, + attach_status, + scheduled_at, + launched_at, + terminated_at, + display_name, + display_description, + provider_location, + provider_auth + FROM volumes; + + DROP TABLE volumes; + + CREATE TABLE volumes ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + ec2_id VARCHAR(255), + user_id VARCHAR(255), + project_id VARCHAR(255), + host VARCHAR(255), + size INTEGER, + availability_zone VARCHAR(255), + instance_id INTEGER, + mountpoint VARCHAR(255), + attach_time VARCHAR(255), + status VARCHAR(255), + attach_status VARCHAR(255), + scheduled_at DATETIME, + launched_at DATETIME, + terminated_at DATETIME, + display_name VARCHAR(255), + display_description VARCHAR(255), + PRIMARY KEY (id), + CHECK (deleted IN (0, 1)), + FOREIGN KEY(instance_id) REFERENCES instances (id) + ); + + INSERT INTO volumes + SELECT created_at, + updated_at, + deleted_at, + deleted, + id, + ec2_id, + user_id, + project_id, + host, + size, + availability_zone, + instance_id, + mountpoint, + attach_time, + status, + attach_status, + scheduled_at, + launched_at, + terminated_at, + display_name, + display_description + FROM volumes_backup; + + DROP TABLE volumes_backup; + +COMMIT; diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/007_add_ipv6_to_fixed_ips.py b/cinder/db/sqlalchemy/migrate_repo/versions/007_add_ipv6_to_fixed_ips.py new file mode 100644 index 00000000000..d84fa173400 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/007_add_ipv6_to_fixed_ips.py @@ -0,0 +1,70 @@ +# Copyright 2011 OpenStack LLC +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Column, Integer, MetaData, String, Table + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta = MetaData() + meta.bind = migrate_engine + + fixed_ips = Table('fixed_ips', meta, autoload=True) + + # + # New Columns + # + fixed_ips_addressV6 = Column( + "addressV6", + String( + length=255, + convert_unicode=False, + assert_unicode=None, + unicode_error=None, + _warn_on_bytestring=False)) + + fixed_ips_netmaskV6 = Column( + "netmaskV6", + String( + length=3, + convert_unicode=False, + assert_unicode=None, + unicode_error=None, + _warn_on_bytestring=False)) + + fixed_ips_gatewayV6 = Column( + "gatewayV6", + String( + length=255, + convert_unicode=False, + assert_unicode=None, + unicode_error=None, + _warn_on_bytestring=False)) + # Add columns to existing tables + fixed_ips.create_column(fixed_ips_addressV6) + fixed_ips.create_column(fixed_ips_netmaskV6) + fixed_ips.create_column(fixed_ips_gatewayV6) + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + fixed_ips = Table('fixed_ips', meta, autoload=True) + + fixed_ips.drop_column('addressV6') + fixed_ips.drop_column('netmaskV6') + fixed_ips.drop_column('gatewayV6') diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/007_sqlite_downgrade.sql b/cinder/db/sqlalchemy/migrate_repo/versions/007_sqlite_downgrade.sql new file mode 100644 index 00000000000..44d34769820 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/007_sqlite_downgrade.sql @@ -0,0 +1,79 @@ +BEGIN TRANSACTION; + + CREATE TEMPORARY TABLE fixed_ips_backup ( + id INTEGER NOT NULL, + address VARCHAR(255), + network_id INTEGER, + instance_id INTEGER, + allocated BOOLEAN DEFAULT FALSE, + leased BOOLEAN DEFAULT FALSE, + reserved BOOLEAN DEFAULT FALSE, + created_at DATETIME NOT NULL, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN NOT NULL, + addressV6 VARCHAR(255), + netmaskV6 VARCHAR(3), + gatewayV6 VARCHAR(255), + PRIMARY KEY (id), + CHECK (leased IN (0, 1)), + CHECK (allocated IN (0, 1)), + CHECK (deleted IN (0, 1)), + CHECK (reserved IN (0, 1)) + ); + + INSERT INTO fixed_ips_backup + SELECT id, + address, + network_id, + instance_id, + allocated, + leased, + reserved, + created_at, + updated_at, + deleted_at, + deleted, + addressV6, + netmaskV6, + gatewayV6 + FROM fixed_ips; + + DROP TABLE fixed_ips; + + CREATE TABLE fixed_ips ( + id INTEGER NOT NULL, + address VARCHAR(255), + network_id INTEGER, + instance_id INTEGER, + allocated BOOLEAN DEFAULT FALSE, + leased BOOLEAN DEFAULT FALSE, + reserved BOOLEAN DEFAULT FALSE, + created_at DATETIME NOT NULL, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN NOT NULL, + PRIMARY KEY (id), + CHECK (leased IN (0, 1)), + CHECK (allocated IN (0, 1)), + CHECK (deleted IN (0, 1)), + CHECK (reserved IN (0, 1)) + ); + + INSERT INTO fixed_ips + SELECT id, + address, + network_id, + instance_id, + allocated, + leased, + reserved, + created_at, + updated_at, + deleted_at, + deleted + FROM fixed_ips_backup; + + DROP TABLE fixed_ips_backup; + +COMMIT; diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/008_add_instance_types.py b/cinder/db/sqlalchemy/migrate_repo/versions/008_add_instance_types.py new file mode 100644 index 00000000000..98e53862827 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/008_add_instance_types.py @@ -0,0 +1,85 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 Ken Pepple +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Boolean, Column, DateTime, Integer +from sqlalchemy import MetaData, String, Table +from cinder import log as logging + +LOG = logging.getLogger(__name__) + + +def upgrade(migrate_engine): + # Upgrade operations go here + # Don't create your own engine; bind migrate_engine + # to your metadata + meta = MetaData() + meta.bind = migrate_engine + # + # New Tables + # + instance_types = Table('instance_types', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('name', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + unique=True), + Column('id', Integer(), primary_key=True, nullable=False), + Column('memory_mb', Integer(), nullable=False), + Column('vcpus', Integer(), nullable=False), + Column('local_gb', Integer(), nullable=False), + Column('flavorid', Integer(), nullable=False, unique=True), + Column('swap', Integer(), nullable=False, default=0), + Column('rxtx_quota', Integer(), nullable=False, default=0), + Column('rxtx_cap', Integer(), nullable=False, default=0)) + try: + instance_types.create() + except Exception: + LOG.info(repr(instance_types)) + LOG.exception('Exception while creating instance_types table') + raise + + # Here are the old static instance types + INSTANCE_TYPES = { + 'm1.tiny': dict(memory_mb=512, vcpus=1, local_gb=0, flavorid=1), + 'm1.small': dict(memory_mb=2048, vcpus=1, local_gb=20, flavorid=2), + 'm1.medium': dict(memory_mb=4096, vcpus=2, local_gb=40, flavorid=3), + 'm1.large': dict(memory_mb=8192, vcpus=4, local_gb=80, flavorid=4), + 'm1.xlarge': dict(memory_mb=16384, vcpus=8, local_gb=160, flavorid=5)} + try: + i = instance_types.insert() + for name, values in INSTANCE_TYPES.iteritems(): + # FIXME(kpepple) should we be seeding created_at / updated_at ? + # now = datetime.datatime.utcnow() + i.execute({'name': name, 'memory_mb': values["memory_mb"], + 'vcpus': values["vcpus"], 'deleted': False, + 'local_gb': values["local_gb"], + 'flavorid': values["flavorid"]}) + except Exception: + LOG.info(repr(instance_types)) + LOG.exception('Exception while seeding instance_types table') + raise + + +def downgrade(migrate_engine): + # Operations to reverse the above upgrade go here. + meta = MetaData() + meta.bind = migrate_engine + instance_types = Table('instance_types', meta, autoload=True) + for table in (instance_types, ): + table.drop() diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/009_add_instance_migrations.py b/cinder/db/sqlalchemy/migrate_repo/versions/009_add_instance_migrations.py new file mode 100644 index 00000000000..acedd3ad022 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/009_add_instance_migrations.py @@ -0,0 +1,70 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Boolean, Column, DateTime, ForeignKey, Integer +from sqlalchemy import MetaData, String, Table +from cinder import log as logging + +LOG = logging.getLogger(__name__) + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta = MetaData() + meta.bind = migrate_engine + + # load tables for fk + instances = Table('instances', meta, autoload=True) + + # + # New Tables + # + migrations = Table('migrations', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('source_compute', String(255)), + Column('dest_compute', String(255)), + Column('dest_host', String(255)), + Column('instance_id', Integer, ForeignKey('instances.id'), + nullable=True), + Column('status', String(255)), + ) + + for table in (migrations, ): + try: + table.create() + except Exception: + LOG.info(repr(table)) + LOG.exception('Exception while creating table') + raise + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + # load tables for fk + instances = Table('instances', meta, autoload=True) + + migrations = Table('migrations', meta, autoload=True) + + for table in (migrations, ): + table.drop() diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/010_add_os_type_to_instances.py b/cinder/db/sqlalchemy/migrate_repo/versions/010_add_os_type_to_instances.py new file mode 100644 index 00000000000..da01940bd39 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/010_add_os_type_to_instances.py @@ -0,0 +1,45 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Column, Integer, MetaData, String, Table + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta = MetaData() + meta.bind = migrate_engine + + instances = Table('instances', meta, autoload=True) + + instances_os_type = Column('os_type', + String(length=255, convert_unicode=False, + assert_unicode=None, unicode_error=None, + _warn_on_bytestring=False), + nullable=True) + instances.create_column(instances_os_type) + migrate_engine.execute(instances.update()\ + .where(instances.c.os_type == None)\ + .values(os_type='linux')) + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + instances = Table('instances', meta, autoload=True) + + instances.drop_column('os_type') diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/011_live_migration.py b/cinder/db/sqlalchemy/migrate_repo/versions/011_live_migration.py new file mode 100644 index 00000000000..c2a3560a390 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/011_live_migration.py @@ -0,0 +1,85 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Boolean, Column, DateTime, Integer, MetaData +from sqlalchemy import Table, Text +from cinder import log as logging + +LOG = logging.getLogger(__name__) + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta = MetaData() + meta.bind = migrate_engine + + instances = Table('instances', meta, autoload=True) + + compute_nodes = Table('compute_nodes', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('service_id', Integer(), nullable=False), + + Column('vcpus', Integer(), nullable=False), + Column('memory_mb', Integer(), nullable=False), + Column('local_gb', Integer(), nullable=False), + Column('vcpus_used', Integer(), nullable=False), + Column('memory_mb_used', Integer(), nullable=False), + Column('local_gb_used', Integer(), nullable=False), + Column('hypervisor_type', + Text(convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + nullable=False), + Column('hypervisor_version', Integer(), nullable=False), + Column('cpu_info', + Text(convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + nullable=False), + ) + + try: + compute_nodes.create() + except Exception: + LOG.info(repr(compute_nodes)) + LOG.exception('Exception while creating table') + meta.drop_all(tables=[compute_nodes]) + raise + + instances_launched_on = Column( + 'launched_on', + Text(convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + nullable=True) + instances.create_column(instances_launched_on) + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + instances = Table('instances', meta, autoload=True) + + compute_nodes = Table('compute_nodes', meta, autoload=True) + + compute_nodes.drop() + + instances.drop_column('launched_on') diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/012_add_ipv6_flatmanager.py b/cinder/db/sqlalchemy/migrate_repo/versions/012_add_ipv6_flatmanager.py new file mode 100644 index 00000000000..a626d2c7dbd --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/012_add_ipv6_flatmanager.py @@ -0,0 +1,90 @@ +# Copyright (c) 2011 NTT. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Boolean, Column, DateTime, ForeignKey, Integer +from sqlalchemy import MetaData, String, Table + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta = MetaData() + meta.bind = migrate_engine + + # load tables for fk + instances = Table('instances', meta, autoload=True) + + networks = Table('networks', meta, autoload=True) + fixed_ips = Table('fixed_ips', meta, autoload=True) + + # Alter column name + networks.c.ra_server.alter(name='gateway_v6') + # Add new column to existing table + networks_netmask_v6 = Column( + 'netmask_v6', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)) + networks.create_column(networks_netmask_v6) + + # drop existing columns from table + fixed_ips.c.addressV6.drop() + fixed_ips.c.netmaskV6.drop() + fixed_ips.c.gatewayV6.drop() + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + # load tables for fk + instances = Table('instances', meta, autoload=True) + + networks = Table('networks', meta, autoload=True) + fixed_ips = Table('fixed_ips', meta, autoload=True) + + networks.c.gateway_v6.alter(name='ra_server') + networks.drop_column('netmask_v6') + + fixed_ips_addressV6 = Column( + "addressV6", + String( + length=255, + convert_unicode=False, + assert_unicode=None, + unicode_error=None, + _warn_on_bytestring=False)) + + fixed_ips_netmaskV6 = Column( + "netmaskV6", + String( + length=3, + convert_unicode=False, + assert_unicode=None, + unicode_error=None, + _warn_on_bytestring=False)) + + fixed_ips_gatewayV6 = Column( + "gatewayV6", + String( + length=255, + convert_unicode=False, + assert_unicode=None, + unicode_error=None, + _warn_on_bytestring=False)) + + for column in (fixed_ips_addressV6, + fixed_ips_netmaskV6, + fixed_ips_gatewayV6): + fixed_ips.create_column(column) diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/012_sqlite_upgrade.sql b/cinder/db/sqlalchemy/migrate_repo/versions/012_sqlite_upgrade.sql new file mode 100644 index 00000000000..0779f50e8a9 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/012_sqlite_upgrade.sql @@ -0,0 +1,195 @@ +BEGIN TRANSACTION; + + CREATE TEMPORARY TABLE networks_backup ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + injected BOOLEAN, + cidr VARCHAR(255), + netmask VARCHAR(255), + bridge VARCHAR(255), + gateway VARCHAR(255), + broadcast VARCHAR(255), + dns VARCHAR(255), + vlan INTEGER, + vpn_public_address VARCHAR(255), + vpn_public_port INTEGER, + vpn_private_address VARCHAR(255), + dhcp_start VARCHAR(255), + project_id VARCHAR(255), + host VARCHAR(255), + cidr_v6 VARCHAR(255), + ra_server VARCHAR(255), + label VARCHAR(255), + PRIMARY KEY (id), + CHECK (injected IN (0, 1)), + CHECK (deleted IN (0, 1)) + ); + + INSERT INTO networks_backup + SELECT created_at, + updated_at, + deleted_at, + deleted, + id, + injected, + cidr, + netmask, + bridge, + gateway, + broadcast, + dns, + vlan, + vpn_public_address, + vpn_public_port, + vpn_private_address, + dhcp_start, + project_id, + host, + cidr_v6, + ra_server, + label + FROM networks; + + DROP TABLE networks; + + CREATE TABLE networks ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + injected BOOLEAN, + cidr VARCHAR(255), + netmask VARCHAR(255), + bridge VARCHAR(255), + gateway VARCHAR(255), + broadcast VARCHAR(255), + dns VARCHAR(255), + vlan INTEGER, + vpn_public_address VARCHAR(255), + vpn_public_port INTEGER, + vpn_private_address VARCHAR(255), + dhcp_start VARCHAR(255), + project_id VARCHAR(255), + host VARCHAR(255), + cidr_v6 VARCHAR(255), + gateway_v6 VARCHAR(255), + label VARCHAR(255), + netmask_v6 VARCHAR(255), + PRIMARY KEY (id), + CHECK (injected IN (0, 1)), + CHECK (deleted IN (0, 1)) + ); + + INSERT INTO networks + SELECT created_at, + updated_at, + deleted_at, + deleted, + id, + injected, + cidr, + netmask, + bridge, + gateway, + broadcast, + dns, + vlan, + vpn_public_address, + vpn_public_port, + vpn_private_address, + dhcp_start, + project_id, + host, + cidr_v6, + ra_server AS gateway_v6, + label, + NULL AS netmask_v6 + FROM networks_backup; + + DROP TABLE networks_backup; + + CREATE TEMPORARY TABLE fixed_ips_backup ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + address VARCHAR(255), + network_id INTEGER, + instance_id INTEGER, + allocated BOOLEAN, + leased BOOLEAN, + reserved BOOLEAN, + addressV6 VARCHAR(255), + netmaskV6 VARCHAR(3), + gatewayV6 VARCHAR(255), + PRIMARY KEY (id), + CHECK (reserved IN (0, 1)), + CHECK (allocated IN (0, 1)), + CHECK (leased IN (0, 1)), + CHECK (deleted IN (0, 1)), + FOREIGN KEY(instance_id) REFERENCES instances (id), + FOREIGN KEY(network_id) REFERENCES networks (id) + ); + + INSERT INTO fixed_ips_backup + SELECT created_at, + updated_at, + deleted_at, + deleted, + id, + address, + network_id, + instance_id, + allocated, + leased, + reserved, + addressV6, + netmaskV6, + gatewayV6 + FROM fixed_ips; + + DROP TABLE fixed_ips; + + CREATE TABLE fixed_ips ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + address VARCHAR(255), + network_id INTEGER, + instance_id INTEGER, + allocated BOOLEAN, + leased BOOLEAN, + reserved BOOLEAN, + PRIMARY KEY (id), + CHECK (reserved IN (0, 1)), + CHECK (allocated IN (0, 1)), + CHECK (leased IN (0, 1)), + CHECK (deleted IN (0, 1)), + FOREIGN KEY(instance_id) REFERENCES instances (id), + FOREIGN KEY(network_id) REFERENCES networks (id) + ); + + INSERT INTO fixed_ips + SELECT created_at, + updated_at, + deleted_at, + deleted, + id, + address, + network_id, + instance_id, + allocated, + leased, + reserved + FROM fixed_ips_backup; + + DROP TABLE fixed_ips_backup; + +COMMIT; diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/013_add_flavors_to_migrations.py b/cinder/db/sqlalchemy/migrate_repo/versions/013_add_flavors_to_migrations.py new file mode 100644 index 00000000000..d8735ec7df9 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/013_add_flavors_to_migrations.py @@ -0,0 +1,43 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Column, Integer, MetaData, Table + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta = MetaData() + meta.bind = migrate_engine + + migrations = Table('migrations', meta, autoload=True) + + old_flavor_id = Column('old_flavor_id', Integer()) + new_flavor_id = Column('new_flavor_id', Integer()) + + migrations.create_column(old_flavor_id) + migrations.create_column(new_flavor_id) + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + migrations = Table('migrations', meta, autoload=True) + + migrations.drop_column('old_flavor_id') + migrations.drop_column('new_flavor_id') diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/013_sqlite_downgrade.sql b/cinder/db/sqlalchemy/migrate_repo/versions/013_sqlite_downgrade.sql new file mode 100644 index 00000000000..fbba364beab --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/013_sqlite_downgrade.sql @@ -0,0 +1,69 @@ +BEGIN TRANSACTION; + + CREATE TEMPORARY TABLE migrations_backup ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + source_compute VARCHAR(255), + dest_compute VARCHAR(255), + dest_host VARCHAR(255), + instance_id INTEGER, + status VARCHAR(255), + old_flavor_id INTEGER, + new_flavor_id INTEGER, + PRIMARY KEY (id), + CHECK (deleted IN (0, 1)), + FOREIGN KEY(instance_id) REFERENCES instances (id) + ); + + INSERT INTO migrations_backup + SELECT created_at, + updated_at, + deleted_at, + deleted, + id, + source_compute, + dest_compute, + dest_host, + instance_id, + status, + old_flavor_id, + new_flavor_id + FROM migrations; + + DROP TABLE migrations; + + CREATE TABLE migrations ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + source_compute VARCHAR(255), + dest_compute VARCHAR(255), + dest_host VARCHAR(255), + instance_id INTEGER, + status VARCHAR(255), + PRIMARY KEY (id), + CHECK (deleted IN (0, 1)), + FOREIGN KEY(instance_id) REFERENCES instances (id) + ); + + INSERT INTO migrations + SELECT created_at, + updated_at, + deleted_at, + deleted, + id, + source_compute, + dest_compute, + dest_host, + instance_id, + status + FROM migrations_backup; + + DROP TABLE migrations_backup; + +COMMIT; diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/014_add_instance_type_id_to_instances.py b/cinder/db/sqlalchemy/migrate_repo/versions/014_add_instance_type_id_to_instances.py new file mode 100644 index 00000000000..b363caca5e4 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/014_add_instance_type_id_to_instances.py @@ -0,0 +1,74 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Column, Integer, MetaData, String, Table + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta = MetaData() + meta.bind = migrate_engine + + instance_types = Table('instance_types', meta, autoload=True) + instances = Table('instances', meta, autoload=True) + + c_instance_type_id = Column('instance_type_id', + String(length=255, convert_unicode=False, + assert_unicode=None, unicode_error=None, + _warn_on_bytestring=False), + nullable=True) + + instances.create_column(c_instance_type_id) + + type_names = {} + recs = migrate_engine.execute(instance_types.select()) + for row in recs: + type_names[row[0]] = row[1] + + for type_id, type_name in type_names.iteritems(): + migrate_engine.execute(instances.update()\ + .where(instances.c.instance_type == type_name)\ + .values(instance_type_id=type_id)) + + instances.c.instance_type.drop() + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + instance_types = Table('instance_types', meta, autoload=True) + instances = Table('instances', meta, autoload=True) + + c_instance_type = Column('instance_type', + String(length=255, convert_unicode=False, + assert_unicode=None, unicode_error=None, + _warn_on_bytestring=False), + nullable=True) + instances.create_column(c_instance_type) + + type_names = {} + recs = migrate_engine.execute(instance_types.select()) + for row in recs: + type_names[row[0]] = row[1] + + for type_id, type_name in type_names.iteritems(): + migrate_engine.execute(instances.update()\ + .where(instances.c.instance_type_id == type_id)\ + .values(instance_type=type_name)) + + instances.c.instance_type_id.drop() diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/015_add_auto_assign_to_floating_ips.py b/cinder/db/sqlalchemy/migrate_repo/versions/015_add_auto_assign_to_floating_ips.py new file mode 100644 index 00000000000..51db850665e --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/015_add_auto_assign_to_floating_ips.py @@ -0,0 +1,35 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# Copyright 2011 Grid Dynamics +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Boolean, Column, MetaData, Table + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta = MetaData() + meta.bind = migrate_engine + floating_ips = Table('floating_ips', meta, autoload=True) + c_auto_assigned = Column('auto_assigned', Boolean, default=False) + floating_ips.create_column(c_auto_assigned) + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + floating_ips = Table('floating_ips', meta, autoload=True) + floating_ips.drop_column('auto_assigned') diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/015_sqlite_downgrade.sql b/cinder/db/sqlalchemy/migrate_repo/versions/015_sqlite_downgrade.sql new file mode 100644 index 00000000000..c599ef2b355 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/015_sqlite_downgrade.sql @@ -0,0 +1,62 @@ +BEGIN TRANSACTION; + CREATE TEMPORARY TABLE floating_ips_backup ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + address VARCHAR(255), + fixed_ip_id INTEGER, + project_id VARCHAR(255), + host VARCHAR(255), + auto_assigned BOOLEAN, + PRIMARY KEY (id), + CHECK (deleted IN (0, 1)), + CHECK (auto_assigned IN (0, 1)), + FOREIGN KEY(fixed_ip_id) REFERENCES fixed_ips (id) + ); + + INSERT INTO floating_ips_backup + SELECT created_at, + updated_at, + deleted_at, + deleted, + id, + address, + fixed_ip_id, + project_id, + host, + auto_assigned + FROM floating_ips; + + DROP TABLE floating_ips; + + CREATE TABLE floating_ips ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + address VARCHAR(255), + fixed_ip_id INTEGER, + project_id VARCHAR(255), + host VARCHAR(255), + PRIMARY KEY (id), + CHECK (deleted IN (0, 1)), + FOREIGN KEY(fixed_ip_id) REFERENCES fixed_ips (id) + ); + + INSERT INTO floating_ips + SELECT created_at, + updated_at, + deleted_at, + deleted, + id, + address, + fixed_ip_id, + project_id, + host + FROM floating_ips_backup; + + DROP TABLE floating_ips_backup; +COMMIT; diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/016_make_quotas_key_and_value.py b/cinder/db/sqlalchemy/migrate_repo/versions/016_make_quotas_key_and_value.py new file mode 100644 index 00000000000..1c7081c4ad6 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/016_make_quotas_key_and_value.py @@ -0,0 +1,213 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Boolean, Column, DateTime, Integer +from sqlalchemy import MetaData, String, Table + +from cinder import utils + +resources = [ + 'instances', + 'cores', + 'volumes', + 'gigabytes', + 'floating_ips', + 'metadata_items', +] + + +def old_style_quotas_table(meta, name): + return Table(name, meta, + Column('id', Integer(), primary_key=True), + Column('created_at', DateTime(), + default=utils.utcnow), + Column('updated_at', DateTime(), + onupdate=utils.utcnow), + Column('deleted_at', DateTime()), + Column('deleted', Boolean(), default=False), + Column('project_id', + String(length=255, convert_unicode=False, + assert_unicode=None, unicode_error=None, + _warn_on_bytestring=False)), + Column('instances', Integer()), + Column('cores', Integer()), + Column('volumes', Integer()), + Column('gigabytes', Integer()), + Column('floating_ips', Integer()), + Column('metadata_items', Integer()), + ) + + +def new_style_quotas_table(meta, name): + return Table(name, meta, + Column('id', Integer(), primary_key=True), + Column('created_at', DateTime(), + default=utils.utcnow), + Column('updated_at', DateTime(), + onupdate=utils.utcnow), + Column('deleted_at', DateTime()), + Column('deleted', Boolean(), default=False), + Column('project_id', + String(length=255, convert_unicode=False, + assert_unicode=None, unicode_error=None, + _warn_on_bytestring=False)), + Column('resource', + String(length=255, convert_unicode=False, + assert_unicode=None, unicode_error=None, + _warn_on_bytestring=False), + nullable=False), + Column('hard_limit', Integer(), nullable=True), + ) + + +def quotas_table(meta, name='quotas'): + return Table(name, meta, autoload=True) + + +def _assert_no_duplicate_project_ids(quotas): + project_ids = set() + message = ('There are multiple active quotas for project "%s" ' + '(among others, possibly). ' + 'Please resolve all ambiguous quotas before ' + 'reattempting the migration.') + for quota in quotas: + assert quota.project_id not in project_ids, message % quota.project_id + project_ids.add(quota.project_id) + + +def assert_old_quotas_have_no_active_duplicates(migrate_engine, quotas): + """Ensure that there are no duplicate non-deleted quota entries.""" + select = quotas.select().where(quotas.c.deleted == False) + results = migrate_engine.execute(select) + _assert_no_duplicate_project_ids(list(results)) + + +def assert_new_quotas_have_no_active_duplicates(migrate_engine, quotas): + """Ensure that there are no duplicate non-deleted quota entries.""" + for resource in resources: + select = quotas.select().\ + where(quotas.c.deleted == False).\ + where(quotas.c.resource == resource) + results = migrate_engine.execute(select) + _assert_no_duplicate_project_ids(list(results)) + + +def convert_forward(migrate_engine, old_quotas, new_quotas): + quotas = list(migrate_engine.execute(old_quotas.select())) + for quota in quotas: + for resource in resources: + hard_limit = getattr(quota, resource) + if hard_limit is None: + continue + insert = new_quotas.insert().values( + created_at=quota.created_at, + updated_at=quota.updated_at, + deleted_at=quota.deleted_at, + deleted=quota.deleted, + project_id=quota.project_id, + resource=resource, + hard_limit=hard_limit) + migrate_engine.execute(insert) + + +def earliest(date1, date2): + if date1 is None and date2 is None: + return None + if date1 is None: + return date2 + if date2 is None: + return date1 + if date1 < date2: + return date1 + return date2 + + +def latest(date1, date2): + if date1 is None and date2 is None: + return None + if date1 is None: + return date2 + if date2 is None: + return date1 + if date1 > date2: + return date1 + return date2 + + +def convert_backward(migrate_engine, old_quotas, new_quotas): + quotas = {} + for quota in migrate_engine.execute(new_quotas.select()): + if (quota.resource not in resources + or quota.hard_limit is None or quota.deleted): + continue + if not quota.project_id in quotas: + quotas[quota.project_id] = { + 'project_id': quota.project_id, + 'created_at': quota.created_at, + 'updated_at': quota.updated_at, + quota.resource: quota.hard_limit, + } + else: + quotas[quota.project_id]['created_at'] = earliest( + quota.created_at, quotas[quota.project_id]['created_at']) + quotas[quota.project_id]['updated_at'] = latest( + quota.updated_at, quotas[quota.project_id]['updated_at']) + quotas[quota.project_id][quota.resource] = quota.hard_limit + + for quota in quotas.itervalues(): + insert = old_quotas.insert().values(**quota) + migrate_engine.execute(insert) + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta = MetaData() + meta.bind = migrate_engine + + old_quotas = quotas_table(meta) + assert_old_quotas_have_no_active_duplicates(migrate_engine, old_quotas) + + new_quotas = new_style_quotas_table(meta, 'quotas_new') + new_quotas.create() + convert_forward(migrate_engine, old_quotas, new_quotas) + old_quotas.drop() + + # clear metadata to work around this: + # http://code.google.com/p/sqlalchemy-migrate/issues/detail?id=128 + meta.clear() + new_quotas = quotas_table(meta, 'quotas_new') + new_quotas.rename('quotas') + + +def downgrade(migrate_engine): + # Operations to reverse the above upgrade go here. + meta = MetaData() + meta.bind = migrate_engine + + new_quotas = quotas_table(meta) + assert_new_quotas_have_no_active_duplicates(migrate_engine, new_quotas) + + old_quotas = old_style_quotas_table(meta, 'quotas_old') + old_quotas.create() + convert_backward(migrate_engine, old_quotas, new_quotas) + new_quotas.drop() + + # clear metadata to work around this: + # http://code.google.com/p/sqlalchemy-migrate/issues/detail?id=128 + meta.clear() + old_quotas = quotas_table(meta, 'quotas_old') + old_quotas.rename('quotas') diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/017_make_instance_type_id_an_integer.py b/cinder/db/sqlalchemy/migrate_repo/versions/017_make_instance_type_id_an_integer.py new file mode 100644 index 00000000000..0aed48a7d19 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/017_make_instance_type_id_an_integer.py @@ -0,0 +1,87 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Column, Integer, MetaData, String, Table +from cinder import log as logging + +LOG = logging.getLogger(__name__) + + +def upgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + instances = Table('instances', meta, autoload=True) + + types = {} + for instance in migrate_engine.execute(instances.select()): + if instance.instance_type_id is None: + types[instance.id] = None + continue + try: + types[instance.id] = int(instance.instance_type_id) + except ValueError: + LOG.warn("Instance %s did not have instance_type_id " + "converted to an integer because its value is %s" % + (instance.id, instance.instance_type_id)) + types[instance.id] = None + + integer_column = Column('instance_type_id_int', Integer(), nullable=True) + string_column = instances.c.instance_type_id + + integer_column.create(instances) + for instance_id, instance_type_id in types.iteritems(): + update = instances.update().\ + where(instances.c.id == instance_id).\ + values(instance_type_id_int=instance_type_id) + migrate_engine.execute(update) + + string_column.alter(name='instance_type_id_str') + integer_column.alter(name='instance_type_id') + string_column.drop() + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + instances = Table('instances', meta, autoload=True) + + integer_column = instances.c.instance_type_id + string_column = Column('instance_type_id_str', + String(length=255, convert_unicode=False, + assert_unicode=None, unicode_error=None, + _warn_on_bytestring=False), + nullable=True) + + types = {} + for instance in migrate_engine.execute(instances.select()): + if instance.instance_type_id is None: + types[instance.id] = None + else: + types[instance.id] = str(instance.instance_type_id) + + string_column.create(instances) + for instance_id, instance_type_id in types.iteritems(): + update = instances.update().\ + where(instances.c.id == instance_id).\ + values(instance_type_id_str=instance_type_id) + migrate_engine.execute(update) + + integer_column.alter(name='instance_type_id_int') + string_column.alter(name='instance_type_id') + integer_column.drop() diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/018_rename_server_management_url.py b/cinder/db/sqlalchemy/migrate_repo/versions/018_rename_server_management_url.py new file mode 100644 index 00000000000..59ead97ada4 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/018_rename_server_management_url.py @@ -0,0 +1,35 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import MetaData, Table + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta = MetaData() + meta.bind = migrate_engine + tokens = Table('auth_tokens', meta, autoload=True) + c_manageent = tokens.c.server_manageent_url + c_manageent.alter(name='server_management_url') + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + tokens = Table('auth_tokens', meta, autoload=True) + c_management = tokens.c.server_management_url + c_management.alter(name='server_manageent_url') diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/019_add_volume_snapshot_support.py b/cinder/db/sqlalchemy/migrate_repo/versions/019_add_volume_snapshot_support.py new file mode 100644 index 00000000000..e0670e3c722 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/019_add_volume_snapshot_support.py @@ -0,0 +1,82 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 MORITA Kazutaka. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Column, Table, MetaData +from sqlalchemy import Integer, DateTime, Boolean, String + +from cinder import log as logging + +LOG = logging.getLogger(__name__) + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta = MetaData() + meta.bind = migrate_engine + + # + # New Tables + # + snapshots = Table('snapshots', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('volume_id', Integer(), nullable=False), + Column('user_id', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('project_id', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('status', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('progress', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('volume_size', Integer()), + Column('scheduled_at', DateTime(timezone=False)), + Column('display_name', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('display_description', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False))) + try: + snapshots.create() + except Exception: + LOG.info(repr(snapshots)) + LOG.exception('Exception while creating table') + meta.drop_all(tables=[snapshots]) + raise + + +def downgrade(migrate_engine): + # Operations to reverse the above upgrade go here. + meta = MetaData() + meta.bind = migrate_engine + snapshots = Table('snapshots', meta, autoload=True) + snapshots.drop() diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/020_add_snapshot_id_to_volumes.py b/cinder/db/sqlalchemy/migrate_repo/versions/020_add_snapshot_id_to_volumes.py new file mode 100644 index 00000000000..c5a632ca08a --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/020_add_snapshot_id_to_volumes.py @@ -0,0 +1,40 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 MORITA Kazutaka. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Column, Table, MetaData, Integer + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta = MetaData() + meta.bind = migrate_engine + + volumes = Table('volumes', meta, autoload=True) + + snapshot_id = Column('snapshot_id', Integer()) + # Add columns to existing tables + volumes.create_column(snapshot_id) + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + volumes = Table('volumes', meta, autoload=True) + + volumes.drop_column('snapshot_id') diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/020_sqlite_downgrade.sql b/cinder/db/sqlalchemy/migrate_repo/versions/020_sqlite_downgrade.sql new file mode 100644 index 00000000000..97b94660453 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/020_sqlite_downgrade.sql @@ -0,0 +1,119 @@ +BEGIN TRANSACTION; + + CREATE TEMPORARY TABLE volumes_backup ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + ec2_id VARCHAR(255), + user_id VARCHAR(255), + project_id VARCHAR(255), + host VARCHAR(255), + size INTEGER, + availability_zone VARCHAR(255), + instance_id INTEGER, + mountpoint VARCHAR(255), + attach_time VARCHAR(255), + status VARCHAR(255), + attach_status VARCHAR(255), + scheduled_at DATETIME, + launched_at DATETIME, + terminated_at DATETIME, + display_name VARCHAR(255), + display_description VARCHAR(255), + provider_location VARCHAR(256), + provider_auth VARCHAR(256), + snapshot_id INTEGER, + PRIMARY KEY (id), + FOREIGN KEY(instance_id) REFERENCES instances (id), + CHECK (deleted IN (0, 1)) + ); + + INSERT INTO volumes_backup + SELECT created_at, + updated_at, + deleted_at, + deleted, + id, + ec2_id, + user_id, + project_id, + host, + size, + availability_zone, + instance_id, + mountpoint, + attach_time, + status, + attach_status, + scheduled_at, + launched_at, + terminated_at, + display_name, + display_description, + provider_location, + provider_auth, + snapshot_id + FROM volumes; + + DROP TABLE volumes; + + CREATE TABLE volumes ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + ec2_id VARCHAR(255), + user_id VARCHAR(255), + project_id VARCHAR(255), + host VARCHAR(255), + size INTEGER, + availability_zone VARCHAR(255), + instance_id INTEGER, + mountpoint VARCHAR(255), + attach_time VARCHAR(255), + status VARCHAR(255), + attach_status VARCHAR(255), + scheduled_at DATETIME, + launched_at DATETIME, + terminated_at DATETIME, + display_name VARCHAR(255), + display_description VARCHAR(255), + provider_location VARCHAR(256), + provider_auth VARCHAR(256), + PRIMARY KEY (id), + FOREIGN KEY(instance_id) REFERENCES instances (id), + CHECK (deleted IN (0, 1)) + ); + + INSERT INTO volumes + SELECT created_at, + updated_at, + deleted_at, + deleted, + id, + ec2_id, + user_id, + project_id, + host, + size, + availability_zone, + instance_id, + mountpoint, + attach_time, + status, + attach_status, + scheduled_at, + launched_at, + terminated_at, + display_name, + display_description, + provider_location, + provider_auth + FROM volumes_backup; + + DROP TABLE volumes_backup; + +COMMIT; diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/021_rename_image_ids.py b/cinder/db/sqlalchemy/migrate_repo/versions/021_rename_image_ids.py new file mode 100644 index 00000000000..64b539ed65c --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/021_rename_image_ids.py @@ -0,0 +1,38 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import MetaData, Table + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta = MetaData() + meta.bind = migrate_engine + instances = Table('instances', meta, autoload=True) + + image_id_column = instances.c.image_id + image_id_column.alter(name='image_ref') + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + instances = Table('instances', meta, autoload=True) + + image_ref_column = instances.c.image_ref + image_ref_column.alter(name='image_id') diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/022_set_engine_mysql_innodb.py b/cinder/db/sqlalchemy/migrate_repo/versions/022_set_engine_mysql_innodb.py new file mode 100644 index 00000000000..2c10b790a68 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/022_set_engine_mysql_innodb.py @@ -0,0 +1,64 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import MetaData + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta = MetaData() + meta.bind = migrate_engine + if migrate_engine.name == "mysql": + migrate_engine.execute("ALTER TABLE auth_tokens Engine=InnoDB") + migrate_engine.execute("ALTER TABLE certificates Engine=InnoDB") + migrate_engine.execute("ALTER TABLE compute_nodes Engine=InnoDB") + migrate_engine.execute("ALTER TABLE console_pools Engine=InnoDB") + migrate_engine.execute("ALTER TABLE consoles Engine=InnoDB") + migrate_engine.execute("ALTER TABLE export_devices Engine=InnoDB") + migrate_engine.execute("ALTER TABLE fixed_ips Engine=InnoDB") + migrate_engine.execute("ALTER TABLE floating_ips Engine=InnoDB") + migrate_engine.execute("ALTER TABLE instance_actions Engine=InnoDB") + migrate_engine.execute("ALTER TABLE instance_metadata Engine=InnoDB") + migrate_engine.execute("ALTER TABLE instance_types Engine=InnoDB") + migrate_engine.execute("ALTER TABLE instances Engine=InnoDB") + migrate_engine.execute("ALTER TABLE iscsi_targets Engine=InnoDB") + migrate_engine.execute("ALTER TABLE key_pairs Engine=InnoDB") + migrate_engine.execute("ALTER TABLE migrate_version Engine=InnoDB") + migrate_engine.execute("ALTER TABLE migrations Engine=InnoDB") + migrate_engine.execute("ALTER TABLE networks Engine=InnoDB") + migrate_engine.execute("ALTER TABLE projects Engine=InnoDB") + migrate_engine.execute("ALTER TABLE quotas Engine=InnoDB") + migrate_engine.execute( + "ALTER TABLE security_group_instance_association Engine=InnoDB") + migrate_engine.execute( + "ALTER TABLE security_group_rules Engine=InnoDB") + migrate_engine.execute("ALTER TABLE security_groups Engine=InnoDB") + migrate_engine.execute("ALTER TABLE services Engine=InnoDB") + migrate_engine.execute( + "ALTER TABLE user_project_association Engine=InnoDB") + migrate_engine.execute( + "ALTER TABLE user_project_role_association Engine=InnoDB") + migrate_engine.execute( + "ALTER TABLE user_role_association Engine=InnoDB") + migrate_engine.execute("ALTER TABLE users Engine=InnoDB") + migrate_engine.execute("ALTER TABLE volumes Engine=InnoDB") + migrate_engine.execute("ALTER TABLE zones Engine=InnoDB") + migrate_engine.execute("ALTER TABLE snapshots Engine=InnoDB") + + +def downgrade(migrate_engine): + pass diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/023_add_vm_mode_to_instances.py b/cinder/db/sqlalchemy/migrate_repo/versions/023_add_vm_mode_to_instances.py new file mode 100644 index 00000000000..ee607dd9240 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/023_add_vm_mode_to_instances.py @@ -0,0 +1,42 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Column, MetaData, String, Table + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta = MetaData() + meta.bind = migrate_engine + + instances = Table('instances', meta, autoload=True) + + instances_vm_mode = Column('vm_mode', + String(length=255, convert_unicode=False, + assert_unicode=None, unicode_error=None, + _warn_on_bytestring=False), + nullable=True) + instances.create_column(instances_vm_mode) + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + instances = Table('instances', meta, autoload=True) + + instances.drop_column('vm_mode') diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/024_add_block_device_mapping.py b/cinder/db/sqlalchemy/migrate_repo/versions/024_add_block_device_mapping.py new file mode 100644 index 00000000000..5c6ddb97089 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/024_add_block_device_mapping.py @@ -0,0 +1,92 @@ +# Copyright 2011 OpenStack LLC. +# Copyright 2011 Isaku Yamahata +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import MetaData, Table, Column +from sqlalchemy import DateTime, Boolean, Integer, String +from sqlalchemy import ForeignKey +from cinder import log as logging + +LOG = logging.getLogger(__name__) + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta = MetaData() + meta.bind = migrate_engine + + # load tables for fk + instances = Table('instances', meta, autoload=True) + volumes = Table('volumes', meta, autoload=True) + snapshots = Table('snapshots', meta, autoload=True) + + # + # New Tables + # + block_device_mapping = Table('block_device_mapping', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, autoincrement=True), + Column('instance_id', + Integer(), + ForeignKey('instances.id'), + nullable=False), + Column('device_name', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + nullable=False), + Column('delete_on_termination', + Boolean(create_constraint=True, name=None), + default=False), + Column('virtual_name', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + nullable=True), + Column('snapshot_id', + Integer(), + ForeignKey('snapshots.id'), + nullable=True), + Column('volume_id', Integer(), ForeignKey('volumes.id'), + nullable=True), + Column('volume_size', Integer(), nullable=True), + Column('no_device', + Boolean(create_constraint=True, name=None), + nullable=True), + ) + try: + block_device_mapping.create() + except Exception: + LOG.info(repr(block_device_mapping)) + LOG.exception('Exception while creating table') + meta.drop_all(tables=[block_device_mapping]) + raise + + +def downgrade(migrate_engine): + # Operations to reverse the above upgrade go here. + meta = MetaData() + meta.bind = migrate_engine + + # load tables for fk + instances = Table('instances', meta, autoload=True) + volumes = Table('volumes', meta, autoload=True) + snapshots = Table('snapshots', meta, autoload=True) + + block_device_mapping = Table('block_device_mapping', meta, autoload=True) + block_device_mapping.drop() diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/025_add_uuid_to_instances.py b/cinder/db/sqlalchemy/migrate_repo/versions/025_add_uuid_to_instances.py new file mode 100644 index 00000000000..313cb16de2c --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/025_add_uuid_to_instances.py @@ -0,0 +1,45 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Column, Integer, MetaData, String, Table + +from cinder import utils + + +def upgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + instances = Table('instances', meta, autoload=True) + + uuid_column = Column("uuid", String(36)) + instances.create_column(uuid_column) + + rows = migrate_engine.execute(instances.select()) + for row in rows: + instance_uuid = str(utils.gen_uuid()) + migrate_engine.execute(instances.update()\ + .where(instances.c.id == row[0])\ + .values(uuid=instance_uuid)) + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + instances = Table('instances', meta, autoload=True) + + instances.drop_column('uuid') diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/026_add_agent_table.py b/cinder/db/sqlalchemy/migrate_repo/versions/026_add_agent_table.py new file mode 100644 index 00000000000..d8f038b0d49 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/026_add_agent_table.py @@ -0,0 +1,89 @@ +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Boolean, Column, DateTime, Integer +from sqlalchemy import MetaData, String, Table +from cinder import log as logging + + +LOG = logging.getLogger(__name__) + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta = MetaData() + meta.bind = migrate_engine + # + # New Tables + # + builds = Table('agent_builds', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('hypervisor', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('os', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('architecture', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('version', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('url', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('md5hash', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + ) + for table in (builds, ): + try: + table.create() + except Exception: + LOG.info(repr(table)) + + instances = Table('instances', meta, autoload=True) + + # + # New Columns + # + architecture = Column('architecture', String(length=255)) + + # Add columns to existing tables + instances.create_column(architecture) + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + builds = Table('agent_builds', meta, autoload=True) + for table in (builds, ): + table.drop() + + instances = Table('instances', meta, autoload=True) + instances.drop_column('architecture') diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/027_add_provider_firewall_rules.py b/cinder/db/sqlalchemy/migrate_repo/versions/027_add_provider_firewall_rules.py new file mode 100644 index 00000000000..8b653444c1c --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/027_add_provider_firewall_rules.py @@ -0,0 +1,65 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Boolean, Column, DateTime +from sqlalchemy import Integer, MetaData, String +from sqlalchemy import Table + +from cinder import log as logging + +LOG = logging.getLogger(__name__) + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta = MetaData() + meta.bind = migrate_engine + # + # New Tables + # + provider_fw_rules = Table('provider_fw_rules', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('protocol', + String(length=5, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('from_port', Integer()), + Column('to_port', Integer()), + Column('cidr', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False))) + for table in (provider_fw_rules,): + try: + table.create() + except Exception: + LOG.info(repr(table)) + LOG.exception('Exception while creating table') + raise + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + provider_fw_rules = Table('provider_fw_rules', meta, autoload=True) + for table in (provider_fw_rules,): + table.drop() diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/028_add_instance_type_extra_specs.py b/cinder/db/sqlalchemy/migrate_repo/versions/028_add_instance_type_extra_specs.py new file mode 100644 index 00000000000..b8346b73541 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/028_add_instance_type_extra_specs.py @@ -0,0 +1,76 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 University of Southern California +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Boolean, Column, DateTime, ForeignKey, Integer +from sqlalchemy import MetaData, String, Table +from cinder import log as logging + +LOG = logging.getLogger(__name__) + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta = MetaData() + meta.bind = migrate_engine + + # load tables for fk + instance_types = Table('instance_types', meta, autoload=True) + + # + # New Tables + # + instance_type_extra_specs_table = Table('instance_type_extra_specs', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('instance_type_id', + Integer(), + ForeignKey('instance_types.id'), + nullable=False), + Column('key', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('value', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False))) + + for table in (instance_type_extra_specs_table, ): + try: + table.create() + except Exception: + LOG.info(repr(table)) + LOG.exception('Exception while creating table') + raise + + +def downgrade(migrate_engine): + # Operations to reverse the above upgrade go here. + meta = MetaData() + meta.bind = migrate_engine + + # load tables for fk + instance_types = Table('instance_types', meta, autoload=True) + + instance_type_extra_specs_table = Table('instance_type_extra_specs', + meta, + autoload=True) + for table in (instance_type_extra_specs_table, ): + table.drop() diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/029_add_zone_weight_offsets.py b/cinder/db/sqlalchemy/migrate_repo/versions/029_add_zone_weight_offsets.py new file mode 100644 index 00000000000..80eb836c06b --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/029_add_zone_weight_offsets.py @@ -0,0 +1,41 @@ +# Copyright 2011 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Column, Float, Integer, MetaData, Table + + +def upgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + zones = Table('zones', meta, autoload=True) + + # + # New Columns + # + weight_offset = Column('weight_offset', Float(), default=0.0) + weight_scale = Column('weight_scale', Float(), default=1.0) + + zones.create_column(weight_offset) + zones.create_column(weight_scale) + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + zones = Table('zones', meta, autoload=True) + + zones.drop_column('weight_offset') + zones.drop_column('weight_scale') diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py b/cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py new file mode 100644 index 00000000000..a34baa83d4c --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py @@ -0,0 +1,146 @@ +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import select, Boolean, Column, DateTime, ForeignKey +from sqlalchemy import Integer, MetaData, String +from sqlalchemy import Table + +from cinder import log as logging +from cinder import utils + +LOG = logging.getLogger(__name__) + + +def upgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + # grab tables and (column for dropping later) + instances = Table('instances', meta, autoload=True) + networks = Table('networks', meta, autoload=True) + fixed_ips = Table('fixed_ips', meta, autoload=True) + c = instances.columns['mac_address'] + + interface = Column('bridge_interface', + String(length=255, convert_unicode=False, + assert_unicode=None, unicode_error=None, + _warn_on_bytestring=False)) + + virtual_interface_id = Column('virtual_interface_id', + Integer()) + # add interface column to networks table + # values will have to be set manually before running cinder + try: + networks.create_column(interface) + except Exception: + LOG.error(_("interface column not added to networks table")) + raise + + # + # New Tables + # + virtual_interfaces = Table('virtual_interfaces', meta, + Column('created_at', DateTime(timezone=False), + default=utils.utcnow()), + Column('updated_at', DateTime(timezone=False), + onupdate=utils.utcnow()), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('address', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + unique=True), + Column('network_id', + Integer(), + ForeignKey('networks.id')), + Column('instance_id', + Integer(), + ForeignKey('instances.id'), + nullable=False), + mysql_engine='InnoDB') + + # create virtual_interfaces table + try: + virtual_interfaces.create() + except Exception: + LOG.error(_("Table |%s| not created!"), repr(virtual_interfaces)) + raise + + # add virtual_interface_id column to fixed_ips table + try: + fixed_ips.create_column(virtual_interface_id) + except Exception: + LOG.error(_("VIF column not added to fixed_ips table")) + raise + + # populate the virtual_interfaces table + # extract data from existing instance and fixed_ip tables + s = select([instances.c.id, instances.c.mac_address, + fixed_ips.c.network_id], + fixed_ips.c.instance_id == instances.c.id) + keys = ('instance_id', 'address', 'network_id') + join_list = [dict(zip(keys, row)) for row in s.execute()] + LOG.debug(_("join list for moving mac_addresses |%s|"), join_list) + + # insert data into the table + if join_list: + i = virtual_interfaces.insert() + i.execute(join_list) + + # populate the fixed_ips virtual_interface_id column + s = select([fixed_ips.c.id, fixed_ips.c.instance_id], + fixed_ips.c.instance_id != None) + + for row in s.execute(): + m = select([virtual_interfaces.c.id]).\ + where(virtual_interfaces.c.instance_id == row['instance_id']).\ + as_scalar() + u = fixed_ips.update().values(virtual_interface_id=m).\ + where(fixed_ips.c.id == row['id']) + u.execute() + + # drop the mac_address column from instances + c.drop() + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + # grab tables and (column for dropping later) + instances = Table('instances', meta, autoload=True) + networks = Table('networks', meta, autoload=True) + fixed_ips = Table('fixed_ips', meta, autoload=True) + virtual_interfaces = Table('virtual_interfaces', meta, autoload=True) + + mac_address = Column('mac_address', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)) + + instances.create_column(mac_address) + + s = select([instances.c.id, virtual_interfaces.c.address], + virtual_interfaces.c.instance_id == instances.c.id) + + for row in s.execute(): + u = instances.update().values(mac_address=row['address']).\ + where(instances.c.id == row['id']) + + networks.drop_column('bridge_interface') + virtual_interfaces.drop() + fixed_ips.drop_column('virtual_interface_id') diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/030_sqlite_downgrade.sql b/cinder/db/sqlalchemy/migrate_repo/versions/030_sqlite_downgrade.sql new file mode 100644 index 00000000000..2486e6d2db6 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/030_sqlite_downgrade.sql @@ -0,0 +1,377 @@ +BEGIN TRANSACTION; + + CREATE TEMPORARY TABLE instances_backup ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + internal_id INTEGER, + admin_pass VARCHAR(255), + user_id VARCHAR(255), + project_id VARCHAR(255), + image_ref VARCHAR(255), + kernel_id VARCHAR(255), + ramdisk_id VARCHAR(255), + server_name VARCHAR(255), + launch_index INTEGER, + key_name VARCHAR(255), + key_data TEXT, + state INTEGER, + state_description VARCHAR(255), + memory_mb INTEGER, + vcpus INTEGER, + local_gb INTEGER, + hostname VARCHAR(255), + host VARCHAR(255), + user_data TEXT, + reservation_id VARCHAR(255), + scheduled_at DATETIME, + launched_at DATETIME, + terminated_at DATETIME, + display_name VARCHAR(255), + display_description VARCHAR(255), + availability_zone VARCHAR(255), + locked BOOLEAN, + os_type VARCHAR(255), + launched_on TEXT, + instance_type_id INTEGER, + vm_mode VARCHAR(255), + uuid VARCHAR(36), + architecture VARCHAR(255), + PRIMARY KEY (id), + CHECK (locked IN (0, 1)), + CHECK (deleted IN (0, 1)) + ); + + INSERT INTO instances_backup + SELECT created_at, + updated_at, + deleted_at, + deleted, + id, + internal_id, + admin_pass, + user_id, + project_id, + image_ref, + kernel_id, + ramdisk_id, + server_name, + launch_index, + key_name, + key_data, + state, + state_description, + memory_mb, + vcpus, + local_gb, + hostname, + host, + user_data, + reservation_id, + scheduled_at, + launched_at, + terminated_at, + display_name, + display_description, + availability_zone, + locked, + os_type, + launched_on, + instance_type_id, + vm_mode, + uuid, + architecture + FROM instances; + + DROP TABLE instances; + + CREATE TABLE instances ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + internal_id INTEGER, + admin_pass VARCHAR(255), + user_id VARCHAR(255), + project_id VARCHAR(255), + image_ref VARCHAR(255), + kernel_id VARCHAR(255), + ramdisk_id VARCHAR(255), + server_name VARCHAR(255), + launch_index INTEGER, + key_name VARCHAR(255), + key_data TEXT, + state INTEGER, + state_description VARCHAR(255), + memory_mb INTEGER, + vcpus INTEGER, + local_gb INTEGER, + hostname VARCHAR(255), + host VARCHAR(255), + user_data TEXT, + reservation_id VARCHAR(255), + scheduled_at DATETIME, + launched_at DATETIME, + terminated_at DATETIME, + display_name VARCHAR(255), + display_description VARCHAR(255), + availability_zone VARCHAR(255), + locked BOOLEAN, + os_type VARCHAR(255), + launched_on TEXT, + instance_type_id INTEGER, + vm_mode VARCHAR(255), + uuid VARCHAR(36), + architecture VARCHAR(255), + mac_address VARCHAR(255), + PRIMARY KEY (id), + CHECK (locked IN (0, 1)), + CHECK (deleted IN (0, 1)) + ); + + INSERT INTO instances + SELECT created_at, + updated_at, + deleted_at, + deleted, + id, + internal_id, + admin_pass, + user_id, + project_id, + image_ref, + kernel_id, + ramdisk_id, + server_name, + launch_index, + key_name, + key_data, + state, + state_description, + memory_mb, + vcpus, + local_gb, + hostname, + host, + user_data, + reservation_id, + scheduled_at, + launched_at, + terminated_at, + display_name, + display_description, + availability_zone, + locked, + os_type, + launched_on, + instance_type_id, + vm_mode, + uuid, + architecture, + NULL AS mac_address + FROM instances_backup; + + DROP TABLE instances_backup; + + UPDATE instances SET mac_address=(SELECT address + FROM virtual_interfaces + WHERE virtual_interfaces.instance_id = instances.id); + + CREATE TEMPORARY TABLE networks_backup ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + injected BOOLEAN, + cidr VARCHAR(255), + netmask VARCHAR(255), + bridge VARCHAR(255), + gateway VARCHAR(255), + broadcast VARCHAR(255), + dns VARCHAR(255), + vlan INTEGER, + vpn_public_address VARCHAR(255), + vpn_public_port INTEGER, + vpn_private_address VARCHAR(255), + dhcp_start VARCHAR(255), + project_id VARCHAR(255), + host VARCHAR(255), + cidr_v6 VARCHAR(255), + gateway_v6 VARCHAR(255), + label VARCHAR(255), + netmask_v6 VARCHAR(255), + bridge_interface VARCHAR(255), + PRIMARY KEY (id), + CHECK (injected IN (0, 1)), + CHECK (deleted IN (0, 1)) + ); + + INSERT INTO networks_backup + SELECT created_at, + updated_at, + deleted_at, + deleted, + id, + injected, + cidr, + netmask, + bridge, + gateway, + broadcast, + dns, + vlan, + vpn_public_address, + vpn_public_port, + vpn_private_address, + dhcp_start, + project_id, + host, + cidr_v6, + gateway_v6, + label, + netmask_v6, + bridge_interface + FROM networks; + + DROP TABLE networks; + + CREATE TABLE networks ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + injected BOOLEAN, + cidr VARCHAR(255), + netmask VARCHAR(255), + bridge VARCHAR(255), + gateway VARCHAR(255), + broadcast VARCHAR(255), + dns VARCHAR(255), + vlan INTEGER, + vpn_public_address VARCHAR(255), + vpn_public_port INTEGER, + vpn_private_address VARCHAR(255), + dhcp_start VARCHAR(255), + project_id VARCHAR(255), + host VARCHAR(255), + cidr_v6 VARCHAR(255), + gateway_v6 VARCHAR(255), + label VARCHAR(255), + netmask_v6 VARCHAR(255), + PRIMARY KEY (id), + CHECK (injected IN (0, 1)), + CHECK (deleted IN (0, 1)) + ); + + INSERT INTO networks + SELECT created_at, + updated_at, + deleted_at, + deleted, + id, + injected, + cidr, + netmask, + bridge, + gateway, + broadcast, + dns, + vlan, + vpn_public_address, + vpn_public_port, + vpn_private_address, + dhcp_start, + project_id, + host, + cidr_v6, + gateway_v6, + label, + netmask_v6 + FROM networks_backup; + + DROP TABLE networks_backup; + + DROP TABLE virtual_interfaces; + + CREATE TEMPORARY TABLE fixed_ips_backup ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + address VARCHAR(255), + network_id INTEGER, + instance_id INTEGER, + allocated BOOLEAN, + leased BOOLEAN, + reserved BOOLEAN, + virtual_interface_id INTEGER, + PRIMARY KEY (id), + CHECK (reserved IN (0, 1)), + CHECK (allocated IN (0, 1)), + CHECK (leased IN (0, 1)), + CHECK (deleted IN (0, 1)), + FOREIGN KEY(instance_id) REFERENCES instances (id), + FOREIGN KEY(network_id) REFERENCES networks (id) + ); + + INSERT INTO fixed_ips_backup + SELECT created_at, + updated_at, + deleted_at, + deleted, + id, + address, + network_id, + instance_id, + allocated, + leased, + reserved, + virtual_interface_id + FROM fixed_ips; + + DROP TABLE fixed_ips; + + CREATE TABLE fixed_ips ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + address VARCHAR(255), + network_id INTEGER, + instance_id INTEGER, + allocated BOOLEAN, + leased BOOLEAN, + reserved BOOLEAN, + PRIMARY KEY (id), + CHECK (reserved IN (0, 1)), + CHECK (allocated IN (0, 1)), + CHECK (leased IN (0, 1)), + CHECK (deleted IN (0, 1)), + FOREIGN KEY(instance_id) REFERENCES instances (id), + FOREIGN KEY(network_id) REFERENCES networks (id) + ); + + INSERT INTO fixed_ips + SELECT created_at, + updated_at, + deleted_at, + deleted, + id, + address, + network_id, + instance_id, + allocated, + leased, + reserved + FROM fixed_ips_backup; + + DROP TABLE fixed_ips_backup; + +COMMIT; diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/031_fk_fixed_ips_virtual_interface_id.py b/cinder/db/sqlalchemy/migrate_repo/versions/031_fk_fixed_ips_virtual_interface_id.py new file mode 100644 index 00000000000..4c1413b22ee --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/031_fk_fixed_ips_virtual_interface_id.py @@ -0,0 +1,59 @@ +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import MetaData, Table +from migrate import ForeignKeyConstraint + +from cinder import log as logging + +LOG = logging.getLogger(__name__) + + +def upgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + dialect = migrate_engine.url.get_dialect().name + + # grab tables + fixed_ips = Table('fixed_ips', meta, autoload=True) + virtual_interfaces = Table('virtual_interfaces', meta, autoload=True) + + # add foreignkey if not sqlite + try: + if not dialect.startswith('sqlite'): + ForeignKeyConstraint(columns=[fixed_ips.c.virtual_interface_id], + refcolumns=[virtual_interfaces.c.id]).create() + except Exception: + LOG.error(_("foreign key constraint couldn't be added")) + raise + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + dialect = migrate_engine.url.get_dialect().name + + # grab tables + fixed_ips = Table('fixed_ips', meta, autoload=True) + virtual_interfaces = Table('virtual_interfaces', meta, autoload=True) + + # drop foreignkey if not sqlite + try: + if not dialect.startswith('sqlite'): + ForeignKeyConstraint(columns=[fixed_ips.c.virtual_interface_id], + refcolumns=[virtual_interfaces.c.id]).drop() + except Exception: + LOG.error(_("foreign key constraint couldn't be dropped")) + raise diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/031_sqlite_downgrade.sql b/cinder/db/sqlalchemy/migrate_repo/versions/031_sqlite_downgrade.sql new file mode 100644 index 00000000000..c1d26b18031 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/031_sqlite_downgrade.sql @@ -0,0 +1,48 @@ +BEGIN TRANSACTION; + + CREATE TEMPORARY TABLE fixed_ips_backup ( + id INTEGER NOT NULL, + address VARCHAR(255), + virtual_interface_id INTEGER, + network_id INTEGER, + instance_id INTEGER, + allocated BOOLEAN default FALSE, + leased BOOLEAN default FALSE, + reserved BOOLEAN default FALSE, + created_at DATETIME NOT NULL, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN NOT NULL, + PRIMARY KEY (id), + FOREIGN KEY(virtual_interface_id) REFERENCES virtual_interfaces (id) + ); + + INSERT INTO fixed_ips_backup + SELECT id, address, virtual_interface_id, network_id, instance_id, allocated, leased, reserved, created_at, updated_at, deleted_at, deleted + FROM fixed_ips; + + DROP TABLE fixed_ips; + + CREATE TABLE fixed_ips ( + id INTEGER NOT NULL, + address VARCHAR(255), + virtual_interface_id INTEGER, + network_id INTEGER, + instance_id INTEGER, + allocated BOOLEAN default FALSE, + leased BOOLEAN default FALSE, + reserved BOOLEAN default FALSE, + created_at DATETIME NOT NULL, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN NOT NULL, + PRIMARY KEY (id) + ); + + INSERT INTO fixed_ips + SELECT id, address, virtual_interface_id, network_id, instance_id, allocated, leased, reserved, created_at, updated_at, deleted_at, deleted + FROM fixed_ips; + + DROP TABLE fixed_ips_backup; + +COMMIT; diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/031_sqlite_upgrade.sql b/cinder/db/sqlalchemy/migrate_repo/versions/031_sqlite_upgrade.sql new file mode 100644 index 00000000000..2a9362545f1 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/031_sqlite_upgrade.sql @@ -0,0 +1,48 @@ +BEGIN TRANSACTION; + + CREATE TEMPORARY TABLE fixed_ips_backup ( + id INTEGER NOT NULL, + address VARCHAR(255), + virtual_interface_id INTEGER, + network_id INTEGER, + instance_id INTEGER, + allocated BOOLEAN default FALSE, + leased BOOLEAN default FALSE, + reserved BOOLEAN default FALSE, + created_at DATETIME NOT NULL, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN NOT NULL, + PRIMARY KEY (id) + ); + + INSERT INTO fixed_ips_backup + SELECT id, address, virtual_interface_id, network_id, instance_id, allocated, leased, reserved, created_at, updated_at, deleted_at, deleted + FROM fixed_ips; + + DROP TABLE fixed_ips; + + CREATE TABLE fixed_ips ( + id INTEGER NOT NULL, + address VARCHAR(255), + virtual_interface_id INTEGER, + network_id INTEGER, + instance_id INTEGER, + allocated BOOLEAN default FALSE, + leased BOOLEAN default FALSE, + reserved BOOLEAN default FALSE, + created_at DATETIME NOT NULL, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN NOT NULL, + PRIMARY KEY (id), + FOREIGN KEY(virtual_interface_id) REFERENCES virtual_interfaces (id) + ); + + INSERT INTO fixed_ips + SELECT id, address, virtual_interface_id, network_id, instance_id, allocated, leased, reserved, created_at, updated_at, deleted_at, deleted + FROM fixed_ips; + + DROP TABLE fixed_ips_backup; + +COMMIT; diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/032_add_root_device_name.py b/cinder/db/sqlalchemy/migrate_repo/versions/032_add_root_device_name.py new file mode 100644 index 00000000000..f12070c5709 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/032_add_root_device_name.py @@ -0,0 +1,42 @@ +# Copyright 2011 OpenStack LLC. +# Copyright 2011 Isaku Yamahata +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Column, Integer, MetaData, Table, String + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta = MetaData() + meta.bind = migrate_engine + + instances = Table('instances', meta, autoload=True) + + root_device_name = Column( + 'root_device_name', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + nullable=True) + instances.create_column(root_device_name) + + +def downgrade(migrate_engine): + # Operations to reverse the above upgrade go here. + meta = MetaData() + meta.bind = migrate_engine + + instances = Table('instances', meta, autoload=True) + + instances.drop_column('root_device_name') diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/033_ha_network.py b/cinder/db/sqlalchemy/migrate_repo/versions/033_ha_network.py new file mode 100644 index 00000000000..becc353f68f --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/033_ha_network.py @@ -0,0 +1,42 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 OpenStack, LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Column, Table, MetaData, Boolean, String + + +def upgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + fixed_ips_host = Column('host', String(255)) + fixed_ips = Table('fixed_ips', meta, autoload=True) + fixed_ips.create_column(fixed_ips_host) + + networks_multi_host = Column('multi_host', Boolean, default=False) + networks = Table('networks', meta, autoload=True) + networks.create_column(networks_multi_host) + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + fixed_ips = Table('fixed_ips', meta, autoload=True) + fixed_ips.drop_column('host') + + networks = Table('networks', meta, autoload=True) + networks.drop_column('multi_host') diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/033_sqlite_downgrade.sql b/cinder/db/sqlalchemy/migrate_repo/versions/033_sqlite_downgrade.sql new file mode 100644 index 00000000000..34188d86629 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/033_sqlite_downgrade.sql @@ -0,0 +1,193 @@ +BEGIN TRANSACTION; + + CREATE TEMPORARY TABLE fixed_ips_backup ( + id INTEGER NOT NULL, + address VARCHAR(255), + virtual_interface_id INTEGER, + network_id INTEGER, + instance_id INTEGER, + allocated BOOLEAN default FALSE, + leased BOOLEAN default FALSE, + reserved BOOLEAN default FALSE, + created_at DATETIME NOT NULL, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN NOT NULL, + host VARCHAR(255), + PRIMARY KEY (id), + FOREIGN KEY(virtual_interface_id) REFERENCES virtual_interfaces (id) + ); + + INSERT INTO fixed_ips_backup + SELECT id, + address, + virtual_interface_id, + network_id, + instance_id, + allocated, + leased, + reserved, + created_at, + updated_at, + deleted_at, + deleted, + host + FROM fixed_ips; + + DROP TABLE fixed_ips; + + CREATE TABLE fixed_ips ( + id INTEGER NOT NULL, + address VARCHAR(255), + virtual_interface_id INTEGER, + network_id INTEGER, + instance_id INTEGER, + allocated BOOLEAN default FALSE, + leased BOOLEAN default FALSE, + reserved BOOLEAN default FALSE, + created_at DATETIME NOT NULL, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN NOT NULL, + PRIMARY KEY (id), + FOREIGN KEY(virtual_interface_id) REFERENCES virtual_interfaces (id) + ); + + INSERT INTO fixed_ips + SELECT id, + address, + virtual_interface_id, + network_id, + instance_id, + allocated, + leased, + reserved, + created_at, + updated_at, + deleted_at, + deleted + FROM fixed_ips_backup; + + DROP TABLE fixed_ips_backup; + + CREATE TEMPORARY TABLE networks_backup ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + injected BOOLEAN, + cidr VARCHAR(255), + netmask VARCHAR(255), + bridge VARCHAR(255), + gateway VARCHAR(255), + broadcast VARCHAR(255), + dns VARCHAR(255), + vlan INTEGER, + vpn_public_address VARCHAR(255), + vpn_public_port INTEGER, + vpn_private_address VARCHAR(255), + dhcp_start VARCHAR(255), + project_id VARCHAR(255), + host VARCHAR(255), + cidr_v6 VARCHAR(255), + gateway_v6 VARCHAR(255), + label VARCHAR(255), + netmask_v6 VARCHAR(255), + bridge_interface VARCHAR(255), + multi_host BOOLEAN, + PRIMARY KEY (id), + CHECK (deleted IN (0, 1)), + CHECK (injected IN (0, 1)), + CHECK (multi_host IN (0, 1)) + ); + + INSERT INTO networks_backup + SELECT created_at, + updated_at, + deleted_at, + deleted, + id, + injected, + cidr, + netmask, + bridge, + gateway, + broadcast, + dns, + vlan, + vpn_public_address, + vpn_public_port, + vpn_private_address, + dhcp_start, + project_id, + host, + cidr_v6, + gateway_v6, + label, + netmask_v6, + bridge_interface, + multi_host + FROM networks; + + DROP TABLE networks; + + CREATE TABLE networks( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + injected BOOLEAN, + cidr VARCHAR(255), + netmask VARCHAR(255), + bridge VARCHAR(255), + gateway VARCHAR(255), + broadcast VARCHAR(255), + dns VARCHAR(255), + vlan INTEGER, + vpn_public_address VARCHAR(255), + vpn_public_port INTEGER, + vpn_private_address VARCHAR(255), + dhcp_start VARCHAR(255), + project_id VARCHAR(255), + host VARCHAR(255), + cidr_v6 VARCHAR(255), + gateway_v6 VARCHAR(255), + label VARCHAR(255), + netmask_v6 VARCHAR(255), + bridge_interface VARCHAR(255), + PRIMARY KEY (id), + CHECK (deleted IN (0, 1)), + CHECK (injected IN (0, 1)) + ); + + INSERT INTO networks + SELECT created_at, + updated_at, + deleted_at, + deleted, + id, + injected, + cidr, + netmask, + bridge, + gateway, + broadcast, + dns, + vlan, + vpn_public_address, + vpn_public_port, + vpn_private_address, + dhcp_start, + project_id, + host, + cidr_v6, + gateway_v6, + label, + netmask_v6, + bridge_interface + FROM networks_backup; + + DROP TABLE networks_backup; +COMMIT; diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/034_change_instance_id_in_migrations.py b/cinder/db/sqlalchemy/migrate_repo/versions/034_change_instance_id_in_migrations.py new file mode 100644 index 00000000000..9cf004301b7 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/034_change_instance_id_in_migrations.py @@ -0,0 +1,46 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Column, Integer, String, MetaData, Table + + +def upgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + migrations = Table('migrations', meta, autoload=True) + instance_uuid = Column('instance_uuid', String(255)) + migrations.create_column(instance_uuid) + + if migrate_engine.name == "mysql": + try: + migrate_engine.execute("ALTER TABLE migrations DROP FOREIGN KEY " + "`migrations_ibfk_1`;") + except Exception: # Don't care, just fail silently. + pass + + migrations.c.instance_id.drop() + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + migrations = Table('migrations', meta, autoload=True) + migrations.c.instance_uuid.drop() + instance_id = Column('instance_id', Integer()) + migrations.create_column(instance_id) diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/035_secondary_dns.py b/cinder/db/sqlalchemy/migrate_repo/versions/035_secondary_dns.py new file mode 100644 index 00000000000..c03e5be6625 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/035_secondary_dns.py @@ -0,0 +1,39 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 OpenStack, LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Column, Table, MetaData, String + + +def upgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + networks = Table('networks', meta, autoload=True) + + networks.c.dns.alter(name='dns1') + dns2 = Column('dns2', String(255)) + networks.create_column(dns2) + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + networks = Table('networks', meta, autoload=True) + + networks.c.dns1.alter(name='dns') + networks.drop_column('dns2') diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/036_change_flavor_id_in_migrations.py b/cinder/db/sqlalchemy/migrate_repo/versions/036_change_flavor_id_in_migrations.py new file mode 100644 index 00000000000..38f83fc0192 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/036_change_flavor_id_in_migrations.py @@ -0,0 +1,79 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Column, Integer, MetaData, Table + + +def upgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + instance_types = Table('instance_types', meta, autoload=True) + migrations = Table('migrations', meta, autoload=True) + + old_instance_type_id = Column('old_instance_type_id', Integer()) + new_instance_type_id = Column('new_instance_type_id', Integer()) + migrations.create_column(old_instance_type_id) + migrations.create_column(new_instance_type_id) + + # Convert flavor_id to instance_type_id + itypes = {} + for instance_type in migrate_engine.execute(instance_types.select()): + itypes[instance_type.id] = instance_type.flavorid + + for instance_type_id in itypes.keys(): + migrate_engine.execute(migrations.update()\ + .where(migrations.c.old_flavor_id == itypes[instance_type_id])\ + .values(old_instance_type_id=instance_type_id)) + migrate_engine.execute(migrations.update()\ + .where(migrations.c.new_flavor_id == itypes[instance_type_id])\ + .values(new_instance_type_id=instance_type_id)) + + migrations.c.old_flavor_id.drop() + migrations.c.new_flavor_id.drop() + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + instance_types = Table('instance_types', meta, autoload=True) + migrations = Table('migrations', meta, autoload=True) + + old_flavor_id = Column('old_flavor_id', Integer()) + new_flavor_id = Column('new_flavor_id', Integer()) + + migrations.create_column(old_flavor_id) + migrations.create_column(new_flavor_id) + + # Convert instance_type_id to flavor_id + itypes = {} + for instance_type in migrate_engine.execute(instance_types.select()): + itypes[instance_type.flavorid] = instance_type.id + + for instance_type_flavorid in itypes.keys(): + migrate_engine.execute(migrations.update()\ + .where(migrations.c.old_instance_type_id == + itypes[instance_type_flavorid])\ + .values(old_flavor_id=instance_type_flavorid)) + migrate_engine.execute(migrations.update()\ + .where(migrations.c.new_instance_type_id == + itypes[instance_type_flavorid])\ + .values(new_flavor_id=instance_type_flavorid)) + + migrations.c.old_instance_type_id.drop() + migrations.c.new_instance_type_id.drop() diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/037_instances_drop_admin_pass.py b/cinder/db/sqlalchemy/migrate_repo/versions/037_instances_drop_admin_pass.py new file mode 100644 index 00000000000..c8a1a19274e --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/037_instances_drop_admin_pass.py @@ -0,0 +1,42 @@ +# Copyright 2011 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Column, MetaData, Table, String + + +def upgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + instances = Table('instances', meta, autoload=True) + + instances.drop_column('admin_pass') + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + instances = Table('instances', meta, autoload=True) + + # + # New Columns + # + admin_pass = Column( + 'admin_pass', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + nullable=True) + + instances.create_column(admin_pass) diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/038_add_uuid_to_virtual_interfaces.py b/cinder/db/sqlalchemy/migrate_repo/versions/038_add_uuid_to_virtual_interfaces.py new file mode 100644 index 00000000000..fbd1c45702c --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/038_add_uuid_to_virtual_interfaces.py @@ -0,0 +1,45 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (C) 2011 Midokura KK +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Column, Integer, MetaData, String, Table + +from cinder import utils + + +def upgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + virtual_interfaces = Table('virtual_interfaces', meta, autoload=True) + + uuid_column = Column('uuid', String(36)) + virtual_interfaces.create_column(uuid_column) + + rows = migrate_engine.execute(virtual_interfaces.select()) + for row in rows: + vif_uuid = str(utils.gen_uuid()) + migrate_engine.execute(virtual_interfaces.update()\ + .where(virtual_interfaces.c.id == row[0])\ + .values(uuid=vif_uuid)) + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + virtual_interfaces = Table('virtual_interfaces', meta, autoload=True) + + virtual_interfaces.drop_column('uuid') diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/038_sqlite_downgrade.sql b/cinder/db/sqlalchemy/migrate_repo/versions/038_sqlite_downgrade.sql new file mode 100644 index 00000000000..0ac66e7e01b --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/038_sqlite_downgrade.sql @@ -0,0 +1,63 @@ +BEGIN TRANSACTION; + + CREATE TEMPORARY TABLE virtual_interfaces_backup ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + address VARCHAR(255), + network_id INTEGER, + instance_id INTEGER NOT NULL, + uuid VARCHAR(36), + PRIMARY KEY (id), + FOREIGN KEY(network_id) REFERENCES networks (id), + FOREIGN KEY(instance_id) REFERENCES instances (id), + UNIQUE (address), + CHECK (deleted IN (0, 1)) + ); + + INSERT INTO virtual_interfaces_backup + SELECT created_at, + updated_at, + deleted_at, + deleted, + id, + address, + network_id, + instance_id, + uuid + FROM virtual_interfaces; + + DROP TABLE virtual_interfaces; + + CREATE TABLE virtual_interfaces ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + address VARCHAR(255), + network_id INTEGER, + instance_id INTEGER NOT NULL, + PRIMARY KEY (id), + FOREIGN KEY(network_id) REFERENCES networks (id), + FOREIGN KEY(instance_id) REFERENCES instances (id), + UNIQUE (address), + CHECK (deleted IN (0, 1)) + ); + + INSERT INTO virtual_interfaces + SELECT created_at, + updated_at, + deleted_at, + deleted, + id, + address, + network_id, + instance_id + FROM virtual_interfaces_backup; + + DROP TABLE virtual_interfaces_backup; + +COMMIT; diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/039_add_instances_accessip.py b/cinder/db/sqlalchemy/migrate_repo/versions/039_add_instances_accessip.py new file mode 100644 index 00000000000..8c8961cd33a --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/039_add_instances_accessip.py @@ -0,0 +1,49 @@ +# Copyright 2011 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Column, Integer, MetaData, Table, String + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta = MetaData() + meta.bind = migrate_engine + + instances = Table('instances', meta, autoload=True) + + accessIPv4 = Column( + 'access_ip_v4', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + nullable=True) + + accessIPv6 = Column( + 'access_ip_v6', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + nullable=True) + instances.create_column(accessIPv4) + instances.create_column(accessIPv6) + + +def downgrade(migrate_engine): + # Operations to reverse the above upgrade go here. + meta = MetaData() + meta.bind = migrate_engine + + instances = Table('instances', meta, autoload=True) + + instances.drop_column('access_ip_v4') + instances.drop_column('access_ip_v6') diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/040_add_uuid_to_networks.py b/cinder/db/sqlalchemy/migrate_repo/versions/040_add_uuid_to_networks.py new file mode 100644 index 00000000000..7125911d34d --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/040_add_uuid_to_networks.py @@ -0,0 +1,45 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Column, Integer, MetaData, String, Table + +from cinder import utils + + +def upgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + networks = Table('networks', meta, autoload=True) + + uuid_column = Column("uuid", String(36)) + networks.create_column(uuid_column) + + rows = migrate_engine.execute(networks.select()) + for row in rows: + networks_uuid = str(utils.gen_uuid()) + migrate_engine.execute(networks.update()\ + .where(networks.c.id == row[0])\ + .values(uuid=networks_uuid)) + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + networks = Table('networks', meta, autoload=True) + + networks.drop_column('uuid') diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/041_add_config_drive_to_instances.py b/cinder/db/sqlalchemy/migrate_repo/versions/041_add_config_drive_to_instances.py new file mode 100644 index 00000000000..f85c4a0d743 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/041_add_config_drive_to_instances.py @@ -0,0 +1,36 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2011 Piston Cloud Computing, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Column, Integer, MetaData, String, Table + + +def upgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + instances = Table("instances", meta, autoload=True) + + config_drive_column = Column("config_drive", String(255), nullable=True) + instances.create_column(config_drive_column) + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + instances = Table("instances", meta, autoload=True) + + instances.drop_column('config_drive') diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/042_add_volume_types_and_extradata.py b/cinder/db/sqlalchemy/migrate_repo/versions/042_add_volume_types_and_extradata.py new file mode 100644 index 00000000000..2434bb0abf9 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/042_add_volume_types_and_extradata.py @@ -0,0 +1,122 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 Zadara Storage Inc. +# Copyright (c) 2011 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Column, DateTime, Integer, MetaData, String, Table +from sqlalchemy import Boolean, ForeignKey + +from cinder import log as logging + +LOG = logging.getLogger(__name__) + + +def upgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + volumes = Table('volumes', meta, autoload=True) + + # + # New Tables + # + volume_types = Table('volume_types', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('name', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + unique=True)) + + volume_type_extra_specs_table = Table('volume_type_extra_specs', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('volume_type_id', + Integer(), + ForeignKey('volume_types.id'), + nullable=False), + Column('key', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('value', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False))) + + volume_metadata_table = Table('volume_metadata', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('volume_id', + Integer(), + ForeignKey('volumes.id'), + nullable=False), + Column('key', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('value', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False))) + + new_tables = (volume_types, + volume_type_extra_specs_table, + volume_metadata_table) + + for table in new_tables: + try: + table.create() + except Exception: + LOG.info(repr(table)) + LOG.exception('Exception while creating table') + raise + + # + # New Columns + # + volume_type_id = Column('volume_type_id', Integer(), nullable=True) + volumes.create_column(volume_type_id) + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + volumes = Table('volumes', meta, autoload=True) + + volumes.drop_column('volume_type_id') + + volume_types = Table('volume_types', meta, autoload=True) + volume_type_extra_specs_table = Table('volume_type_extra_specs', + meta, + autoload=True) + volume_metadata_table = Table('volume_metadata', meta, autoload=True) + + # table order matters, don't change + for table in (volume_type_extra_specs_table, + volume_types, + volume_metadata_table): + table.drop() diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/042_sqlite_downgrade.sql b/cinder/db/sqlalchemy/migrate_repo/versions/042_sqlite_downgrade.sql new file mode 100644 index 00000000000..8fa39663a23 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/042_sqlite_downgrade.sql @@ -0,0 +1,129 @@ +BEGIN TRANSACTION; + + CREATE TEMPORARY TABLE volumes_backup ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + ec2_id VARCHAR(255), + user_id VARCHAR(255), + project_id VARCHAR(255), + host VARCHAR(255), + size INTEGER, + availability_zone VARCHAR(255), + instance_id INTEGER, + mountpoint VARCHAR(255), + attach_time VARCHAR(255), + status VARCHAR(255), + attach_status VARCHAR(255), + scheduled_at DATETIME, + launched_at DATETIME, + terminated_at DATETIME, + display_name VARCHAR(255), + display_description VARCHAR(255), + provider_location VARCHAR(256), + provider_auth VARCHAR(256), + snapshot_id INTEGER, + volume_type_id INTEGER, + PRIMARY KEY (id), + FOREIGN KEY(instance_id) REFERENCES instances (id), + CHECK (deleted IN (0, 1)) + ); + + INSERT INTO volumes_backup + SELECT created_at, + updated_at, + deleted_at, + deleted, + id, + ec2_id, + user_id, + project_id, + host, + size, + availability_zone, + instance_id, + mountpoint, + attach_time, + status, + attach_status, + scheduled_at, + launched_at, + terminated_at, + display_name, + display_description, + provider_location, + provider_auth, + snapshot_id, + volume_type_id + FROM volumes; + + DROP TABLE volumes; + + CREATE TABLE volumes ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + ec2_id VARCHAR(255), + user_id VARCHAR(255), + project_id VARCHAR(255), + host VARCHAR(255), + size INTEGER, + availability_zone VARCHAR(255), + instance_id INTEGER, + mountpoint VARCHAR(255), + attach_time VARCHAR(255), + status VARCHAR(255), + attach_status VARCHAR(255), + scheduled_at DATETIME, + launched_at DATETIME, + terminated_at DATETIME, + display_name VARCHAR(255), + display_description VARCHAR(255), + provider_location VARCHAR(256), + provider_auth VARCHAR(256), + snapshot_id INTEGER, + PRIMARY KEY (id), + FOREIGN KEY(instance_id) REFERENCES instances (id), + CHECK (deleted IN (0, 1)) + ); + + INSERT INTO volumes + SELECT created_at, + updated_at, + deleted_at, + deleted, + id, + ec2_id, + user_id, + project_id, + host, + size, + availability_zone, + instance_id, + mountpoint, + attach_time, + status, + attach_status, + scheduled_at, + launched_at, + terminated_at, + display_name, + display_description, + provider_location, + provider_auth, + snapshot_id + FROM volumes_backup; + + DROP TABLE volumes_backup; + + DROP TABLE volume_type_extra_specs; + + DROP TABLE volume_types; + + DROP TABLE volume_metadata; + +COMMIT; diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/043_add_vsa_data.py b/cinder/db/sqlalchemy/migrate_repo/versions/043_add_vsa_data.py new file mode 100644 index 00000000000..1e22608fc0b --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/043_add_vsa_data.py @@ -0,0 +1,84 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 Zadara Storage Inc. +# Copyright (c) 2011 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Column, DateTime, Integer, MetaData, String, Table +from sqlalchemy import Boolean + +from cinder import log as logging + +LOG = logging.getLogger(__name__) + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta = MetaData() + meta.bind = migrate_engine + + # + # New Tables + # + virtual_storage_arrays = Table('virtual_storage_arrays', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('display_name', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('display_description', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('project_id', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('availability_zone', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('instance_type_id', Integer(), nullable=False), + Column('image_ref', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('vc_count', Integer(), nullable=False), + Column('vol_count', Integer(), nullable=False), + Column('status', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + ) + + try: + virtual_storage_arrays.create() + except Exception: + LOG.info(repr(virtual_storage_arrays)) + LOG.exception('Exception while creating table') + raise + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + virtual_storage_arrays = Table('virtual_storage_arrays', + meta, + autoload=True) + virtual_storage_arrays.drop() diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/044_update_instance_states.py b/cinder/db/sqlalchemy/migrate_repo/versions/044_update_instance_states.py new file mode 100644 index 00000000000..95d3b7529ce --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/044_update_instance_states.py @@ -0,0 +1,52 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import MetaData, Table, Column, String + + +def upgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + instance_table = Table('instances', meta, autoload=True) + + c_state = instance_table.c.state + c_state.alter(name='power_state') + + c_vm_state = instance_table.c.state_description + c_vm_state.alter(name='vm_state') + + c_task_state = Column('task_state', + String(length=255, convert_unicode=False, + assert_unicode=None, unicode_error=None, + _warn_on_bytestring=False), + nullable=True) + instance_table.create_column(c_task_state) + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + instance_table = Table('instances', meta, autoload=True) + + c_state = instance_table.c.power_state + c_state.alter(name='state') + + c_vm_state = instance_table.c.vm_state + c_vm_state.alter(name='state_description') + + instance_table.drop_column('task_state') diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/045_add_network_priority.py b/cinder/db/sqlalchemy/migrate_repo/versions/045_add_network_priority.py new file mode 100644 index 00000000000..3d75803dc53 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/045_add_network_priority.py @@ -0,0 +1,44 @@ +# Copyright 2011 Nicira, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Column, Integer, MetaData, Table + +from cinder import log as logging + + +LOG = logging.getLogger(__name__) + + +def upgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + networks = Table('networks', meta, autoload=True) + + priority = Column('priority', Integer()) + try: + networks.create_column(priority) + except Exception: + LOG.error(_("priority column not added to networks table")) + raise + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + networks = Table('networks', meta, autoload=True) + + networks.drop_column('priority') diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/046_add_instance_swap.py b/cinder/db/sqlalchemy/migrate_repo/versions/046_add_instance_swap.py new file mode 100644 index 00000000000..3ee1c4e7ee5 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/046_add_instance_swap.py @@ -0,0 +1,49 @@ +# Copyright 2011 Isaku Yamahata +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Column, Integer, MetaData, Table, String + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta = MetaData() + meta.bind = migrate_engine + + instances = Table('instances', meta, autoload=True) + + default_local_device = Column( + 'default_local_device', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + nullable=True) + + default_swap_device = Column( + 'default_swap_device', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + nullable=True) + instances.create_column(default_local_device) + instances.create_column(default_swap_device) + + +def downgrade(migrate_engine): + # Operations to reverse the above upgrade go here. + meta = MetaData() + meta.bind = migrate_engine + + instances = Table('instances', meta, autoload=True) + + instances.drop_column('default_swap_device') + instances.drop_column('default_local_device') diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/047_remove_instances_fk_from_vif.py b/cinder/db/sqlalchemy/migrate_repo/versions/047_remove_instances_fk_from_vif.py new file mode 100644 index 00000000000..0b365df3122 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/047_remove_instances_fk_from_vif.py @@ -0,0 +1,61 @@ +# Copyright 2011 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import MetaData, Table +from migrate import ForeignKeyConstraint + +from cinder import log as logging + +LOG = logging.getLogger(__name__) + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta = MetaData() + meta.bind = migrate_engine + dialect = migrate_engine.url.get_dialect().name + if dialect.startswith('sqlite'): + return + + instances = Table('instances', meta, autoload=True) + vifs = Table('virtual_interfaces', meta, autoload=True) + + try: + fkey_name = list(vifs.c.instance_id.foreign_keys)[0].constraint.name + ForeignKeyConstraint(columns=[vifs.c.instance_id], + refcolumns=[instances.c.id], + name=fkey_name).drop() + except Exception: + LOG.error(_("foreign key constraint couldn't be removed")) + raise + + +def downgrade(migrate_engine): + # Operations to reverse the above upgrade go here. + meta = MetaData() + meta.bind = migrate_engine + dialect = migrate_engine.url.get_dialect().name + if dialect.startswith('sqlite'): + return + + instances = Table('instances', meta, autoload=True) + vifs = Table('virtual_interfaces', meta, autoload=True) + + try: + ForeignKeyConstraint(columns=[vifs.c.instance_id], + refcolumns=[instances.c.id]).create() + except Exception: + LOG.error(_("foreign key constraint couldn't be added")) + raise diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/047_sqlite_downgrade.sql b/cinder/db/sqlalchemy/migrate_repo/versions/047_sqlite_downgrade.sql new file mode 100644 index 00000000000..9bc3ee8d4c0 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/047_sqlite_downgrade.sql @@ -0,0 +1,46 @@ +BEGIN TRANSACTION; + CREATE TEMPORARY TABLE virtual_interfaces_backup ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + address VARCHAR(255), + network_id INTEGER, + instance_id INTEGER NOT NULL, + uuid VARCHAR(36), + PRIMARY KEY (id) + ); + + INSERT INTO virtual_interfaces_backup + SELECT created_at, updated_at, deleted_at, deleted, id, address, + network_id, instance_id, uuid + FROM virtual_interfaces; + + DROP TABLE virtual_interfaces; + + CREATE TABLE virtual_interfaces ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + address VARCHAR(255), + network_id INTEGER, + instance_id INTEGER NOT NULL, + uuid VARCHAR(36), + PRIMARY KEY (id), + FOREIGN KEY(network_id) REFERENCES networks (id), + FOREIGN KEY(instance_id) REFERENCES instances (id), + UNIQUE (address), + CHECK (deleted IN (0, 1)) + ); + + INSERT INTO virtual_interfaces + SELECT created_at, updated_at, deleted_at, deleted, id, address, + network_id, instance_id, uuid + FROM virtual_interfaces_backup; + + DROP TABLE virtual_interfaces_backup; + +COMMIT; diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/047_sqlite_upgrade.sql b/cinder/db/sqlalchemy/migrate_repo/versions/047_sqlite_upgrade.sql new file mode 100644 index 00000000000..2c0919f1dd9 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/047_sqlite_upgrade.sql @@ -0,0 +1,45 @@ +BEGIN TRANSACTION; + CREATE TEMPORARY TABLE virtual_interfaces_backup ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + address VARCHAR(255), + network_id INTEGER, + instance_id INTEGER NOT NULL, + uuid VARCHAR(36), + PRIMARY KEY (id) + ); + + INSERT INTO virtual_interfaces_backup + SELECT created_at, updated_at, deleted_at, deleted, id, address, + network_id, instance_id, uuid + FROM virtual_interfaces; + + DROP TABLE virtual_interfaces; + + CREATE TABLE virtual_interfaces ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + address VARCHAR(255), + network_id INTEGER, + instance_id INTEGER NOT NULL, + uuid VARCHAR(36), + PRIMARY KEY (id), + FOREIGN KEY(network_id) REFERENCES networks (id), + UNIQUE (address), + CHECK (deleted IN (0, 1)) + ); + + INSERT INTO virtual_interfaces + SELECT created_at, updated_at, deleted_at, deleted, id, address, + network_id, instance_id, uuid + FROM virtual_interfaces_backup; + + DROP TABLE virtual_interfaces_backup; + +COMMIT; diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/048_add_zone_name.py b/cinder/db/sqlalchemy/migrate_repo/versions/048_add_zone_name.py new file mode 100644 index 00000000000..e313fc7dee8 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/048_add_zone_name.py @@ -0,0 +1,33 @@ +# Copyright 2011 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Column, Integer, MetaData, String, Table + + +def upgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + zones = Table('zones', meta, autoload=True) + name = Column('name', String(255)) + zones.create_column(name) + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + zones = Table('zones', meta, autoload=True) + + zones.drop_column('name') diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/049_add_instances_progress.py b/cinder/db/sqlalchemy/migrate_repo/versions/049_add_instances_progress.py new file mode 100644 index 00000000000..c19d89e64cf --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/049_add_instances_progress.py @@ -0,0 +1,44 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Column, Integer, MetaData, Table + +from cinder import log as logging + +LOG = logging.getLogger(__name__) + + +def upgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + instances = Table('instances', meta, autoload=True) + + progress = Column('progress', Integer()) + try: + instances.create_column(progress) + except Exception: + LOG.error(_("progress column not added to instances table")) + raise + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + instances = Table('instances', meta, autoload=True) + + instances.drop_column('progress') diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/050_add_disk_config_to_instances.py b/cinder/db/sqlalchemy/migrate_repo/versions/050_add_disk_config_to_instances.py new file mode 100644 index 00000000000..d4a2fcc13d7 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/050_add_disk_config_to_instances.py @@ -0,0 +1,37 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Boolean, Column, Integer, MetaData, Table + + +def upgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + instances = Table('instances', meta, autoload=True) + + managed_disk = Column("managed_disk", Boolean(create_constraint=False, + name=None)) + instances.create_column(managed_disk) + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + instances = Table('instances', meta, autoload=True) + + instances.drop_column('managed_disk') diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/050_sqlite_downgrade.sql b/cinder/db/sqlalchemy/migrate_repo/versions/050_sqlite_downgrade.sql new file mode 100644 index 00000000000..8db7087bc08 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/050_sqlite_downgrade.sql @@ -0,0 +1,207 @@ +BEGIN TRANSACTION; + + CREATE TEMPORARY TABLE instances_backup ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + internal_id INTEGER, + user_id VARCHAR(255), + project_id VARCHAR(255), + image_ref VARCHAR(255), + kernel_id VARCHAR(255), + ramdisk_id VARCHAR(255), + server_name VARCHAR(255), + launch_index INTEGER, + key_name VARCHAR(255), + key_data TEXT, + power_state INTEGER, + vm_state VARCHAR(255), + memory_mb INTEGER, + vcpus INTEGER, + local_gb INTEGER, + hostname VARCHAR(255), + host VARCHAR(255), + user_data TEXT, + reservation_id VARCHAR(255), + scheduled_at DATETIME, + launched_at DATETIME, + terminated_at DATETIME, + display_name VARCHAR(255), + display_description VARCHAR(255), + availability_zone VARCHAR(255), + locked BOOLEAN, + os_type VARCHAR(255), + launched_on TEXT, + instance_type_id INTEGER, + vm_mode VARCHAR(255), + uuid VARCHAR(36), + architecture VARCHAR(255), + root_device_name VARCHAR(255), + access_ip_v4 VARCHAR(255), + access_ip_v6 VARCHAR(255), + config_drive VARCHAR(255), + task_state VARCHAR(255), + default_local_device VARCHAR(255), + default_swap_device VARCHAR(255), + progress INTEGER, + managed_disk BOOLEAN, + PRIMARY KEY (id), + CHECK (deleted IN (0, 1)), + CHECK (locked IN (0, 1)), + CHECK (managed_disk IN (0, 1)) + ); + + INSERT INTO instances_backup + SELECT created_at, + updated_at, + deleted_at, + deleted, + id, + internal_id, + user_id, + project_id, + image_ref, + kernel_id, + ramdisk_id, + server_name, + launch_index, + key_name, + key_data, + power_state, + vm_state, + memory_mb, + vcpus, + local_gb, + hostname, + host, + user_data, + reservation_id, + scheduled_at, + launched_at, + terminated_at, + display_name, + display_description, + availability_zone, + locked, + os_type, + launched_on, + instance_type_id, + vm_mode, + uuid, + architecture, + root_device_name, + access_ip_v4, + access_ip_v6, + config_drive, + task_state, + default_local_device, + default_swap_device, + progress, + managed_disk + FROM instances; + + DROP TABLE instances; + + CREATE TABLE instances ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + internal_id INTEGER, + user_id VARCHAR(255), + project_id VARCHAR(255), + image_ref VARCHAR(255), + kernel_id VARCHAR(255), + ramdisk_id VARCHAR(255), + server_name VARCHAR(255), + launch_index INTEGER, + key_name VARCHAR(255), + key_data TEXT, + power_state INTEGER, + vm_state VARCHAR(255), + memory_mb INTEGER, + vcpus INTEGER, + local_gb INTEGER, + hostname VARCHAR(255), + host VARCHAR(255), + user_data TEXT, + reservation_id VARCHAR(255), + scheduled_at DATETIME, + launched_at DATETIME, + terminated_at DATETIME, + display_name VARCHAR(255), + display_description VARCHAR(255), + availability_zone VARCHAR(255), + locked BOOLEAN, + os_type VARCHAR(255), + launched_on TEXT, + instance_type_id INTEGER, + vm_mode VARCHAR(255), + uuid VARCHAR(36), + architecture VARCHAR(255), + root_device_name VARCHAR(255), + access_ip_v4 VARCHAR(255), + access_ip_v6 VARCHAR(255), + config_drive VARCHAR(255), + task_state VARCHAR(255), + default_local_device VARCHAR(255), + default_swap_device VARCHAR(255), + progress INTEGER, + PRIMARY KEY (id), + CHECK (deleted IN (0, 1)), + CHECK (locked IN (0, 1)) + ); + + INSERT INTO instances + SELECT created_at, + updated_at, + deleted_at, + deleted, + id, + internal_id, + user_id, + project_id, + image_ref, + kernel_id, + ramdisk_id, + server_name, + launch_index, + key_name, + key_data, + power_state, + vm_state, + memory_mb, + vcpus, + local_gb, + hostname, + host, + user_data, + reservation_id, + scheduled_at, + launched_at, + terminated_at, + display_name, + display_description, + availability_zone, + locked, + os_type, + launched_on, + instance_type_id, + vm_mode, + uuid, + architecture, + root_device_name, + access_ip_v4, + access_ip_v6, + config_drive, + task_state, + default_local_device, + default_swap_device, + progress + FROM instances_backup; + + DROP TABLE instances_backup; +COMMIT; diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/051_add_vcpu_weight_to_instance_types.py b/cinder/db/sqlalchemy/migrate_repo/versions/051_add_vcpu_weight_to_instance_types.py new file mode 100644 index 00000000000..a338319933b --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/051_add_vcpu_weight_to_instance_types.py @@ -0,0 +1,34 @@ +# Copyright 2011 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Column, Integer, MetaData, Table + + +def upgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + instance_types = Table('instance_types', meta, autoload=True) + + vcpu_weight = Column("vcpu_weight", Integer()) + instance_types.create_column(vcpu_weight) + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + instance_types = Table('instance_types', meta, autoload=True) + + instance_types.drop_column('vcpu_weight') diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/052_kill_export_devices.py b/cinder/db/sqlalchemy/migrate_repo/versions/052_kill_export_devices.py new file mode 100644 index 00000000000..c71b4eeefee --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/052_kill_export_devices.py @@ -0,0 +1,65 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 University of Southern California +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Boolean, Column, DateTime, ForeignKey, Integer +from sqlalchemy import MetaData, Table +from cinder import log as logging + +LOG = logging.getLogger(__name__) + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + # load tables for fk + volumes = Table('volumes', meta, autoload=True) + + # + # New Tables + # + export_devices = Table('export_devices', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('shelf_id', Integer()), + Column('blade_id', Integer()), + Column('volume_id', + Integer(), + ForeignKey('volumes.id'), + nullable=True), + ) + + try: + export_devices.create() + except Exception: + LOG.info(repr(export_devices)) + LOG.exception('Exception while creating table') + raise + + +def upgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + # load tables for fk + volumes = Table('volumes', meta, autoload=True) + + export_devices = Table('export_devices', meta, autoload=True) + + export_devices.drop() diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/053_add_connection_info_to_block_device_mapping.py b/cinder/db/sqlalchemy/migrate_repo/versions/053_add_connection_info_to_block_device_mapping.py new file mode 100644 index 00000000000..8fe13991849 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/053_add_connection_info_to_block_device_mapping.py @@ -0,0 +1,38 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Column, MetaData, Table, Text + + +def upgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + table = Table('block_device_mapping', meta, autoload=True) + + new_column = Column('connection_info', Text()) + + table.create_column(new_column) + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + table = Table('block_device_mapping', meta, autoload=True) + + table.c.connection_info.drop() diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/053_sqlite_downgrade.sql b/cinder/db/sqlalchemy/migrate_repo/versions/053_sqlite_downgrade.sql new file mode 100644 index 00000000000..84439976367 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/053_sqlite_downgrade.sql @@ -0,0 +1,87 @@ +BEGIN TRANSACTION; + + CREATE TEMPORARY TABLE block_device_mapping_backup ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + instance_id INTEGER NOT NULL, + device_name VARCHAR(255) NOT NULL, + delete_on_termination BOOLEAN, + virtual_name VARCHAR(255), + snapshot_id INTEGER, + volume_id INTEGER, + volume_size INTEGER, + no_device BOOLEAN, + connection_info TEXT, + PRIMARY KEY (id), + FOREIGN KEY(snapshot_id) REFERENCES snapshots (id), + CHECK (deleted IN (0, 1)), + CHECK (delete_on_termination IN (0, 1)), + CHECK (no_device IN (0, 1)), + FOREIGN KEY(volume_id) REFERENCES volumes (id), + FOREIGN KEY(instance_id) REFERENCES instances (id) + ); + + INSERT INTO block_device_mapping_backup + SELECT created_at, + updated_at, + deleted_at, + deleted, + id, + instance_id, + device_name, + delete_on_termination, + virtual_name, + snapshot_id, + volume_id, + volume_size, + no_device, + connection_info + FROM block_device_mapping; + + DROP TABLE block_device_mapping; + + CREATE TABLE block_device_mapping ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + instance_id INTEGER NOT NULL, + device_name VARCHAR(255) NOT NULL, + delete_on_termination BOOLEAN, + virtual_name VARCHAR(255), + snapshot_id INTEGER, + volume_id INTEGER, + volume_size INTEGER, + no_device BOOLEAN, + PRIMARY KEY (id), + FOREIGN KEY(snapshot_id) REFERENCES snapshots (id), + CHECK (deleted IN (0, 1)), + CHECK (delete_on_termination IN (0, 1)), + CHECK (no_device IN (0, 1)), + FOREIGN KEY(volume_id) REFERENCES volumes (id), + FOREIGN KEY(instance_id) REFERENCES instances (id) + ); + + INSERT INTO block_device_mapping + SELECT created_at, + updated_at, + deleted_at, + deleted, + id, + instance_id, + device_name, + delete_on_termination, + virtual_name, + snapshot_id, + volume_id, + volume_size, + no_device + FROM block_device_mapping_backup; + + DROP TABLE block_device_mapping_backup; + +COMMIT; diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/054_add_bw_usage_data_cache.py b/cinder/db/sqlalchemy/migrate_repo/versions/054_add_bw_usage_data_cache.py new file mode 100644 index 00000000000..765f9cfc179 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/054_add_bw_usage_data_cache.py @@ -0,0 +1,64 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 MORITA Kazutaka. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Column, Table, MetaData +from sqlalchemy import Integer, BigInteger, DateTime, Boolean, String + +from cinder import log as logging + +LOG = logging.getLogger(__name__) + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta = MetaData() + meta.bind = migrate_engine + + # + # New Tables + # + bw_cache = Table('bw_usage_cache', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('instance_id', Integer(), nullable=False), + Column('network_label', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('start_period', DateTime(timezone=False), nullable=False), + Column('last_refreshed', DateTime(timezone=False)), + Column('bw_in', BigInteger()), + Column('bw_out', BigInteger())) + try: + bw_cache.create() + except Exception: + LOG.info(repr(bw_cache)) + LOG.exception('Exception while creating table') + meta.drop_all(tables=[bw_cache]) + raise + + +def downgrade(migrate_engine): + # Operations to reverse the above upgrade go here. + meta = MetaData() + meta.bind = migrate_engine + bw_cache = Table('bw_usage_cache', meta, autoload=True) + bw_cache.drop() diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/055_convert_flavor_id_to_str.py b/cinder/db/sqlalchemy/migrate_repo/versions/055_convert_flavor_id_to_str.py new file mode 100644 index 00000000000..b110b6f208a --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/055_convert_flavor_id_to_str.py @@ -0,0 +1,112 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import migrate +import migrate.changeset +from sqlalchemy import Column, Integer, String, MetaData, Table + +from cinder import log as logging + +LOG = logging.getLogger(__name__) + + +def upgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + instance_types = Table('instance_types', meta, autoload=True) + + string_column = Column('flavorid_str', String(255)) + + string_column.create(instance_types) + + try: + # NOTE(bcwaldon): This catches a bug with python-migrate + # failing to add the unique constraint + try: + migrate.UniqueConstraint(string_column).create() + except migrate.changeset.NotSupportedError: + LOG.error("Failed to add unique constraint on flavorid") + pass + + # NOTE(bcwaldon): this is a hack to preserve uniqueness constraint + # on existing 'name' column + try: + migrate.UniqueConstraint(instance_types.c.name).create() + except Exception: + pass + + integer_column = instance_types.c.flavorid + + instance_type_rows = list(instance_types.select().execute()) + for instance_type in instance_type_rows: + flavorid_int = instance_type.flavorid + instance_types.update()\ + .where(integer_column == flavorid_int)\ + .values(flavorid_str=str(flavorid_int))\ + .execute() + except Exception: + string_column.drop() + raise + + integer_column.alter(name='flavorid_int') + string_column.alter(name='flavorid') + integer_column.drop() + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + instance_types = Table('instance_types', meta, autoload=True) + + integer_column = Column('flavorid_int', Integer()) + + integer_column.create(instance_types) + + try: + # NOTE(bcwaldon): This catches a bug with python-migrate + # failing to add the unique constraint + try: + migrate.UniqueConstraint(integer_column).create() + except migrate.changeset.NotSupportedError: + LOG.info("Failed to add unique constraint on flavorid") + pass + + string_column = instance_types.c.flavorid + + instance_types_rows = list(instance_types.select().execute()) + for instance_type in instance_types_rows: + flavorid_str = instance_type.flavorid + try: + flavorid_int = int(instance_type.flavorid) + except ValueError: + msg = _('Could not cast flavorid to integer: %s. ' + 'Set flavorid to an integer-like string to downgrade.') + LOG.error(msg % instance_type.flavorid) + raise + + instance_types.update()\ + .where(string_column == flavorid_str)\ + .values(flavorid_int=flavorid_int)\ + .execute() + except Exception: + integer_column.drop() + raise + + string_column.alter(name='flavorid_str') + integer_column.alter(name='flavorid') + string_column.drop() diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/056_add_s3_images.py b/cinder/db/sqlalchemy/migrate_repo/versions/056_add_s3_images.py new file mode 100644 index 00000000000..aed52488a2e --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/056_add_s3_images.py @@ -0,0 +1,60 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Boolean, Column, DateTime, Integer +from sqlalchemy import MetaData, String, Table +from cinder import log as logging + +LOG = logging.getLogger(__name__) + + +def upgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + # + # New Tables + # + s3_images = Table('s3_images', meta, + Column('created_at', + DateTime(timezone=False)), + Column('updated_at', + DateTime(timezone=False)), + Column('deleted_at', + DateTime(timezone=False)), + Column('deleted', + Boolean(create_constraint=True, name=None)), + Column('id', Integer(), + primary_key=True, + nullable=False, + autoincrement=True), + Column('uuid', String(36), + nullable=False)) + try: + s3_images.create() + except Exception: + LOG.exception("Exception while creating table 's3_images'") + meta.drop_all(tables=[s3_images]) + raise + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + s3_images = Table('s3_images', meta, autoload=True) + s3_images.drop() diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/057_add_sm_driver_tables.py b/cinder/db/sqlalchemy/migrate_repo/versions/057_add_sm_driver_tables.py new file mode 100644 index 00000000000..ea01fc80c6e --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/057_add_sm_driver_tables.py @@ -0,0 +1,113 @@ +# Copyright (c) 2011 Citrix Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Boolean, Column, DateTime, ForeignKey +from sqlalchemy import Integer, MetaData, String +from sqlalchemy import Table + +from cinder import log as logging + +LOG = logging.getLogger(__name__) + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta = MetaData() + meta.bind = migrate_engine + + # load tables for fk + volumes = Table('volumes', meta, autoload=True) + + # + # New Tables + # + flavors = Table('sm_flavors', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('label', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('description', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + ) + + backend = Table('sm_backend_config', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('flavor_id', Integer(), ForeignKey('sm_flavors.id'), + nullable=False), + Column('sr_uuid', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('sr_type', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('config_params', + String(length=2047, + convert_unicode=False, + assert_unicode=None, + unicode_error=None, + _warn_on_bytestring=False)), + ) + + sm_vol = Table('sm_volume', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), ForeignKey('volumes.id'), + primary_key=True, nullable=False), + Column('backend_id', Integer(), + ForeignKey('sm_backend_config.id'), + nullable=False), + Column('vdi_uuid', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + ) + for table in (flavors, backend, sm_vol): + try: + table.create() + except Exception: + LOG.info(repr(table)) + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + # load tables for fk + volumes = Table('volumes', meta, autoload=True) + + flavors = Table('sm_flavors', meta, autoload=True) + backend = Table('sm_backend_config', meta, autoload=True) + sm_vol = Table('sm_volume', meta, autoload=True) + + for table in (flavors, backend, sm_vol): + try: + table.drop() + except Exception: + LOG.info(repr(table)) diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/058_rename_managed_disk.py b/cinder/db/sqlalchemy/migrate_repo/versions/058_rename_managed_disk.py new file mode 100644 index 00000000000..e12cabddd01 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/058_rename_managed_disk.py @@ -0,0 +1,37 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import MetaData, Table + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta = MetaData() + meta.bind = migrate_engine + instances = Table('instances', meta, autoload=True) + + managed_disk = instances.c.managed_disk + managed_disk.alter(name='auto_disk_config') + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + instances = Table('instances', meta, autoload=True) + + image_ref_column = instances.c.auto_disk_config + image_ref_column.alter(name='managed_disk') diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/059_split_rxtx_quota_into_network.py b/cinder/db/sqlalchemy/migrate_repo/versions/059_split_rxtx_quota_into_network.py new file mode 100644 index 00000000000..58f6d69e08d --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/059_split_rxtx_quota_into_network.py @@ -0,0 +1,61 @@ +# Copyright 2011 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Column, Integer, Float, MetaData, Table + + +def upgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + instance_types = Table('instance_types', meta, autoload=True) + networks = Table('networks', meta, autoload=True) + + rxtx_base = Column('rxtx_base', Integer) + rxtx_factor = Column('rxtx_factor', Float, default=1) + instance_types.create_column(rxtx_factor) + networks.create_column(rxtx_base) + + base = migrate_engine.execute("select min(rxtx_cap) as min_rxtx from " + "instance_types where rxtx_cap > 0").scalar() + base = base if base > 1 else 1 + update_i_type_sql = ("update instance_types set rxtx_factor = rxtx_cap" + "/%s where rxtx_cap > 0" % base) + migrate_engine.execute(update_i_type_sql) + migrate_engine.execute("update networks set rxtx_base = %s" % base) + + instance_types.c.rxtx_quota.drop() + instance_types.c.rxtx_cap.drop() + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + instance_types = Table('instance_types', meta, autoload=True) + networks = Table('networks', meta, autoload=True) + + rxtx_quota = Column('rxtx_quota', Integer) + rxtx_cap = Column('rxtx_cap', Integer) + instance_types.create_column(rxtx_quota) + instance_types.create_column(rxtx_cap) + + base = migrate_engine.execute("select min(rxtx_base) from networks " + "where rxtx_base > 0").scalar() + base = base if base > 1 else 1 + + update_i_type_sql = ("update instance_types set rxtx_cap = " + "rxtx_factor * %s" % base) + migrate_engine.execute(update_i_type_sql) + + instance_types.c.rxtx_factor.drop() + networks.c.rxtx_base.drop() diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/059_sqlite_downgrade.sql b/cinder/db/sqlalchemy/migrate_repo/versions/059_sqlite_downgrade.sql new file mode 100644 index 00000000000..ecf45c599ba --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/059_sqlite_downgrade.sql @@ -0,0 +1,137 @@ +BEGIN TRANSACTION; + + CREATE TEMPORARY TABLE instance_types_backup ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + name VARCHAR(255), + id INTEGER NOT NULL, + memory_mb INTEGER NOT NULL, + vcpus INTEGER NOT NULL, + local_gb INTEGER NOT NULL, + swap INTEGER NOT NULL, + rxtx_quota INTEGER NOT NULL, + rxtx_cap INTEGER NOT NULL, + vcpu_weight INTEGER, + flavorid VARCHAR(255), + PRIMARY KEY (id), + CHECK (deleted IN (0, 1)), + UNIQUE (flavorid), + UNIQUE (name) + ); + + INSERT INTO instance_types_backup + SELECT created_at, + updated_at, + deleted_at, + deleted, + name, + id, + memory_mb, + vcpus, + local_gb, + swap, + 0 as rxtx_quota, + COALESCE(rxtx_factor, 1) * COALESCE ((SELECT MIN(rxtx_base) + FROM networks + WHERE rxtx_base > 0), 1) + as rxtx_cap, + vcpu_weight, + flavorid FROM instance_types; + + DROP TABLE instance_types; + + CREATE TABLE instance_types ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + name VARCHAR(255), + id INTEGER NOT NULL, + memory_mb INTEGER NOT NULL, + vcpus INTEGER NOT NULL, + local_gb INTEGER NOT NULL, + swap INTEGER NOT NULL, + rxtx_quota INTEGER NOT NULL, + rxtx_cap INTEGER NOT NULL, + vcpu_weight INTEGER, + flavorid VARCHAR(255), + PRIMARY KEY (id), + UNIQUE (flavorid), + CHECK (deleted IN (0, 1)), + UNIQUE (name) + ); + + INSERT INTO instance_types SELECT * FROM instance_types_backup; + DROP TABLE instance_types_backup; + + CREATE TABLE networks_backup ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + injected BOOLEAN, + cidr VARCHAR(255), + netmask VARCHAR(255), + bridge VARCHAR(255), + gateway VARCHAR(255), + broadcast VARCHAR(255), + dns1 VARCHAR(255), + vlan INTEGER, + vpn_public_address VARCHAR(255), + vpn_public_port INTEGER, + vpn_private_address VARCHAR(255), + dhcp_start VARCHAR(255), + project_id VARCHAR(255), + host VARCHAR(255), + cidr_v6 VARCHAR(255), + gateway_v6 VARCHAR(255), + label VARCHAR(255), + netmask_v6 VARCHAR(255), + bridge_interface VARCHAR(255), + multi_host BOOLEAN, + dns2 VARCHAR(255), + uuid VARCHAR(36), + priority INTEGER, + PRIMARY KEY (id), + CHECK (deleted IN (0, 1)), + CHECK (injected IN (0, 1)), + CHECK (multi_host IN (0, 1)) + ); + + INSERT INTO networks_backup + SELECT created_at, + updated_at, + deleted_at, + deleted, + id, + injected, + cidr, + netmask, + bridge, + gateway, + broadcast, + dns1, + vlan, + vpn_public_address, + vpn_public_port, + vpn_private_address, + dhcp_start, + project_id, + host, + cidr_v6, + gateway_v6, + label, + netmask_v6, + bridge_interface, + multi_host, + dns2, + uuid, + priority + FROM networks; + + DROP TABLE networks; + ALTER TABLE networks_backup RENAME TO networks; +COMMIT; diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/059_sqlite_upgrade.sql b/cinder/db/sqlalchemy/migrate_repo/versions/059_sqlite_upgrade.sql new file mode 100644 index 00000000000..ba7729aedef --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/059_sqlite_upgrade.sql @@ -0,0 +1,87 @@ +BEGIN TRANSACTION; + + CREATE TEMPORARY TABLE instance_types_backup ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + name VARCHAR(255), + id INTEGER NOT NULL, + memory_mb INTEGER NOT NULL, + vcpus INTEGER NOT NULL, + local_gb INTEGER NOT NULL, + swap INTEGER NOT NULL, + rxtx_factor FLOAT, + vcpu_weight INTEGER, + flavorid VARCHAR(255), + PRIMARY KEY (id), + UNIQUE (flavorid), + CHECK (deleted IN (0, 1)), + UNIQUE (name) + ); + + INSERT INTO instance_types_backup + SELECT created_at, + updated_at, + deleted_at, + deleted, + name, + id, + memory_mb, + vcpus, + local_gb, + swap, + COALESCE(rxtx_cap, 1) / COALESCE ((SELECT MIN(rxtx_cap) + FROM instance_types + WHERE rxtx_cap > 0), 1) as rxtx_cap, + vcpu_weight, + flavorid + FROM instance_types; + + ALTER TABLE networks ADD COLUMN rxtx_base INTEGER DEFAULT 1; + + UPDATE networks SET rxtx_base = COALESCE((SELECT MIN(rxtx_cap) + FROM instance_types + WHERE rxtx_cap>0), 1); + + DROP TABLE instance_types; + + CREATE TABLE instance_types ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + name VARCHAR(255), + id INTEGER NOT NULL, + memory_mb INTEGER NOT NULL, + vcpus INTEGER NOT NULL, + local_gb INTEGER NOT NULL, + swap INTEGER NOT NULL, + rxtx_factor FLOAT, + vcpu_weight INTEGER, + flavorid VARCHAR(255), + PRIMARY KEY (id), + UNIQUE (flavorid), + CHECK (deleted IN (0, 1)), + UNIQUE (name) + ); + + INSERT INTO instance_types + SELECT created_at, + updated_at, + deleted_at, + deleted, + name, + id, + memory_mb, + vcpus, + local_gb, + swap, + rxtx_factor, + vcpu_weight, + flavorid + FROM instance_types_backup; + + DROP TABLE instance_types_backup; + +COMMIT; diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/060_remove_network_fk_from_vif.py b/cinder/db/sqlalchemy/migrate_repo/versions/060_remove_network_fk_from_vif.py new file mode 100644 index 00000000000..83ed1cf6b1f --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/060_remove_network_fk_from_vif.py @@ -0,0 +1,62 @@ +# Copyright 2011 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import MetaData, Table +from migrate import ForeignKeyConstraint + +from cinder import log as logging + +LOG = logging.getLogger(__name__) + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta = MetaData() + meta.bind = migrate_engine + dialect = migrate_engine.url.get_dialect().name + if dialect.startswith('sqlite'): + return + + networks = Table('networks', meta, autoload=True) + vifs = Table('virtual_interfaces', meta, autoload=True) + + try: + fkey_name = list(vifs.c.network_id.foreign_keys)[0].constraint.name + ForeignKeyConstraint(columns=[vifs.c.network_id], + refcolumns=[networks.c.id], + name=fkey_name).drop() + + except Exception: + LOG.error(_("foreign key constraint couldn't be removed")) + raise + + +def downgrade(migrate_engine): + # Operations to reverse the above upgrade go here. + meta = MetaData() + meta.bind = migrate_engine + dialect = migrate_engine.url.get_dialect().name + if dialect.startswith('sqlite'): + return + + networks = Table('networks', meta, autoload=True) + vifs = Table('virtual_interfaces', meta, autoload=True) + + try: + ForeignKeyConstraint(columns=[vifs.c.network_id], + refcolumns=[networks.c.id]).create() + except Exception: + LOG.error(_("foreign key constraint couldn't be added")) + raise diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/060_sqlite_downgrade.sql b/cinder/db/sqlalchemy/migrate_repo/versions/060_sqlite_downgrade.sql new file mode 100644 index 00000000000..2c0919f1dd9 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/060_sqlite_downgrade.sql @@ -0,0 +1,45 @@ +BEGIN TRANSACTION; + CREATE TEMPORARY TABLE virtual_interfaces_backup ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + address VARCHAR(255), + network_id INTEGER, + instance_id INTEGER NOT NULL, + uuid VARCHAR(36), + PRIMARY KEY (id) + ); + + INSERT INTO virtual_interfaces_backup + SELECT created_at, updated_at, deleted_at, deleted, id, address, + network_id, instance_id, uuid + FROM virtual_interfaces; + + DROP TABLE virtual_interfaces; + + CREATE TABLE virtual_interfaces ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + address VARCHAR(255), + network_id INTEGER, + instance_id INTEGER NOT NULL, + uuid VARCHAR(36), + PRIMARY KEY (id), + FOREIGN KEY(network_id) REFERENCES networks (id), + UNIQUE (address), + CHECK (deleted IN (0, 1)) + ); + + INSERT INTO virtual_interfaces + SELECT created_at, updated_at, deleted_at, deleted, id, address, + network_id, instance_id, uuid + FROM virtual_interfaces_backup; + + DROP TABLE virtual_interfaces_backup; + +COMMIT; diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/060_sqlite_upgrade.sql b/cinder/db/sqlalchemy/migrate_repo/versions/060_sqlite_upgrade.sql new file mode 100644 index 00000000000..fd49ea4f524 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/060_sqlite_upgrade.sql @@ -0,0 +1,44 @@ +BEGIN TRANSACTION; + CREATE TEMPORARY TABLE virtual_interfaces_backup ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + address VARCHAR(255), + network_id INTEGER, + instance_id INTEGER NOT NULL, + uuid VARCHAR(36), + PRIMARY KEY (id) + ); + + INSERT INTO virtual_interfaces_backup + SELECT created_at, updated_at, deleted_at, deleted, id, address, + network_id, instance_id, uuid + FROM virtual_interfaces; + + DROP TABLE virtual_interfaces; + + CREATE TABLE virtual_interfaces ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + address VARCHAR(255), + network_id INTEGER, + instance_id INTEGER NOT NULL, + uuid VARCHAR(36), + PRIMARY KEY (id), + UNIQUE (address), + CHECK (deleted IN (0, 1)) + ); + + INSERT INTO virtual_interfaces + SELECT created_at, updated_at, deleted_at, deleted, id, address, + network_id, instance_id, uuid + FROM virtual_interfaces_backup; + + DROP TABLE virtual_interfaces_backup; + +COMMIT; diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/061_add_index_to_instance_uuid.py b/cinder/db/sqlalchemy/migrate_repo/versions/061_add_index_to_instance_uuid.py new file mode 100644 index 00000000000..1a369bffec3 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/061_add_index_to_instance_uuid.py @@ -0,0 +1,29 @@ +# Copyright 2011 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Index, MetaData, Table + + +def upgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + instances = Table('instances', meta, autoload=True) + Index('uuid', instances.c.uuid, unique=True).create(migrate_engine) + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + instances = Table('instances', meta, autoload=True) + Index('uuid', instances.c.uuid, unique=True).drop(migrate_engine) diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/062_add_instance_info_cache_table.py b/cinder/db/sqlalchemy/migrate_repo/versions/062_add_instance_info_cache_table.py new file mode 100644 index 00000000000..f2b0e8a742d --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/062_add_instance_info_cache_table.py @@ -0,0 +1,70 @@ +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Boolean, Column, DateTime, ForeignKey +from sqlalchemy import Integer, MetaData, String +from sqlalchemy import Table, Text + +from cinder import log as logging +from cinder import utils + +LOG = logging.getLogger(__name__) + + +def upgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + # load tables for fk + instances = Table('instances', meta, autoload=True) + + # + # New Tables + # + instance_info_caches = Table('instance_info_caches', meta, + Column('created_at', DateTime(timezone=False), + default=utils.utcnow()), + Column('updated_at', DateTime(timezone=False), + onupdate=utils.utcnow()), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True), + Column('network_info', Text()), + Column('instance_id', String(36), + ForeignKey('instances.uuid'), + nullable=False, + unique=True), + mysql_engine='InnoDB') + # create instance_info_caches table + try: + instance_info_caches.create() + except Exception: + LOG.error(_("Table |%s| not created!"), repr(instance_info_caches)) + raise + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + # load tables for fk + instances = Table('instances', meta, autoload=True) + + instance_info_caches = Table('instance_info_caches', meta, autoload=True) + try: + instance_info_caches.drop() + except Exception: + LOG.error(_("instance_info_caches tables not dropped")) + raise diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/063_add_instance_faults_table.py b/cinder/db/sqlalchemy/migrate_repo/versions/063_add_instance_faults_table.py new file mode 100644 index 00000000000..be4561791cc --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/063_add_instance_faults_table.py @@ -0,0 +1,60 @@ +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Boolean, Column, DateTime, Integer, ForeignKey +from sqlalchemy import MetaData, String, Table, Text +from cinder import log as logging + +LOG = logging.getLogger(__name__) + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta = MetaData() + meta.bind = migrate_engine + # + # New Tables + # + instance_faults = Table('instance_faults', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None), + default=False), + Column('id', Integer(), primary_key=True, nullable=False), + Column('instance_uuid', String(36, ForeignKey('instances.uuid'))), + Column('code', Integer(), nullable=False), + Column('message', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('details', + Text(length=None, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + ) + try: + instance_faults.create() + except Exception: + LOG.info(repr(instance_faults)) + + +def downgrade(migrate_engine): + # Operations to reverse the above upgrade go here. + meta = MetaData() + meta.bind = migrate_engine + instance_faults = Table('instance_faults', meta, autoload=True) + instance_faults.drop() diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/064_change_instance_id_to_uuid_in_instance_actions.py b/cinder/db/sqlalchemy/migrate_repo/versions/064_change_instance_id_to_uuid_in_instance_actions.py new file mode 100644 index 00000000000..bed9151666d --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/064_change_instance_id_to_uuid_in_instance_actions.py @@ -0,0 +1,80 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import select, Column, ForeignKey, Integer +from sqlalchemy import MetaData, String, Table +from migrate import ForeignKeyConstraint + +from cinder import log as logging + + +LOG = logging.getLogger(__name__) + + +def upgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + dialect = migrate_engine.url.get_dialect().name + instance_actions = Table('instance_actions', meta, autoload=True) + instances = Table('instances', meta, autoload=True) + uuid_column = Column('instance_uuid', String(36)) + uuid_column.create(instance_actions) + + try: + instance_actions.update().values( + instance_uuid=select( + [instances.c.uuid], + instances.c.id == instance_actions.c.instance_id) + ).execute() + except Exception: + uuid_column.drop() + raise + + if not dialect.startswith('sqlite'): + fkeys = list(instance_actions.c.instance_id.foreign_keys) + if fkeys: + try: + fkey_name = fkeys[0].constraint.name + ForeignKeyConstraint(columns=[instance_actions.c.instance_id], + refcolumns=[instances.c.id], + name=fkey_name).drop() + except Exception: + LOG.error(_("foreign key constraint couldn't be removed")) + raise + + instance_actions.c.instance_id.drop() + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + instance_actions = Table('instance_actions', meta, autoload=True) + instances = Table('instances', meta, autoload=True) + id_column = Column('instance_id', Integer, ForeignKey('instances.id')) + id_column.create(instance_actions) + + try: + instance_actions.update().values( + instance_id=select( + [instances.c.id], + instances.c.uuid == instance_actions.c.instance_uuid) + ).execute() + except Exception: + id_column.drop() + raise + + instance_actions.c.instance_uuid.drop() diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/065_add_index_to_instance_project_id.py b/cinder/db/sqlalchemy/migrate_repo/versions/065_add_index_to_instance_project_id.py new file mode 100644 index 00000000000..9b27f39dcc6 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/065_add_index_to_instance_project_id.py @@ -0,0 +1,31 @@ +# Copyright 2011 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Index, MetaData, Table + + +def upgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + instances = Table('instances', meta, autoload=True) + index = Index('project_id', instances.c.project_id) + index.create(migrate_engine) + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + instances = Table('instances', meta, autoload=True) + index = Index('project_id', instances.c.project_id) + index.drop(migrate_engine) diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/066_preload_instance_info_cache_table.py b/cinder/db/sqlalchemy/migrate_repo/versions/066_preload_instance_info_cache_table.py new file mode 100644 index 00000000000..b2df1bbe25b --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/066_preload_instance_info_cache_table.py @@ -0,0 +1,31 @@ +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import json + +from sqlalchemy import select, MetaData, Table + +from cinder import log as logging +from cinder import utils + +LOG = logging.getLogger(__name__) + + +def upgrade(migrate_engine): + pass + + +def downgrade(migrate_engine): + pass diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/067_add_pool_and_interface_to_floating_ip.py b/cinder/db/sqlalchemy/migrate_repo/versions/067_add_pool_and_interface_to_floating_ip.py new file mode 100644 index 00000000000..61adb8fa423 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/067_add_pool_and_interface_to_floating_ip.py @@ -0,0 +1,41 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 X.commerce, a business unit of eBay Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Column, MetaData, String, Table + +from cinder import flags + +FLAGS = flags.FLAGS + + +def upgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + table = Table('floating_ips', meta, autoload=True) + + pool_column = Column('pool', String(255)) + interface_column = Column('interface', String(255)) + table.create_column(pool_column) + table.create_column(interface_column) + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + table = Table('floating_ips', meta, autoload=True) + table.c.pool.drop() + table.c.interface.drop() diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/067_sqlite_downgrade.sql b/cinder/db/sqlalchemy/migrate_repo/versions/067_sqlite_downgrade.sql new file mode 100644 index 00000000000..3cd12cbdc2f --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/067_sqlite_downgrade.sql @@ -0,0 +1,69 @@ +BEGIN TRANSACTION; + + CREATE TEMPORARY TABLE floating_ips_backup ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + address VARCHAR(255), + fixed_ip_id INTEGER, + project_id VARCHAR(255), + host VARCHAR(255), + auto_assigned BOOLEAN, + pool VARCHAR(255), + interface VARCHAR(255), + PRIMARY KEY (id), + CHECK (deleted IN (0, 1)), + FOREIGN KEY(fixed_ip_id) REFERENCES fixed_ips (id) + ); + + INSERT INTO floating_ips_backup + SELECT created_at, + updated_at, + deleted_at, + deleted, + id, + address, + fixed_ip_id, + project_id, + host, + auto_assigned, + pool, + interface + FROM floating_ips; + + DROP TABLE floating_ips; + + CREATE TABLE floating_ips ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + address VARCHAR(255), + fixed_ip_id INTEGER, + project_id VARCHAR(255), + host VARCHAR(255), + auto_assigned BOOLEAN, + PRIMARY KEY (id), + CHECK (deleted IN (0, 1)), + FOREIGN KEY(fixed_ip_id) REFERENCES fixed_ips (id) + ); + + INSERT INTO floating_ips + SELECT created_at, + updated_at, + deleted_at, + deleted, + id, + address, + fixed_ip_id, + project_id, + host, + auto_assigned + FROM floating_ips_backup; + + DROP TABLE floating_ips_backup; + +COMMIT; diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/068_add_instance_attribute.py b/cinder/db/sqlalchemy/migrate_repo/versions/068_add_instance_attribute.py new file mode 100644 index 00000000000..a65aff8b426 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/068_add_instance_attribute.py @@ -0,0 +1,36 @@ +# Copyright 2011 Isaku Yamahata +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Boolean +from sqlalchemy import Column, MetaData, Table + + +def upgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + instances = Table('instances', meta, autoload=True) + shutdown_terminate = Column( + 'shutdown_terminate', Boolean(), default=True) + disable_terminate = Column( + 'disable_terminate', Boolean(), default=False) + instances.create_column(shutdown_terminate) + instances.create_column(disable_terminate) + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + instances = Table('instances', meta, autoload=True) + instances.drop_column('shutdown_terminate') + instances.drop_column('disable_terminate') diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/068_sqlite_downgrade.sql b/cinder/db/sqlalchemy/migrate_repo/versions/068_sqlite_downgrade.sql new file mode 100644 index 00000000000..a7700f6fab7 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/068_sqlite_downgrade.sql @@ -0,0 +1,219 @@ +BEGIN TRANSACTION; + + CREATE TEMPORARY TABLE instances_backup ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + internal_id INTEGER, + user_id VARCHAR(255), + project_id VARCHAR(255), + image_ref VARCHAR(255), + kernel_id VARCHAR(255), + ramdisk_id VARCHAR(255), + server_name VARCHAR(255), + launch_index INTEGER, + key_name VARCHAR(255), + key_data TEXT, + power_state INTEGER, + vm_state VARCHAR(255), + memory_mb INTEGER, + vcpus INTEGER, + local_gb INTEGER, + hostname VARCHAR(255), + host VARCHAR(255), + user_data TEXT, + reservation_id VARCHAR(255), + scheduled_at DATETIME, + launched_at DATETIME, + terminated_at DATETIME, + display_name VARCHAR(255), + display_description VARCHAR(255), + availability_zone VARCHAR(255), + locked BOOLEAN, + os_type VARCHAR(255), + launched_on TEXT, + instance_type_id INTEGER, + vm_mode VARCHAR(255), + uuid VARCHAR(36), + architecture VARCHAR(255), + root_device_name VARCHAR(255), + access_ip_v4 VARCHAR(255), + access_ip_v6 VARCHAR(255), + config_drive VARCHAR(255), + task_state VARCHAR(255), + default_local_device VARCHAR(255), + default_swap_device VARCHAR(255), + progress INTEGER, + auto_disk_config BOOLEAN, + shutdown_terminate BOOLEAN, + disable_terminate BOOLEAN, + PRIMARY KEY (id), + CHECK (deleted IN (0, 1)), + CHECK (locked IN (0, 1)), + CHECK (auto_disk_config IN (0, 1)), + CHECK (shutdown_terminate IN (0, 1)), + CHECK (disable_terminate IN (0, 1)) + ); + + INSERT INTO instances_backup + SELECT created_at, + updated_at, + deleted_at, + deleted, + id, + internal_id, + user_id, + project_id, + image_ref, + kernel_id, + ramdisk_id, + server_name, + launch_index, + key_name, + key_data, + power_state, + vm_state, + memory_mb, + vcpus, + local_gb, + hostname, + host, + user_data, + reservation_id, + scheduled_at, + launched_at, + terminated_at, + display_name, + display_description, + availability_zone, + locked, + os_type, + launched_on, + instance_type_id, + vm_mode, + uuid, + architecture, + root_device_name, + access_ip_v4, + access_ip_v6, + config_drive, + task_state, + default_local_device, + default_swap_device, + progress, + auto_disk_config, + shutdown_terminate, + disable_terminate + FROM instances; + + DROP TABLE instances; + + CREATE TABLE instances( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + internal_id INTEGER, + user_id VARCHAR(255), + project_id VARCHAR(255), + image_ref VARCHAR(255), + kernel_id VARCHAR(255), + ramdisk_id VARCHAR(255), + server_name VARCHAR(255), + launch_index INTEGER, + key_name VARCHAR(255), + key_data TEXT, + power_state INTEGER, + vm_state VARCHAR(255), + memory_mb INTEGER, + vcpus INTEGER, + local_gb INTEGER, + hostname VARCHAR(255), + host VARCHAR(255), + user_data TEXT, + reservation_id VARCHAR(255), + scheduled_at DATETIME, + launched_at DATETIME, + terminated_at DATETIME, + display_name VARCHAR(255), + display_description VARCHAR(255), + availability_zone VARCHAR(255), + locked BOOLEAN, + os_type VARCHAR(255), + launched_on TEXT, + instance_type_id INTEGER, + vm_mode VARCHAR(255), + uuid VARCHAR(36), + architecture VARCHAR(255), + root_device_name VARCHAR(255), + access_ip_v4 VARCHAR(255), + access_ip_v6 VARCHAR(255), + config_drive VARCHAR(255), + task_state VARCHAR(255), + default_local_device VARCHAR(255), + default_swap_device VARCHAR(255), + progress INTEGER, + auto_disk_config BOOLEAN, + PRIMARY KEY (id), + CHECK (deleted IN (0, 1)), + CHECK (locked IN (0, 1)), + CHECK (auto_disk_config IN (0, 1)) + ); + + CREATE INDEX project_id ON instances (project_id); + CREATE UNIQUE INDEX uuid ON instances (uuid); + + INSERT INTO instances + SELECT created_at, + updated_at, + deleted_at, + deleted, + id, + internal_id, + user_id, + project_id, + image_ref, + kernel_id, + ramdisk_id, + server_name, + launch_index, + key_name, + key_data, + power_state, + vm_state, + memory_mb, + vcpus, + local_gb, + hostname, + host, + user_data, + reservation_id, + scheduled_at, + launched_at, + terminated_at, + display_name, + display_description, + availability_zone, + locked, + os_type, + launched_on, + instance_type_id, + vm_mode, + uuid, + architecture, + root_device_name, + access_ip_v4, + access_ip_v6, + config_drive, + task_state, + default_local_device, + default_swap_device, + progress, + auto_disk_config + FROM instances_backup; + + DROP TABLE instances_backup; +COMMIT; diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/069_block_migration.py b/cinder/db/sqlalchemy/migrate_repo/versions/069_block_migration.py new file mode 100644 index 00000000000..e9984be28b8 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/069_block_migration.py @@ -0,0 +1,50 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Column, Integer, MetaData, Table +from cinder import log as logging + +LOG = logging.getLogger(__name__) + + +def upgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + disk_available_least = Column('disk_available_least', Integer(), default=0) + compute_nodes = Table('compute_nodes', meta, autoload=True) + # Add disk_available_least column to compute_nodes table. + # Thinking about qcow2 image support, both compressed and virtual disk size + # has to be considered. + # disk_available stores "total disk size - used disk(compressed disk size)" + # while disk_available_least stores + # "total disk size - used disk(virtual disk size)". + # virtual disk size is used for kvm block migration. + try: + compute_nodes.create_column(disk_available_least) + except Exception: + LOG.error(_("progress column not added to compute_nodes table")) + raise + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + compute_nodes = Table('compute_nodes', meta, autoload=True) + compute_nodes.drop_column('disk_available_least') diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/070_sqlite_downgrade.sql b/cinder/db/sqlalchemy/migrate_repo/versions/070_sqlite_downgrade.sql new file mode 100644 index 00000000000..5837603c866 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/070_sqlite_downgrade.sql @@ -0,0 +1,103 @@ +BEGIN TRANSACTION; + CREATE TABLE fixed_ips_backup ( + created_at DATETIME NOT NULL, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN NOT NULL, + id INTEGER NOT NULL, + address VARCHAR(255), + virtual_interface_id INTEGER, + network_id INTEGER, + instance_id INTEGER, + allocated BOOLEAN default FALSE, + leased BOOLEAN default FALSE, + reserved BOOLEAN default FALSE, + host VARCHAR(255), + PRIMARY KEY (id) + ); + + CREATE TABLE floating_ips_backup ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + address VARCHAR(255), + fixed_ip_id INTEGER, + project_id VARCHAR(255), + host VARCHAR(255), + auto_assigned BOOLEAN, + pool VARCHAR(255), + interface VARCHAR(255), + PRIMARY KEY (id) + ); + + INSERT INTO fixed_ips_backup + SELECT created_at, updated_at, deleted_at, deleted, id, address, + virtual_interface_id, network_id, instance_id, allocated, + leased, reserved, host + FROM fixed_ips; + + INSERT INTO floating_ips_backup + SELECT created_at, updated_at, deleted_at, deleted, id, address, + fixed_ip_id, project_id, host, auto_assigned, pool, + interface + FROM floating_ips; + + DROP TABLE fixed_ips; + DROP TABLE floating_ips; + + CREATE TABLE fixed_ips ( + created_at DATETIME NOT NULL, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN NOT NULL, + id INTEGER NOT NULL, + address VARCHAR(255), + virtual_interface_id INTEGER, + network_id INTEGER, + instance_id INTEGER, + allocated BOOLEAN default FALSE, + leased BOOLEAN default FALSE, + reserved BOOLEAN default FALSE, + host VARCHAR(255), + PRIMARY KEY (id), + FOREIGN KEY(virtual_interface_id) REFERENCES virtual_interfaces (id), + FOREIGN KEY(network_id) REFERENCES networks (id), + FOREIGN KEY(instance_id) REFERENCES instances (id) + ); + + CREATE TABLE floating_ips ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + address VARCHAR(255), + fixed_ip_id INTEGER, + project_id VARCHAR(255), + host VARCHAR(255), + auto_assigned BOOLEAN, + pool VARCHAR(255), + interface VARCHAR(255), + PRIMARY KEY (id), + CHECK (deleted IN (0, 1)), + FOREIGN KEY(fixed_ip_id) REFERENCES fixed_ips (id) + ); + + INSERT INTO fixed_ips + SELECT created_at, updated_at, deleted_at, deleted, id, address, + virtual_interface_id, network_id, instance_id, allocated, + leased, reserved, host + FROM fixed_ips_backup; + + INSERT INTO floating_ips + SELECT created_at, updated_at, deleted_at, deleted, id, address, + fixed_ip_id, project_id, host, auto_assigned, pool, + interface + FROM floating_ips_backup; + + DROP TABLE fixed_ips_backup; + DROP TABLE floating_ips_backup; + +COMMIT; diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/070_sqlite_upgrade.sql b/cinder/db/sqlalchemy/migrate_repo/versions/070_sqlite_upgrade.sql new file mode 100644 index 00000000000..2b6f7c39a40 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/070_sqlite_upgrade.sql @@ -0,0 +1,99 @@ +BEGIN TRANSACTION; + CREATE TABLE fixed_ips_backup ( + created_at DATETIME NOT NULL, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN NOT NULL, + id INTEGER NOT NULL, + address VARCHAR(255), + virtual_interface_id INTEGER, + network_id INTEGER, + instance_id INTEGER, + allocated BOOLEAN default FALSE, + leased BOOLEAN default FALSE, + reserved BOOLEAN default FALSE, + host VARCHAR(255), + PRIMARY KEY (id) + ); + + CREATE TABLE floating_ips_backup ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + address VARCHAR(255), + fixed_ip_id INTEGER, + project_id VARCHAR(255), + host VARCHAR(255), + auto_assigned BOOLEAN, + pool VARCHAR(255), + interface VARCHAR(255), + PRIMARY KEY (id) + ); + + INSERT INTO fixed_ips_backup + SELECT created_at, updated_at, deleted_at, deleted, id, address, + virtual_interface_id, network_id, instance_id, allocated, + leased, reserved, host + FROM fixed_ips; + + INSERT INTO floating_ips_backup + SELECT created_at, updated_at, deleted_at, deleted, id, address, + fixed_ip_id, project_id, host, auto_assigned, pool, + interface + FROM floating_ips; + + DROP TABLE fixed_ips; + DROP TABLE floating_ips; + + CREATE TABLE fixed_ips ( + created_at DATETIME NOT NULL, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN NOT NULL, + id INTEGER NOT NULL, + address VARCHAR(255), + virtual_interface_id INTEGER, + network_id INTEGER, + instance_id INTEGER, + allocated BOOLEAN default FALSE, + leased BOOLEAN default FALSE, + reserved BOOLEAN default FALSE, + host VARCHAR(255), + PRIMARY KEY (id) + ); + + CREATE TABLE floating_ips ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + address VARCHAR(255), + fixed_ip_id INTEGER, + project_id VARCHAR(255), + host VARCHAR(255), + auto_assigned BOOLEAN, + pool VARCHAR(255), + interface VARCHAR(255), + PRIMARY KEY (id), + CHECK (deleted IN (0, 1)) + ); + + INSERT INTO fixed_ips + SELECT created_at, updated_at, deleted_at, deleted, id, address, + virtual_interface_id, network_id, instance_id, allocated, + leased, reserved, host + FROM fixed_ips_backup; + + INSERT INTO floating_ips + SELECT created_at, updated_at, deleted_at, deleted, id, address, + fixed_ip_id, project_id, host, auto_assigned, pool, + interface + FROM floating_ips_backup; + + DROP TABLE fixed_ips_backup; + DROP TABLE floating_ips_backup; + +COMMIT; diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/070_untie_nova_network_models.py b/cinder/db/sqlalchemy/migrate_repo/versions/070_untie_nova_network_models.py new file mode 100644 index 00000000000..0316194b326 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/070_untie_nova_network_models.py @@ -0,0 +1,100 @@ +# Copyright 2011 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import MetaData, Table +from migrate import ForeignKeyConstraint + +from cinder import log as logging + +LOG = logging.getLogger(__name__) + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta = MetaData() + meta.bind = migrate_engine + dialect = migrate_engine.url.get_dialect().name + if dialect.startswith('sqlite'): + return + + instances = Table('instances', meta, autoload=True) + networks = Table('networks', meta, autoload=True) + vifs = Table('virtual_interfaces', meta, autoload=True) + fixed_ips = Table('fixed_ips', meta, autoload=True) + floating_ips = Table('floating_ips', meta, autoload=True) + + try: + fkeys = list(fixed_ips.c.network_id.foreign_keys) + if fkeys: + fkey_name = fkeys[0].constraint.name + ForeignKeyConstraint(columns=[fixed_ips.c.network_id], + refcolumns=[networks.c.id], + name=fkey_name).drop() + + fkeys = list(fixed_ips.c.virtual_interface_id.foreign_keys) + if fkeys: + fkey_name = fkeys[0].constraint.name + ForeignKeyConstraint(columns=[fixed_ips.c.virtual_interface_id], + refcolumns=[vifs.c.id], + name=fkey_name).drop() + + fkeys = list(fixed_ips.c.instance_id.foreign_keys) + if fkeys: + fkey_name = fkeys[0].constraint.name + ForeignKeyConstraint(columns=[fixed_ips.c.instance_id], + refcolumns=[instances.c.id], + name=fkey_name).drop() + + fkeys = list(floating_ips.c.fixed_ip_id.foreign_keys) + if fkeys: + fkey_name = fkeys[0].constraint.name + ForeignKeyConstraint(columns=[floating_ips.c.fixed_ip_id], + refcolumns=[fixed_ips.c.id], + name=fkey_name).drop() + + except Exception: + LOG.error(_("foreign key constraint couldn't be removed")) + raise + + +def downgrade(migrate_engine): + # Operations to reverse the above upgrade go here. + meta = MetaData() + meta.bind = migrate_engine + dialect = migrate_engine.url.get_dialect().name + if dialect.startswith('sqlite'): + return + + instances = Table('instances', meta, autoload=True) + networks = Table('networks', meta, autoload=True) + vifs = Table('virtual_interfaces', meta, autoload=True) + fixed_ips = Table('fixed_ips', meta, autoload=True) + floating_ips = Table('floating_ips', meta, autoload=True) + + try: + ForeignKeyConstraint(columns=[fixed_ips.c.network_id], + refcolumns=[networks.c.id]).create() + + ForeignKeyConstraint(columns=[fixed_ips.c.virtual_interface_id], + refcolumns=[vifs.c.id]).create() + + ForeignKeyConstraint(columns=[fixed_ips.c.instance_id], + refcolumns=[instances.c.id]).create() + + ForeignKeyConstraint(columns=[floating_ips.c.fixed_ip_id], + refcolumns=[fixed_ips.c.id]).create() + except Exception: + LOG.error(_("foreign key constraint couldn't be added")) + raise diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/071_add_host_aggregate_tables.py b/cinder/db/sqlalchemy/migrate_repo/versions/071_add_host_aggregate_tables.py new file mode 100644 index 00000000000..d85c3bad0a4 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/071_add_host_aggregate_tables.py @@ -0,0 +1,108 @@ +# Copyright (c) 2011 Citrix Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Boolean, String, DateTime, Integer +from sqlalchemy import MetaData, Column, ForeignKey, Table + +from cinder import log as logging + +LOG = logging.getLogger(__name__) + + +def upgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + # + # New Tables + # + aggregates = Table('aggregates', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), + primary_key=True, nullable=False, autoincrement=True), + Column('name', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + unique=True), + Column('operational_state', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + nullable=False), + Column('availability_zone', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + nullable=False), + ) + + hosts = Table('aggregate_hosts', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('host', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + unique=True), + Column('aggregate_id', Integer(), ForeignKey('aggregates.id'), + nullable=False), + ) + + metadata = Table('aggregate_metadata', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('aggregate_id', + Integer(), + ForeignKey('aggregates.id'), + nullable=False), + Column('key', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + nullable=False), + Column('value', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + nullable=False)) + tables = (aggregates, hosts, metadata) + for table in tables: + try: + table.create() + except Exception: + LOG.exception(repr(table)) + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + aggregates = Table('aggregates', meta, autoload=True) + hosts = Table('aggregate_hosts', meta, autoload=True) + metadata = Table('aggregate_metadata', meta, autoload=True) + # table order matters, don't change + for table in (hosts, metadata, aggregates): + try: + table.drop() + except Exception: + LOG.exception(repr(table)) diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/072_add_dns_table.py b/cinder/db/sqlalchemy/migrate_repo/versions/072_add_dns_table.py new file mode 100644 index 00000000000..5c1644d579b --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/072_add_dns_table.py @@ -0,0 +1,77 @@ +# Copyright 2012 Andrew Bogott for The Wikimedia Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Boolean, Column, DateTime, ForeignKey +from sqlalchemy import MetaData, String, Table +from cinder import log as logging + +LOG = logging.getLogger(__name__) + + +def upgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + # load tables for fk + projects = Table('projects', meta, autoload=True) + + # + # New Tables + # + dns_domains = Table('dns_domains', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('domain', + String(length=512, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + primary_key=True, nullable=False), + Column('scope', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('availability_zone', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('project_id', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + ForeignKey('projects.id')) + ) + # create dns_domains table + try: + dns_domains.create() + except Exception: + LOG.error(_("Table |%s| not created!"), repr(dns_domains)) + raise + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + # load tables for fk + projects = Table('projects', meta, autoload=True) + + dns_domains = Table('dns_domains', meta, autoload=True) + try: + dns_domains.drop() + except Exception: + LOG.error(_("dns_domains table not dropped")) + raise diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/072_mysql_upgrade.sql b/cinder/db/sqlalchemy/migrate_repo/versions/072_mysql_upgrade.sql new file mode 100644 index 00000000000..d11e8214788 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/072_mysql_upgrade.sql @@ -0,0 +1,13 @@ +CREATE TABLE dns_domains ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + domain VARCHAR(512) CHARACTER SET latin1 NOT NULL, + scope VARCHAR(255), + availability_zone VARCHAR(255), + project_id VARCHAR(255), + PRIMARY KEY (domain), + CHECK (deleted IN (0, 1)), + FOREIGN KEY(project_id) REFERENCES projects (id) +); diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/073_add_capacity.py b/cinder/db/sqlalchemy/migrate_repo/versions/073_add_capacity.py new file mode 100644 index 00000000000..31ed41581ed --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/073_add_capacity.py @@ -0,0 +1,49 @@ +# Copyright 2011 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Column, Integer, MetaData, Table + +from cinder import log as logging + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta = MetaData() + meta.bind = migrate_engine + compute_nodes = Table('compute_nodes', meta, autoload=True) + + # + # New Columns + # + new_columns = [ + Column('free_ram_mb', Integer()), + Column('free_disk_gb', Integer()), + Column('current_workload', Integer()), + Column('running_vms', Integer()), + ] + for column in new_columns: + compute_nodes.create_column(column) + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + compute_nodes = Table('compute_nodes', meta, autoload=True) + + for column in ('free_ram_mb', + 'free_disk_gb', + 'current_workload', + 'running_vms'): + compute_nodes.drop_column(column) diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/074_change_flavor_local_gb.py b/cinder/db/sqlalchemy/migrate_repo/versions/074_change_flavor_local_gb.py new file mode 100644 index 00000000000..a371aa6963f --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/074_change_flavor_local_gb.py @@ -0,0 +1,130 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import select, Column, Integer, MetaData, Table + +from cinder import exception +from cinder import flags + +FLAGS = flags.FLAGS + + +def upgrade_libvirt(instances, instance_types): + # Update instance_types first + tiny = None + for inst_type in instance_types.select().execute(): + if inst_type['name'] == 'm1.tiny': + tiny = inst_type['id'] + root_gb = 0 + else: + root_gb = 10 + + instance_types.update()\ + .values(root_gb=root_gb, + ephemeral_gb=inst_type['local_gb'])\ + .where(instance_types.c.id == inst_type['id'])\ + .execute() + + # then update instances following same pattern + instances.update()\ + .values(root_gb=10, + ephemeral_gb=instances.c.local_gb)\ + .execute() + + if tiny is not None: + instances.update()\ + .values(root_gb=0, + ephemeral_gb=instances.c.local_gb)\ + .where(instances.c.instance_type_id == tiny)\ + .execute() + + +def upgrade_other(instances, instance_types): + for table in (instances, instance_types): + table.update().values(root_gb=table.c.local_gb, + ephemeral_gb=0).execute() + + +def check_instance_presence(migrate_engine, instances_table): + result = migrate_engine.execute(instances_table.select().limit(1)) + return result.fetchone() is not None + + +def upgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + instances = Table('instances', meta, autoload=True) + + data_present = check_instance_presence(migrate_engine, instances) + + if data_present and not FLAGS.connection_type: + msg = ("Found instance records in database. You must specify " + "connection_type to run migration migration") + raise exception.Error(msg) + + instance_types = Table('instance_types', meta, autoload=True) + + for table in (instances, instance_types): + root_gb = Column('root_gb', Integer) + root_gb.create(table) + ephemeral_gb = Column('ephemeral_gb', Integer) + ephemeral_gb.create(table) + + # Since this migration is part of the work to get all drivers + # working the same way, we need to treat the new root_gb and + # ephemeral_gb columns differently depending on what the + # driver implementation used to behave like. + if FLAGS.connection_type == 'libvirt': + upgrade_libvirt(instances, instance_types) + else: + upgrade_other(instances, instance_types) + + default_local_device = instances.c.default_local_device + default_local_device.alter(name='default_ephemeral_device') + + for table in (instances, instance_types): + table.drop_column('local_gb') + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + instances = Table('instances', meta, autoload=True) + instance_types = Table('instance_types', meta, autoload=True) + + for table in (instances, instance_types): + local_gb = Column('local_gb', Integer) + local_gb.create(table) + + try: + for table in (instances, instance_types): + if FLAGS.connection_type == 'libvirt': + column = table.c.ephemeral_gb + else: + column = table.c.root_gb + table.update().values(local_gb=column).execute() + except Exception: + for table in (instances, instance_types): + table.drop_column('local_gb') + raise + + default_ephemeral_device = instances.c.default_ephemeral_device + default_ephemeral_device.alter(name='default_local_device') + + for table in (instances, instance_types): + table.drop_column('root_gb') + table.drop_column('ephemeral_gb') diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/074_sqlite_upgrade.sql b/cinder/db/sqlalchemy/migrate_repo/versions/074_sqlite_upgrade.sql new file mode 100644 index 00000000000..e2708111b4c --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/074_sqlite_upgrade.sql @@ -0,0 +1,313 @@ +-- sqlalchemy-migrate is surprisingly broken when it comes to migrations +-- for sqlite. As a result, we have to do much of the work manually here + +BEGIN TRANSACTION; + -- make backup of instance_types + CREATE TEMPORARY TABLE instance_types_backup ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + name VARCHAR(255), + id INTEGER NOT NULL, + memory_mb INTEGER NOT NULL, + vcpus INTEGER NOT NULL, + local_gb INTEGER NOT NULL, + swap INTEGER NOT NULL, + rxtx_factor FLOAT, + vcpu_weight INTEGER, + flavorid VARCHAR(255), + PRIMARY KEY (id), + UNIQUE (flavorid), + CHECK (deleted IN (0, 1)), + UNIQUE (name) + ); + + INSERT INTO instance_types_backup + SELECT created_at, + updated_at, + deleted_at, + deleted, + name, + id, + memory_mb, + vcpus, + local_gb, + swap, + rxtx_factor, + vcpu_weight, + flavorid + FROM instance_types; + + DROP TABLE instance_types; + + CREATE TABLE instance_types ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + name VARCHAR(255), + id INTEGER NOT NULL, + memory_mb INTEGER NOT NULL, + vcpus INTEGER NOT NULL, + root_gb INTEGER NOT NULL, + ephemeral_gb INTEGER NOT NULL, + swap INTEGER NOT NULL, + rxtx_factor FLOAT, + vcpu_weight INTEGER, + flavorid VARCHAR(255), + PRIMARY KEY (id), + UNIQUE (flavorid), + CHECK (deleted IN (0, 1)), + UNIQUE (name) + ); + + -- copy from backup to new table with root_gb set to local_gb and + -- ephmeral_gb set to 0 + INSERT INTO instance_types + SELECT created_at, + updated_at, + deleted_at, + deleted, + name, + id, + memory_mb, + vcpus, + local_gb, + 0, + swap, + rxtx_factor, + vcpu_weight, + flavorid + FROM instance_types_backup; + + DROP TABLE instance_types_backup; + + -- make backup of instances + CREATE TEMPORARY TABLE instances_backup ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + internal_id INTEGER, + user_id VARCHAR(255), + project_id VARCHAR(255), + image_ref VARCHAR(255), + kernel_id VARCHAR(255), + ramdisk_id VARCHAR(255), + server_name VARCHAR(255), + launch_index INTEGER, + key_name VARCHAR(255), + key_data TEXT, + power_state INTEGER, + vm_state VARCHAR(255), + memory_mb INTEGER, + vcpus INTEGER, + local_gb INTEGER, + hostname VARCHAR(255), + host VARCHAR(255), + user_data TEXT, + reservation_id VARCHAR(255), + scheduled_at DATETIME, + launched_at DATETIME, + terminated_at DATETIME, + display_name VARCHAR(255), + display_description VARCHAR(255), + availability_zone VARCHAR(255), + locked BOOLEAN, + os_type VARCHAR(255), + launched_on TEXT, + instance_type_id INTEGER, + vm_mode VARCHAR(255), + uuid VARCHAR(36), + architecture VARCHAR(255), + root_device_name VARCHAR(255), + access_ip_v4 VARCHAR(255), + access_ip_v6 VARCHAR(255), + config_drive VARCHAR(255), + task_state VARCHAR(255), + default_local_device VARCHAR(255), + default_swap_device VARCHAR(255), + progress INTEGER, + auto_disk_config BOOLEAN, + shutdown_terminate BOOLEAN, + disable_terminate BOOLEAN, + PRIMARY KEY (id), + CHECK (deleted IN (0, 1)), + CHECK (locked IN (0, 1)), + CHECK (auto_disk_config IN (0, 1)), + CHECK (shutdown_terminate IN (0, 1)), + CHECK (disable_terminate IN (0, 1)) + ); + + INSERT INTO instances_backup + SELECT created_at, + updated_at, + deleted_at, + deleted, + id, + internal_id, + user_id, + project_id, + image_ref, + kernel_id, + ramdisk_id, + server_name, + launch_index, + key_name, + key_data, + power_state, + vm_state, + memory_mb, + vcpus, + local_gb, + hostname, + host, + user_data, + reservation_id, + scheduled_at, + launched_at, + terminated_at, + display_name, + display_description, + availability_zone, + locked, + os_type, + launched_on, + instance_type_id, + vm_mode, + uuid, + architecture, + root_device_name, + access_ip_v4, + access_ip_v6, + config_drive, + task_state, + default_local_device, + default_swap_device, + progress, + auto_disk_config, + shutdown_terminate, + disable_terminate + FROM instances; + + DROP TABLE instances; + + CREATE TABLE instances ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + internal_id INTEGER, + user_id VARCHAR(255), + project_id VARCHAR(255), + image_ref VARCHAR(255), + kernel_id VARCHAR(255), + ramdisk_id VARCHAR(255), + server_name VARCHAR(255), + launch_index INTEGER, + key_name VARCHAR(255), + key_data TEXT, + power_state INTEGER, + vm_state VARCHAR(255), + memory_mb INTEGER, + vcpus INTEGER, + root_gb INTEGER, + ephemeral_gb INTEGER, + hostname VARCHAR(255), + host VARCHAR(255), + user_data TEXT, + reservation_id VARCHAR(255), + scheduled_at DATETIME, + launched_at DATETIME, + terminated_at DATETIME, + display_name VARCHAR(255), + display_description VARCHAR(255), + availability_zone VARCHAR(255), + locked BOOLEAN, + os_type VARCHAR(255), + launched_on TEXT, + instance_type_id INTEGER, + vm_mode VARCHAR(255), + uuid VARCHAR(36), + architecture VARCHAR(255), + root_device_name VARCHAR(255), + access_ip_v4 VARCHAR(255), + access_ip_v6 VARCHAR(255), + config_drive VARCHAR(255), + task_state VARCHAR(255), + default_ephemeral_device VARCHAR(255), + default_swap_device VARCHAR(255), + progress INTEGER, + auto_disk_config BOOLEAN, + shutdown_terminate BOOLEAN, + disable_terminate BOOLEAN, + PRIMARY KEY (id), + CHECK (deleted IN (0, 1)), + CHECK (locked IN (0, 1)), + CHECK (auto_disk_config IN (0, 1)), + CHECK (shutdown_terminate IN (0, 1)), + CHECK (disable_terminate IN (0, 1)) + ); + + CREATE INDEX project_id ON instances (project_id); + CREATE UNIQUE INDEX uuid ON instances (uuid); + + -- copy from backup to new table with root_gb set to local_gb and + -- ephmeral_gb set to 0 + INSERT INTO instances + SELECT created_at, + updated_at, + deleted_at, + deleted, + id, + internal_id, + user_id, + project_id, + image_ref, + kernel_id, + ramdisk_id, + server_name, + launch_index, + key_name, + key_data, + power_state, + vm_state, + memory_mb, + vcpus, + local_gb, + 0, + hostname, + host, + user_data, + reservation_id, + scheduled_at, + launched_at, + terminated_at, + display_name, + display_description, + availability_zone, + locked, + os_type, + launched_on, + instance_type_id, + vm_mode, + uuid, + architecture, + root_device_name, + access_ip_v4, + access_ip_v6, + config_drive, + task_state, + default_local_device, + default_swap_device, + progress, + auto_disk_config, + shutdown_terminate, + disable_terminate + FROM instances_backup; + + DROP TABLE instances_backup; +COMMIT; diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/075_convert_bw_usage_to_store_network_id.py b/cinder/db/sqlalchemy/migrate_repo/versions/075_convert_bw_usage_to_store_network_id.py new file mode 100644 index 00000000000..3d26204f069 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/075_convert_bw_usage_to_store_network_id.py @@ -0,0 +1,97 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import and_, select +from sqlalchemy import BigInteger, Boolean, Column, DateTime +from sqlalchemy import Integer, MetaData, String +from sqlalchemy import Table + +from cinder import utils + + +def upgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + vifs = Table('virtual_interfaces', meta, autoload=True) + networks = Table('networks', meta, autoload=True) + + bw_usage_cache = Table('bw_usage_cache', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('instance_id', Integer(), nullable=False), + Column('network_label', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('start_period', DateTime(timezone=False), + nullable=False), + Column('last_refreshed', DateTime(timezone=False)), + Column('bw_in', BigInteger()), + Column('bw_out', BigInteger()), + useexisting=True) + mac_column = Column('mac', String(255)) + bw_usage_cache.create_column(mac_column) + + bw_usage_cache.update()\ + .values(mac=select([vifs.c.address])\ + .where(and_( + networks.c.label == bw_usage_cache.c.network_label, + networks.c.id == vifs.c.network_id, + bw_usage_cache.c.instance_id == vifs.c.instance_id))\ + .as_scalar()).execute() + + bw_usage_cache.c.network_label.drop() + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + vifs = Table('virtual_interfaces', meta, autoload=True) + network = Table('networks', meta, autoload=True) + + bw_usage_cache = Table('bw_usage_cache', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('instance_id', Integer(), nullable=False), + Column('mac', String(255)), + Column('start_period', DateTime(timezone=False), + nullable=False), + Column('last_refreshed', DateTime(timezone=False)), + Column('bw_in', BigInteger()), + Column('bw_out', BigInteger()), + useexisting=True) + + network_label_column = Column('network_label', String(255)) + bw_usage_cache.create_column(network_label_column) + + bw_usage_cache.update()\ + .values(network_label=select([network.c.label])\ + .where(and_( + network.c.id == vifs.c.network_id, + vifs.c.address == bw_usage_cache.c.mac, + bw_usage_cache.c.instance_id == vifs.c.instance_id))\ + .as_scalar()).execute() + + bw_usage_cache.c.mac.drop() diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/076_remove_unique_constraints.py b/cinder/db/sqlalchemy/migrate_repo/versions/076_remove_unique_constraints.py new file mode 100644 index 00000000000..971bfbecc2e --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/076_remove_unique_constraints.py @@ -0,0 +1,84 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) OpenStack, LLC +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License.from sqlalchemy import * + +from sqlalchemy import MetaData, Table +from migrate.changeset.constraint import UniqueConstraint + + +def _get_constraint_names(engine_name): + + # NOTE(vish): These constraint names may be dependent on the backend, but + # there doesn't seem to be we a way to determine the proper + # name for existing constraints. These names are correct for + # mysql and postgres. + if engine_name == "mysql": + return { + "instance_types_name": ("name", "instance_types_name_key"), + "instance_types_flavorid": "instance_types_flavorid_str_key", + "volume_types_name": "name", + } + else: + return { + "instance_types_name": ("instance_types_name_key",), + "instance_types_flavorid": "instance_types_flavorid_str_key", + "volume_types_name": "volume_types_name_key", + } + + +def upgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + c_names = _get_constraint_names(migrate_engine.name) + + table = Table('instance_types', meta, autoload=True) + for constraint_name in c_names['instance_types_name']: + cons = UniqueConstraint('name', + name=constraint_name, + table=table) + cons.drop() + cons = UniqueConstraint('flavorid', + name=c_names['instance_types_flavorid'], + table=table) + cons.drop() + table = Table('volume_types', meta, autoload=True) + cons = UniqueConstraint('name', + name=c_names['volume_types_name'], + table=table) + cons.drop() + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + c_names = _get_constraint_names(migrate_engine.name) + + table = Table('instance_types', meta, autoload=True) + for constraint_name in c_names['instance_types_name']: + cons = UniqueConstraint('name', + name=constraint_name, + table=table) + cons.create() + table = Table('instance_types', meta, autoload=True) + cons = UniqueConstraint('flavorid', + name=c_names['instance_types_flavorid'], + table=table) + cons.create() + table = Table('volume_types', meta, autoload=True) + cons = UniqueConstraint('name', + name=c_names['volume_types_name'], + table=table) + cons.create() diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/076_sqlite_upgrade.sql b/cinder/db/sqlalchemy/migrate_repo/versions/076_sqlite_upgrade.sql new file mode 100644 index 00000000000..6053c1ed74d --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/076_sqlite_upgrade.sql @@ -0,0 +1,61 @@ +-- sqlalchemy-migrate is surprisingly broken when it comes to migrations +-- for sqlite. As a result, we have to do much of the work manually here + +BEGIN TRANSACTION; + CREATE TABLE instance_types_temp ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + name VARCHAR(255), + id INTEGER NOT NULL, + memory_mb INTEGER NOT NULL, + vcpus INTEGER NOT NULL, + root_gb INTEGER NOT NULL, + ephemeral_gb INTEGER NOT NULL, + swap INTEGER NOT NULL, + rxtx_factor FLOAT, + vcpu_weight INTEGER, + flavorid VARCHAR(255), + PRIMARY KEY (id), + CHECK (deleted IN (0, 1)) + ); + INSERT INTO instance_types_temp SELECT + created_at, + updated_at, + deleted_at, + deleted, + name, + id, + memory_mb, + vcpus, + root_gb, + ephemeral_gb, + swap, + rxtx_factor, + vcpu_weight, + flavorid + FROM instance_types; + DROP TABLE instance_types; + ALTER TABLE instance_types_temp RENAME TO instance_types; + CREATE TABLE volume_types_temp ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + name VARCHAR(255), + id INTEGER NOT NULL, + PRIMARY KEY (id), + CHECK (deleted IN (0, 1)) + ); + INSERT INTO volume_types_temp SELECT + created_at, + updated_at, + deleted_at, + deleted, + name, + id + FROM volume_types; + DROP TABLE volume_types; + ALTER TABLE volume_types_temp RENAME TO volume_types; +COMMIT; diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/077_convert_to_utf8.py b/cinder/db/sqlalchemy/migrate_repo/versions/077_convert_to_utf8.py new file mode 100644 index 00000000000..4c08e2f0dc6 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/077_convert_to_utf8.py @@ -0,0 +1,61 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import MetaData + + +def upgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + # NOTE (ironcamel): The only table we are not converting to utf8 here is + # dns_domains. This table has a primary key that is 512 characters wide. + # When the mysql engine attempts to convert it to utf8, it complains about + # not supporting key columns larger than 1000. + + if migrate_engine.name == "mysql": + tables = [ + # tables that are FK parents, must be converted early + "aggregates", "console_pools", "instance_types", "instances", + "projects", "security_groups", "sm_backend_config", "sm_flavors", + "snapshots", "user_project_association", "users", "volume_types", + "volumes", + # those that are children and others later + "agent_builds", "aggregate_hosts", "aggregate_metadata", + "auth_tokens", "block_device_mapping", "bw_usage_cache", + "certificates", "compute_nodes", "consoles", "fixed_ips", + "floating_ips", "instance_actions", "instance_faults", + "instance_info_caches", "instance_metadata", + "instance_type_extra_specs", "iscsi_targets", "key_pairs", + "migrate_version", "migrations", "networks", "provider_fw_rules", + "quotas", "s3_images", "security_group_instance_association", + "security_group_rules", "services", "sm_volume", + "user_project_role_association", "user_role_association", + "virtual_interfaces", "virtual_storage_arrays", "volume_metadata", + "volume_type_extra_specs", "zones"] + sql = "SET foreign_key_checks = 0;" + for table in tables: + sql += "ALTER TABLE %s CONVERT TO CHARACTER SET utf8;" % table + sql += "SET foreign_key_checks = 1;" + sql += "ALTER DATABASE %s DEFAULT CHARACTER SET utf8;" \ + % migrate_engine.url.database + migrate_engine.execute(sql) + + +def downgrade(migrate_engine): + # utf8 tables should be backwards compatible, so lets leave it alone + pass diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/078_add_rpc_info_to_zones.py b/cinder/db/sqlalchemy/migrate_repo/versions/078_add_rpc_info_to_zones.py new file mode 100644 index 00000000000..e4043f84dec --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/078_add_rpc_info_to_zones.py @@ -0,0 +1,46 @@ +# Copyright 2012 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Boolean, Column +from sqlalchemy import Integer, MetaData, String +from sqlalchemy import Table + + +def upgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + zones = Table('zones', meta, autoload=True) + + is_parent = Column('is_parent', Boolean(), default=False) + rpc_host = Column('rpc_host', String(255)) + rpc_port = Column('rpc_port', Integer()) + rpc_virtual_host = Column('rpc_virtual_host', String(255)) + + zones.create_column(is_parent) + zones.create_column(rpc_host) + zones.create_column(rpc_port) + zones.create_column(rpc_virtual_host) + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + zones = Table('zones', meta, autoload=True) + + zones.drop_column('rpc_virtual_host') + zones.drop_column('rpc_port') + zones.drop_column('rpc_host') + zones.drop_column('is_parent') diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/078_sqlite_downgrade.sql b/cinder/db/sqlalchemy/migrate_repo/versions/078_sqlite_downgrade.sql new file mode 100644 index 00000000000..80061af78b9 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/078_sqlite_downgrade.sql @@ -0,0 +1,35 @@ +BEGIN TRANSACTION; + + CREATE TEMPORARY TABLE zones_temp ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + name VARCHAR(255), + api_url VARCHAR(255), + username VARCHAR(255), + password VARCHAR(255), + weight_offset FLOAT, + weight_scale FLOAT, + PRIMARY KEY (id), + CHECK (deleted IN (0, 1)) + ); + + INSERT INTO zones_temp + SELECT created_at, + updated_at, + deleted_at, + deleted, + id, + name, + api_url, + username, + password, + weight_offset, + weight_scale FROM zones; + + DROP TABLE zones; + + ALTER TABLE zones_temp RENAME TO zones; +COMMIT; diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/079_add_zone_name_to_instances.py b/cinder/db/sqlalchemy/migrate_repo/versions/079_add_zone_name_to_instances.py new file mode 100644 index 00000000000..2b22b94a020 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/079_add_zone_name_to_instances.py @@ -0,0 +1,30 @@ +# Copyright 2012 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Column, MetaData, String, Table + + +def upgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + instances = Table('instances', meta, autoload=True) + zone_name = Column('zone_name', String(255)) + instances.create_column(zone_name) + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + instances = Table('instances', meta, autoload=True) + instances.drop_column('zone_name') diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/080_add_hypervisor_hostname_to_compute_nodes.py b/cinder/db/sqlalchemy/migrate_repo/versions/080_add_hypervisor_hostname_to_compute_nodes.py new file mode 100644 index 00000000000..28a3ce48f1c --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/080_add_hypervisor_hostname_to_compute_nodes.py @@ -0,0 +1,30 @@ +# Copyright 2012 OpenStack, LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from sqlalchemy import Column, MetaData, String, Table + + +def upgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + compute_nodes = Table("compute_nodes", meta, autoload=True) + hypervisor_hostname = Column("hypervisor_hostname", String(255)) + compute_nodes.create_column(hypervisor_hostname) + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + compute_nodes = Table("compute_nodes", meta, autoload=True) + compute_nodes.drop_column('hypervisor_hostname') diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/081_drop_instance_id_bw_cache.py b/cinder/db/sqlalchemy/migrate_repo/versions/081_drop_instance_id_bw_cache.py new file mode 100644 index 00000000000..c6687ac8074 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/081_drop_instance_id_bw_cache.py @@ -0,0 +1,69 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 OpenStack, LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +import json + +from sqlalchemy import Column, Table, MetaData, Integer, Boolean, String +from sqlalchemy import DateTime, BigInteger + + +def upgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + bw_usage_cache = Table('bw_usage_cache', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('instance_id', Integer(), nullable=False), + Column('mac', String(255)), + Column('start_period', DateTime(timezone=False), + nullable=False), + Column('last_refreshed', DateTime(timezone=False)), + Column('bw_in', BigInteger()), + Column('bw_out', BigInteger()), + useexisting=True) + + bw_usage_cache.drop_column('instance_id') + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + instance_info_caches = Table('instance_info_caches', meta, autoload=True) + bw_usage_cache = Table('bw_usage_cache', meta, autoload=True) + + instance_id = Column('instance_id', Integer) + bw_usage_cache.create_column(instance_id) + + cache = {} + for row in migrate_engine.execute(instance_info_caches.select()): + instance_id = row['instance']['id'] + if not row['network_info']: + continue + + nw_info = json.loads(row['network_info']) + for vif in nw_info: + cache[vif['address']] = instance_id + + for row in migrate_engine.execute(bw_usage_cache.select()): + instance_id = cache[row['mac']] + migrate_engine.execute(bw_usage_cache.update()\ + .where(bw_usage_cache.c.id == row['id'])\ + .values(instance_id=instance_id)) diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/082_zone_to_cell.py b/cinder/db/sqlalchemy/migrate_repo/versions/082_zone_to_cell.py new file mode 100644 index 00000000000..79e99503af0 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/082_zone_to_cell.py @@ -0,0 +1,35 @@ +# Copyright 2012 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import MetaData, Table + + +def upgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + instances = Table('instances', meta, autoload=True) + zone_name = instances.c.zone_name + zone_name.alter(name='cell_name') + zones = Table('zones', meta, autoload=True) + zones.rename('cells') + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + instances = Table('instances', meta, autoload=True) + cell_name = instances.c.cell_name + cell_name.alter(name='zone_name') + cells = Table('cells', meta, autoload=True) + cells.rename('zones') diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/083_quota_class.py b/cinder/db/sqlalchemy/migrate_repo/versions/083_quota_class.py new file mode 100644 index 00000000000..3869c6ab57b --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/083_quota_class.py @@ -0,0 +1,61 @@ +# Copyright 2012 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Boolean, Column, DateTime +from sqlalchemy import MetaData, Integer, String, Table + +from cinder import log as logging + +LOG = logging.getLogger(__name__) + + +def upgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + # New table + quota_classes = Table('quota_classes', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True), + Column('class_name', + String(length=255, convert_unicode=True, + assert_unicode=None, unicode_error=None, + _warn_on_bytestring=False), index=True), + Column('resource', + String(length=255, convert_unicode=True, + assert_unicode=None, unicode_error=None, + _warn_on_bytestring=False)), + Column('hard_limit', Integer(), nullable=True), + ) + + try: + quota_classes.create() + except Exception: + LOG.error(_("Table |%s| not created!"), repr(quota_classes)) + raise + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + quota_classes = Table('quota_classes', meta, autoload=True) + try: + quota_classes.drop() + except Exception: + LOG.error(_("quota_classes table not dropped")) + raise diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/084_quotas_unlimited.py b/cinder/db/sqlalchemy/migrate_repo/versions/084_quotas_unlimited.py new file mode 100644 index 00000000000..bcbc2db9013 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/084_quotas_unlimited.py @@ -0,0 +1,43 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 Red Hat, Inc +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import migrate +import sqlalchemy + + +def upgrade(migrate_engine): + """Map quotas hard_limit from NULL to -1""" + _migrate_unlimited(migrate_engine, None, -1) + + +def downgrade(migrate_engine): + """Map quotas hard_limit from -1 to NULL""" + _migrate_unlimited(migrate_engine, -1, None) + + +def _migrate_unlimited(migrate_engine, old_limit, new_limit): + meta = sqlalchemy.MetaData() + meta.bind = migrate_engine + + def _migrate(table_name): + table = sqlalchemy.Table(table_name, meta, autoload=True) + table.update().\ + where(table.c.hard_limit == old_limit).\ + values(hard_limit=new_limit).execute() + + _migrate('quotas') + _migrate('quota_classes') diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/085_add_index_to_fixed_ips_by_address.py b/cinder/db/sqlalchemy/migrate_repo/versions/085_add_index_to_fixed_ips_by_address.py new file mode 100644 index 00000000000..8c4f0d5c307 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/085_add_index_to_fixed_ips_by_address.py @@ -0,0 +1,31 @@ +# Copyright 2012 IBM +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Index, MetaData, Table + + +def upgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + instances = Table('fixed_ips', meta, autoload=True) + index = Index('address', instances.c.address) + index.create(migrate_engine) + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + instances = Table('fixed_ips', meta, autoload=True) + index = Index('address', instances.c.address) + index.drop(migrate_engine) diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/086_set_engine_mysql_innodb.py b/cinder/db/sqlalchemy/migrate_repo/versions/086_set_engine_mysql_innodb.py new file mode 100644 index 00000000000..da985b95613 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/086_set_engine_mysql_innodb.py @@ -0,0 +1,44 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import MetaData + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + + tables = ["agent_builds", "aggregate_hosts", "aggregate_metadata", + "aggregates", "block_device_mapping", "bw_usage_cache", + "dns_domains", "instance_faults", "instance_type_extra_specs", + "provider_fw_rules", "quota_classes", "s3_images", + "sm_backend_config", "sm_flavors", "sm_volume", + "virtual_storage_arrays", "volume_metadata", + "volume_type_extra_specs", "volume_types"] + + meta = MetaData() + meta.bind = migrate_engine + if migrate_engine.name == "mysql": + d = migrate_engine.execute("SHOW TABLE STATUS WHERE Engine!='InnoDB';") + for row in d.fetchall(): + table_name = row[0] + if table_name in tables: + migrate_engine.execute("ALTER TABLE %s Engine=InnoDB" % + table_name) + + +def downgrade(migrate_engine): + pass diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/087_add_uuid_to_bw_usage_cache.py b/cinder/db/sqlalchemy/migrate_repo/versions/087_add_uuid_to_bw_usage_cache.py new file mode 100644 index 00000000000..e66004b12b6 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/087_add_uuid_to_bw_usage_cache.py @@ -0,0 +1,56 @@ +# Copyright 2012 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Boolean, Column, DateTime, BigInteger +from sqlalchemy import MetaData, Integer, String, Table + +from cinder import log as logging + +LOG = logging.getLogger(__name__) + + +def upgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + # add column: + bw_usage_cache = Table('bw_usage_cache', meta, autoload=True) + uuid = Column('uuid', String(36)) + + # clear the cache to get rid of entries with no uuid + migrate_engine.execute(bw_usage_cache.delete()) + + bw_usage_cache.create_column(uuid) + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + # drop column: + bw_usage_cache = Table('bw_usage_cache', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('mac', String(255)), + Column('uuid', String(36)), + Column('start_period', DateTime(timezone=False), nullable=False), + Column('last_refreshed', DateTime(timezone=False)), + Column('bw_in', BigInteger()), + Column('bw_out', BigInteger()), + useexisting=True) + + bw_usage_cache.drop_column('uuid') diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/088_change_instance_id_to_uuid_in_block_device_mapping.py b/cinder/db/sqlalchemy/migrate_repo/versions/088_change_instance_id_to_uuid_in_block_device_mapping.py new file mode 100644 index 00000000000..4962b2b054f --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/088_change_instance_id_to_uuid_in_block_device_mapping.py @@ -0,0 +1,81 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# Copyright 2012 Michael Still and Canonical Inc +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import select, Column, ForeignKey, Integer +from sqlalchemy import MetaData, String, Table +from migrate import ForeignKeyConstraint + +from cinder import log as logging + + +LOG = logging.getLogger(__name__) + + +def upgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + dialect = migrate_engine.url.get_dialect().name + block_device_mapping = Table('block_device_mapping', meta, autoload=True) + instances = Table('instances', meta, autoload=True) + uuid_column = Column('instance_uuid', String(36)) + uuid_column.create(block_device_mapping) + + try: + block_device_mapping.update().values( + instance_uuid=select( + [instances.c.uuid], + instances.c.id == block_device_mapping.c.instance_id) + ).execute() + except Exception: + uuid_column.drop() + raise + + fkeys = list(block_device_mapping.c.instance_id.foreign_keys) + if fkeys: + try: + fkey_name = fkeys[0].constraint.name + ForeignKeyConstraint( + columns=[block_device_mapping.c.instance_id], + refcolumns=[instances.c.id], + name=fkey_name).drop() + except Exception: + LOG.error(_("foreign key constraint couldn't be removed")) + raise + + block_device_mapping.c.instance_id.drop() + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + block_device_mapping = Table('block_device_mapping', meta, autoload=True) + instances = Table('instances', meta, autoload=True) + id_column = Column('instance_id', Integer, ForeignKey('instances.id')) + id_column.create(block_device_mapping) + + try: + block_device_mapping.update().values( + instance_id=select( + [instances.c.id], + instances.c.uuid == block_device_mapping.c.instance_uuid) + ).execute() + except Exception: + id_column.drop() + raise + + block_device_mapping.c.instance_uuid.drop() diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/088_sqlite_downgrade.sql b/cinder/db/sqlalchemy/migrate_repo/versions/088_sqlite_downgrade.sql new file mode 100644 index 00000000000..3699ce9abec --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/088_sqlite_downgrade.sql @@ -0,0 +1,97 @@ +BEGIN TRANSACTION; + CREATE TEMPORARY TABLE block_device_mapping_backup ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + instance_id INTEGER NOT NULL, + device_name VARCHAR(255) NOT NULL, + delete_on_termination BOOLEAN, + virtual_name VARCHAR(255), + snapshot_id INTEGER, + volume_id INTEGER, + volume_size INTEGER, + no_device BOOLEAN, + connection_info TEXT, + instance_uuid VARCHAR(36), + PRIMARY KEY (id), + FOREIGN KEY(snapshot_id) REFERENCES snapshots (id), + CHECK (deleted IN (0, 1)), + CHECK (delete_on_termination IN (0, 1)), + CHECK (no_device IN (0, 1)), + FOREIGN KEY(volume_id) REFERENCES volumes (id), + FOREIGN KEY(instance_id) REFERENCES instances (id) + ); + + INSERT INTO block_device_mapping_backup + SELECT created_at, + updated_at, + deleted_at, + deleted, + id, + NULL, + device_name, + delete_on_termination, + virtual_name, + snapshot_id, + volume_id, + volume_size, + no_device, + connection_info, + instance_uuid + FROM block_device_mapping; + + UPDATE block_device_mapping_backup + SET instance_id= + (SELECT id + FROM instances + WHERE block_device_mapping_backup.instance_uuid = instances.uuid + ); + + DROP TABLE block_device_mapping; + + CREATE TABLE block_device_mapping ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + instance_id INTEGER NOT NULL, + device_name VARCHAR(255) NOT NULL, + delete_on_termination BOOLEAN, + virtual_name VARCHAR(255), + snapshot_id INTEGER, + volume_id INTEGER, + volume_size INTEGER, + no_device BOOLEAN, + connection_info TEXT, + PRIMARY KEY (id), + FOREIGN KEY(snapshot_id) REFERENCES snapshots (id), + CHECK (deleted IN (0, 1)), + CHECK (delete_on_termination IN (0, 1)), + CHECK (no_device IN (0, 1)), + FOREIGN KEY(volume_id) REFERENCES volumes (id), + FOREIGN KEY(instance_id) REFERENCES instances (id) + ); + + INSERT INTO block_device_mapping + SELECT created_at, + updated_at, + deleted_at, + deleted, + id, + instance_id, + device_name, + delete_on_termination, + virtual_name, + snapshot_id, + volume_id, + volume_size, + no_device, + connection_info + FROM block_device_mapping_backup; + + DROP TABLE block_device_mapping_backup; + +COMMIT; \ No newline at end of file diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/088_sqlite_upgrade.sql b/cinder/db/sqlalchemy/migrate_repo/versions/088_sqlite_upgrade.sql new file mode 100644 index 00000000000..d75d2ffa216 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/088_sqlite_upgrade.sql @@ -0,0 +1,97 @@ +BEGIN TRANSACTION; + CREATE TEMPORARY TABLE block_device_mapping_backup ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + instance_id INTEGER NOT NULL, + device_name VARCHAR(255) NOT NULL, + delete_on_termination BOOLEAN, + virtual_name VARCHAR(255), + snapshot_id INTEGER, + volume_id INTEGER, + volume_size INTEGER, + no_device BOOLEAN, + connection_info TEXT, + instance_uuid VARCHAR(36), + PRIMARY KEY (id), + FOREIGN KEY(snapshot_id) REFERENCES snapshots (id), + CHECK (deleted IN (0, 1)), + CHECK (delete_on_termination IN (0, 1)), + CHECK (no_device IN (0, 1)), + FOREIGN KEY(volume_id) REFERENCES volumes (id), + FOREIGN KEY(instance_id) REFERENCES instances (id) + ); + + INSERT INTO block_device_mapping_backup + SELECT created_at, + updated_at, + deleted_at, + deleted, + id, + instance_id, + device_name, + delete_on_termination, + virtual_name, + snapshot_id, + volume_id, + volume_size, + no_device, + connection_info, + NULL + FROM block_device_mapping; + + UPDATE block_device_mapping_backup + SET instance_uuid= + (SELECT uuid + FROM instances + WHERE block_device_mapping_backup.instance_id = instances.id + ); + + DROP TABLE block_device_mapping; + + CREATE TABLE block_device_mapping ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + device_name VARCHAR(255) NOT NULL, + delete_on_termination BOOLEAN, + virtual_name VARCHAR(255), + snapshot_id INTEGER, + volume_id INTEGER, + volume_size INTEGER, + no_device BOOLEAN, + connection_info TEXT, + instance_uuid VARCHAR(36), + PRIMARY KEY (id), + FOREIGN KEY(snapshot_id) REFERENCES snapshots (id), + CHECK (deleted IN (0, 1)), + CHECK (delete_on_termination IN (0, 1)), + CHECK (no_device IN (0, 1)), + FOREIGN KEY(volume_id) REFERENCES volumes (id), + FOREIGN KEY(instance_uuid) REFERENCES instances (uuid) + ); + + INSERT INTO block_device_mapping + SELECT created_at, + updated_at, + deleted_at, + deleted, + id, + device_name, + delete_on_termination, + virtual_name, + snapshot_id, + volume_id, + volume_size, + no_device, + connection_info, + instance_uuid + FROM block_device_mapping_backup; + + DROP TABLE block_device_mapping_backup; + +COMMIT; diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/089_add_volume_id_mappings.py b/cinder/db/sqlalchemy/migrate_repo/versions/089_add_volume_id_mappings.py new file mode 100644 index 00000000000..11bc25b0158 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/089_add_volume_id_mappings.py @@ -0,0 +1,116 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Boolean, Column, DateTime, Integer +from sqlalchemy import MetaData, String, Table +from cinder import log as logging +from cinder import utils + +LOG = logging.getLogger(__name__) + + +def upgrade(migrate_engine): + """Build mapping tables for our volume uuid migration. + + These mapping tables serve two purposes: + 1. Provide a method for downgrade after UUID conversion + 2. Provide a uuid to associate with existing volumes and snapshots + when we do the actual datatype migration from int to uuid + + """ + meta = MetaData() + meta.bind = migrate_engine + + volume_id_mappings = Table('volume_id_mappings', meta, + Column('created_at', + DateTime(timezone=False)), + Column('updated_at', + DateTime(timezone=False)), + Column('deleted_at', + DateTime(timezone=False)), + Column('deleted', + Boolean(create_constraint=True, name=None)), + Column('id', Integer(), + primary_key=True, + nullable=False, + autoincrement=True), + Column('uuid', String(36), + nullable=False)) + try: + volume_id_mappings.create() + except Exception: + LOG.exception("Exception while creating table 'volume_id_mappings'") + meta.drop_all(tables=[volume_id_mappings]) + raise + + snapshot_id_mappings = Table('snapshot_id_mappings', meta, + Column('created_at', + DateTime(timezone=False)), + Column('updated_at', + DateTime(timezone=False)), + Column('deleted_at', + DateTime(timezone=False)), + Column('deleted', + Boolean(create_constraint=True, name=None)), + Column('id', Integer(), + primary_key=True, + nullable=False, + autoincrement=True), + Column('uuid', String(36), + nullable=False)) + try: + snapshot_id_mappings.create() + except Exception: + LOG.exception("Exception while creating table 'snapshot_id_mappings'") + meta.drop_all(tables=[snapshot_id_mappings]) + raise + + if migrate_engine.name == "mysql": + migrate_engine.execute("ALTER TABLE volume_id_mappings Engine=InnoDB") + migrate_engine.execute("ALTER TABLE snapshot_id_mappings "\ + "Engine=InnoDB") + + volumes = Table('volumes', meta, autoload=True) + snapshots = Table('snapshots', meta, autoload=True) + volume_id_mappings = Table('volume_id_mappings', meta, autoload=True) + snapshot_id_mappings = Table('snapshot_id_mappings', meta, autoload=True) + + volume_list = list(volumes.select().execute()) + for v in volume_list: + old_id = v['id'] + new_id = utils.gen_uuid() + row = volume_id_mappings.insert() + row.execute({'id': old_id, + 'uuid': str(new_id)}) + + snapshot_list = list(snapshots.select().execute()) + for s in snapshot_list: + old_id = s['id'] + new_id = utils.gen_uuid() + row = snapshot_id_mappings.insert() + row.execute({'id': old_id, + 'uuid': str(new_id)}) + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + volume_id_mappings = Table('volume_id_mappings', meta, autoload=True) + volume_id_mappings.drop() + + snapshot_id_mappings = Table('snapshot_id_mappings', meta, autoload=True) + snapshot_id_mappings.drop() diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/090_modify_volume_id_datatype.py b/cinder/db/sqlalchemy/migrate_repo/versions/090_modify_volume_id_datatype.py new file mode 100644 index 00000000000..7887cd88e8b --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/090_modify_volume_id_datatype.py @@ -0,0 +1,239 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Integer +from sqlalchemy import MetaData, String, Table +from migrate import ForeignKeyConstraint +from cinder import log as logging + +LOG = logging.getLogger(__name__) + + +def upgrade(migrate_engine): + """Convert volume and snapshot id columns from int to varchar.""" + meta = MetaData() + meta.bind = migrate_engine + dialect = migrate_engine.url.get_dialect().name + + if dialect.startswith('sqlite'): + return + + volumes = Table('volumes', meta, autoload=True) + snapshots = Table('snapshots', meta, autoload=True) + iscsi_targets = Table('iscsi_targets', meta, autoload=True) + volume_metadata = Table('volume_metadata', meta, autoload=True) + sm_volume = Table('sm_volume', meta, autoload=True) + block_device_mapping = Table('block_device_mapping', meta, autoload=True) + + try: + fkeys = list(snapshots.c.volume_id.foreign_keys) + if fkeys: + fkey_name = fkeys[0].constraint.name + ForeignKeyConstraint(columns=[snapshots.c.volume_id], + refcolumns=[volumes.c.id], + name=fkey_name).drop() + + fkeys = list(iscsi_targets.c.volume_id.foreign_keys) + if fkeys: + fkey_name = fkeys[0].constraint.name + ForeignKeyConstraint(columns=[iscsi_targets.c.volume_id], + refcolumns=[volumes.c.id], + name=fkey_name).drop() + + fkeys = list(volume_metadata.c.volume_id.foreign_keys) + if fkeys: + fkey_name = fkeys[0].constraint.name + ForeignKeyConstraint(columns=[volume_metadata.c.volume_id], + refcolumns=[volumes.c.id], + name=fkey_name).drop() + + fkeys = list(sm_volume.c.id.foreign_keys) + if fkeys: + fkey_name = fkeys[0].constraint.name + ForeignKeyConstraint(columns=[sm_volume.c.id], + refcolumns=[volumes.c.id], + name=fkey_name).drop() + + fkeys = list(block_device_mapping.c.volume_id.foreign_keys) + if fkeys: + fkey_name = fkeys[0].constraint.name + ForeignKeyConstraint(columns=[block_device_mapping.c.volume_id], + refcolumns=[volumes.c.id], + name=fkey_name).drop() + + fkeys = list(block_device_mapping.c.snapshot_id.foreign_keys) + if fkeys: + fkey_name = fkeys[0].constraint.name + ForeignKeyConstraint(columns=[block_device_mapping.c.snapshot_id], + refcolumns=[snapshots.c.id], + name=fkey_name).drop() + + except Exception: + LOG.error(_("Foreign Key constraint couldn't be removed")) + raise + + volumes.c.id.alter(String(36), primary_key=True) + volumes.c.snapshot_id.alter(String(36)) + volume_metadata.c.volume_id.alter(String(36), nullable=False) + snapshots.c.id.alter(String(36), primary_key=True) + snapshots.c.volume_id.alter(String(36)) + sm_volume.c.id.alter(String(36)) + block_device_mapping.c.volume_id.alter(String(36)) + block_device_mapping.c.snapshot_id.alter(String(36)) + iscsi_targets.c.volume_id.alter(String(36), nullable=True) + + try: + fkeys = list(snapshots.c.volume_id.foreign_keys) + if fkeys: + fkey_name = fkeys[0].constraint.name + ForeignKeyConstraint(columns=[snapshots.c.volume_id], + refcolumns=[volumes.c.id], + name=fkey_name).create() + + fkeys = list(iscsi_targets.c.volume_id.foreign_keys) + if fkeys: + fkey_name = fkeys[0].constraint.name + ForeignKeyConstraint(columns=[iscsi_targets.c.volume_id], + refcolumns=[volumes.c.id], + name=fkey_name).create() + + fkeys = list(volume_metadata.c.volume_id.foreign_keys) + if fkeys: + fkey_name = fkeys[0].constraint.name + ForeignKeyConstraint(columns=[volume_metadata.c.volume_id], + refcolumns=[volumes.c.id], + name=fkey_name).create() + + fkeys = list(sm_volume.c.id.foreign_keys) + if fkeys: + fkey_name = fkeys[0].constraint.name + ForeignKeyConstraint(columns=[sm_volume.c.id], + refcolumns=[volumes.c.id], + name=fkey_name).create() + # NOTE(jdg) We're intentionally leaving off FK's on BDM + + except Exception: + LOG.error(_("Foreign Key constraint couldn't be removed")) + raise + + +def downgrade(migrate_engine): + """Convert volume and snapshot id columns back to int.""" + meta = MetaData() + meta.bind = migrate_engine + dialect = migrate_engine.url.get_dialect().name + + if dialect.startswith('sqlite'): + return + + volumes = Table('volumes', meta, autoload=True) + snapshots = Table('snapshots', meta, autoload=True) + iscsi_targets = Table('iscsi_targets', meta, autoload=True) + volume_metadata = Table('volume_metadata', meta, autoload=True) + sm_volume = Table('sm_volume', meta, autoload=True) + block_device_mapping = Table('block_device_mapping', meta, autoload=True) + + try: + fkeys = list(snapshots.c.volume_id.foreign_keys) + if fkeys: + fkey_name = fkeys[0].constraint.name + ForeignKeyConstraint(columns=[snapshots.c.volume_id], + refcolumns=[volumes.c.id], + name=fkey_name).drop() + + fkeys = list(iscsi_targets.c.volume_id.foreign_keys) + if fkeys: + fkey_name = fkeys[0].constraint.name + ForeignKeyConstraint(columns=[iscsi_targets.c.volume_id], + refcolumns=[volumes.c.id], + name=fkey_name).drop() + + fkeys = list(volume_metadata.c.volume_id.foreign_keys) + if fkeys: + fkey_name = fkeys[0].constraint.name + ForeignKeyConstraint(columns=[volume_metadata.c.volume_id], + refcolumns=[volumes.c.id], + name=fkey_name).drop() + + fkeys = list(sm_volume.c.id.foreign_keys) + if fkeys: + fkey_name = fkeys[0].constraint.name + ForeignKeyConstraint(columns=[sm_volume.c.id], + refcolumns=[volumes.c.id], + name=fkey_name).drop() + + except Exception: + LOG.error(_("Foreign Key constraint couldn't be removed")) + raise + + volumes.c.id.alter(Integer, primary_key=True, autoincrement=True) + volumes.c.snapshot_id.alter(Integer) + volume_metadata.c.volume_id.alter(Integer, nullable=False) + snapshots.c.id.alter(Integer, primary_key=True, autoincrement=True) + snapshots.c.volume_id.alter(Integer) + sm_volume.c.id.alter(Integer) + block_device_mapping.c.volume_id.alter(Integer) + block_device_mapping.c.snapshot_id.alter(Integer) + iscsi_targets.c.volume_id.alter(Integer, nullable=True) + + try: + fkeys = list(snapshots.c.volume_id.foreign_keys) + if fkeys: + fkey_name = fkeys[0].constraint.name + ForeignKeyConstraint(columns=[snapshots.c.volume_id], + refcolumns=[volumes.c.id], + name=fkey_name).create() + + fkeys = list(iscsi_targets.c.volume_id.foreign_keys) + if fkeys: + fkey_name = fkeys[0].constraint.name + ForeignKeyConstraint(columns=[iscsi_targets.c.volume_id], + refcolumns=[volumes.c.id], + name=fkey_name).create() + + fkeys = list(volume_metadata.c.volume_id.foreign_keys) + if fkeys: + fkey_name = fkeys[0].constraint.name + ForeignKeyConstraint(columns=[volume_metadata.c.volume_id], + refcolumns=[volumes.c.id], + name=fkey_name).create() + + fkeys = list(sm_volume.c.id.foreign_keys) + if fkeys: + fkey_name = fkeys[0].constraint.name + ForeignKeyConstraint(columns=[sm_volume.c.id], + refcolumns=[volumes.c.id], + name=fkey_name).create() + + # NOTE(jdg) Put the BDM foreign keys back in place + fkeys = list(block_device_mapping.c.volume_id.foreign_keys) + if fkeys: + fkey_name = fkeys[0].constraint.name + ForeignKeyConstraint(columns=[block_device_mapping.c.volume_id], + refcolumns=[volumes.c.id], + name=fkey_name).drop() + + fkeys = list(block_device_mapping.c.snapshot_id.foreign_keys) + if fkeys: + fkey_name = fkeys[0].constraint.name + ForeignKeyConstraint(columns=[block_device_mapping.c.snapshot_id], + refcolumns=[snapshots.c.id], + name=fkey_name).drop() + + except Exception: + LOG.error(_("Foreign Key constraint couldn't be removed")) + raise diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/090_sqlite_downgrade.sql b/cinder/db/sqlalchemy/migrate_repo/versions/090_sqlite_downgrade.sql new file mode 100644 index 00000000000..7d89da247b3 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/090_sqlite_downgrade.sql @@ -0,0 +1,226 @@ +BEGIN TRANSACTION; + + -- change id and snapshot_id datatypes in volumes table + CREATE TABLE volumes_backup( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + ec2_id INTEGER, + user_id VARCHAR(255), + project_id VARCHAR(255), + snapshot_id VARCHAR(255), + host VARCHAR(255), + size INTEGER, + availability_zone VARCHAR(255), + instance_id INTEGER, + mountpoint VARCHAR(255), + attach_time VARCHAR(255), + status VARCHAR(255), + attach_status VARCHAR(255), + scheduled_at DATETIME, + launched_at DATETIME, + terminated_at DATETIME, + display_name VARCHAR(255), + display_description VARCHAR(255), + provider_location VARCHAR(255), + provider_auth VARCHAR(255), + volume_type_id INTEGER, + PRIMARY KEY (id), + FOREIGN KEY(instance_id) REFERENCES instances (id), + UNIQUE (id), + CHECK (deleted IN (0, 1)) + ); + + INSERT INTO volumes_backup SELECT + created_at, + updated_at, + deleted_at, + deleted, + id, + ec2_id, + user_id, + project_id, + snapshot_id, + host, + size, + availability_zone, + instance_id, + mountpoint, + attach_time, + status, + attach_status, + scheduled_at, + launched_at, + terminated_at, + display_name, + display_description, + provider_location, + provider_auth, + volume_type_id + FROM volumes; + DROP TABLE volumes; + ALTER TABLE volumes_backup RENAME TO volumes; + + -- change id and volume_id datatypes in snapshots table + CREATE TABLE snapshots_backup ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + user_id VARCHAR(255), + project_id VARCHAR(255), + volume_id INTEGER, + status VARCHAR(255), + progress VARCHAR(255), + volume_size INTEGER, + display_name VARCHAR(255), + display_description VARCHAR(255), + PRIMARY KEY (id), + UNIQUE (id), + CHECK (deleted IN (0, 1)) + ); + INSERT INTO snapshots_backup SELECT + created_at, + updated_at, + deleted_at, + deleted, + id, + user_id, + project_id, + volume_id, + status, + progress, + volume_size, + display_name, + display_description + FROM snapshots; + DROP TABLE snapshots; + ALTER TABLE snapshots_backup RENAME TO snapshots; + + -- change id and volume_id datatypes in iscsi_targets table + CREATE TABLE iscsi_targets_backup ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + target_num INTEGER, + host VARCHAR(255), + volume_id INTEGER, + PRIMARY KEY (id), + FOREIGN KEY(volume_id) REFERENCES volumes(id), + UNIQUE (id), + CHECK (deleted IN (0, 1)) + ); + INSERT INTO iscsi_targets_backup SELECT + created_at, + updated_at, + deleted_at, + deleted, + id, + target_num, + host, + volume_id + FROM iscsi_targets; + DROP TABLE iscsi_targets; + ALTER TABLE iscsi_targets_backup RENAME TO iscsi_targets; + + CREATE TABLE volume_metadata_backup ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + key VARCHAR(255), + value VARCHAR(255), + volume_id INTEGER, + PRIMARY KEY (id), + FOREIGN KEY(volume_id) REFERENCES volumes(id), + UNIQUE (id), + CHECK (deleted IN (0, 1)) + ); + INSERT INTO volume_metadata_backup SELECT + created_at, + updated_at, + deleted_at, + deleted, + id, + key, + value, + volume_id + FROM volume_metadata; + DROP TABLE volume_metadata; + ALTER TABLE volume_metadata_backup RENAME TO volume_metadata; + + -- change volume_id and snapshot_id datatypes in bdm table + CREATE TABLE block_device_mapping_backup ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + instance_uuid VARCHAR(36) NOT NULL, + device_name VARCHAR(255), + delete_on_termination BOOLEAN, + virtual_name VARCHAR(255), + snapshot_id INTEGER, + volume_id INTEGER, + volume_size INTEGER, + no_device BOOLEAN, + connection_info VARCHAR(255), + FOREIGN KEY(instance_uuid) REFERENCES instances(id), + FOREIGN KEY(volume_id) REFERENCES volumes(id), + FOREIGN KEY(snapshot_id) REFERENCES snapshots(id), + PRIMARY KEY (id), + UNIQUE (id), + CHECK (deleted IN (0, 1)) + ); + INSERT INTO block_device_mapping_backup SELECT + created_at, + updated_at, + deleted_at, + deleted, + id, + instance_uuid, + device_name, + delete_on_termination, + virtual_name, + snapshot_id, + volume_id, + volume_size, + no_device, + connection_info + FROM block_device_mapping; + DROP TABLE block_device_mapping; + ALTER TABLE block_device_mapping_backup RENAME TO block_device_mapping; + + -- change volume_id and sm_volume_table + CREATE TABLE sm_volume_backup ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + backend_id INTEGER NOT NULL, + vdi_uuid VARCHAR(255), + PRIMARY KEY (id), + FOREIGN KEY(id) REFERENCES volumes(id), + UNIQUE (id), + CHECK (deleted IN (0,1)) + ); + INSERT INTO sm_volume_backup SELECT + created_at, + updated_at, + deleted_at, + deleted, + id, + backend_id, + vdi_uuid + FROM sm_volume; + DROP TABLE sm_volume; + ALTER TABLE sm_volume_backup RENAME TO sm_volume; + +COMMIT; diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/090_sqlite_upgrade.sql b/cinder/db/sqlalchemy/migrate_repo/versions/090_sqlite_upgrade.sql new file mode 100644 index 00000000000..53fbc69f6e6 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/090_sqlite_upgrade.sql @@ -0,0 +1,226 @@ +BEGIN TRANSACTION; + + -- change id and snapshot_id datatypes in volumes table + CREATE TABLE volumes_backup( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id VARCHAR(36) NOT NULL, + ec2_id INTEGER, + user_id VARCHAR(255), + project_id VARCHAR(255), + snapshot_id VARCHAR(36), + host VARCHAR(255), + size INTEGER, + availability_zone VARCHAR(255), + instance_id INTEGER, + mountpoint VARCHAR(255), + attach_time VARCHAR(255), + status VARCHAR(255), + attach_status VARCHAR(255), + scheduled_at DATETIME, + launched_at DATETIME, + terminated_at DATETIME, + display_name VARCHAR(255), + display_description VARCHAR(255), + provider_location VARCHAR(255), + provider_auth VARCHAR(255), + volume_type_id INTEGER, + PRIMARY KEY (id), + FOREIGN KEY(instance_id) REFERENCES instances (id), + UNIQUE (id), + CHECK (deleted IN (0, 1)) + ); + + INSERT INTO volumes_backup SELECT + created_at, + updated_at, + deleted_at, + deleted, + id, + ec2_id, + user_id, + project_id, + snapshot_id, + host, + size, + availability_zone, + instance_id, + mountpoint, + attach_time, + status, + attach_status, + scheduled_at, + launched_at, + terminated_at, + display_name, + display_description, + provider_location, + provider_auth, + volume_type_id + FROM volumes; + DROP TABLE volumes; + ALTER TABLE volumes_backup RENAME TO volumes; + + -- change id and volume_id datatypes in snapshots table + CREATE TABLE snapshots_backup ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id VARCHAR(36) NOT NULL, + user_id VARCHAR(255), + project_id VARCHAR(255), + volume_id VARCHAR(36), + status VARCHAR(255), + progress VARCHAR(255), + volume_size INTEGER, + display_name VARCHAR(255), + display_description VARCHAR(255), + PRIMARY KEY (id), + UNIQUE (id), + CHECK (deleted IN (0, 1)) + ); + INSERT INTO snapshots_backup SELECT + created_at, + updated_at, + deleted_at, + deleted, + id, + user_id, + project_id, + volume_id, + status, + progress, + volume_size, + display_name, + display_description + FROM snapshots; + DROP TABLE snapshots; + ALTER TABLE snapshots_backup RENAME TO snapshots; + + -- change id and volume_id datatypes in iscsi_targets table + CREATE TABLE iscsi_targets_backup ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + target_num INTEGER, + host VARCHAR(255), + volume_id VARCHAR(36), + PRIMARY KEY (id), + FOREIGN KEY(volume_id) REFERENCES volumes(id), + UNIQUE (id), + CHECK (deleted IN (0, 1)) + ); + INSERT INTO iscsi_targets_backup SELECT + created_at, + updated_at, + deleted_at, + deleted, + id, + target_num, + host, + volume_id + FROM iscsi_targets; + DROP TABLE iscsi_targets; + ALTER TABLE iscsi_targets_backup RENAME TO iscsi_targets; + + CREATE TABLE volume_metadata_backup ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + key VARCHAR(255), + value VARCHAR(255), + volume_id VARCHAR(36), + PRIMARY KEY (id), + FOREIGN KEY(volume_id) REFERENCES volumes(id), + UNIQUE (id), + CHECK (deleted IN (0, 1)) + ); + INSERT INTO volume_metadata_backup SELECT + created_at, + updated_at, + deleted_at, + deleted, + id, + key, + value, + volume_id + FROM volume_metadata; + DROP TABLE volume_metadata; + ALTER TABLE volume_metadata_backup RENAME TO volume_metadata; + + -- change volume_id and snapshot_id datatypes in bdm table + CREATE TABLE block_device_mapping_backup ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + instance_uuid VARCHAR(36) NOT NULL, + device_name VARCHAR(255), + delete_on_termination BOOLEAN, + virtual_name VARCHAR(255), + snapshot_id VARCHAR(36), + volume_id VARCHAR(36), + volume_size INTEGER, + no_device BOOLEAN, + connection_info VARCHAR(255), + FOREIGN KEY(instance_uuid) REFERENCES instances(id), + FOREIGN KEY(volume_id) REFERENCES volumes(id), + FOREIGN KEY(snapshot_id) REFERENCES snapshots(id), + PRIMARY KEY (id), + UNIQUE (id), + CHECK (deleted IN (0, 1)) + ); + INSERT INTO block_device_mapping_backup SELECT + created_at, + updated_at, + deleted_at, + deleted, + id, + instance_uuid, + device_name, + delete_on_termination, + virtual_name, + snapshot_id, + volume_id, + volume_size, + no_device, + connection_info + FROM block_device_mapping; + DROP TABLE block_device_mapping; + ALTER TABLE block_device_mapping_backup RENAME TO block_device_mapping; + + -- change volume_id and sm_volume_table + CREATE TABLE sm_volume_backup ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id VARCHAR(36) NOT NULL, + backend_id INTEGER NOT NULL, + vdi_uuid VARCHAR(255), + PRIMARY KEY (id), + FOREIGN KEY(id) REFERENCES volumes(id), + UNIQUE (id), + CHECK (deleted IN (0,1)) + ); + INSERT INTO sm_volume_backup SELECT + created_at, + updated_at, + deleted_at, + deleted, + id, + backend_id, + vdi_uuid + FROM sm_volume; + DROP TABLE sm_volume; + ALTER TABLE sm_volume_backup RENAME TO sm_volume; + +COMMIT; diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/091_convert_volume_ids_to_uuid.py b/cinder/db/sqlalchemy/migrate_repo/versions/091_convert_volume_ids_to_uuid.py new file mode 100644 index 00000000000..b9ec5c83e05 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/091_convert_volume_ids_to_uuid.py @@ -0,0 +1,145 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import MetaData, select, Table +from cinder import log as logging + +LOG = logging.getLogger(__name__) + + +def upgrade(migrate_engine): + """Convert volume and snapshot id columns from int to varchar.""" + meta = MetaData() + meta.bind = migrate_engine + + volumes = Table('volumes', meta, autoload=True) + snapshots = Table('snapshots', meta, autoload=True) + iscsi_targets = Table('iscsi_targets', meta, autoload=True) + volume_metadata = Table('volume_metadata', meta, autoload=True) + block_device_mapping = Table('block_device_mapping', meta, autoload=True) + sm_volumes = Table('sm_volume', meta, autoload=True) + + volume_mappings = Table('volume_id_mappings', meta, autoload=True) + snapshot_mappings = Table('snapshot_id_mappings', meta, autoload=True) + + volume_list = list(volumes.select().execute()) + for v in volume_list: + new_id = select([volume_mappings.c.uuid], + volume_mappings.c.id == v['id']) + + volumes.update().\ + where(volumes.c.id == v['id']).\ + values(id=new_id).execute() + + sm_volumes.update().\ + where(sm_volumes.c.id == v['id']).\ + values(id=new_id).execute() + + snapshots.update().\ + where(snapshots.c.volume_id == v['id']).\ + values(volume_id=new_id).execute() + + iscsi_targets.update().\ + where(iscsi_targets.c.volume_id == v['id']).\ + values(volume_id=new_id).execute() + + volume_metadata.update().\ + where(volume_metadata.c.volume_id == v['id']).\ + values(volume_id=new_id).execute() + + block_device_mapping.update().\ + where(block_device_mapping.c.volume_id == v['id']).\ + values(volume_id=new_id).execute() + + snapshot_list = list(snapshots.select().execute()) + for s in snapshot_list: + new_id = select([snapshot_mappings.c.uuid], + volume_mappings.c.id == s['id']) + + volumes.update().\ + where(volumes.c.snapshot_id == s['id']).\ + values(snapshot_id=new_id).execute() + + snapshots.update().\ + where(snapshots.c.id == s['id']).\ + values(volume_id=new_id).execute() + + block_device_mapping.update().\ + where(block_device_mapping.c.snapshot_id == s['id']).\ + values(snapshot_id=new_id).execute() + + +def downgrade(migrate_engine): + """Convert volume and snapshot id columns back to int.""" + meta = MetaData() + meta.bind = migrate_engine + + volumes = Table('volumes', meta, autoload=True) + snapshots = Table('snapshots', meta, autoload=True) + iscsi_targets = Table('iscsi_targets', meta, autoload=True) + volume_metadata = Table('volume_metadata', meta, autoload=True) + block_device_mapping = Table('block_device_mapping', meta, autoload=True) + sm_volumes = Table('sm_volume', meta, autoload=True) + + volume_mappings = Table('volume_id_mappings', meta, autoload=True) + snapshot_mappings = Table('snapshot_id_mappings', meta, autoload=True) + + volume_list = list(volumes.select().execute()) + for v in volume_list: + new_id = select([volume_mappings.c.id], + volume_mappings.c.uuid == v['id']) + + volumes.update().\ + where(volumes.c.id == v['id']).\ + values(id=new_id).execute() + + sm_volumes.update().\ + where(sm_volumes.c.id == v['id']).\ + values(id=new_id).execute() + + snapshots.update().\ + where(snapshots.c.volume_id == v['id']).\ + values(volume_id=new_id).execute() + + iscsi_targets.update().\ + where(iscsi_targets.c.volume_id == v['id']).\ + values(volume_id=new_id).execute() + + volume_metadata.update().\ + where(volume_metadata.c.volume_id == v['id']).\ + values(volume_id=new_id).execute() + + block_device_mapping.update().\ + where(block_device_mapping.c.volume_id == v['id']).\ + values(volume_id=new_id).execute() + + snapshot_list = list(snapshots.select().execute()) + for s in snapshot_list: + new_id = select([snapshot_mappings.c.id], + volume_mappings.c.uuid == s['id']) + + volumes.update().\ + where(volumes.c.snapshot_id == s['id']).\ + values(snapshot_id=new_id).execute() + + snapshots.update().\ + where(snapshots.c.id == s['id']).\ + values(volume_id=new_id).execute() + + block_device_mapping.update().\ + where(block_device_mapping.c.snapshot_id == s['id']).\ + values(snapshot_id=new_id).execute() diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/__init__.py b/cinder/db/sqlalchemy/migrate_repo/versions/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/cinder/db/sqlalchemy/migration.py b/cinder/db/sqlalchemy/migration.py new file mode 100644 index 00000000000..153be1a1f49 --- /dev/null +++ b/cinder/db/sqlalchemy/migration.py @@ -0,0 +1,129 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import distutils.version as dist_version +import os +import sys + +from cinder.db.sqlalchemy.session import get_engine +from cinder import exception +from cinder import flags +from cinder import log as logging + + +import sqlalchemy +import migrate +from migrate.versioning import util as migrate_util + + +LOG = logging.getLogger(__name__) + + +@migrate_util.decorator +def patched_with_engine(f, *a, **kw): + url = a[0] + engine = migrate_util.construct_engine(url, **kw) + + try: + kw['engine'] = engine + return f(*a, **kw) + finally: + if isinstance(engine, migrate_util.Engine) and engine is not url: + migrate_util.log.debug('Disposing SQLAlchemy engine %s', engine) + engine.dispose() + + +# TODO(jkoelker) When migrate 0.7.3 is released and cinder depends +# on that version or higher, this can be removed +MIN_PKG_VERSION = dist_version.StrictVersion('0.7.3') +if (not hasattr(migrate, '__version__') or + dist_version.StrictVersion(migrate.__version__) < MIN_PKG_VERSION): + migrate_util.with_engine = patched_with_engine + + +# NOTE(jkoelker) Delay importing migrate until we are patched +from migrate import exceptions as versioning_exceptions +from migrate.versioning import api as versioning_api +from migrate.versioning.repository import Repository + +FLAGS = flags.FLAGS + +_REPOSITORY = None + + +def db_sync(version=None): + if version is not None: + try: + version = int(version) + except ValueError: + raise exception.Error(_("version should be an integer")) + + current_version = db_version() + repository = _find_migrate_repo() + if version is None or version > current_version: + return versioning_api.upgrade(get_engine(), repository, version) + else: + return versioning_api.downgrade(get_engine(), repository, + version) + + +def db_version(): + repository = _find_migrate_repo() + try: + return versioning_api.db_version(get_engine(), repository) + except versioning_exceptions.DatabaseNotControlledError: + # If we aren't version controlled we may already have the database + # in the state from before we started version control, check for that + # and set up version_control appropriately + meta = sqlalchemy.MetaData() + engine = get_engine() + meta.reflect(bind=engine) + try: + for table in ('auth_tokens', 'zones', 'export_devices', + 'fixed_ips', 'floating_ips', 'instances', + 'key_pairs', 'networks', 'projects', 'quotas', + 'security_group_instance_association', + 'security_group_rules', 'security_groups', + 'services', 'migrations', + 'users', 'user_project_association', + 'user_project_role_association', + 'user_role_association', + 'virtual_storage_arrays', + 'volumes', 'volume_metadata', + 'volume_types', 'volume_type_extra_specs'): + assert table in meta.tables + return db_version_control(1) + except AssertionError: + return db_version_control(0) + + +def db_version_control(version=None): + repository = _find_migrate_repo() + versioning_api.version_control(get_engine(), repository, version) + return version + + +def _find_migrate_repo(): + """Get the path for the migrate repository.""" + global _REPOSITORY + path = os.path.join(os.path.abspath(os.path.dirname(__file__)), + 'migrate_repo') + assert os.path.exists(path) + if _REPOSITORY is None: + _REPOSITORY = Repository(path) + return _REPOSITORY diff --git a/cinder/db/sqlalchemy/models.py b/cinder/db/sqlalchemy/models.py new file mode 100644 index 00000000000..732e6832f0e --- /dev/null +++ b/cinder/db/sqlalchemy/models.py @@ -0,0 +1,1063 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 X.commerce, a business unit of eBay Inc. +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# Copyright 2011 Piston Cloud Computing, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +SQLAlchemy models for cinder data. +""" + +from sqlalchemy.orm import relationship, backref, object_mapper +from sqlalchemy import Column, Integer, BigInteger, String, schema +from sqlalchemy import ForeignKey, DateTime, Boolean, Text, Float +from sqlalchemy.exc import IntegrityError +from sqlalchemy.ext.declarative import declarative_base +from sqlalchemy.schema import ForeignKeyConstraint + +from cinder.db.sqlalchemy.session import get_session + +from cinder import exception +from cinder import flags +from cinder import utils + + +FLAGS = flags.FLAGS +BASE = declarative_base() + + +class CinderBase(object): + """Base class for Cinder Models.""" + __table_args__ = {'mysql_engine': 'InnoDB'} + __table_initialized__ = False + created_at = Column(DateTime, default=utils.utcnow) + updated_at = Column(DateTime, onupdate=utils.utcnow) + deleted_at = Column(DateTime) + deleted = Column(Boolean, default=False) + metadata = None + + def save(self, session=None): + """Save this object.""" + if not session: + session = get_session() + session.add(self) + try: + session.flush() + except IntegrityError, e: + if str(e).endswith('is not unique'): + raise exception.Duplicate(str(e)) + else: + raise + + def delete(self, session=None): + """Delete this object.""" + self.deleted = True + self.deleted_at = utils.utcnow() + self.save(session=session) + + def __setitem__(self, key, value): + setattr(self, key, value) + + def __getitem__(self, key): + return getattr(self, key) + + def get(self, key, default=None): + return getattr(self, key, default) + + def __iter__(self): + self._i = iter(object_mapper(self).columns) + return self + + def next(self): + n = self._i.next().name + return n, getattr(self, n) + + def update(self, values): + """Make the model object behave like a dict""" + for k, v in values.iteritems(): + setattr(self, k, v) + + def iteritems(self): + """Make the model object behave like a dict. + + Includes attributes from joins.""" + local = dict(self) + joined = dict([(k, v) for k, v in self.__dict__.iteritems() + if not k[0] == '_']) + local.update(joined) + return local.iteritems() + + +class Service(BASE, CinderBase): + """Represents a running service on a host.""" + + __tablename__ = 'services' + id = Column(Integer, primary_key=True) + host = Column(String(255)) # , ForeignKey('hosts.id')) + binary = Column(String(255)) + topic = Column(String(255)) + report_count = Column(Integer, nullable=False, default=0) + disabled = Column(Boolean, default=False) + availability_zone = Column(String(255), default='cinder') + + +class ComputeNode(BASE, CinderBase): + """Represents a running compute service on a host.""" + + __tablename__ = 'compute_nodes' + id = Column(Integer, primary_key=True) + service_id = Column(Integer, ForeignKey('services.id'), nullable=True) + service = relationship(Service, + backref=backref('compute_node'), + foreign_keys=service_id, + primaryjoin='and_(' + 'ComputeNode.service_id == Service.id,' + 'ComputeNode.deleted == False)') + + vcpus = Column(Integer) + memory_mb = Column(Integer) + local_gb = Column(Integer) + vcpus_used = Column(Integer) + memory_mb_used = Column(Integer) + local_gb_used = Column(Integer) + hypervisor_type = Column(Text) + hypervisor_version = Column(Integer) + hypervisor_hostname = Column(String(255)) + + # Free Ram, amount of activity (resize, migration, boot, etc) and + # the number of running VM's are a good starting point for what's + # important when making scheduling decisions. + # + # NOTE(sandy): We'll need to make this extensible for other schedulers. + free_ram_mb = Column(Integer) + free_disk_gb = Column(Integer) + current_workload = Column(Integer) + running_vms = Column(Integer) + + # Note(masumotok): Expected Strings example: + # + # '{"arch":"x86_64", + # "model":"Nehalem", + # "topology":{"sockets":1, "threads":2, "cores":3}, + # "features":["tdtscp", "xtpr"]}' + # + # Points are "json translatable" and it must have all dictionary keys + # above, since it is copied from tag of getCapabilities() + # (See libvirt.virtConnection). + cpu_info = Column(Text, nullable=True) + disk_available_least = Column(Integer) + + +class Certificate(BASE, CinderBase): + """Represents a an x509 certificate""" + __tablename__ = 'certificates' + id = Column(Integer, primary_key=True) + + user_id = Column(String(255)) + project_id = Column(String(255)) + file_name = Column(String(255)) + + +class Instance(BASE, CinderBase): + """Represents a guest vm.""" + __tablename__ = 'instances' + injected_files = [] + + id = Column(Integer, primary_key=True, autoincrement=True) + + @property + def name(self): + try: + base_name = FLAGS.instance_name_template % self.id + except TypeError: + # Support templates like "uuid-%(uuid)s", etc. + info = {} + for key, value in self.iteritems(): + # prevent recursion if someone specifies %(name)s + # %(name)s will not be valid. + if key == 'name': + continue + info[key] = value + try: + base_name = FLAGS.instance_name_template % info + except KeyError: + base_name = self.uuid + if getattr(self, '_rescue', False): + base_name += "-rescue" + return base_name + + user_id = Column(String(255)) + project_id = Column(String(255)) + + image_ref = Column(String(255)) + kernel_id = Column(String(255)) + ramdisk_id = Column(String(255)) + server_name = Column(String(255)) + +# image_ref = Column(Integer, ForeignKey('images.id'), nullable=True) +# kernel_id = Column(Integer, ForeignKey('images.id'), nullable=True) +# ramdisk_id = Column(Integer, ForeignKey('images.id'), nullable=True) +# ramdisk = relationship(Ramdisk, backref=backref('instances', order_by=id)) +# kernel = relationship(Kernel, backref=backref('instances', order_by=id)) +# project = relationship(Project, backref=backref('instances', order_by=id)) + + launch_index = Column(Integer) + key_name = Column(String(255)) + key_data = Column(Text) + + power_state = Column(Integer) + vm_state = Column(String(255)) + task_state = Column(String(255)) + + memory_mb = Column(Integer) + vcpus = Column(Integer) + root_gb = Column(Integer) + ephemeral_gb = Column(Integer) + + hostname = Column(String(255)) + host = Column(String(255)) # , ForeignKey('hosts.id')) + + # *not* flavor_id + instance_type_id = Column(Integer) + + user_data = Column(Text) + + reservation_id = Column(String(255)) + + scheduled_at = Column(DateTime) + launched_at = Column(DateTime) + terminated_at = Column(DateTime) + + availability_zone = Column(String(255)) + + # User editable field for display in user-facing UIs + display_name = Column(String(255)) + display_description = Column(String(255)) + + # To remember on which host a instance booted. + # An instance may have moved to another host by live migraiton. + launched_on = Column(Text) + locked = Column(Boolean) + + os_type = Column(String(255)) + architecture = Column(String(255)) + vm_mode = Column(String(255)) + uuid = Column(String(36)) + + root_device_name = Column(String(255)) + default_ephemeral_device = Column(String(255), nullable=True) + default_swap_device = Column(String(255), nullable=True) + config_drive = Column(String(255)) + + # User editable field meant to represent what ip should be used + # to connect to the instance + access_ip_v4 = Column(String(255)) + access_ip_v6 = Column(String(255)) + + auto_disk_config = Column(Boolean()) + progress = Column(Integer) + + # EC2 instance_initiated_shutdown_teminate + # True: -> 'terminate' + # False: -> 'stop' + shutdown_terminate = Column(Boolean(), default=True, nullable=False) + + # EC2 disable_api_termination + disable_terminate = Column(Boolean(), default=False, nullable=False) + + # OpenStack compute cell name + cell_name = Column(String(255)) + + +class InstanceInfoCache(BASE, CinderBase): + """ + Represents a cache of information about an instance + """ + __tablename__ = 'instance_info_caches' + id = Column(Integer, primary_key=True, autoincrement=True) + + # text column used for storing a json object of network data for api + network_info = Column(Text) + + instance_id = Column(String(36), ForeignKey('instances.uuid'), + nullable=False, unique=True) + instance = relationship(Instance, + backref=backref('info_cache', uselist=False), + foreign_keys=instance_id, + primaryjoin=instance_id == Instance.uuid) + + +class InstanceActions(BASE, CinderBase): + """Represents a guest VM's actions and results""" + __tablename__ = "instance_actions" + id = Column(Integer, primary_key=True) + instance_uuid = Column(String(36), ForeignKey('instances.uuid')) + action = Column(String(255)) + error = Column(Text) + + +class InstanceTypes(BASE, CinderBase): + """Represent possible instance_types or flavor of VM offered""" + __tablename__ = "instance_types" + id = Column(Integer, primary_key=True) + name = Column(String(255)) + memory_mb = Column(Integer) + vcpus = Column(Integer) + root_gb = Column(Integer) + ephemeral_gb = Column(Integer) + flavorid = Column(String(255)) + swap = Column(Integer, nullable=False, default=0) + rxtx_factor = Column(Float, nullable=False, default=1) + vcpu_weight = Column(Integer, nullable=True) + + instances = relationship(Instance, + backref=backref('instance_type', uselist=False), + foreign_keys=id, + primaryjoin='and_(' + 'Instance.instance_type_id == ' + 'InstanceTypes.id, ' + 'InstanceTypes.deleted == False)') + + +class Volume(BASE, CinderBase): + """Represents a block storage device that can be attached to a vm.""" + __tablename__ = 'volumes' + id = Column(String(36), primary_key=True) + + @property + def name(self): + return FLAGS.volume_name_template % self.id + + ec2_id = Column(Integer) + user_id = Column(String(255)) + project_id = Column(String(255)) + + snapshot_id = Column(String(36)) + + host = Column(String(255)) # , ForeignKey('hosts.id')) + size = Column(Integer) + availability_zone = Column(String(255)) # TODO(vish): foreign key? + instance_id = Column(Integer, ForeignKey('instances.id'), nullable=True) + instance = relationship(Instance, + backref=backref('volumes'), + foreign_keys=instance_id, + primaryjoin='and_(Volume.instance_id==Instance.id,' + 'Volume.deleted==False)') + mountpoint = Column(String(255)) + attach_time = Column(String(255)) # TODO(vish): datetime + status = Column(String(255)) # TODO(vish): enum? + attach_status = Column(String(255)) # TODO(vish): enum + + scheduled_at = Column(DateTime) + launched_at = Column(DateTime) + terminated_at = Column(DateTime) + + display_name = Column(String(255)) + display_description = Column(String(255)) + + provider_location = Column(String(255)) + provider_auth = Column(String(255)) + + volume_type_id = Column(Integer) + + +class VolumeMetadata(BASE, CinderBase): + """Represents a metadata key/value pair for a volume""" + __tablename__ = 'volume_metadata' + id = Column(Integer, primary_key=True) + key = Column(String(255)) + value = Column(String(255)) + volume_id = Column(String(36), ForeignKey('volumes.id'), nullable=False) + volume = relationship(Volume, backref="volume_metadata", + foreign_keys=volume_id, + primaryjoin='and_(' + 'VolumeMetadata.volume_id == Volume.id,' + 'VolumeMetadata.deleted == False)') + + +class VolumeTypes(BASE, CinderBase): + """Represent possible volume_types of volumes offered""" + __tablename__ = "volume_types" + id = Column(Integer, primary_key=True) + name = Column(String(255)) + + volumes = relationship(Volume, + backref=backref('volume_type', uselist=False), + foreign_keys=id, + primaryjoin='and_(' + 'Volume.volume_type_id == VolumeTypes.id, ' + 'VolumeTypes.deleted == False)') + + +class VolumeTypeExtraSpecs(BASE, CinderBase): + """Represents additional specs as key/value pairs for a volume_type""" + __tablename__ = 'volume_type_extra_specs' + id = Column(Integer, primary_key=True) + key = Column(String(255)) + value = Column(String(255)) + volume_type_id = Column(Integer, ForeignKey('volume_types.id'), + nullable=False) + volume_type = relationship(VolumeTypes, backref="extra_specs", + foreign_keys=volume_type_id, + primaryjoin='and_(' + 'VolumeTypeExtraSpecs.volume_type_id == VolumeTypes.id,' + 'VolumeTypeExtraSpecs.deleted == False)') + + +class Quota(BASE, CinderBase): + """Represents a single quota override for a project. + + If there is no row for a given project id and resource, then the + default for the quota class is used. If there is no row for a + given quota class and resource, then the default for the + deployment is used. If the row is present but the hard limit is + Null, then the resource is unlimited. + """ + + __tablename__ = 'quotas' + id = Column(Integer, primary_key=True) + + project_id = Column(String(255), index=True) + + resource = Column(String(255)) + hard_limit = Column(Integer, nullable=True) + + +class QuotaClass(BASE, CinderBase): + """Represents a single quota override for a quota class. + + If there is no row for a given quota class and resource, then the + default for the deployment is used. If the row is present but the + hard limit is Null, then the resource is unlimited. + """ + + __tablename__ = 'quota_classes' + id = Column(Integer, primary_key=True) + + class_name = Column(String(255), index=True) + + resource = Column(String(255)) + hard_limit = Column(Integer, nullable=True) + + +class Snapshot(BASE, CinderBase): + """Represents a block storage device that can be attached to a vm.""" + __tablename__ = 'snapshots' + id = Column(String(36), primary_key=True) + + @property + def name(self): + return FLAGS.snapshot_name_template % self.id + + @property + def volume_name(self): + return FLAGS.volume_name_template % self.volume_id + + user_id = Column(String(255)) + project_id = Column(String(255)) + + volume_id = Column(String(36)) + status = Column(String(255)) + progress = Column(String(255)) + volume_size = Column(Integer) + + display_name = Column(String(255)) + display_description = Column(String(255)) + + +class BlockDeviceMapping(BASE, CinderBase): + """Represents block device mapping that is defined by EC2""" + __tablename__ = "block_device_mapping" + id = Column(Integer, primary_key=True, autoincrement=True) + + instance_uuid = Column(Integer, ForeignKey('instances.uuid'), + nullable=False) + instance = relationship(Instance, + backref=backref('balock_device_mapping'), + foreign_keys=instance_uuid, + primaryjoin='and_(BlockDeviceMapping.' + 'instance_uuid==' + 'Instance.uuid,' + 'BlockDeviceMapping.deleted==' + 'False)') + device_name = Column(String(255), nullable=False) + + # default=False for compatibility of the existing code. + # With EC2 API, + # default True for ami specified device. + # default False for created with other timing. + delete_on_termination = Column(Boolean, default=False) + + # for ephemeral device + virtual_name = Column(String(255), nullable=True) + + # for snapshot or volume + snapshot_id = Column(String(36), ForeignKey('snapshots.id')) + # outer join + snapshot = relationship(Snapshot, + foreign_keys=snapshot_id) + + volume_id = Column(String(36), ForeignKey('volumes.id'), nullable=True) + volume = relationship(Volume, + foreign_keys=volume_id) + volume_size = Column(Integer, nullable=True) + + # for no device to suppress devices. + no_device = Column(Boolean, nullable=True) + + connection_info = Column(Text, nullable=True) + + +class IscsiTarget(BASE, CinderBase): + """Represates an iscsi target for a given host""" + __tablename__ = 'iscsi_targets' + __table_args__ = (schema.UniqueConstraint("target_num", "host"), + {'mysql_engine': 'InnoDB'}) + id = Column(Integer, primary_key=True) + target_num = Column(Integer) + host = Column(String(255)) + volume_id = Column(String(36), ForeignKey('volumes.id'), nullable=True) + volume = relationship(Volume, + backref=backref('iscsi_target', uselist=False), + foreign_keys=volume_id, + primaryjoin='and_(IscsiTarget.volume_id==Volume.id,' + 'IscsiTarget.deleted==False)') + + +class SecurityGroupInstanceAssociation(BASE, CinderBase): + __tablename__ = 'security_group_instance_association' + id = Column(Integer, primary_key=True) + security_group_id = Column(Integer, ForeignKey('security_groups.id')) + instance_id = Column(Integer, ForeignKey('instances.id')) + + +class SecurityGroup(BASE, CinderBase): + """Represents a security group.""" + __tablename__ = 'security_groups' + id = Column(Integer, primary_key=True) + + name = Column(String(255)) + description = Column(String(255)) + user_id = Column(String(255)) + project_id = Column(String(255)) + + instances = relationship(Instance, + secondary="security_group_instance_association", + primaryjoin='and_(' + 'SecurityGroup.id == ' + 'SecurityGroupInstanceAssociation.security_group_id,' + 'SecurityGroupInstanceAssociation.deleted == False,' + 'SecurityGroup.deleted == False)', + secondaryjoin='and_(' + 'SecurityGroupInstanceAssociation.instance_id == Instance.id,' + # (anthony) the condition below shouldn't be necessary now that the + # association is being marked as deleted. However, removing this + # may cause existing deployments to choke, so I'm leaving it + 'Instance.deleted == False)', + backref='security_groups') + + +class SecurityGroupIngressRule(BASE, CinderBase): + """Represents a rule in a security group.""" + __tablename__ = 'security_group_rules' + id = Column(Integer, primary_key=True) + + parent_group_id = Column(Integer, ForeignKey('security_groups.id')) + parent_group = relationship("SecurityGroup", backref="rules", + foreign_keys=parent_group_id, + primaryjoin='and_(' + 'SecurityGroupIngressRule.parent_group_id == SecurityGroup.id,' + 'SecurityGroupIngressRule.deleted == False)') + + protocol = Column(String(5)) # "tcp", "udp", or "icmp" + from_port = Column(Integer) + to_port = Column(Integer) + cidr = Column(String(255)) + + # Note: This is not the parent SecurityGroup. It's SecurityGroup we're + # granting access for. + group_id = Column(Integer, ForeignKey('security_groups.id')) + grantee_group = relationship("SecurityGroup", + foreign_keys=group_id, + primaryjoin='and_(' + 'SecurityGroupIngressRule.group_id == SecurityGroup.id,' + 'SecurityGroupIngressRule.deleted == False)') + + +class ProviderFirewallRule(BASE, CinderBase): + """Represents a rule in a security group.""" + __tablename__ = 'provider_fw_rules' + id = Column(Integer, primary_key=True) + + protocol = Column(String(5)) # "tcp", "udp", or "icmp" + from_port = Column(Integer) + to_port = Column(Integer) + cidr = Column(String(255)) + + +class KeyPair(BASE, CinderBase): + """Represents a public key pair for ssh.""" + __tablename__ = 'key_pairs' + id = Column(Integer, primary_key=True) + + name = Column(String(255)) + + user_id = Column(String(255)) + + fingerprint = Column(String(255)) + public_key = Column(Text) + + +class Migration(BASE, CinderBase): + """Represents a running host-to-host migration.""" + __tablename__ = 'migrations' + id = Column(Integer, primary_key=True, nullable=False) + # NOTE(tr3buchet): the ____compute variables are instance['host'] + source_compute = Column(String(255)) + dest_compute = Column(String(255)) + # NOTE(tr3buchet): dest_host, btw, is an ip address + dest_host = Column(String(255)) + old_instance_type_id = Column(Integer()) + new_instance_type_id = Column(Integer()) + instance_uuid = Column(String(255), ForeignKey('instances.uuid'), + nullable=True) + #TODO(_cerberus_): enum + status = Column(String(255)) + + +class Network(BASE, CinderBase): + """Represents a network.""" + __tablename__ = 'networks' + __table_args__ = (schema.UniqueConstraint("vpn_public_address", + "vpn_public_port"), + {'mysql_engine': 'InnoDB'}) + id = Column(Integer, primary_key=True) + label = Column(String(255)) + + injected = Column(Boolean, default=False) + cidr = Column(String(255), unique=True) + cidr_v6 = Column(String(255), unique=True) + multi_host = Column(Boolean, default=False) + + gateway_v6 = Column(String(255)) + netmask_v6 = Column(String(255)) + netmask = Column(String(255)) + bridge = Column(String(255)) + bridge_interface = Column(String(255)) + gateway = Column(String(255)) + broadcast = Column(String(255)) + dns1 = Column(String(255)) + dns2 = Column(String(255)) + + vlan = Column(Integer) + vpn_public_address = Column(String(255)) + vpn_public_port = Column(Integer) + vpn_private_address = Column(String(255)) + dhcp_start = Column(String(255)) + + rxtx_base = Column(Integer) + + project_id = Column(String(255)) + priority = Column(Integer) + host = Column(String(255)) # , ForeignKey('hosts.id')) + uuid = Column(String(36)) + + +class VirtualInterface(BASE, CinderBase): + """Represents a virtual interface on an instance.""" + __tablename__ = 'virtual_interfaces' + id = Column(Integer, primary_key=True) + address = Column(String(255), unique=True) + network_id = Column(Integer, nullable=False) + instance_id = Column(Integer, nullable=False) + uuid = Column(String(36)) + + +# TODO(vish): can these both come from the same baseclass? +class FixedIp(BASE, CinderBase): + """Represents a fixed ip for an instance.""" + __tablename__ = 'fixed_ips' + id = Column(Integer, primary_key=True) + address = Column(String(255)) + network_id = Column(Integer, nullable=True) + virtual_interface_id = Column(Integer, nullable=True) + instance_id = Column(Integer, nullable=True) + # associated means that a fixed_ip has its instance_id column set + # allocated means that a fixed_ip has a its virtual_interface_id column set + allocated = Column(Boolean, default=False) + # leased means dhcp bridge has leased the ip + leased = Column(Boolean, default=False) + reserved = Column(Boolean, default=False) + host = Column(String(255)) + + +class FloatingIp(BASE, CinderBase): + """Represents a floating ip that dynamically forwards to a fixed ip.""" + __tablename__ = 'floating_ips' + id = Column(Integer, primary_key=True) + address = Column(String(255)) + fixed_ip_id = Column(Integer, nullable=True) + project_id = Column(String(255)) + host = Column(String(255)) # , ForeignKey('hosts.id')) + auto_assigned = Column(Boolean, default=False, nullable=False) + pool = Column(String(255)) + interface = Column(String(255)) + + +class AuthToken(BASE, CinderBase): + """Represents an authorization token for all API transactions. + + Fields are a string representing the actual token and a user id for + mapping to the actual user + + """ + __tablename__ = 'auth_tokens' + token_hash = Column(String(255), primary_key=True) + user_id = Column(String(255)) + server_management_url = Column(String(255)) + storage_url = Column(String(255)) + cdn_management_url = Column(String(255)) + + +class User(BASE, CinderBase): + """Represents a user.""" + __tablename__ = 'users' + id = Column(String(255), primary_key=True) + + name = Column(String(255)) + access_key = Column(String(255)) + secret_key = Column(String(255)) + + is_admin = Column(Boolean) + + +class Project(BASE, CinderBase): + """Represents a project.""" + __tablename__ = 'projects' + id = Column(String(255), primary_key=True) + name = Column(String(255)) + description = Column(String(255)) + + project_manager = Column(String(255), ForeignKey(User.id)) + + members = relationship(User, + secondary='user_project_association', + backref='projects') + + +class DNSDomain(BASE, CinderBase): + """Represents a DNS domain with availability zone or project info.""" + __tablename__ = 'dns_domains' + domain = Column(String(512), primary_key=True) + scope = Column(String(255)) + availability_zone = Column(String(255)) + project_id = Column(String(255)) + project = relationship(Project, + primaryjoin=project_id == Project.id, + foreign_keys=[Project.id], + uselist=False) + + +class UserProjectRoleAssociation(BASE, CinderBase): + __tablename__ = 'user_project_role_association' + user_id = Column(String(255), primary_key=True) + user = relationship(User, + primaryjoin=user_id == User.id, + foreign_keys=[User.id], + uselist=False) + + project_id = Column(String(255), primary_key=True) + project = relationship(Project, + primaryjoin=project_id == Project.id, + foreign_keys=[Project.id], + uselist=False) + + role = Column(String(255), primary_key=True) + ForeignKeyConstraint(['user_id', + 'project_id'], + ['user_project_association.user_id', + 'user_project_association.project_id']) + + +class UserRoleAssociation(BASE, CinderBase): + __tablename__ = 'user_role_association' + user_id = Column(String(255), ForeignKey('users.id'), primary_key=True) + user = relationship(User, backref='roles') + role = Column(String(255), primary_key=True) + + +class UserProjectAssociation(BASE, CinderBase): + __tablename__ = 'user_project_association' + user_id = Column(String(255), ForeignKey(User.id), primary_key=True) + project_id = Column(String(255), ForeignKey(Project.id), primary_key=True) + + +class ConsolePool(BASE, CinderBase): + """Represents pool of consoles on the same physical node.""" + __tablename__ = 'console_pools' + id = Column(Integer, primary_key=True) + address = Column(String(255)) + username = Column(String(255)) + password = Column(String(255)) + console_type = Column(String(255)) + public_hostname = Column(String(255)) + host = Column(String(255)) + compute_host = Column(String(255)) + + +class Console(BASE, CinderBase): + """Represents a console session for an instance.""" + __tablename__ = 'consoles' + id = Column(Integer, primary_key=True) + instance_name = Column(String(255)) + instance_id = Column(Integer) + password = Column(String(255)) + port = Column(Integer, nullable=True) + pool_id = Column(Integer, ForeignKey('console_pools.id')) + pool = relationship(ConsolePool, backref=backref('consoles')) + + +class InstanceMetadata(BASE, CinderBase): + """Represents a metadata key/value pair for an instance""" + __tablename__ = 'instance_metadata' + id = Column(Integer, primary_key=True) + key = Column(String(255)) + value = Column(String(255)) + instance_id = Column(Integer, ForeignKey('instances.id'), nullable=False) + instance = relationship(Instance, backref="metadata", + foreign_keys=instance_id, + primaryjoin='and_(' + 'InstanceMetadata.instance_id == Instance.id,' + 'InstanceMetadata.deleted == False)') + + +class InstanceTypeExtraSpecs(BASE, CinderBase): + """Represents additional specs as key/value pairs for an instance_type""" + __tablename__ = 'instance_type_extra_specs' + id = Column(Integer, primary_key=True) + key = Column(String(255)) + value = Column(String(255)) + instance_type_id = Column(Integer, ForeignKey('instance_types.id'), + nullable=False) + instance_type = relationship(InstanceTypes, backref="extra_specs", + foreign_keys=instance_type_id, + primaryjoin='and_(' + 'InstanceTypeExtraSpecs.instance_type_id == InstanceTypes.id,' + 'InstanceTypeExtraSpecs.deleted == False)') + + +class Cell(BASE, CinderBase): + """Represents parent and child cells of this cell.""" + __tablename__ = 'cells' + id = Column(Integer, primary_key=True) + name = Column(String(255)) + api_url = Column(String(255)) + username = Column(String(255)) + password = Column(String(255)) + weight_offset = Column(Float(), default=0.0) + weight_scale = Column(Float(), default=1.0) + is_parent = Column(Boolean()) + rpc_host = Column(String(255)) + rpc_port = Column(Integer()) + rpc_virtual_host = Column(String(255)) + + +class AggregateHost(BASE, CinderBase): + """Represents a host that is member of an aggregate.""" + __tablename__ = 'aggregate_hosts' + id = Column(Integer, primary_key=True, autoincrement=True) + host = Column(String(255), unique=True) + aggregate_id = Column(Integer, ForeignKey('aggregates.id'), nullable=False) + + +class AggregateMetadata(BASE, CinderBase): + """Represents a metadata key/value pair for an aggregate.""" + __tablename__ = 'aggregate_metadata' + id = Column(Integer, primary_key=True) + key = Column(String(255), nullable=False) + value = Column(String(255), nullable=False) + aggregate_id = Column(Integer, ForeignKey('aggregates.id'), nullable=False) + + +class Aggregate(BASE, CinderBase): + """Represents a cluster of hosts that exists in this zone.""" + __tablename__ = 'aggregates' + id = Column(Integer, primary_key=True, autoincrement=True) + name = Column(String(255), unique=True) + operational_state = Column(String(255), nullable=False) + availability_zone = Column(String(255), nullable=False) + _hosts = relationship(AggregateHost, + secondary="aggregate_hosts", + primaryjoin='and_(' + 'Aggregate.id == AggregateHost.aggregate_id,' + 'AggregateHost.deleted == False,' + 'Aggregate.deleted == False)', + secondaryjoin='and_(' + 'AggregateHost.aggregate_id == Aggregate.id, ' + 'AggregateHost.deleted == False,' + 'Aggregate.deleted == False)', + backref='aggregates') + + _metadata = relationship(AggregateMetadata, + secondary="aggregate_metadata", + primaryjoin='and_(' + 'Aggregate.id == AggregateMetadata.aggregate_id,' + 'AggregateMetadata.deleted == False,' + 'Aggregate.deleted == False)', + secondaryjoin='and_(' + 'AggregateMetadata.aggregate_id == Aggregate.id, ' + 'AggregateMetadata.deleted == False,' + 'Aggregate.deleted == False)', + backref='aggregates') + + @property + def hosts(self): + return [h.host for h in self._hosts] + + @property + def metadetails(self): + return dict([(m.key, m.value) for m in self._metadata]) + + +class AgentBuild(BASE, CinderBase): + """Represents an agent build.""" + __tablename__ = 'agent_builds' + id = Column(Integer, primary_key=True) + hypervisor = Column(String(255)) + os = Column(String(255)) + architecture = Column(String(255)) + version = Column(String(255)) + url = Column(String(255)) + md5hash = Column(String(255)) + + +class BandwidthUsage(BASE, CinderBase): + """Cache for instance bandwidth usage data pulled from the hypervisor""" + __tablename__ = 'bw_usage_cache' + id = Column(Integer, primary_key=True, nullable=False) + uuid = Column(String(36), nullable=False) + mac = Column(String(255), nullable=False) + start_period = Column(DateTime, nullable=False) + last_refreshed = Column(DateTime) + bw_in = Column(BigInteger) + bw_out = Column(BigInteger) + + +class S3Image(BASE, CinderBase): + """Compatibility layer for the S3 image service talking to Glance""" + __tablename__ = 's3_images' + id = Column(Integer, primary_key=True, nullable=False, autoincrement=True) + uuid = Column(String(36), nullable=False) + + +class VolumeIdMapping(BASE, CinderBase): + """Compatability layer for the EC2 volume service""" + __tablename__ = 'volume_id_mappings' + id = Column(Integer, primary_key=True, nullable=False, autoincrement=True) + uuid = Column(String(36), nullable=False) + + +class SnapshotIdMapping(BASE, CinderBase): + """Compatability layer for the EC2 snapshot service""" + __tablename__ = 'snapshot_id_mappings' + id = Column(Integer, primary_key=True, nullable=False, autoincrement=True) + uuid = Column(String(36), nullable=False) + + +class SMFlavors(BASE, CinderBase): + """Represents a flavor for SM volumes.""" + __tablename__ = 'sm_flavors' + id = Column(Integer(), primary_key=True) + label = Column(String(255)) + description = Column(String(255)) + + +class SMBackendConf(BASE, CinderBase): + """Represents the connection to the backend for SM.""" + __tablename__ = 'sm_backend_config' + id = Column(Integer(), primary_key=True) + flavor_id = Column(Integer, ForeignKey('sm_flavors.id'), nullable=False) + sr_uuid = Column(String(255)) + sr_type = Column(String(255)) + config_params = Column(String(2047)) + + +class SMVolume(BASE, CinderBase): + __tablename__ = 'sm_volume' + id = Column(String(36), ForeignKey(Volume.id), primary_key=True) + backend_id = Column(Integer, ForeignKey('sm_backend_config.id'), + nullable=False) + vdi_uuid = Column(String(255)) + + +class InstanceFault(BASE, CinderBase): + __tablename__ = 'instance_faults' + id = Column(Integer(), primary_key=True, autoincrement=True) + instance_uuid = Column(String(36), + ForeignKey('instances.uuid'), + nullable=False) + code = Column(Integer(), nullable=False) + message = Column(String(255)) + details = Column(Text) + + +def register_models(): + """Register Models and create metadata. + + Called from cinder.db.sqlalchemy.__init__ as part of loading the driver, + it will never need to be called explicitly elsewhere unless the + connection is lost and needs to be reestablished. + """ + from sqlalchemy import create_engine + models = (AgentBuild, + Aggregate, + AggregateHost, + AggregateMetadata, + AuthToken, + Certificate, + Cell, + Console, + ConsolePool, + FixedIp, + FloatingIp, + Instance, + InstanceActions, + InstanceFault, + InstanceMetadata, + InstanceTypeExtraSpecs, + InstanceTypes, + IscsiTarget, + Migration, + Network, + Project, + SecurityGroup, + SecurityGroupIngressRule, + SecurityGroupInstanceAssociation, + Service, + SMBackendConf, + SMFlavors, + SMVolume, + User, + Volume, + VolumeMetadata, + VolumeTypeExtraSpecs, + VolumeTypes, + VolumeIdMapping, + SnapshotIdMapping, + ) + engine = create_engine(FLAGS.sql_connection, echo=False) + for model in models: + model.metadata.create_all(engine) diff --git a/cinder/db/sqlalchemy/session.py b/cinder/db/sqlalchemy/session.py new file mode 100644 index 00000000000..fd6eef4ba45 --- /dev/null +++ b/cinder/db/sqlalchemy/session.py @@ -0,0 +1,156 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Session Handling for SQLAlchemy backend.""" + +import time + +import sqlalchemy.interfaces +import sqlalchemy.orm +from sqlalchemy.exc import DisconnectionError, OperationalError +from sqlalchemy.pool import NullPool, StaticPool + +import cinder.exception +import cinder.flags as flags +import cinder.log as logging + + +FLAGS = flags.FLAGS +LOG = logging.getLogger(__name__) + +_ENGINE = None +_MAKER = None + + +def get_session(autocommit=True, expire_on_commit=False): + """Return a SQLAlchemy session.""" + global _MAKER + + if _MAKER is None: + engine = get_engine() + _MAKER = get_maker(engine, autocommit, expire_on_commit) + + session = _MAKER() + session.query = cinder.exception.wrap_db_error(session.query) + session.flush = cinder.exception.wrap_db_error(session.flush) + return session + + +class SynchronousSwitchListener(sqlalchemy.interfaces.PoolListener): + + """Switch sqlite connections to non-synchronous mode""" + + def connect(self, dbapi_con, con_record): + dbapi_con.execute("PRAGMA synchronous = OFF") + + +class MySQLPingListener(object): + + """ + Ensures that MySQL connections checked out of the + pool are alive. + + Borrowed from: + http://groups.google.com/group/sqlalchemy/msg/a4ce563d802c929f + """ + + def checkout(self, dbapi_con, con_record, con_proxy): + try: + dbapi_con.cursor().execute('select 1') + except dbapi_con.OperationalError, ex: + if ex.args[0] in (2006, 2013, 2014, 2045, 2055): + LOG.warn('Got mysql server has gone away: %s', ex) + raise DisconnectionError("Database server went away") + else: + raise + + +def is_db_connection_error(args): + """Return True if error in connecting to db.""" + # NOTE(adam_g): This is currently MySQL specific and needs to be extended + # to support Postgres and others. + conn_err_codes = ('2002', '2003', '2006') + for err_code in conn_err_codes: + if args.find(err_code) != -1: + return True + return False + + +def get_engine(): + """Return a SQLAlchemy engine.""" + global _ENGINE + if _ENGINE is None: + connection_dict = sqlalchemy.engine.url.make_url(FLAGS.sql_connection) + + engine_args = { + "pool_recycle": FLAGS.sql_idle_timeout, + "echo": False, + 'convert_unicode': True, + } + + # Map our SQL debug level to SQLAlchemy's options + if FLAGS.sql_connection_debug >= 100: + engine_args['echo'] = 'debug' + elif FLAGS.sql_connection_debug >= 50: + engine_args['echo'] = True + + if "sqlite" in connection_dict.drivername: + engine_args["poolclass"] = NullPool + + if FLAGS.sql_connection == "sqlite://": + engine_args["poolclass"] = StaticPool + engine_args["connect_args"] = {'check_same_thread': False} + + if not FLAGS.sqlite_synchronous: + engine_args["listeners"] = [SynchronousSwitchListener()] + + if 'mysql' in connection_dict.drivername: + engine_args['listeners'] = [MySQLPingListener()] + + _ENGINE = sqlalchemy.create_engine(FLAGS.sql_connection, **engine_args) + + try: + _ENGINE.connect() + except OperationalError, e: + if not is_db_connection_error(e.args[0]): + raise + + remaining = FLAGS.sql_max_retries + if remaining == -1: + remaining = 'infinite' + while True: + msg = _('SQL connection failed. %s attempts left.') + LOG.warn(msg % remaining) + if remaining != 'infinite': + remaining -= 1 + time.sleep(FLAGS.sql_retry_interval) + try: + _ENGINE.connect() + break + except OperationalError, e: + if (remaining != 'infinite' and remaining == 0) or \ + not is_db_connection_error(e.args[0]): + raise + return _ENGINE + + +def get_maker(engine, autocommit=True, expire_on_commit=False): + """Return a SQLAlchemy sessionmaker using the given engine.""" + return sqlalchemy.orm.sessionmaker(bind=engine, + autocommit=autocommit, + expire_on_commit=expire_on_commit) diff --git a/cinder/exception.py b/cinder/exception.py new file mode 100644 index 00000000000..b57c1925ea5 --- /dev/null +++ b/cinder/exception.py @@ -0,0 +1,938 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Cinder base exception handling. + +Includes decorator for re-raising Cinder-type exceptions. + +SHOULD include dedicated exception logging. + +""" + +import functools +import sys + +import webob.exc + +from cinder import log as logging + +LOG = logging.getLogger(__name__) + + +class ConvertedException(webob.exc.WSGIHTTPException): + def __init__(self, code=0, title="", explanation=""): + self.code = code + self.title = title + self.explanation = explanation + super(ConvertedException, self).__init__() + + +class ProcessExecutionError(IOError): + def __init__(self, stdout=None, stderr=None, exit_code=None, cmd=None, + description=None): + self.exit_code = exit_code + self.stderr = stderr + self.stdout = stdout + self.cmd = cmd + self.description = description + + if description is None: + description = _('Unexpected error while running command.') + if exit_code is None: + exit_code = '-' + message = _('%(description)s\nCommand: %(cmd)s\n' + 'Exit code: %(exit_code)s\nStdout: %(stdout)r\n' + 'Stderr: %(stderr)r') % locals() + IOError.__init__(self, message) + + +class Error(Exception): + pass + + +class DBError(Error): + """Wraps an implementation specific exception.""" + def __init__(self, inner_exception=None): + self.inner_exception = inner_exception + super(DBError, self).__init__(str(inner_exception)) + + +def wrap_db_error(f): + def _wrap(*args, **kwargs): + try: + return f(*args, **kwargs) + except UnicodeEncodeError: + raise InvalidUnicodeParameter() + except Exception, e: + LOG.exception(_('DB exception wrapped.')) + raise DBError(e) + _wrap.func_name = f.func_name + return _wrap + + +def wrap_exception(notifier=None, publisher_id=None, event_type=None, + level=None): + """This decorator wraps a method to catch any exceptions that may + get thrown. It logs the exception as well as optionally sending + it to the notification system. + """ + # TODO(sandy): Find a way to import cinder.notifier.api so we don't have + # to pass it in as a parameter. Otherwise we get a cyclic import of + # cinder.notifier.api -> cinder.utils -> cinder.exception :( + # TODO(johannes): Also, it would be nice to use + # utils.save_and_reraise_exception() without an import loop + def inner(f): + def wrapped(*args, **kw): + try: + return f(*args, **kw) + except Exception, e: + # Save exception since it can be clobbered during processing + # below before we can re-raise + exc_info = sys.exc_info() + + if notifier: + payload = dict(args=args, exception=e) + payload.update(kw) + + # Use a temp vars so we don't shadow + # our outer definitions. + temp_level = level + if not temp_level: + temp_level = notifier.ERROR + + temp_type = event_type + if not temp_type: + # If f has multiple decorators, they must use + # functools.wraps to ensure the name is + # propagated. + temp_type = f.__name__ + + notifier.notify(publisher_id, temp_type, temp_level, + payload) + + # re-raise original exception since it may have been clobbered + raise exc_info[0], exc_info[1], exc_info[2] + + return functools.wraps(f)(wrapped) + return inner + + +class CinderException(Exception): + """Base Cinder Exception + + To correctly use this class, inherit from it and define + a 'message' property. That message will get printf'd + with the keyword arguments provided to the constructor. + + """ + message = _("An unknown exception occurred.") + + def __init__(self, message=None, **kwargs): + self.kwargs = kwargs + + if 'code' not in self.kwargs: + try: + self.kwargs['code'] = self.code + except AttributeError: + pass + + if not message: + try: + message = self.message % kwargs + + except Exception as e: + # kwargs doesn't match a variable in the message + # log the issue and the kwargs + LOG.exception(_('Exception in string format operation')) + for name, value in kwargs.iteritems(): + LOG.error("%s: %s" % (name, value)) + # at least get the core message out if something happened + message = self.message + + super(CinderException, self).__init__(message) + + +class DecryptionFailure(CinderException): + message = _("Failed to decrypt text") + + +class ImagePaginationFailed(CinderException): + message = _("Failed to paginate through images from image service") + + +class VirtualInterfaceCreateException(CinderException): + message = _("Virtual Interface creation failed") + + +class VirtualInterfaceMacAddressException(CinderException): + message = _("5 attempts to create virtual interface" + "with unique mac address failed") + + +class GlanceConnectionFailed(CinderException): + message = _("Connection to glance failed") + ": %(reason)s" + + +class MelangeConnectionFailed(CinderException): + message = _("Connection to melange failed") + ": %(reason)s" + + +class NotAuthorized(CinderException): + message = _("Not authorized.") + code = 403 + + +class AdminRequired(NotAuthorized): + message = _("User does not have admin privileges") + + +class PolicyNotAuthorized(NotAuthorized): + message = _("Policy doesn't allow %(action)s to be performed.") + + +class ImageNotAuthorized(CinderException): + message = _("Not authorized for image %(image_id)s.") + + +class Invalid(CinderException): + message = _("Unacceptable parameters.") + code = 400 + + +class InvalidSnapshot(Invalid): + message = _("Invalid snapshot") + ": %(reason)s" + + +class VolumeUnattached(Invalid): + message = _("Volume %(volume_id)s is not attached to anything") + + +class InvalidKeypair(Invalid): + message = _("Keypair data is invalid") + + +class SfJsonEncodeFailure(CinderException): + message = _("Failed to load data into json format") + + +class InvalidRequest(Invalid): + message = _("The request is invalid.") + + +class InvalidSignature(Invalid): + message = _("Invalid signature %(signature)s for user %(user)s.") + + +class InvalidInput(Invalid): + message = _("Invalid input received") + ": %(reason)s" + + +class InvalidInstanceType(Invalid): + message = _("Invalid instance type %(instance_type)s.") + + +class InvalidVolumeType(Invalid): + message = _("Invalid volume type") + ": %(reason)s" + + +class InvalidVolume(Invalid): + message = _("Invalid volume") + ": %(reason)s" + + +class InvalidPortRange(Invalid): + message = _("Invalid port range %(from_port)s:%(to_port)s. %(msg)s") + + +class InvalidIpProtocol(Invalid): + message = _("Invalid IP protocol %(protocol)s.") + + +class InvalidContentType(Invalid): + message = _("Invalid content type %(content_type)s.") + + +class InvalidCidr(Invalid): + message = _("Invalid cidr %(cidr)s.") + + +class InvalidRPCConnectionReuse(Invalid): + message = _("Invalid reuse of an RPC connection.") + + +class InvalidUnicodeParameter(Invalid): + message = _("Invalid Parameter: " + "Unicode is not supported by the current database.") + + +# Cannot be templated as the error syntax varies. +# msg needs to be constructed when raised. +class InvalidParameterValue(Invalid): + message = _("%(err)s") + + +class InvalidAggregateAction(Invalid): + message = _("Cannot perform action '%(action)s' on aggregate " + "%(aggregate_id)s. Reason: %(reason)s.") + + +class InvalidGroup(Invalid): + message = _("Group not valid. Reason: %(reason)s") + + +class InstanceInvalidState(Invalid): + message = _("Instance %(instance_uuid)s in %(attr)s %(state)s. Cannot " + "%(method)s while the instance is in this state.") + + +class InstanceNotRunning(Invalid): + message = _("Instance %(instance_id)s is not running.") + + +class InstanceNotSuspended(Invalid): + message = _("Instance %(instance_id)s is not suspended.") + + +class InstanceNotInRescueMode(Invalid): + message = _("Instance %(instance_id)s is not in rescue mode") + + +class InstanceSuspendFailure(Invalid): + message = _("Failed to suspend instance") + ": %(reason)s" + + +class InstanceResumeFailure(Invalid): + message = _("Failed to resume server") + ": %(reason)s." + + +class InstanceRebootFailure(Invalid): + message = _("Failed to reboot instance") + ": %(reason)s" + + +class InstanceTerminationFailure(Invalid): + message = _("Failed to terminate instance") + ": %(reason)s" + + +class ServiceUnavailable(Invalid): + message = _("Service is unavailable at this time.") + + +class VolumeServiceUnavailable(ServiceUnavailable): + message = _("Volume service is unavailable at this time.") + + +class UnableToMigrateToSelf(Invalid): + message = _("Unable to migrate instance (%(instance_id)s) " + "to current host (%(host)s).") + + +class DestinationHostUnavailable(Invalid): + message = _("Destination compute host is unavailable at this time.") + + +class SourceHostUnavailable(Invalid): + message = _("Original compute host is unavailable at this time.") + + +class InvalidHypervisorType(Invalid): + message = _("The supplied hypervisor type of is invalid.") + + +class DestinationHypervisorTooOld(Invalid): + message = _("The instance requires a newer hypervisor version than " + "has been provided.") + + +class DestinationDiskExists(Invalid): + message = _("The supplied disk path (%(path)s) already exists, " + "it is expected not to exist.") + + +class InvalidDevicePath(Invalid): + message = _("The supplied device path (%(path)s) is invalid.") + + +class DeviceIsBusy(Invalid): + message = _("The supplied device (%(device)s) is busy.") + + +class InvalidCPUInfo(Invalid): + message = _("Unacceptable CPU info") + ": %(reason)s" + + +class InvalidIpAddressError(Invalid): + message = _("%(address)s is not a valid IP v4/6 address.") + + +class InvalidVLANTag(Invalid): + message = _("VLAN tag is not appropriate for the port group " + "%(bridge)s. Expected VLAN tag is %(tag)s, " + "but the one associated with the port group is %(pgroup)s.") + + +class InvalidVLANPortGroup(Invalid): + message = _("vSwitch which contains the port group %(bridge)s is " + "not associated with the desired physical adapter. " + "Expected vSwitch is %(expected)s, but the one associated " + "is %(actual)s.") + + +class InvalidDiskFormat(Invalid): + message = _("Disk format %(disk_format)s is not acceptable") + + +class ImageUnacceptable(Invalid): + message = _("Image %(image_id)s is unacceptable: %(reason)s") + + +class InstanceUnacceptable(Invalid): + message = _("Instance %(instance_id)s is unacceptable: %(reason)s") + + +class NotFound(CinderException): + message = _("Resource could not be found.") + code = 404 + + +class FlagNotSet(NotFound): + message = _("Required flag %(flag)s not set.") + + +class VolumeNotFound(NotFound): + message = _("Volume %(volume_id)s could not be found.") + + +class SfAccountNotFound(NotFound): + message = _("Unable to locate account %(account_name)s on " + "Solidfire device") + + +class VolumeNotFoundForInstance(VolumeNotFound): + message = _("Volume not found for instance %(instance_id)s.") + + +class VolumeMetadataNotFound(NotFound): + message = _("Volume %(volume_id)s has no metadata with " + "key %(metadata_key)s.") + + +class NoVolumeTypesFound(NotFound): + message = _("Zero volume types found.") + + +class VolumeTypeNotFound(NotFound): + message = _("Volume type %(volume_type_id)s could not be found.") + + +class VolumeTypeNotFoundByName(VolumeTypeNotFound): + message = _("Volume type with name %(volume_type_name)s " + "could not be found.") + + +class VolumeTypeExtraSpecsNotFound(NotFound): + message = _("Volume Type %(volume_type_id)s has no extra specs with " + "key %(extra_specs_key)s.") + + +class SnapshotNotFound(NotFound): + message = _("Snapshot %(snapshot_id)s could not be found.") + + +class VolumeIsBusy(CinderException): + message = _("deleting volume %(volume_name)s that has snapshot") + + +class SnapshotIsBusy(CinderException): + message = _("deleting snapshot %(snapshot_name)s that has " + "dependent volumes") + + +class ISCSITargetNotFoundForVolume(NotFound): + message = _("No target id found for volume %(volume_id)s.") + + +class DiskNotFound(NotFound): + message = _("No disk at %(location)s") + + +class VolumeDriverNotFound(NotFound): + message = _("Could not find a handler for %(driver_type)s volume.") + + +class InvalidImageRef(Invalid): + message = _("Invalid image href %(image_href)s.") + + +class ListingImageRefsNotSupported(Invalid): + message = _("Some images have been stored via hrefs." + " This version of the api does not support displaying image hrefs.") + + +class ImageNotFound(NotFound): + message = _("Image %(image_id)s could not be found.") + + +class KernelNotFoundForImage(ImageNotFound): + message = _("Kernel not found for image %(image_id)s.") + + +class UserNotFound(NotFound): + message = _("User %(user_id)s could not be found.") + + +class ProjectNotFound(NotFound): + message = _("Project %(project_id)s could not be found.") + + +class ProjectMembershipNotFound(NotFound): + message = _("User %(user_id)s is not a member of project %(project_id)s.") + + +class UserRoleNotFound(NotFound): + message = _("Role %(role_id)s could not be found.") + + +class StorageRepositoryNotFound(NotFound): + message = _("Cannot find SR to read/write VDI.") + + +class DatastoreNotFound(NotFound): + message = _("Could not find the datastore reference(s) which the VM uses.") + + +class FixedIpNotFound(NotFound): + message = _("No fixed IP associated with id %(id)s.") + + +class FixedIpNotFoundForAddress(FixedIpNotFound): + message = _("Fixed ip not found for address %(address)s.") + + +class FixedIpNotFoundForInstance(FixedIpNotFound): + message = _("Instance %(instance_id)s has zero fixed ips.") + + +class FixedIpNotFoundForNetworkHost(FixedIpNotFound): + message = _("Network host %(host)s has zero fixed ips " + "in network %(network_id)s.") + + +class FixedIpNotFoundForSpecificInstance(FixedIpNotFound): + message = _("Instance %(instance_id)s doesn't have fixed ip '%(ip)s'.") + + +class FixedIpNotFoundForHost(FixedIpNotFound): + message = _("Host %(host)s has zero fixed ips.") + + +class FixedIpNotFoundForNetwork(FixedIpNotFound): + message = _("Fixed IP address (%(address)s) does not exist in " + "network (%(network_uuid)s).") + + +class FixedIpAlreadyInUse(CinderException): + message = _("Fixed IP address %(address)s is already in use.") + + +class FixedIpInvalid(Invalid): + message = _("Fixed IP address %(address)s is invalid.") + + +class NoMoreFixedIps(CinderException): + message = _("Zero fixed ips available.") + + +class NoFixedIpsDefined(NotFound): + message = _("Zero fixed ips could be found.") + + +class FloatingIpNotFound(NotFound): + message = _("Floating ip not found for id %(id)s.") + + +class FloatingIpDNSExists(Invalid): + message = _("The DNS entry %(name)s already exists in domain %(domain)s.") + + +class FloatingIpNotFoundForAddress(FloatingIpNotFound): + message = _("Floating ip not found for address %(address)s.") + + +class FloatingIpNotFoundForHost(FloatingIpNotFound): + message = _("Floating ip not found for host %(host)s.") + + +class NoMoreFloatingIps(FloatingIpNotFound): + message = _("Zero floating ips available.") + + +class FloatingIpAssociated(CinderException): + message = _("Floating ip %(address)s is associated.") + + +class FloatingIpNotAssociated(CinderException): + message = _("Floating ip %(address)s is not associated.") + + +class NoFloatingIpsDefined(NotFound): + message = _("Zero floating ips exist.") + + +class NoFloatingIpInterface(NotFound): + message = _("Interface %(interface)s not found.") + + +class KeypairNotFound(NotFound): + message = _("Keypair %(name)s not found for user %(user_id)s") + + +class CertificateNotFound(NotFound): + message = _("Certificate %(certificate_id)s not found.") + + +class ServiceNotFound(NotFound): + message = _("Service %(service_id)s could not be found.") + + +class HostNotFound(NotFound): + message = _("Host %(host)s could not be found.") + + +class HostBinaryNotFound(NotFound): + message = _("Could not find binary %(binary)s on host %(host)s.") + + +class AuthTokenNotFound(NotFound): + message = _("Auth token %(token)s could not be found.") + + +class AccessKeyNotFound(NotFound): + message = _("Access Key %(access_key)s could not be found.") + + +class QuotaNotFound(NotFound): + message = _("Quota could not be found") + + +class ProjectQuotaNotFound(QuotaNotFound): + message = _("Quota for project %(project_id)s could not be found.") + + +class QuotaClassNotFound(QuotaNotFound): + message = _("Quota class %(class_name)s could not be found.") + + +class SecurityGroupNotFound(NotFound): + message = _("Security group %(security_group_id)s not found.") + + +class SecurityGroupNotFoundForProject(SecurityGroupNotFound): + message = _("Security group %(security_group_id)s not found " + "for project %(project_id)s.") + + +class SecurityGroupNotFoundForRule(SecurityGroupNotFound): + message = _("Security group with rule %(rule_id)s not found.") + + +class SecurityGroupExistsForInstance(Invalid): + message = _("Security group %(security_group_id)s is already associated" + " with the instance %(instance_id)s") + + +class SecurityGroupNotExistsForInstance(Invalid): + message = _("Security group %(security_group_id)s is not associated with" + " the instance %(instance_id)s") + + +class MigrationNotFound(NotFound): + message = _("Migration %(migration_id)s could not be found.") + + +class MigrationNotFoundByStatus(MigrationNotFound): + message = _("Migration not found for instance %(instance_id)s " + "with status %(status)s.") + + +class NoInstanceTypesFound(NotFound): + message = _("Zero instance types found.") + + +class InstanceTypeNotFound(NotFound): + message = _("Instance type %(instance_type_id)s could not be found.") + + +class InstanceTypeNotFoundByName(InstanceTypeNotFound): + message = _("Instance type with name %(instance_type_name)s " + "could not be found.") + + +class FlavorNotFound(NotFound): + message = _("Flavor %(flavor_id)s could not be found.") + + +class CellNotFound(NotFound): + message = _("Cell %(cell_id)s could not be found.") + + +class SchedulerHostFilterNotFound(NotFound): + message = _("Scheduler Host Filter %(filter_name)s could not be found.") + + +class SchedulerCostFunctionNotFound(NotFound): + message = _("Scheduler cost function %(cost_fn_str)s could" + " not be found.") + + +class SchedulerWeightFlagNotFound(NotFound): + message = _("Scheduler weight flag not found: %(flag_name)s") + + +class InstanceMetadataNotFound(NotFound): + message = _("Instance %(instance_id)s has no metadata with " + "key %(metadata_key)s.") + + +class InstanceTypeExtraSpecsNotFound(NotFound): + message = _("Instance Type %(instance_type_id)s has no extra specs with " + "key %(extra_specs_key)s.") + + +class LDAPObjectNotFound(NotFound): + message = _("LDAP object could not be found") + + +class LDAPUserNotFound(LDAPObjectNotFound): + message = _("LDAP user %(user_id)s could not be found.") + + +class LDAPGroupNotFound(LDAPObjectNotFound): + message = _("LDAP group %(group_id)s could not be found.") + + +class LDAPGroupMembershipNotFound(NotFound): + message = _("LDAP user %(user_id)s is not a member of group %(group_id)s.") + + +class FileNotFound(NotFound): + message = _("File %(file_path)s could not be found.") + + +class NoFilesFound(NotFound): + message = _("Zero files could be found.") + + +class SwitchNotFoundForNetworkAdapter(NotFound): + message = _("Virtual switch associated with the " + "network adapter %(adapter)s not found.") + + +class NetworkAdapterNotFound(NotFound): + message = _("Network adapter %(adapter)s could not be found.") + + +class ClassNotFound(NotFound): + message = _("Class %(class_name)s could not be found: %(exception)s") + + +class NotAllowed(CinderException): + message = _("Action not allowed.") + + +class GlobalRoleNotAllowed(NotAllowed): + message = _("Unable to use global role %(role_id)s") + + +class ImageRotationNotAllowed(CinderException): + message = _("Rotation is not allowed for snapshots") + + +class RotationRequiredForBackup(CinderException): + message = _("Rotation param is required for backup image_type") + + +#TODO(bcwaldon): EOL this exception! +class Duplicate(CinderException): + pass + + +class KeyPairExists(Duplicate): + message = _("Key pair %(key_name)s already exists.") + + +class UserExists(Duplicate): + message = _("User %(user)s already exists.") + + +class LDAPUserExists(UserExists): + message = _("LDAP user %(user)s already exists.") + + +class LDAPGroupExists(Duplicate): + message = _("LDAP group %(group)s already exists.") + + +class LDAPMembershipExists(Duplicate): + message = _("User %(uid)s is already a member of " + "the group %(group_dn)s") + + +class ProjectExists(Duplicate): + message = _("Project %(project)s already exists.") + + +class InstanceExists(Duplicate): + message = _("Instance %(name)s already exists.") + + +class InstanceTypeExists(Duplicate): + message = _("Instance Type %(name)s already exists.") + + +class VolumeTypeExists(Duplicate): + message = _("Volume Type %(name)s already exists.") + + +class InvalidSharedStorage(CinderException): + message = _("%(path)s is on shared storage: %(reason)s") + + +class MigrationError(CinderException): + message = _("Migration error") + ": %(reason)s" + + +class MalformedRequestBody(CinderException): + message = _("Malformed message body: %(reason)s") + + +class ConfigNotFound(NotFound): + message = _("Could not find config at %(path)s") + + +class PasteAppNotFound(NotFound): + message = _("Could not load paste app '%(name)s' from %(path)s") + + +class CannotResizeToSameSize(CinderException): + message = _("When resizing, instances must change size!") + + +class ImageTooLarge(CinderException): + message = _("Image is larger than instance type allows") + + +class ZoneRequestError(CinderException): + message = _("1 or more Zones could not complete the request") + + +class InstanceTypeMemoryTooSmall(CinderException): + message = _("Instance type's memory is too small for requested image.") + + +class InstanceTypeDiskTooSmall(CinderException): + message = _("Instance type's disk is too small for requested image.") + + +class InsufficientFreeMemory(CinderException): + message = _("Insufficient free memory on compute node to start %(uuid)s.") + + +class CouldNotFetchMetrics(CinderException): + message = _("Could not fetch bandwidth/cpu/disk metrics for this host.") + + +class NoValidHost(CinderException): + message = _("No valid host was found. %(reason)s") + + +class WillNotSchedule(CinderException): + message = _("Host %(host)s is not up or doesn't exist.") + + +class QuotaError(CinderException): + message = _("Quota exceeded") + ": code=%(code)s" + + +class AggregateError(CinderException): + message = _("Aggregate %(aggregate_id)s: action '%(action)s' " + "caused an error: %(reason)s.") + + +class AggregateNotFound(NotFound): + message = _("Aggregate %(aggregate_id)s could not be found.") + + +class AggregateNameExists(Duplicate): + message = _("Aggregate %(aggregate_name)s already exists.") + + +class AggregateHostNotFound(NotFound): + message = _("Aggregate %(aggregate_id)s has no host %(host)s.") + + +class AggregateMetadataNotFound(NotFound): + message = _("Aggregate %(aggregate_id)s has no metadata with " + "key %(metadata_key)s.") + + +class AggregateHostConflict(Duplicate): + message = _("Host %(host)s already member of another aggregate.") + + +class AggregateHostExists(Duplicate): + message = _("Aggregate %(aggregate_id)s already has host %(host)s.") + + +class DuplicateSfVolumeNames(Duplicate): + message = _("Detected more than one volume with name %(vol_name)s") + + +class VolumeTypeCreateFailed(CinderException): + message = _("Cannot create volume_type with " + "name %(name)s and specs %(extra_specs)s") + + +class InstanceTypeCreateFailed(CinderException): + message = _("Unable to create instance type") + + +class SolidFireAPIException(CinderException): + message = _("Bad response from SolidFire API") + + +class SolidFireAPIStatusException(SolidFireAPIException): + message = _("Error in SolidFire API response: status=%(status)s") + + +class SolidFireAPIDataException(SolidFireAPIException): + message = _("Error in SolidFire API response: data=%(data)s") + + +class DuplicateVlan(Duplicate): + message = _("Detected existing vlan with id %(vlan)d") + + +class InstanceNotFound(NotFound): + message = _("Instance %(instance_id)s could not be found.") + + +class InvalidInstanceIDMalformed(Invalid): + message = _("Invalid id: %(val)s (expecting \"i-...\").") + + +class CouldNotFetchImage(CinderException): + message = _("Could not fetch image %(image)s") diff --git a/cinder/flags.py b/cinder/flags.py new file mode 100644 index 00000000000..3f1d9f5d889 --- /dev/null +++ b/cinder/flags.py @@ -0,0 +1,356 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# Copyright 2012 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Command-line flag library. + +Emulates gflags by wrapping cfg.ConfigOpts. + +The idea is to move fully to cfg eventually, and this wrapper is a +stepping stone. + +""" + +import os +import socket +import sys + +from cinder.compat import flagfile +from cinder.openstack.common import cfg + + +class CinderConfigOpts(cfg.CommonConfigOpts): + + def __init__(self, *args, **kwargs): + super(CinderConfigOpts, self).__init__(*args, **kwargs) + self.disable_interspersed_args() + + def __call__(self, argv): + with flagfile.handle_flagfiles_managed(argv[1:]) as args: + return argv[:1] + super(CinderConfigOpts, self).__call__(args) + + +FLAGS = CinderConfigOpts() + + +class UnrecognizedFlag(Exception): + pass + + +def DECLARE(name, module_string, flag_values=FLAGS): + if module_string not in sys.modules: + __import__(module_string, globals(), locals()) + if name not in flag_values: + raise UnrecognizedFlag('%s not defined by %s' % (name, module_string)) + + +def _get_my_ip(): + """ + Returns the actual ip of the local machine. + + This code figures out what source address would be used if some traffic + were to be sent out to some well known address on the Internet. In this + case, a Google DNS server is used, but the specific address does not + matter much. No traffic is actually sent. + """ + try: + csock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) + csock.connect(('8.8.8.8', 80)) + (addr, port) = csock.getsockname() + csock.close() + return addr + except socket.error: + return "127.0.0.1" + + +log_opts = [ + cfg.StrOpt('logdir', + default=None, + help='Log output to a per-service log file in named directory'), + cfg.StrOpt('logfile', + default=None, + help='Log output to a named file'), + cfg.BoolOpt('use_stderr', + default=True, + help='Log output to standard error'), + ] + +core_opts = [ + cfg.StrOpt('connection_type', + default=None, + help='Virtualization api connection type : libvirt, xenapi, ' + 'or fake'), + cfg.StrOpt('sql_connection', + default='sqlite:///$state_path/$sqlite_db', + help='The SQLAlchemy connection string used to connect to the ' + 'database'), + cfg.IntOpt('sql_connection_debug', + default=0, + help='Verbosity of SQL debugging information. 0=None, ' + '100=Everything'), + cfg.StrOpt('api_paste_config', + default="api-paste.ini", + help='File name for the paste.deploy config for cinder-api'), + cfg.StrOpt('pybasedir', + default=os.path.abspath(os.path.join(os.path.dirname(__file__), + '../')), + help='Directory where the cinder python module is installed'), + cfg.StrOpt('bindir', + default='$pybasedir/bin', + help='Directory where cinder binaries are installed'), + cfg.StrOpt('state_path', + default='$pybasedir', + help="Top-level directory for maintaining cinder's state"), + cfg.StrOpt('lock_path', + default='$pybasedir', + help='Directory to use for lock files'), + ] + +debug_opts = [ + cfg.BoolOpt('fake_rabbit', + default=False, + help='If passed, use a fake RabbitMQ provider'), +] + +FLAGS.register_cli_opts(log_opts) +FLAGS.register_cli_opts(core_opts) +FLAGS.register_cli_opts(debug_opts) + +global_opts = [ + cfg.StrOpt('my_ip', + default=_get_my_ip(), + help='ip address of this host'), + cfg.ListOpt('region_list', + default=[], + help='list of region=fqdn pairs separated by commas'), + cfg.StrOpt('aws_access_key_id', + default='admin', + help='AWS Access ID'), + cfg.StrOpt('aws_secret_access_key', + default='admin', + help='AWS Access Key'), + cfg.StrOpt('glance_host', + default='$my_ip', + help='default glance hostname or ip'), + cfg.IntOpt('glance_port', + default=9292, + help='default glance port'), + cfg.ListOpt('glance_api_servers', + default=['$glance_host:$glance_port'], + help='A list of the glance api servers available to cinder ' + '([hostname|ip]:port)'), + cfg.StrOpt('scheduler_topic', + default='scheduler', + help='the topic scheduler nodes listen on'), + cfg.StrOpt('volume_topic', + default='volume', + help='the topic volume nodes listen on'), + cfg.StrOpt('rabbit_host', + default='localhost', + help='the RabbitMQ host'), + cfg.IntOpt('rabbit_port', + default=5672, + help='the RabbitMQ port'), + cfg.BoolOpt('rabbit_use_ssl', + default=False, + help='connect over SSL for RabbitMQ'), + cfg.StrOpt('rabbit_userid', + default='guest', + help='the RabbitMQ userid'), + cfg.StrOpt('rabbit_password', + default='guest', + help='the RabbitMQ password'), + cfg.StrOpt('rabbit_virtual_host', + default='/', + help='the RabbitMQ virtual host'), + cfg.IntOpt('rabbit_retry_interval', + default=1, + help='how frequently to retry connecting with RabbitMQ'), + cfg.IntOpt('rabbit_retry_backoff', + default=2, + help='how long to backoff for between retries when connecting ' + 'to RabbitMQ'), + cfg.IntOpt('rabbit_max_retries', + default=0, + help='maximum retries with trying to connect to RabbitMQ ' + '(the default of 0 implies an infinite retry count)'), + cfg.StrOpt('control_exchange', + default='cinder', + help='the main RabbitMQ exchange to connect to'), + cfg.BoolOpt('rabbit_durable_queues', + default=False, + help='use durable queues in RabbitMQ'), + cfg.BoolOpt('api_rate_limit', + default=True, + help='whether to rate limit the api'), + cfg.ListOpt('enabled_apis', + default=['osapi_volume'], + help='a list of APIs to enable by default'), + cfg.ListOpt('osapi_volume_ext_list', + default=[], + help='Specify list of extensions to load when using osapi_' + 'volume_extension option with cinder.api.openstack.' + 'volume.contrib.select_extensions'), + cfg.MultiStrOpt('osapi_volume_extension', + default=[ + 'cinder.api.openstack.volume.contrib.standard_extensions' + ], + help='osapi volume extension to load'), + cfg.StrOpt('osapi_scheme', + default='http', + help='the protocol to use when connecting to the openstack api ' + 'server (http, https)'), + cfg.StrOpt('osapi_path', + default='/v1.1/', + help='the path prefix used to call the openstack api server'), + cfg.StrOpt('osapi_compute_link_prefix', + default=None, + help='Base URL that will be presented to users in links ' + 'to the OpenStack Compute API'), + cfg.IntOpt('osapi_max_limit', + default=1000, + help='the maximum number of items returned in a single ' + 'response from a collection resource'), + cfg.StrOpt('metadata_host', + default='$my_ip', + help='the ip for the metadata api server'), + cfg.IntOpt('metadata_port', + default=8775, + help='the port for the metadata api port'), + cfg.StrOpt('default_project', + default='openstack', + help='the default project to use for openstack'), + cfg.StrOpt('default_image', + default='ami-11111', + help='default image to use, testing only'), + cfg.StrOpt('default_instance_type', + default='m1.small', + help='default instance type to use, testing only'), + cfg.StrOpt('null_kernel', + default='nokernel', + help='kernel image that indicates not to use a kernel, but to ' + 'use a raw disk image instead'), + cfg.StrOpt('vpn_image_id', + default='0', + help='image id used when starting up a cloudpipe vpn server'), + cfg.StrOpt('vpn_key_suffix', + default='-vpn', + help='Suffix to add to project name for vpn key and secgroups'), + cfg.IntOpt('auth_token_ttl', + default=3600, + help='Seconds for auth tokens to linger'), + cfg.StrOpt('logfile_mode', + default='0644', + help='Default file mode used when creating log files'), + cfg.StrOpt('sqlite_db', + default='cinder.sqlite', + help='the filename to use with sqlite'), + cfg.BoolOpt('sqlite_synchronous', + default=True, + help='If passed, use synchronous mode for sqlite'), + cfg.IntOpt('sql_idle_timeout', + default=3600, + help='timeout before idle sql connections are reaped'), + cfg.IntOpt('sql_max_retries', + default=10, + help='maximum db connection retries during startup. ' + '(setting -1 implies an infinite retry count)'), + cfg.IntOpt('sql_retry_interval', + default=10, + help='interval between retries of opening a sql connection'), + cfg.StrOpt('volume_manager', + default='cinder.volume.manager.VolumeManager', + help='full class name for the Manager for volume'), + cfg.StrOpt('scheduler_manager', + default='cinder.scheduler.manager.SchedulerManager', + help='full class name for the Manager for scheduler'), + cfg.StrOpt('host', + default=socket.gethostname(), + help='Name of this node. This can be an opaque identifier. ' + 'It is not necessarily a hostname, FQDN, or IP address.'), + cfg.StrOpt('node_availability_zone', + default='cinder', + help='availability zone of this node'), + cfg.StrOpt('notification_driver', + default='cinder.notifier.no_op_notifier', + help='Default driver for sending notifications'), + cfg.ListOpt('memcached_servers', + default=None, + help='Memcached servers or None for in process cache.'), + cfg.StrOpt('instance_usage_audit_period', + default='month', + help='time period to generate instance usages for. ' + 'Time period must be hour, day, month or year'), + cfg.IntOpt('bandwith_poll_interval', + default=600, + help='interval to pull bandwidth usage info'), + cfg.BoolOpt('start_guests_on_host_boot', + default=False, + help='Whether to restart guests when the host reboots'), + cfg.BoolOpt('resume_guests_state_on_host_boot', + default=False, + help='Whether to start guests that were running before the ' + 'host rebooted'), + cfg.StrOpt('default_ephemeral_format', + default=None, + help='The default format a ephemeral_volume will be ' + 'formatted with on creation.'), + cfg.StrOpt('root_helper', + default='sudo', + help='Command prefix to use for running commands as root'), + cfg.BoolOpt('use_ipv6', + default=False, + help='use ipv6'), + cfg.BoolOpt('monkey_patch', + default=False, + help='Whether to log monkey patching'), + cfg.ListOpt('monkey_patch_modules', + default=[], + help='List of modules/decorators to monkey patch'), + cfg.BoolOpt('allow_resize_to_same_host', + default=False, + help='Allow destination machine to match source for resize. ' + 'Useful when testing in single-host environments.'), + cfg.IntOpt('reclaim_instance_interval', + default=0, + help='Interval in seconds for reclaiming deleted instances'), + cfg.IntOpt('zombie_instance_updated_at_window', + default=172800, + help='Number of seconds zombie instances are cleaned up.'), + cfg.IntOpt('service_down_time', + default=60, + help='maximum time since last check-in for up service'), + cfg.StrOpt('default_schedule_zone', + default=None, + help='availability zone to use when user doesn\'t specify one'), + cfg.ListOpt('isolated_images', + default=[], + help='Images to run on isolated host'), + cfg.ListOpt('isolated_hosts', + default=[], + help='Host reserved for specific images'), + cfg.StrOpt('volume_api_class', + default='cinder.volume.api.API', + help='The full class name of the volume API class to use'), + cfg.StrOpt('auth_strategy', + default='noauth', + help='The strategy to use for auth. Supports noauth, keystone, ' + 'and deprecated.'), +] + +FLAGS.register_opts(global_opts) diff --git a/cinder/locale/bs/LC_MESSAGES/nova.po b/cinder/locale/bs/LC_MESSAGES/nova.po new file mode 100644 index 00000000000..f2171c73d93 --- /dev/null +++ b/cinder/locale/bs/LC_MESSAGES/nova.po @@ -0,0 +1,8201 @@ +# Bosnian translation for cinder +# Copyright (c) 2011 Rosetta Contributors and Canonical Ltd 2011 +# This file is distributed under the same license as the cinder package. +# FIRST AUTHOR , 2011. +# +msgid "" +msgstr "" +"Project-Id-Version: cinder\n" +"Report-Msgid-Bugs-To: FULL NAME \n" +"POT-Creation-Date: 2012-04-08 23:04+0000\n" +"PO-Revision-Date: 2012-01-19 20:22+0000\n" +"Last-Translator: yazar \n" +"Language-Team: Bosnian \n" +"Plural-Forms: nplurals=2; plural=(n != 1)\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 0.9.6\n" + +#: cinder/context.py:59 +#, python-format +msgid "Arguments dropped when creating context: %s" +msgstr "" + +#: cinder/context.py:90 +#, python-format +msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" +msgstr "" + +#: cinder/crypto.py:48 +msgid "Filename of root CA" +msgstr "" + +#: cinder/crypto.py:51 +msgid "Filename of private key" +msgstr "" + +#: cinder/crypto.py:54 +msgid "Filename of root Certificate Revocation List" +msgstr "" + +#: cinder/crypto.py:57 +msgid "Where we keep our keys" +msgstr "" + +#: cinder/crypto.py:60 +msgid "Where we keep our root CA" +msgstr "" + +#: cinder/crypto.py:63 +msgid "Should we use a CA for each project?" +msgstr "" + +#: cinder/crypto.py:67 +#, python-format +msgid "Subject for certificate for users, %s for project, user, timestamp" +msgstr "" + +#: cinder/crypto.py:72 +#, python-format +msgid "Subject for certificate for projects, %s for project, timestamp" +msgstr "" + +#: cinder/crypto.py:292 +#, python-format +msgid "Flags path: %s" +msgstr "" + +#: cinder/exception.py:56 +msgid "Unexpected error while running command." +msgstr "Neočekivana greška prilikom pokretanja komande." + +#: cinder/exception.py:59 +#, python-format +msgid "" +"%(description)s\n" +"Command: %(cmd)s\n" +"Exit code: %(exit_code)s\n" +"Stdout: %(stdout)r\n" +"Stderr: %(stderr)r" +msgstr "" + +#: cinder/exception.py:94 +msgid "DB exception wrapped." +msgstr "" + +#: cinder/exception.py:155 +msgid "An unknown exception occurred." +msgstr "" + +#: cinder/exception.py:178 +msgid "Failed to decrypt text" +msgstr "" + +#: cinder/exception.py:182 +msgid "Failed to paginate through images from image service" +msgstr "" + +#: cinder/exception.py:186 +msgid "Virtual Interface creation failed" +msgstr "" + +#: cinder/exception.py:190 +msgid "5 attempts to create virtual interfacewith unique mac address failed" +msgstr "" + +#: cinder/exception.py:195 +msgid "Connection to glance failed" +msgstr "" + +#: cinder/exception.py:199 +msgid "Connection to melange failed" +msgstr "" + +#: cinder/exception.py:203 +msgid "Not authorized." +msgstr "" + +#: cinder/exception.py:208 +msgid "User does not have admin privileges" +msgstr "" + +#: cinder/exception.py:212 +#, python-format +msgid "Policy doesn't allow %(action)s to be performed." +msgstr "" + +#: cinder/exception.py:216 +#, python-format +msgid "Not authorized for image %(image_id)s." +msgstr "" + +#: cinder/exception.py:220 +msgid "Unacceptable parameters." +msgstr "" + +#: cinder/exception.py:225 +msgid "Invalid snapshot" +msgstr "" + +#: cinder/exception.py:229 +#, python-format +msgid "Volume %(volume_id)s is not attached to anything" +msgstr "" + +#: cinder/exception.py:233 cinder/api/openstack/compute/contrib/keypairs.py:113 +msgid "Keypair data is invalid" +msgstr "" + +#: cinder/exception.py:237 +msgid "Failed to load data into json format" +msgstr "" + +#: cinder/exception.py:241 +msgid "The request is invalid." +msgstr "" + +#: cinder/exception.py:245 +#, python-format +msgid "Invalid signature %(signature)s for user %(user)s." +msgstr "" + +#: cinder/exception.py:249 +msgid "Invalid input received" +msgstr "" + +#: cinder/exception.py:253 +#, python-format +msgid "Invalid instance type %(instance_type)s." +msgstr "" + +#: cinder/exception.py:257 +msgid "Invalid volume type" +msgstr "" + +#: cinder/exception.py:261 +msgid "Invalid volume" +msgstr "" + +#: cinder/exception.py:265 +#, python-format +msgid "Invalid port range %(from_port)s:%(to_port)s. %(msg)s" +msgstr "" + +#: cinder/exception.py:269 +#, python-format +msgid "Invalid IP protocol %(protocol)s." +msgstr "" + +#: cinder/exception.py:273 +#, python-format +msgid "Invalid content type %(content_type)s." +msgstr "" + +#: cinder/exception.py:277 +#, python-format +msgid "Invalid cidr %(cidr)s." +msgstr "" + +#: cinder/exception.py:281 +msgid "Invalid reuse of an RPC connection." +msgstr "" + +#: cinder/exception.py:285 +msgid "Invalid Parameter: Unicode is not supported by the current database." +msgstr "" + +#: cinder/exception.py:292 +#, python-format +msgid "%(err)s" +msgstr "" + +#: cinder/exception.py:296 +#, python-format +msgid "" +"Cannot perform action '%(action)s' on aggregate %(aggregate_id)s. Reason:" +" %(reason)s." +msgstr "" + +#: cinder/exception.py:301 +#, python-format +msgid "Group not valid. Reason: %(reason)s" +msgstr "" + +#: cinder/exception.py:305 +#, python-format +msgid "" +"Instance %(instance_uuid)s in %(attr)s %(state)s. Cannot %(method)s while" +" the instance is in this state." +msgstr "" + +#: cinder/exception.py:310 +#, python-format +msgid "Instance %(instance_id)s is not running." +msgstr "" + +#: cinder/exception.py:314 +#, python-format +msgid "Instance %(instance_id)s is not suspended." +msgstr "" + +#: cinder/exception.py:318 +#, python-format +msgid "Instance %(instance_id)s is not in rescue mode" +msgstr "" + +#: cinder/exception.py:322 +msgid "Failed to suspend instance" +msgstr "" + +#: cinder/exception.py:326 +msgid "Failed to resume server" +msgstr "" + +#: cinder/exception.py:330 +msgid "Failed to reboot instance" +msgstr "" + +#: cinder/exception.py:334 +msgid "Failed to terminate instance" +msgstr "" + +#: cinder/exception.py:338 +msgid "Service is unavailable at this time." +msgstr "" + +#: cinder/exception.py:342 +msgid "Volume service is unavailable at this time." +msgstr "" + +#: cinder/exception.py:346 +msgid "Compute service is unavailable at this time." +msgstr "" + +#: cinder/exception.py:350 +#, python-format +msgid "Unable to migrate instance (%(instance_id)s) to current host (%(host)s)." +msgstr "" + +#: cinder/exception.py:355 +msgid "Destination compute host is unavailable at this time." +msgstr "" + +#: cinder/exception.py:359 +msgid "Original compute host is unavailable at this time." +msgstr "" + +#: cinder/exception.py:363 +msgid "The supplied hypervisor type of is invalid." +msgstr "" + +#: cinder/exception.py:367 +msgid "The instance requires a newer hypervisor version than has been provided." +msgstr "" + +#: cinder/exception.py:372 +#, python-format +msgid "" +"The supplied disk path (%(path)s) already exists, it is expected not to " +"exist." +msgstr "" + +#: cinder/exception.py:377 +#, python-format +msgid "The supplied device path (%(path)s) is invalid." +msgstr "" + +#: cinder/exception.py:381 +#, python-format +msgid "The supplied device (%(device)s) is busy." +msgstr "" + +#: cinder/exception.py:385 +msgid "Unacceptable CPU info" +msgstr "" + +#: cinder/exception.py:389 +#, python-format +msgid "%(address)s is not a valid IP v4/6 address." +msgstr "" + +#: cinder/exception.py:393 +#, python-format +msgid "" +"VLAN tag is not appropriate for the port group %(bridge)s. Expected VLAN " +"tag is %(tag)s, but the one associated with the port group is %(pgroup)s." +msgstr "" + +#: cinder/exception.py:399 +#, python-format +msgid "" +"vSwitch which contains the port group %(bridge)s is not associated with " +"the desired physical adapter. Expected vSwitch is %(expected)s, but the " +"one associated is %(actual)s." +msgstr "" + +#: cinder/exception.py:406 +#, python-format +msgid "Disk format %(disk_format)s is not acceptable" +msgstr "" + +#: cinder/exception.py:410 +#, python-format +msgid "Image %(image_id)s is unacceptable: %(reason)s" +msgstr "" + +#: cinder/exception.py:414 +#, python-format +msgid "Instance %(instance_id)s is unacceptable: %(reason)s" +msgstr "" + +#: cinder/exception.py:418 +#, python-format +msgid "Ec2 id %(ec2_id)s is unacceptable." +msgstr "" + +#: cinder/exception.py:422 +msgid "Resource could not be found." +msgstr "" + +#: cinder/exception.py:427 +#, python-format +msgid "Required flag %(flag)s not set." +msgstr "" + +#: cinder/exception.py:431 +#, python-format +msgid "Volume %(volume_id)s could not be found." +msgstr "" + +#: cinder/exception.py:435 +#, python-format +msgid "Unable to locate account %(account_name)s on Solidfire device" +msgstr "" + +#: cinder/exception.py:440 +#, python-format +msgid "Volume not found for instance %(instance_id)s." +msgstr "" + +#: cinder/exception.py:444 +#, python-format +msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:449 +msgid "Zero volume types found." +msgstr "" + +#: cinder/exception.py:453 +#, python-format +msgid "Volume type %(volume_type_id)s could not be found." +msgstr "" + +#: cinder/exception.py:457 +#, python-format +msgid "Volume type with name %(volume_type_name)s could not be found." +msgstr "" + +#: cinder/exception.py:462 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s has no extra specs with key " +"%(extra_specs_key)s." +msgstr "" + +#: cinder/exception.py:467 +#, python-format +msgid "Snapshot %(snapshot_id)s could not be found." +msgstr "" + +#: cinder/exception.py:471 +#, python-format +msgid "deleting volume %(volume_name)s that has snapshot" +msgstr "" + +#: cinder/exception.py:475 +#, python-format +msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" +msgstr "" + +#: cinder/exception.py:480 +#, python-format +msgid "No target id found for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:484 +#, python-format +msgid "No disk at %(location)s" +msgstr "" + +#: cinder/exception.py:488 +#, python-format +msgid "Could not find a handler for %(driver_type)s volume." +msgstr "" + +#: cinder/exception.py:492 +#, python-format +msgid "Invalid image href %(image_href)s." +msgstr "" + +#: cinder/exception.py:496 +msgid "" +"Some images have been stored via hrefs. This version of the api does not " +"support displaying image hrefs." +msgstr "" + +#: cinder/exception.py:501 +#, python-format +msgid "Image %(image_id)s could not be found." +msgstr "" + +#: cinder/exception.py:505 +#, python-format +msgid "Kernel not found for image %(image_id)s." +msgstr "" + +#: cinder/exception.py:509 +#, python-format +msgid "User %(user_id)s could not be found." +msgstr "" + +#: cinder/exception.py:513 +#, python-format +msgid "Project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:517 +#, python-format +msgid "User %(user_id)s is not a member of project %(project_id)s." +msgstr "" + +#: cinder/exception.py:521 +#, python-format +msgid "Role %(role_id)s could not be found." +msgstr "" + +#: cinder/exception.py:525 +msgid "Cannot find SR to read/write VDI." +msgstr "" + +#: cinder/exception.py:529 +#, python-format +msgid "%(req)s is required to create a network." +msgstr "" + +#: cinder/exception.py:533 +#, python-format +msgid "Network %(network_id)s could not be found." +msgstr "" + +#: cinder/exception.py:537 +#, python-format +msgid "Network could not be found for bridge %(bridge)s" +msgstr "" + +#: cinder/exception.py:541 +#, python-format +msgid "Network could not be found for uuid %(uuid)s" +msgstr "" + +#: cinder/exception.py:545 +#, python-format +msgid "Network could not be found with cidr %(cidr)s." +msgstr "" + +#: cinder/exception.py:549 +#, python-format +msgid "Network could not be found for instance %(instance_id)s." +msgstr "" + +#: cinder/exception.py:553 +msgid "No networks defined." +msgstr "" + +#: cinder/exception.py:557 +#, python-format +msgid "" +"Either Network uuid %(network_uuid)s is not present or is not assigned to" +" the project %(project_id)s." +msgstr "" + +#: cinder/exception.py:562 +#, python-format +msgid "Host is not set to the network (%(network_id)s)." +msgstr "" + +#: cinder/exception.py:566 +#, python-format +msgid "Network %(network)s has active ports, cannot delete." +msgstr "" + +#: cinder/exception.py:570 +msgid "Could not find the datastore reference(s) which the VM uses." +msgstr "" + +#: cinder/exception.py:574 +#, python-format +msgid "No fixed IP associated with id %(id)s." +msgstr "" + +#: cinder/exception.py:578 +#, python-format +msgid "Fixed ip not found for address %(address)s." +msgstr "" + +#: cinder/exception.py:582 +#, python-format +msgid "Instance %(instance_id)s has zero fixed ips." +msgstr "" + +#: cinder/exception.py:586 +#, python-format +msgid "Network host %(host)s has zero fixed ips in network %(network_id)s." +msgstr "" + +#: cinder/exception.py:591 +#, python-format +msgid "Instance %(instance_id)s doesn't have fixed ip '%(ip)s'." +msgstr "" + +#: cinder/exception.py:595 +#, python-format +msgid "Host %(host)s has zero fixed ips." +msgstr "" + +#: cinder/exception.py:599 +#, python-format +msgid "" +"Fixed IP address (%(address)s) does not exist in network " +"(%(network_uuid)s)." +msgstr "" + +#: cinder/exception.py:604 +#, python-format +msgid "Fixed IP address %(address)s is already in use." +msgstr "" + +#: cinder/exception.py:608 +#, python-format +msgid "Fixed IP address %(address)s is invalid." +msgstr "" + +#: cinder/exception.py:612 +msgid "Zero fixed ips available." +msgstr "" + +#: cinder/exception.py:616 +msgid "Zero fixed ips could be found." +msgstr "" + +#: cinder/exception.py:620 +#, python-format +msgid "Floating ip not found for id %(id)s." +msgstr "" + +#: cinder/exception.py:624 +#, python-format +msgid "The DNS entry %(name)s already exists in domain %(domain)s." +msgstr "" + +#: cinder/exception.py:628 +#, python-format +msgid "Floating ip not found for address %(address)s." +msgstr "" + +#: cinder/exception.py:632 +#, python-format +msgid "Floating ip not found for host %(host)s." +msgstr "" + +#: cinder/exception.py:636 +msgid "Zero floating ips available." +msgstr "" + +#: cinder/exception.py:640 +#, python-format +msgid "Floating ip %(address)s is associated." +msgstr "" + +#: cinder/exception.py:644 +#, python-format +msgid "Floating ip %(address)s is not associated." +msgstr "" + +#: cinder/exception.py:648 +msgid "Zero floating ips exist." +msgstr "" + +#: cinder/exception.py:652 +#, python-format +msgid "Interface %(interface)s not found." +msgstr "" + +#: cinder/exception.py:656 +#, python-format +msgid "Keypair %(name)s not found for user %(user_id)s" +msgstr "" + +#: cinder/exception.py:660 +#, python-format +msgid "Certificate %(certificate_id)s not found." +msgstr "" + +#: cinder/exception.py:664 +#, python-format +msgid "Service %(service_id)s could not be found." +msgstr "" + +#: cinder/exception.py:668 +#, python-format +msgid "Host %(host)s could not be found." +msgstr "" + +#: cinder/exception.py:672 +#, python-format +msgid "Compute host %(host)s could not be found." +msgstr "" + +#: cinder/exception.py:676 +#, python-format +msgid "Could not find binary %(binary)s on host %(host)s." +msgstr "" + +#: cinder/exception.py:680 +#, python-format +msgid "Auth token %(token)s could not be found." +msgstr "" + +#: cinder/exception.py:684 +#, python-format +msgid "Access Key %(access_key)s could not be found." +msgstr "" + +#: cinder/exception.py:688 +msgid "Quota could not be found" +msgstr "" + +#: cinder/exception.py:692 +#, python-format +msgid "Quota for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:696 +#, python-format +msgid "Quota class %(class_name)s could not be found." +msgstr "" + +#: cinder/exception.py:700 +#, python-format +msgid "Security group %(security_group_id)s not found." +msgstr "" + +#: cinder/exception.py:704 +#, python-format +msgid "Security group %(security_group_id)s not found for project %(project_id)s." +msgstr "" + +#: cinder/exception.py:709 +#, python-format +msgid "Security group with rule %(rule_id)s not found." +msgstr "" + +#: cinder/exception.py:713 +#, python-format +msgid "" +"Security group %(security_group_id)s is already associated with the " +"instance %(instance_id)s" +msgstr "" + +#: cinder/exception.py:718 +#, python-format +msgid "" +"Security group %(security_group_id)s is not associated with the instance " +"%(instance_id)s" +msgstr "" + +#: cinder/exception.py:723 +#, python-format +msgid "Migration %(migration_id)s could not be found." +msgstr "" + +#: cinder/exception.py:727 +#, python-format +msgid "Migration not found for instance %(instance_id)s with status %(status)s." +msgstr "" + +#: cinder/exception.py:732 +#, python-format +msgid "Console pool %(pool_id)s could not be found." +msgstr "" + +#: cinder/exception.py:736 +#, python-format +msgid "" +"Console pool of type %(console_type)s for compute host %(compute_host)s " +"on proxy host %(host)s not found." +msgstr "" + +#: cinder/exception.py:742 +#, python-format +msgid "Console %(console_id)s could not be found." +msgstr "" + +#: cinder/exception.py:746 +#, python-format +msgid "Console for instance %(instance_id)s could not be found." +msgstr "" + +#: cinder/exception.py:750 +#, python-format +msgid "" +"Console for instance %(instance_id)s in pool %(pool_id)s could not be " +"found." +msgstr "" + +#: cinder/exception.py:755 +#, python-format +msgid "Invalid console type %(console_type)s " +msgstr "" + +#: cinder/exception.py:759 +msgid "Zero instance types found." +msgstr "" + +#: cinder/exception.py:763 +#, python-format +msgid "Instance type %(instance_type_id)s could not be found." +msgstr "" + +#: cinder/exception.py:767 +#, python-format +msgid "Instance type with name %(instance_type_name)s could not be found." +msgstr "" + +#: cinder/exception.py:772 +#, python-format +msgid "Flavor %(flavor_id)s could not be found." +msgstr "" + +#: cinder/exception.py:776 +#, python-format +msgid "Cell %(cell_id)s could not be found." +msgstr "" + +#: cinder/exception.py:780 +#, python-format +msgid "Scheduler Host Filter %(filter_name)s could not be found." +msgstr "" + +#: cinder/exception.py:784 +#, python-format +msgid "Scheduler cost function %(cost_fn_str)s could not be found." +msgstr "" + +#: cinder/exception.py:789 +#, python-format +msgid "Scheduler weight flag not found: %(flag_name)s" +msgstr "" + +#: cinder/exception.py:793 +#, python-format +msgid "Instance %(instance_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:798 +#, python-format +msgid "" +"Instance Type %(instance_type_id)s has no extra specs with key " +"%(extra_specs_key)s." +msgstr "" + +#: cinder/exception.py:803 +msgid "LDAP object could not be found" +msgstr "" + +#: cinder/exception.py:807 +#, python-format +msgid "LDAP user %(user_id)s could not be found." +msgstr "" + +#: cinder/exception.py:811 +#, python-format +msgid "LDAP group %(group_id)s could not be found." +msgstr "" + +#: cinder/exception.py:815 +#, python-format +msgid "LDAP user %(user_id)s is not a member of group %(group_id)s." +msgstr "" + +#: cinder/exception.py:819 +#, python-format +msgid "File %(file_path)s could not be found." +msgstr "" + +#: cinder/exception.py:823 +msgid "Zero files could be found." +msgstr "" + +#: cinder/exception.py:827 +#, python-format +msgid "Virtual switch associated with the network adapter %(adapter)s not found." +msgstr "" + +#: cinder/exception.py:832 +#, python-format +msgid "Network adapter %(adapter)s could not be found." +msgstr "" + +#: cinder/exception.py:836 +#, python-format +msgid "Class %(class_name)s could not be found: %(exception)s" +msgstr "" + +#: cinder/exception.py:840 +msgid "Action not allowed." +msgstr "" + +#: cinder/exception.py:844 +#, python-format +msgid "Unable to use global role %(role_id)s" +msgstr "" + +#: cinder/exception.py:848 +msgid "Rotation is not allowed for snapshots" +msgstr "" + +#: cinder/exception.py:852 +msgid "Rotation param is required for backup image_type" +msgstr "" + +#: cinder/exception.py:861 +#, python-format +msgid "Key pair %(key_name)s already exists." +msgstr "" + +#: cinder/exception.py:865 +#, python-format +msgid "User %(user)s already exists." +msgstr "" + +#: cinder/exception.py:869 +#, python-format +msgid "LDAP user %(user)s already exists." +msgstr "" + +#: cinder/exception.py:873 +#, python-format +msgid "LDAP group %(group)s already exists." +msgstr "" + +#: cinder/exception.py:877 +#, python-format +msgid "User %(uid)s is already a member of the group %(group_dn)s" +msgstr "" + +#: cinder/exception.py:882 +#, python-format +msgid "Project %(project)s already exists." +msgstr "" + +#: cinder/exception.py:886 +#, python-format +msgid "Instance %(name)s already exists." +msgstr "" + +#: cinder/exception.py:890 +#, python-format +msgid "Instance Type %(name)s already exists." +msgstr "" + +#: cinder/exception.py:894 +#, python-format +msgid "Volume Type %(name)s already exists." +msgstr "" + +#: cinder/exception.py:898 +#, python-format +msgid "%(path)s is on shared storage: %(reason)s" +msgstr "" + +#: cinder/exception.py:902 +msgid "Migration error" +msgstr "" + +#: cinder/exception.py:906 +#, python-format +msgid "Malformed message body: %(reason)s" +msgstr "" + +#: cinder/exception.py:910 +#, python-format +msgid "Could not find config at %(path)s" +msgstr "" + +#: cinder/exception.py:914 +#, python-format +msgid "Could not load paste app '%(name)s' from %(path)s" +msgstr "" + +#: cinder/exception.py:918 +msgid "When resizing, instances must change size!" +msgstr "" + +#: cinder/exception.py:922 +msgid "Image is larger than instance type allows" +msgstr "" + +#: cinder/exception.py:926 +msgid "1 or more Zones could not complete the request" +msgstr "" + +#: cinder/exception.py:930 +msgid "Instance type's memory is too small for requested image." +msgstr "" + +#: cinder/exception.py:934 +msgid "Instance type's disk is too small for requested image." +msgstr "" + +#: cinder/exception.py:938 +#, python-format +msgid "Insufficient free memory on compute node to start %(uuid)s." +msgstr "" + +#: cinder/exception.py:942 +msgid "Could not fetch bandwidth/cpu/disk metrics for this host." +msgstr "" + +#: cinder/exception.py:946 +#, python-format +msgid "No valid host was found. %(reason)s" +msgstr "" + +#: cinder/exception.py:950 +#, python-format +msgid "Host %(host)s is not up or doesn't exist." +msgstr "" + +#: cinder/exception.py:954 +msgid "Quota exceeded" +msgstr "" + +#: cinder/exception.py:958 +#, python-format +msgid "" +"Aggregate %(aggregate_id)s: action '%(action)s' caused an error: " +"%(reason)s." +msgstr "" + +#: cinder/exception.py:963 +#, python-format +msgid "Aggregate %(aggregate_id)s could not be found." +msgstr "" + +#: cinder/exception.py:967 +#, python-format +msgid "Aggregate %(aggregate_name)s already exists." +msgstr "" + +#: cinder/exception.py:971 +#, python-format +msgid "Aggregate %(aggregate_id)s has no host %(host)s." +msgstr "" + +#: cinder/exception.py:975 +#, python-format +msgid "Aggregate %(aggregate_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:980 +#, python-format +msgid "Host %(host)s already member of another aggregate." +msgstr "" + +#: cinder/exception.py:984 +#, python-format +msgid "Aggregate %(aggregate_id)s already has host %(host)s." +msgstr "" + +#: cinder/exception.py:988 +#, python-format +msgid "Detected more than one volume with name %(vol_name)s" +msgstr "" + +#: cinder/exception.py:992 +#, python-format +msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" +msgstr "" + +#: cinder/exception.py:997 +msgid "Unable to create instance type" +msgstr "" + +#: cinder/exception.py:1001 +msgid "Bad response from SolidFire API" +msgstr "" + +#: cinder/exception.py:1005 +#, python-format +msgid "Error in SolidFire API response: status=%(status)s" +msgstr "" + +#: cinder/exception.py:1009 +#, python-format +msgid "Error in SolidFire API response: data=%(data)s" +msgstr "" + +#: cinder/exception.py:1013 +#, python-format +msgid "Detected existing vlan with id %(vlan)d" +msgstr "" + +#: cinder/exception.py:1017 +#, python-format +msgid "Instance %(instance_id)s could not be found." +msgstr "" + +#: cinder/exception.py:1021 +#, python-format +msgid "Invalid id: %(val)s (expecting \"i-...\")." +msgstr "" + +#: cinder/exception.py:1025 +#, python-format +msgid "Could not fetch image %(image)s" +msgstr "" + +#: cinder/log.py:315 +#, python-format +msgid "syslog facility must be one of: %s" +msgstr "" + +#: cinder/manager.py:146 +#, python-format +msgid "Skipping %(full_task_name)s, %(ticks_to_skip)s ticks left until next run" +msgstr "" + +#: cinder/manager.py:152 +#, python-format +msgid "Running periodic task %(full_task_name)s" +msgstr "" + +#: cinder/manager.py:159 +#, python-format +msgid "Error during %(full_task_name)s: %(e)s" +msgstr "" + +#: cinder/manager.py:203 +msgid "Notifying Schedulers of capabilities ..." +msgstr "" + +#: cinder/policy.py:30 +msgid "JSON file representing policy" +msgstr "" + +#: cinder/policy.py:33 +msgid "Rule checked when requested rule is not found" +msgstr "" + +#: cinder/service.py:137 +msgid "SIGTERM received" +msgstr "" + +#: cinder/service.py:177 +#, python-format +msgid "Starting %(topic)s node (version %(vcs_string)s)" +msgstr "" + +#: cinder/service.py:195 +#, python-format +msgid "Creating Consumer connection for Service %s" +msgstr "" + +#: cinder/service.py:282 +msgid "Service killed that has no database entry" +msgstr "" + +#: cinder/service.py:319 +msgid "The service database object disappeared, Recreating it." +msgstr "" + +#: cinder/service.py:334 +msgid "Recovered model server connection!" +msgstr "" + +#: cinder/service.py:340 +msgid "model server went away" +msgstr "" + +#: cinder/service.py:433 +msgid "Full set of FLAGS:" +msgstr "" + +#: cinder/service.py:440 +#, python-format +msgid "%(flag)s : FLAG SET " +msgstr "" + +#: cinder/utils.py:79 +#, python-format +msgid "Inner Exception: %s" +msgstr "" + +#: cinder/utils.py:165 +#, python-format +msgid "Fetching %s" +msgstr "" + +#: cinder/utils.py:210 +#, python-format +msgid "Got unknown keyword args to utils.execute: %r" +msgstr "" + +#: cinder/utils.py:220 +#, python-format +msgid "Running cmd (subprocess): %s" +msgstr "" + +#: cinder/utils.py:236 cinder/utils.py:315 +#, python-format +msgid "Result was %s" +msgstr "" + +#: cinder/utils.py:249 +#, python-format +msgid "%r failed. Retrying." +msgstr "" + +#: cinder/utils.py:291 +#, python-format +msgid "Running cmd (SSH): %s" +msgstr "" + +#: cinder/utils.py:293 +msgid "Environment not supported over SSH" +msgstr "" + +#: cinder/utils.py:297 +msgid "process_input not supported over SSH" +msgstr "" + +#: cinder/utils.py:352 +#, python-format +msgid "debug in callback: %s" +msgstr "" + +#: cinder/utils.py:534 +#, python-format +msgid "Link Local address is not found.:%s" +msgstr "" + +#: cinder/utils.py:537 +#, python-format +msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s" +msgstr "" + +#: cinder/utils.py:648 +#, python-format +msgid "Invalid backend: %s" +msgstr "" + +#: cinder/utils.py:659 +#, python-format +msgid "backend %s" +msgstr "" + +#: cinder/utils.py:709 +msgid "in looping call" +msgstr "" + +#: cinder/utils.py:927 +#, python-format +msgid "Attempting to grab semaphore \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/utils.py:931 +#, python-format +msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/utils.py:935 +#, python-format +msgid "Attempting to grab file lock \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/utils.py:942 +#, python-format +msgid "Got file lock \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/utils.py:1001 +#, python-format +msgid "Found sentinel %(filename)s for pid %(pid)s" +msgstr "" + +#: cinder/utils.py:1008 +#, python-format +msgid "Cleaned sentinel %(filename)s for pid %(pid)s" +msgstr "" + +#: cinder/utils.py:1023 +#, python-format +msgid "Found lockfile %(file)s with link count %(count)d" +msgstr "" + +#: cinder/utils.py:1028 +#, python-format +msgid "Cleaned lockfile %(file)s with link count %(count)d" +msgstr "" + +#: cinder/utils.py:1138 +#, python-format +msgid "Expected object of type: %s" +msgstr "" + +#: cinder/utils.py:1169 +#, python-format +msgid "Invalid server_string: %s" +msgstr "" + +#: cinder/utils.py:1298 +#, python-format +msgid "timefunc: '%(name)s' took %(total_time).2f secs" +msgstr "" + +#: cinder/utils.py:1330 +msgid "Original exception being dropped" +msgstr "" + +#: cinder/utils.py:1461 +#, python-format +msgid "Class %(fullname)s is deprecated: %(msg)s" +msgstr "" + +#: cinder/utils.py:1463 +#, python-format +msgid "Class %(fullname)s is deprecated" +msgstr "" + +#: cinder/utils.py:1495 +#, python-format +msgid "Function %(name)s in %(location)s is deprecated: %(msg)s" +msgstr "" + +#: cinder/utils.py:1497 +#, python-format +msgid "Function %(name)s in %(location)s is deprecated" +msgstr "" + +#: cinder/utils.py:1681 +#, python-format +msgid "Could not remove tmpdir: %s" +msgstr "" + +#: cinder/wsgi.py:97 +#, python-format +msgid "Started %(name)s on %(host)s:%(port)s" +msgstr "" + +#: cinder/wsgi.py:108 +msgid "Stopping WSGI server." +msgstr "" + +#: cinder/wsgi.py:111 +msgid "Stopping raw TCP server." +msgstr "" + +#: cinder/wsgi.py:117 +#, python-format +msgid "Starting TCP server %(arg0)s on %(host)s:%(port)s" +msgstr "" + +#: cinder/wsgi.py:133 +msgid "WSGI server has stopped." +msgstr "" + +#: cinder/wsgi.py:211 +msgid "You must implement __call__" +msgstr "" + +#: cinder/api/direct.py:218 +msgid "not available" +msgstr "" + +#: cinder/api/direct.py:299 +#, python-format +msgid "Returned non-serializeable type: %s" +msgstr "" + +#: cinder/api/sizelimit.py:51 +msgid "Request is too large." +msgstr "" + +#: cinder/api/validator.py:142 +#, python-format +msgid "%(key)s with value %(value)s failed validator %(validator)s" +msgstr "" + +#: cinder/api/ec2/__init__.py:73 +#, python-format +msgid "%(code)s: %(message)s" +msgstr "" + +#: cinder/api/ec2/__init__.py:95 +#, python-format +msgid "FaultWrapper: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:170 +msgid "Too many failed authentications." +msgstr "" + +#: cinder/api/ec2/__init__.py:180 +#, python-format +msgid "" +"Access key %(access_key)s has had %(failures)d failed authentications and" +" will be locked out for %(lock_mins)d minutes." +msgstr "" + +#: cinder/api/ec2/__init__.py:267 +msgid "Signature not provided" +msgstr "" + +#: cinder/api/ec2/__init__.py:271 +msgid "Access key not provided" +msgstr "" + +#: cinder/api/ec2/__init__.py:306 cinder/api/ec2/__init__.py:319 +msgid "Failure communicating with keystone" +msgstr "" + +#: cinder/api/ec2/__init__.py:388 +#, python-format +msgid "Authentication Failure: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:404 +#, python-format +msgid "Authenticated Request For %(uname)s:%(pname)s)" +msgstr "" + +#: cinder/api/ec2/__init__.py:435 +#, python-format +msgid "action: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:437 +#, python-format +msgid "arg: %(key)s\t\tval: %(value)s" +msgstr "" + +#: cinder/api/ec2/__init__.py:512 +#, python-format +msgid "Unauthorized request for controller=%(controller)s and action=%(action)s" +msgstr "" + +#: cinder/api/ec2/__init__.py:584 +#, python-format +msgid "InstanceNotFound raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:590 +#, python-format +msgid "VolumeNotFound raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:596 +#, python-format +msgid "SnapshotNotFound raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:602 +#, python-format +msgid "NotFound raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:605 +#, python-format +msgid "EC2APIError raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:613 +#, python-format +msgid "KeyPairExists raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:617 +#, python-format +msgid "InvalidParameterValue raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:621 +#, python-format +msgid "InvalidPortRange raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:625 +#, python-format +msgid "NotAuthorized raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:629 +#, python-format +msgid "InvalidRequest raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:633 +#, python-format +msgid "QuotaError raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:637 +#, python-format +msgid "Invalid id: bogus (expecting \"i-...\"): %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:646 +#, python-format +msgid "Unexpected error raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:647 +#, python-format +msgid "Environment: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:649 cinder/api/metadata/handler.py:248 +msgid "An unknown error has occurred. Please try your request again." +msgstr "" + +#: cinder/api/ec2/apirequest.py:64 +#, python-format +msgid "Unsupported API request: controller = %(controller)s, action = %(action)s" +msgstr "" + +#: cinder/api/ec2/cloud.py:336 +#, python-format +msgid "Create snapshot of volume %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:372 +#, python-format +msgid "" +"Value (%s) for KeyName is invalid. Content limited to Alphanumeric " +"character, spaces, dashes, and underscore." +msgstr "" + +#: cinder/api/ec2/cloud.py:378 +#, python-format +msgid "Value (%s) for Keyname is invalid. Length exceeds maximum of 255." +msgstr "" + +#: cinder/api/ec2/cloud.py:382 +#, python-format +msgid "Create key pair %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:391 +#, python-format +msgid "Import key %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:409 +#, python-format +msgid "Delete key pair %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:551 +msgid "Invalid CIDR" +msgstr "" + +#: cinder/api/ec2/cloud.py:639 cinder/api/ec2/cloud.py:693 +#: cinder/api/ec2/cloud.py:800 +msgid "Not enough parameters, need group_name or group_id" +msgstr "" + +#: cinder/api/ec2/cloud.py:654 +#: cinder/api/openstack/compute/contrib/security_groups.py:517 +#, python-format +msgid "Revoke security group ingress %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:666 cinder/api/ec2/cloud.py:719 +#, python-format +msgid "%s Not enough parameters to build a valid rule" +msgstr "" + +#: cinder/api/ec2/cloud.py:684 cinder/api/ec2/cloud.py:744 +msgid "No rule for the specified parameters." +msgstr "" + +#: cinder/api/ec2/cloud.py:708 +#: cinder/api/openstack/compute/contrib/security_groups.py:354 +#, python-format +msgid "Authorize security group ingress %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:725 +#, python-format +msgid "%s - This rule already exists in group" +msgstr "" + +#: cinder/api/ec2/cloud.py:769 +#, python-format +msgid "" +"Value (%s) for parameter GroupName is invalid. Content limited to " +"Alphanumeric characters, spaces, dashes, and underscores." +msgstr "" + +#: cinder/api/ec2/cloud.py:776 +#, python-format +msgid "" +"Value (%s) for parameter GroupName is invalid. Length exceeds maximum of " +"255." +msgstr "" + +#: cinder/api/ec2/cloud.py:780 +#: cinder/api/openstack/compute/contrib/security_groups.py:292 +#, python-format +msgid "Create Security Group %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:783 +#, python-format +msgid "group %s already exists" +msgstr "" + +#: cinder/api/ec2/cloud.py:815 +#: cinder/api/openstack/compute/contrib/security_groups.py:245 +#, python-format +msgid "Delete security group %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:823 cinder/compute/manager.py:1630 +#, python-format +msgid "Get console output for instance %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:894 +#, python-format +msgid "Create volume from snapshot %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:898 cinder/api/openstack/compute/contrib/volumes.py:186 +#: cinder/api/openstack/volume/volumes.py:222 +#, python-format +msgid "Create volume of %s GB" +msgstr "" + +#: cinder/api/ec2/cloud.py:921 +msgid "Delete Failed" +msgstr "" + +#: cinder/api/ec2/cloud.py:931 +#, python-format +msgid "Attach volume %(volume_id)s to instance %(instance_id)s at %(device)s" +msgstr "" + +#: cinder/api/ec2/cloud.py:939 +msgid "Attach Failed." +msgstr "" + +#: cinder/api/ec2/cloud.py:952 cinder/api/openstack/compute/contrib/volumes.py:366 +#, python-format +msgid "Detach volume %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:959 +msgid "Detach Volume Failed." +msgstr "" + +#: cinder/api/ec2/cloud.py:984 cinder/api/ec2/cloud.py:1041 +#: cinder/api/ec2/cloud.py:1518 cinder/api/ec2/cloud.py:1533 +#, python-format +msgid "attribute not supported: %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:1107 +#, python-format +msgid "vol = %s\n" +msgstr "" + +#: cinder/api/ec2/cloud.py:1255 +msgid "Allocate address" +msgstr "" + +#: cinder/api/ec2/cloud.py:1267 +#, python-format +msgid "Release address %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:1272 +#, python-format +msgid "Associate address %(public_ip)s to instance %(instance_id)s" +msgstr "" + +#: cinder/api/ec2/cloud.py:1282 +#, python-format +msgid "Disassociate address %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:1308 +msgid "Image must be available" +msgstr "" + +#: cinder/api/ec2/cloud.py:1329 +msgid "Going to start terminating instances" +msgstr "" + +#: cinder/api/ec2/cloud.py:1343 +#, python-format +msgid "Reboot instance %r" +msgstr "" + +#: cinder/api/ec2/cloud.py:1354 +msgid "Going to stop instances" +msgstr "" + +#: cinder/api/ec2/cloud.py:1365 +msgid "Going to start instances" +msgstr "" + +#: cinder/api/ec2/cloud.py:1455 +#, python-format +msgid "De-registering image %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:1471 +msgid "imageLocation is required" +msgstr "" + +#: cinder/api/ec2/cloud.py:1490 +#, python-format +msgid "Registered image %(image_location)s with id %(image_id)s" +msgstr "" + +#: cinder/api/ec2/cloud.py:1536 +msgid "user or group not specified" +msgstr "" + +#: cinder/api/ec2/cloud.py:1538 +msgid "only group \"all\" is supported" +msgstr "" + +#: cinder/api/ec2/cloud.py:1540 +msgid "operation_type must be add or remove" +msgstr "" + +#: cinder/api/ec2/cloud.py:1542 +#, python-format +msgid "Updating image %s publicity" +msgstr "" + +#: cinder/api/ec2/cloud.py:1555 +#, python-format +msgid "Not allowed to modify attributes for image %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:1603 +#, python-format +msgid "Couldn't stop instance with in %d sec" +msgstr "" + +#: cinder/api/metadata/handler.py:246 cinder/api/metadata/handler.py:253 +#, python-format +msgid "Failed to get metadata for ip: %s" +msgstr "" + +#: cinder/api/openstack/__init__.py:43 +#, python-format +msgid "Caught error: %s" +msgstr "" + +#: cinder/api/openstack/__init__.py:45 cinder/api/openstack/wsgi.py:886 +#, python-format +msgid "%(url)s returned with HTTP %(status)d" +msgstr "" + +#: cinder/api/openstack/__init__.py:94 +msgid "Must specify an ExtensionManager class" +msgstr "" + +#: cinder/api/openstack/__init__.py:105 +#, python-format +msgid "Extended resource: %s" +msgstr "" + +#: cinder/api/openstack/__init__.py:130 +#, python-format +msgid "" +"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " +"resource" +msgstr "" + +#: cinder/api/openstack/__init__.py:135 +#, python-format +msgid "Extension %(ext_name)s extending resource: %(collection)s" +msgstr "" + +#: cinder/api/openstack/auth.py:90 +#, python-format +msgid "%(user_id)s could not be found with token '%(token)s'" +msgstr "" + +#: cinder/api/openstack/auth.py:134 +#, python-format +msgid "%(user_id)s must be an admin or a member of %(project_id)s" +msgstr "" + +#: cinder/api/openstack/auth.py:152 +msgid "Authentication requests must be made against a version root (e.g. /v2)." +msgstr "" + +#: cinder/api/openstack/auth.py:167 +#, python-format +msgid "Could not find %s in request." +msgstr "" + +#: cinder/api/openstack/auth.py:191 +#, python-format +msgid "Successfully authenticated '%s'" +msgstr "" + +#: cinder/api/openstack/auth.py:241 +msgid "User not found with provided API key." +msgstr "" + +#: cinder/api/openstack/auth.py:258 +#, python-format +msgid "Provided API key is valid, but not for user '%(username)s'" +msgstr "" + +#: cinder/api/openstack/common.py:133 cinder/api/openstack/common.py:167 +msgid "limit param must be an integer" +msgstr "" + +#: cinder/api/openstack/common.py:136 cinder/api/openstack/common.py:171 +msgid "limit param must be positive" +msgstr "" + +#: cinder/api/openstack/common.py:161 +msgid "offset param must be an integer" +msgstr "" + +#: cinder/api/openstack/common.py:175 +msgid "offset param must be positive" +msgstr "" + +#: cinder/api/openstack/common.py:203 +#, python-format +msgid "marker [%s] not found" +msgstr "" + +#: cinder/api/openstack/common.py:243 +#, python-format +msgid "href %s does not contain version" +msgstr "" + +#: cinder/api/openstack/common.py:278 +msgid "Image metadata limit exceeded" +msgstr "" + +#: cinder/api/openstack/common.py:295 +#, python-format +msgid "Converting nw_info: %s" +msgstr "" + +#: cinder/api/openstack/common.py:305 +#, python-format +msgid "Converted networks: %s" +msgstr "" + +#: cinder/api/openstack/common.py:338 +#, python-format +msgid "Cannot '%(action)s' while instance is in %(attr)s %(state)s" +msgstr "" + +#: cinder/api/openstack/common.py:341 +#, python-format +msgid "Instance is in an invalid state for '%(action)s'" +msgstr "" + +#: cinder/api/openstack/common.py:421 +msgid "Rejecting snapshot request, snapshots currently disabled" +msgstr "" + +#: cinder/api/openstack/common.py:423 +msgid "Instance snapshots are not permitted at this time." +msgstr "" + +#: cinder/api/openstack/extensions.py:188 +#, python-format +msgid "Loaded extension: %s" +msgstr "" + +#: cinder/api/openstack/extensions.py:225 +#, python-format +msgid "Ext name: %s" +msgstr "" + +#: cinder/api/openstack/extensions.py:226 +#, python-format +msgid "Ext alias: %s" +msgstr "" + +#: cinder/api/openstack/extensions.py:227 +#, python-format +msgid "Ext description: %s" +msgstr "" + +#: cinder/api/openstack/extensions.py:229 +#, python-format +msgid "Ext namespace: %s" +msgstr "" + +#: cinder/api/openstack/extensions.py:230 +#, python-format +msgid "Ext updated: %s" +msgstr "" + +#: cinder/api/openstack/extensions.py:232 +#, python-format +msgid "Exception loading extension: %s" +msgstr "" + +#: cinder/api/openstack/extensions.py:246 +#, python-format +msgid "Loading extension %s" +msgstr "" + +#: cinder/api/openstack/extensions.py:252 +#, python-format +msgid "Calling extension factory %s" +msgstr "" + +#: cinder/api/openstack/extensions.py:264 +#, python-format +msgid "Failed to load extension %(ext_factory)s: %(exc)s" +msgstr "" + +#: cinder/api/openstack/extensions.py:344 +#, python-format +msgid "Failed to load extension %(classpath)s: %(exc)s" +msgstr "" + +#: cinder/api/openstack/extensions.py:368 +#, python-format +msgid "Failed to load extension %(ext_name)s: %(exc)s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:135 cinder/api/openstack/wsgi.py:538 +msgid "cannot understand JSON" +msgstr "" + +#: cinder/api/openstack/wsgi.py:159 +#: cinder/api/openstack/compute/contrib/hosts.py:86 +msgid "cannot understand XML" +msgstr "" + +#: cinder/api/openstack/wsgi.py:543 +msgid "too many body keys" +msgstr "" + +#: cinder/api/openstack/wsgi.py:582 +#, python-format +msgid "Exception handling resource: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:586 +#, python-format +msgid "Fault thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:589 +#, python-format +msgid "HTTP exception thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:697 +msgid "Unrecognized Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:701 +msgid "No Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:705 +msgid "Empty body provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:816 +#, python-format +msgid "There is no such action: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:819 cinder/api/openstack/wsgi.py:832 +#: cinder/api/openstack/compute/server_metadata.py:58 +#: cinder/api/openstack/compute/server_metadata.py:76 +#: cinder/api/openstack/compute/server_metadata.py:101 +#: cinder/api/openstack/compute/server_metadata.py:126 +#: cinder/api/openstack/compute/contrib/admin_actions.py:211 +#: cinder/api/openstack/compute/contrib/console_output.py:52 +msgid "Malformed request body" +msgstr "" + +#: cinder/api/openstack/wsgi.py:829 +msgid "Unsupported Content-Type" +msgstr "" + +#: cinder/api/openstack/wsgi.py:841 +msgid "Malformed request url" +msgstr "" + +#: cinder/api/openstack/wsgi.py:889 +#, python-format +msgid "%(url)s returned a fault: %(e)s" +msgstr "" + +#: cinder/api/openstack/xmlutil.py:265 +msgid "element is not a child" +msgstr "" + +#: cinder/api/openstack/xmlutil.py:414 +msgid "root element selecting a list" +msgstr "" + +#: cinder/api/openstack/xmlutil.py:739 +#, python-format +msgid "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" +msgstr "" + +#: cinder/api/openstack/xmlutil.py:858 +msgid "subclasses must implement construct()!" +msgstr "" + +#: cinder/api/openstack/compute/extensions.py:29 +#: cinder/api/openstack/volume/extensions.py:29 +msgid "Initializing extension manager." +msgstr "" + +#: cinder/api/openstack/compute/image_metadata.py:40 +#: cinder/api/openstack/compute/images.py:146 +#: cinder/api/openstack/compute/images.py:161 +msgid "Image not found." +msgstr "" + +#: cinder/api/openstack/compute/image_metadata.py:79 +msgid "Incorrect request body format" +msgstr "" + +#: cinder/api/openstack/compute/image_metadata.py:83 +#: cinder/api/openstack/compute/server_metadata.py:80 +#: cinder/api/openstack/compute/contrib/flavorextraspecs.py:79 +#: cinder/api/openstack/compute/contrib/volumetypes.py:188 +msgid "Request body and URI mismatch" +msgstr "" + +#: cinder/api/openstack/compute/image_metadata.py:86 +#: cinder/api/openstack/compute/server_metadata.py:84 +#: cinder/api/openstack/compute/contrib/flavorextraspecs.py:82 +#: cinder/api/openstack/compute/contrib/volumetypes.py:191 +msgid "Request body contains too many items" +msgstr "" + +#: cinder/api/openstack/compute/image_metadata.py:111 +msgid "Invalid metadata key" +msgstr "" + +#: cinder/api/openstack/compute/ips.py:74 +msgid "Instance does not exist" +msgstr "" + +#: cinder/api/openstack/compute/ips.py:97 +msgid "Instance is not a member of specified network" +msgstr "" + +#: cinder/api/openstack/compute/limits.py:140 +#, python-format +msgid "" +"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " +"%(unit_string)s." +msgstr "" + +#: cinder/api/openstack/compute/limits.py:266 +msgid "This request was rate-limited." +msgstr "" + +#: cinder/api/openstack/compute/server_metadata.py:38 +#: cinder/api/openstack/compute/server_metadata.py:122 +#: cinder/api/openstack/compute/server_metadata.py:159 +msgid "Server does not exist" +msgstr "" + +#: cinder/api/openstack/compute/server_metadata.py:141 +#: cinder/api/openstack/compute/server_metadata.py:152 +msgid "Metadata item was not found" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:425 +#, python-format +msgid "Invalid server status: %(status)s" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:433 +msgid "Invalid changes-since value" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:483 +msgid "Personality file limit exceeded" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:485 +msgid "Personality file path too long" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:487 +msgid "Personality file content too long" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:501 +msgid "Server name is not a string or unicode" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:505 +msgid "Server name is an empty string" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:509 +msgid "Server name must be less than 256 characters." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:527 +#, python-format +msgid "Bad personality format: missing %s" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:530 +msgid "Bad personality format" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:535 +#, python-format +msgid "Personality content for %s cannot be decoded" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:550 +#, python-format +msgid "Bad networks format: network uuid is not in proper format (%s)" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:559 +#, python-format +msgid "Invalid fixed IP address (%s)" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:566 +#, python-format +msgid "Duplicate networks (%s) are not allowed" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:572 +#, python-format +msgid "Bad network format: missing %s" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:575 +msgid "Bad networks format" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:587 +msgid "Userdata content cannot be decoded" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:594 +msgid "accessIPv4 is not proper IPv4 format" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:601 +msgid "accessIPv6 is not proper IPv6 format" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:633 +msgid "Server name is not defined" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:675 +#: cinder/api/openstack/compute/servers.py:740 +msgid "Invalid flavorRef provided." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:737 +msgid "Can not find requested image" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:743 +msgid "Invalid key_name provided." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:829 +#: cinder/api/openstack/compute/servers.py:849 +msgid "Instance has not been resized." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:835 +#, python-format +msgid "Error in confirm-resize %s" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:855 +#, python-format +msgid "Error in revert-resize %s" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:868 +msgid "Argument 'type' for reboot is not HARD or SOFT" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:872 +msgid "Missing argument 'type' for reboot" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:885 +#, python-format +msgid "Error in reboot %s" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:897 +msgid "Unable to locate requested flavor." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:900 +msgid "Resize requires a change in size." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:924 +msgid "Malformed server entity" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:931 +msgid "Missing imageRef attribute" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:940 +msgid "Invalid imageRef provided." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:949 +msgid "Missing flavorRef attribute" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:962 +msgid "No adminPass was specified" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:966 +#: cinder/api/openstack/compute/servers.py:1144 +msgid "Invalid adminPass" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:980 +msgid "Unable to parse metadata key/value pairs." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:993 +msgid "Resize request has invalid 'flavorRef' attribute." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:996 +msgid "Resize requests require 'flavorRef' attribute." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:1014 +#: cinder/api/openstack/compute/contrib/aggregates.py:142 +#: cinder/api/openstack/compute/contrib/networks.py:65 +msgid "Invalid request body" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:1019 +msgid "Could not parse imageRef from request." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:1071 +msgid "Instance could not be found" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:1074 +msgid "Cannot find image for rebuild" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:1103 +msgid "createImage entity requires name attribute" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:1112 +#: cinder/api/openstack/compute/contrib/admin_actions.py:238 +msgid "Invalid metadata" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:1167 +#, python-format +msgid "Removing options '%(unk_opt_str)s' from query" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:60 +#, python-format +msgid "Compute.api::pause %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:77 +#, python-format +msgid "Compute.api::unpause %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:94 +#, python-format +msgid "compute.api::suspend %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:111 +#, python-format +msgid "compute.api::resume %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:127 +#, python-format +msgid "Error in migrate %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:141 +#, python-format +msgid "Compute.api::reset_network %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:154 +#: cinder/api/openstack/compute/contrib/admin_actions.py:170 +#: cinder/api/openstack/compute/contrib/admin_actions.py:186 +#: cinder/api/openstack/compute/contrib/multinic.py:41 +#: cinder/api/openstack/compute/contrib/rescue.py:44 +msgid "Server not found" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:157 +#, python-format +msgid "Compute.api::inject_network_info %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:173 +#, python-format +msgid "Compute.api::lock %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:189 +#, python-format +msgid "Compute.api::unlock %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:219 +#, python-format +msgid "createBackup entity requires %s attribute" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:223 +msgid "Malformed createBackup entity" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:229 +msgid "createBackup attribute 'rotation' must be an integer" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:244 +#: cinder/api/openstack/compute/contrib/console_output.py:47 +#: cinder/api/openstack/compute/contrib/console_output.py:59 +#: cinder/api/openstack/compute/contrib/consoles.py:49 +#: cinder/api/openstack/compute/contrib/consoles.py:60 +#: cinder/api/openstack/compute/contrib/server_action_list.py:49 +#: cinder/api/openstack/compute/contrib/server_diagnostics.py:47 +#: cinder/api/openstack/compute/contrib/server_start_stop.py:38 +msgid "Instance not found" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:272 +msgid "host and block_migration must be specified." +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:284 +#, python-format +msgid "Live migration of instance %(id)s to host %(host)s failed" +msgstr "" + +#: cinder/api/openstack/compute/contrib/aggregates.py:76 +#, python-format +msgid "" +"Cannot create aggregate with name %(name)s and availability zone " +"%(avail_zone)s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/aggregates.py:88 +#, python-format +msgid "Cannot show aggregate: %(id)s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/aggregates.py:114 +#, python-format +msgid "Cannot update aggregate: %(id)s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/aggregates.py:126 +#, python-format +msgid "Cannot delete aggregate: %(id)s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/aggregates.py:139 +#, python-format +msgid "Aggregates does not have %s action" +msgstr "" + +#: cinder/api/openstack/compute/contrib/aggregates.py:152 +#: cinder/api/openstack/compute/contrib/aggregates.py:158 +#, python-format +msgid "Cannot add host %(host)s in aggregate %(id)s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/aggregates.py:171 +#: cinder/api/openstack/compute/contrib/aggregates.py:175 +#, python-format +msgid "Cannot remove host %(host)s in aggregate %(id)s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/aggregates.py:195 +#, python-format +msgid "Cannot set metadata %(metadata)s in aggregate %(id)s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/certificates.py:75 +msgid "Only root certificate can be retrieved." +msgstr "" + +#: cinder/api/openstack/compute/contrib/cloudpipe.py:146 +msgid "" +"Unable to claim IP for VPN instances, ensure it isn't running, and try " +"again in a few minutes" +msgstr "" + +#: cinder/api/openstack/compute/contrib/consoles.py:44 +msgid "Missing type specification" +msgstr "" + +#: cinder/api/openstack/compute/contrib/consoles.py:56 +msgid "Invalid type specification" +msgstr "" + +#: cinder/api/openstack/compute/contrib/disk_config.py:44 +#, python-format +msgid "%s must be either 'MANUAL' or 'AUTO'." +msgstr "" + +#: cinder/api/openstack/compute/contrib/extended_server_attributes.py:77 +#: cinder/api/openstack/compute/contrib/extended_status.py:61 +msgid "Server not found." +msgstr "" + +#: cinder/api/openstack/compute/contrib/flavorextradata.py:61 +#: cinder/api/openstack/compute/contrib/flavorextradata.py:91 +msgid "Flavor not found." +msgstr "" + +#: cinder/api/openstack/compute/contrib/flavorextraspecs.py:49 +#: cinder/api/openstack/compute/contrib/volumetypes.py:158 +msgid "No Request Body" +msgstr "" + +#: cinder/api/openstack/compute/contrib/floating_ips.py:159 +#, python-format +msgid "No more floating ips in pool %s." +msgstr "" + +#: cinder/api/openstack/compute/contrib/floating_ips.py:161 +msgid "No more floating ips available." +msgstr "" + +#: cinder/api/openstack/compute/contrib/floating_ips.py:201 +#: cinder/api/openstack/compute/contrib/floating_ips.py:230 +#: cinder/api/openstack/compute/contrib/security_groups.py:571 +#: cinder/api/openstack/compute/contrib/security_groups.py:604 +msgid "Missing parameter dict" +msgstr "" + +#: cinder/api/openstack/compute/contrib/floating_ips.py:204 +#: cinder/api/openstack/compute/contrib/floating_ips.py:233 +msgid "Address not specified" +msgstr "" + +#: cinder/api/openstack/compute/contrib/floating_ips.py:213 +msgid "No fixed ips associated to instance" +msgstr "" + +#: cinder/api/openstack/compute/contrib/floating_ips.py:216 +msgid "Associate floating ip failed" +msgstr "" + +#: cinder/api/openstack/compute/contrib/hosts.py:144 +#, python-format +msgid "Invalid status: '%s'" +msgstr "" + +#: cinder/api/openstack/compute/contrib/hosts.py:148 +#, python-format +msgid "Invalid mode: '%s'" +msgstr "" + +#: cinder/api/openstack/compute/contrib/hosts.py:152 +#, python-format +msgid "Invalid update setting: '%s'" +msgstr "" + +#: cinder/api/openstack/compute/contrib/hosts.py:170 +#, python-format +msgid "Putting host %(host)s in maintenance mode %(mode)s." +msgstr "" + +#: cinder/api/openstack/compute/contrib/hosts.py:181 +#, python-format +msgid "Setting host %(host)s to %(state)s." +msgstr "" + +#: cinder/api/openstack/compute/contrib/hosts.py:230 +msgid "Describe-resource is admin only functionality" +msgstr "" + +#: cinder/api/openstack/compute/contrib/hosts.py:238 +msgid "Host not found" +msgstr "" + +#: cinder/api/openstack/compute/contrib/keypairs.py:70 +msgid "Keypair name contains unsafe characters" +msgstr "" + +#: cinder/api/openstack/compute/contrib/keypairs.py:95 +msgid "Keypair name must be between 1 and 255 characters long" +msgstr "" + +#: cinder/api/openstack/compute/contrib/keypairs.py:100 +#, python-format +msgid "Key pair '%s' already exists." +msgstr "" + +#: cinder/api/openstack/compute/contrib/multinic.py:52 +msgid "Missing 'networkId' argument for addFixedIp" +msgstr "" + +#: cinder/api/openstack/compute/contrib/multinic.py:68 +msgid "Missing 'address' argument for removeFixedIp" +msgstr "" + +#: cinder/api/openstack/compute/contrib/multinic.py:77 +#, python-format +msgid "Unable to find address %r" +msgstr "" + +#: cinder/api/openstack/compute/contrib/networks.py:62 +#, python-format +msgid "Network does not have %s action" +msgstr "" + +#: cinder/api/openstack/compute/contrib/networks.py:70 +#, python-format +msgid "Disassociating network with id %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/networks.py:74 +#: cinder/api/openstack/compute/contrib/networks.py:91 +#: cinder/api/openstack/compute/contrib/networks.py:101 +msgid "Network not found" +msgstr "" + +#: cinder/api/openstack/compute/contrib/networks.py:87 +#, python-format +msgid "Showing network with id %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/networks.py:97 +#, python-format +msgid "Deleting network with id %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/scheduler_hints.py:41 +msgid "Malformed scheduler_hints attribute" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:222 +msgid "Security group id should be integer" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:243 +msgid "Security group is still in use" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:295 +#, python-format +msgid "Security group %s already exists" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:315 +#, python-format +msgid "Security group %s is not a string or unicode" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:318 +#, python-format +msgid "Security group %s cannot be empty." +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:321 +#, python-format +msgid "Security group %s should not be greater than 255 characters." +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:348 +msgid "Parent group id is not integer" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:351 +#, python-format +msgid "Security group (%s) not found" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:369 +msgid "Not enough parameters to build a valid rule." +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:376 +#, python-format +msgid "This rule already exists in group %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:414 +msgid "Parent or group id is not integer" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:507 +msgid "Rule id is not integer" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:510 +#, python-format +msgid "Rule (%s) not found" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:574 +#: cinder/api/openstack/compute/contrib/security_groups.py:607 +msgid "Security group not specified" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:578 +#: cinder/api/openstack/compute/contrib/security_groups.py:611 +msgid "Security group name cannot be empty" +msgstr "" + +#: cinder/api/openstack/compute/contrib/server_start_stop.py:45 +#, python-format +msgid "start instance %r" +msgstr "" + +#: cinder/api/openstack/compute/contrib/server_start_stop.py:54 +#, python-format +msgid "stop instance %r" +msgstr "" + +#: cinder/api/openstack/compute/contrib/volumes.py:73 +#: cinder/api/openstack/volume/volumes.py:106 +#, python-format +msgid "vol=%s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/volumes.py:146 +#: cinder/api/openstack/volume/volumes.py:184 +#, python-format +msgid "Delete volume with id: %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/volumes.py:329 +#, python-format +msgid "Attach volume %(volume_id)s to instance %(server_id)s at %(device)s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/volumes.py:481 +#: cinder/api/openstack/volume/snapshots.py:110 +#, python-format +msgid "Delete snapshot with id: %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/volumes.py:524 +#: cinder/api/openstack/volume/snapshots.py:150 +#, python-format +msgid "Create snapshot from volume %s" +msgstr "" + +#: cinder/auth/fakeldap.py:33 +msgid "Attempted to instantiate singleton" +msgstr "" + +#: cinder/auth/ldapdriver.py:650 +#, python-format +msgid "" +"Attempted to remove the last member of a group. Deleting the group at %s " +"instead." +msgstr "" + +#: cinder/auth/manager.py:298 +#, python-format +msgid "Looking up user: %r" +msgstr "" + +#: cinder/auth/manager.py:302 +#, python-format +msgid "Failed authorization for access key %s" +msgstr "" + +#: cinder/auth/manager.py:308 +#, python-format +msgid "Using project name = user name (%s)" +msgstr "" + +#: cinder/auth/manager.py:315 +#, python-format +msgid "failed authorization: no project named %(pjid)s (user=%(uname)s)" +msgstr "" + +#: cinder/auth/manager.py:324 +#, python-format +msgid "" +"Failed authorization: user %(uname)s not admin and not member of project " +"%(pjname)s" +msgstr "" + +#: cinder/auth/manager.py:331 cinder/auth/manager.py:343 +#, python-format +msgid "user.secret: %s" +msgstr "" + +#: cinder/auth/manager.py:332 cinder/auth/manager.py:344 +#, python-format +msgid "expected_signature: %s" +msgstr "" + +#: cinder/auth/manager.py:333 cinder/auth/manager.py:345 +#, python-format +msgid "signature: %s" +msgstr "" + +#: cinder/auth/manager.py:335 cinder/auth/manager.py:357 +#, python-format +msgid "Invalid signature for user %s" +msgstr "" + +#: cinder/auth/manager.py:353 +#, python-format +msgid "host_only_signature: %s" +msgstr "" + +#: cinder/auth/manager.py:449 +msgid "Must specify project" +msgstr "" + +#: cinder/auth/manager.py:490 +#, python-format +msgid "Adding role %(role)s to user %(uid)s in project %(pid)s" +msgstr "" + +#: cinder/auth/manager.py:493 +#, python-format +msgid "Adding sitewide role %(role)s to user %(uid)s" +msgstr "" + +#: cinder/auth/manager.py:519 +#, python-format +msgid "Removing role %(role)s from user %(uid)s on project %(pid)s" +msgstr "" + +#: cinder/auth/manager.py:522 +#, python-format +msgid "Removing sitewide role %(role)s from user %(uid)s" +msgstr "" + +#: cinder/auth/manager.py:595 +#, python-format +msgid "Created project %(name)s with manager %(manager_user)s" +msgstr "" + +#: cinder/auth/manager.py:613 +#, python-format +msgid "modifying project %s" +msgstr "" + +#: cinder/auth/manager.py:625 +#, python-format +msgid "Adding user %(uid)s to project %(pid)s" +msgstr "" + +#: cinder/auth/manager.py:646 +#, python-format +msgid "Remove user %(uid)s from project %(pid)s" +msgstr "" + +#: cinder/auth/manager.py:676 +#, python-format +msgid "Deleting project %s" +msgstr "" + +#: cinder/auth/manager.py:734 +#, python-format +msgid "Created user %(rvname)s (admin: %(rvadmin)r)" +msgstr "" + +#: cinder/auth/manager.py:743 +#, python-format +msgid "Deleting user %s" +msgstr "" + +#: cinder/auth/manager.py:753 +#, python-format +msgid "Access Key change for user %s" +msgstr "" + +#: cinder/auth/manager.py:755 +#, python-format +msgid "Secret Key change for user %s" +msgstr "" + +#: cinder/auth/manager.py:757 +#, python-format +msgid "Admin status set to %(admin)r for user %(uid)s" +msgstr "" + +#: cinder/auth/manager.py:802 +#, python-format +msgid "No vpn data for project %s" +msgstr "" + +#: cinder/cloudpipe/pipelib.py:46 +msgid "Instance type for vpn instances" +msgstr "" + +#: cinder/cloudpipe/pipelib.py:49 +msgid "Template for cloudpipe instance boot script" +msgstr "" + +#: cinder/cloudpipe/pipelib.py:52 +msgid "Network to push into openvpn config" +msgstr "" + +#: cinder/cloudpipe/pipelib.py:55 +msgid "Netmask to push into openvpn config" +msgstr "" + +#: cinder/cloudpipe/pipelib.py:107 +#, python-format +msgid "Launching VPN for %s" +msgstr "" + +#: cinder/compute/api.py:141 +msgid "No compute host specified" +msgstr "" + +#: cinder/compute/api.py:144 +#, python-format +msgid "Unable to find host for Instance %s" +msgstr "" + +#: cinder/compute/api.py:192 +#, python-format +msgid "" +"Quota exceeded for %(pid)s, tried to set %(num_metadata)s metadata " +"properties" +msgstr "" + +#: cinder/compute/api.py:203 +#, python-format +msgid "Quota exceeded for %(pid)s, metadata property key or value too long" +msgstr "" + +#: cinder/compute/api.py:257 +msgid "Cannot run any more instances of this type." +msgstr "" + +#: cinder/compute/api.py:259 +#, python-format +msgid "Can only run %s more instances of this type." +msgstr "" + +#: cinder/compute/api.py:261 +#, python-format +msgid "Quota exceeded for %(pid)s, tried to run %(min_count)s instances. " +msgstr "" + +#: cinder/compute/api.py:310 +msgid "Creating a raw instance" +msgstr "" + +#: cinder/compute/api.py:312 +#, python-format +msgid "Using Kernel=%(kernel_id)s, Ramdisk=%(ramdisk_id)s" +msgstr "" + +#: cinder/compute/api.py:383 +#, python-format +msgid "Going to run %s instances..." +msgstr "" + +#: cinder/compute/api.py:447 +#, python-format +msgid "bdm %s" +msgstr "" + +#: cinder/compute/api.py:474 +#, python-format +msgid "block_device_mapping %s" +msgstr "" + +#: cinder/compute/api.py:591 +#, python-format +msgid "Sending create to scheduler for %(pid)s/%(uid)s's" +msgstr "" + +#: cinder/compute/api.py:871 +msgid "Going to try to soft delete instance" +msgstr "" + +#: cinder/compute/api.py:891 +msgid "No host for instance, deleting immediately" +msgstr "" + +#: cinder/compute/api.py:939 +msgid "Going to try to terminate instance" +msgstr "" + +#: cinder/compute/api.py:977 +msgid "Going to try to stop instance" +msgstr "" + +#: cinder/compute/api.py:996 +msgid "Going to try to start instance" +msgstr "" + +#: cinder/compute/api.py:1000 +#, python-format +msgid "Instance %(instance_uuid)s is not stopped. (%(vm_state)s" +msgstr "" + +#: cinder/compute/api.py:1071 cinder/volume/api.py:173 +#: cinder/volume/volume_types.py:64 +#, python-format +msgid "Searching by: %s" +msgstr "" + +#: cinder/compute/api.py:1201 +#, python-format +msgid "Image type not recognized %s" +msgstr "" + +#: cinder/compute/api.py:1369 +msgid "flavor_id is None. Assuming migration." +msgstr "" + +#: cinder/compute/api.py:1377 +#, python-format +msgid "" +"Old instance type %(current_instance_type_name)s, new instance type " +"%(new_instance_type_name)s" +msgstr "" + +#: cinder/compute/api.py:1644 +#, python-format +msgid "multiple fixedips exist, using the first: %s" +msgstr "" + +#: cinder/compute/instance_types.py:57 cinder/compute/instance_types.py:65 +msgid "create arguments must be positive integers" +msgstr "" + +#: cinder/compute/instance_types.py:76 cinder/volume/volume_types.py:41 +#, python-format +msgid "DB error: %s" +msgstr "" + +#: cinder/compute/instance_types.py:86 +#, python-format +msgid "Instance type %s not found for deletion" +msgstr "" + +#: cinder/compute/manager.py:138 +#, python-format +msgid "check_instance_lock: decorating: |%s|" +msgstr "" + +#: cinder/compute/manager.py:140 +#, python-format +msgid "" +"check_instance_lock: arguments: |%(self)s| |%(context)s| " +"|%(instance_uuid)s|" +msgstr "" + +#: cinder/compute/manager.py:144 +#, python-format +msgid "check_instance_lock: locked: |%s|" +msgstr "" + +#: cinder/compute/manager.py:146 +#, python-format +msgid "check_instance_lock: admin: |%s|" +msgstr "" + +#: cinder/compute/manager.py:151 +#, python-format +msgid "check_instance_lock: executing: |%s|" +msgstr "" + +#: cinder/compute/manager.py:155 +#, python-format +msgid "check_instance_lock: not executing |%s|" +msgstr "" + +#: cinder/compute/manager.py:201 +#, python-format +msgid "Unable to load the virtualization driver: %s" +msgstr "" + +#: cinder/compute/manager.py:223 +#, python-format +msgid "" +"Instance %(instance_uuid)s has been destroyed from under us while trying " +"to set it to ERROR" +msgstr "" + +#: cinder/compute/manager.py:240 +#, python-format +msgid "Current state is %(drv_state)s, state in DB is %(db_state)s." +msgstr "" + +#: cinder/compute/manager.py:245 +msgid "Rebooting instance after cinder-compute restart." +msgstr "" + +#: cinder/compute/manager.py:255 +msgid "Hypervisor driver does not support firewall rules" +msgstr "" + +#: cinder/compute/manager.py:260 +msgid "Checking state" +msgstr "" + +#: cinder/compute/manager.py:329 +#, python-format +msgid "Setting up bdm %s" +msgstr "" + +#: cinder/compute/manager.py:400 +#, python-format +msgid "Instance %s already deleted from database. Attempting forceful vm deletion" +msgstr "" + +#: cinder/compute/manager.py:406 +#, python-format +msgid "Exception encountered while terminating the instance %s" +msgstr "" + +#: cinder/compute/manager.py:444 +#, python-format +msgid "Instance %s not found." +msgstr "" + +#: cinder/compute/manager.py:480 +msgid "Instance has already been created" +msgstr "" + +#: cinder/compute/manager.py:523 +#, python-format +msgid "" +"image_id=%(image_id)s, image_size_bytes=%(size_bytes)d, " +"allowed_size_bytes=%(allowed_size_bytes)d" +msgstr "" + +#: cinder/compute/manager.py:528 +#, python-format +msgid "" +"Image '%(image_id)s' size %(size_bytes)d exceeded instance_type allowed " +"size %(allowed_size_bytes)d" +msgstr "" + +#: cinder/compute/manager.py:538 +msgid "Starting instance..." +msgstr "" + +#: cinder/compute/manager.py:548 +msgid "Skipping network allocation for instance" +msgstr "" + +#: cinder/compute/manager.py:561 +msgid "Instance failed network setup" +msgstr "" + +#: cinder/compute/manager.py:565 +#, python-format +msgid "Instance network_info: |%s|" +msgstr "" + +#: cinder/compute/manager.py:578 +msgid "Instance failed block device setup" +msgstr "" + +#: cinder/compute/manager.py:594 +msgid "Instance failed to spawn" +msgstr "" + +#: cinder/compute/manager.py:615 +msgid "Deallocating network for instance" +msgstr "" + +#: cinder/compute/manager.py:672 +#, python-format +msgid "%(action_str)s instance" +msgstr "" + +#: cinder/compute/manager.py:699 +#, python-format +msgid "Ignoring DiskNotFound: %s" +msgstr "" + +#: cinder/compute/manager.py:708 +#, python-format +msgid "terminating bdm %s" +msgstr "" + +#: cinder/compute/manager.py:742 cinder/compute/manager.py:1328 +#: cinder/compute/manager.py:1416 cinder/compute/manager.py:2501 +#, python-format +msgid "%s. Setting instance vm_state to ERROR" +msgstr "" + +#: cinder/compute/manager.py:811 +#, python-format +msgid "" +"Cannot rebuild instance [%(instance_uuid)s], because the given image does" +" not exist." +msgstr "" + +#: cinder/compute/manager.py:816 +#, python-format +msgid "Cannot rebuild instance [%(instance_uuid)s]: %(exc)s" +msgstr "" + +#: cinder/compute/manager.py:823 +#, python-format +msgid "Rebuilding instance %s" +msgstr "" + +#: cinder/compute/manager.py:876 +#, python-format +msgid "Rebooting instance %s" +msgstr "" + +#: cinder/compute/manager.py:891 +#, python-format +msgid "" +"trying to reboot a non-running instance: %(instance_uuid)s (state: " +"%(state)s expected: %(running)s)" +msgstr "" + +#: cinder/compute/manager.py:933 +#, python-format +msgid "instance %s: snapshotting" +msgstr "" + +#: cinder/compute/manager.py:939 +#, python-format +msgid "" +"trying to snapshot a non-running instance: %(instance_uuid)s (state: " +"%(state)s expected: %(running)s)" +msgstr "" + +#: cinder/compute/manager.py:995 +#, python-format +msgid "Found %(num_images)d images (rotation: %(rotation)d)" +msgstr "" + +#: cinder/compute/manager.py:1001 +#, python-format +msgid "Rotating out %d backups" +msgstr "" + +#: cinder/compute/manager.py:1005 +#, python-format +msgid "Deleting image %s" +msgstr "" + +#: cinder/compute/manager.py:1035 +#, python-format +msgid "Failed to set admin password. Instance %s is not running" +msgstr "" + +#: cinder/compute/manager.py:1041 +#, python-format +msgid "Instance %s: Root password set" +msgstr "" + +#: cinder/compute/manager.py:1050 +msgid "set_admin_password is not implemented by this driver." +msgstr "" + +#: cinder/compute/manager.py:1064 +msgid "Error setting admin password" +msgstr "" + +#: cinder/compute/manager.py:1079 +#, python-format +msgid "" +"trying to inject a file into a non-running instance: %(instance_uuid)s " +"(state: %(current_power_state)s expected: %(expected_state)s)" +msgstr "" + +#: cinder/compute/manager.py:1084 +#, python-format +msgid "instance %(instance_uuid)s: injecting file to %(path)s" +msgstr "" + +#: cinder/compute/manager.py:1098 +#, python-format +msgid "" +"trying to update agent on a non-running instance: %(instance_uuid)s " +"(state: %(current_power_state)s expected: %(expected_state)s)" +msgstr "" + +#: cinder/compute/manager.py:1103 +#, python-format +msgid "instance %(instance_uuid)s: updating agent to %(url)s" +msgstr "" + +#: cinder/compute/manager.py:1116 +#, python-format +msgid "instance %s: rescuing" +msgstr "" + +#: cinder/compute/manager.py:1141 +#, python-format +msgid "instance %s: unrescuing" +msgstr "" + +#: cinder/compute/manager.py:1270 +msgid "destination same as source!" +msgstr "" + +#: cinder/compute/manager.py:1287 +#, python-format +msgid "instance %s: migrating" +msgstr "" + +#: cinder/compute/manager.py:1471 +#, python-format +msgid "instance %s: pausing" +msgstr "" + +#: cinder/compute/manager.py:1489 +#, python-format +msgid "instance %s: unpausing" +msgstr "" + +#: cinder/compute/manager.py:1525 +#, python-format +msgid "instance %s: retrieving diagnostics" +msgstr "" + +#: cinder/compute/manager.py:1534 +#, python-format +msgid "instance %s: suspending" +msgstr "" + +#: cinder/compute/manager.py:1556 +#, python-format +msgid "instance %s: resuming" +msgstr "" + +#: cinder/compute/manager.py:1579 +#, python-format +msgid "instance %s: locking" +msgstr "" + +#: cinder/compute/manager.py:1588 +#, python-format +msgid "instance %s: unlocking" +msgstr "" + +#: cinder/compute/manager.py:1596 +#, python-format +msgid "instance %s: getting locked state" +msgstr "" + +#: cinder/compute/manager.py:1606 +#, python-format +msgid "instance %s: reset network" +msgstr "" + +#: cinder/compute/manager.py:1614 +#, python-format +msgid "instance %s: inject network info" +msgstr "" + +#: cinder/compute/manager.py:1618 +#, python-format +msgid "network_info to inject: |%s|" +msgstr "" + +#: cinder/compute/manager.py:1655 +#, python-format +msgid "instance %s: getting vnc console" +msgstr "" + +#: cinder/compute/manager.py:1685 +#, python-format +msgid "Booting with volume %(volume_id)s at %(mountpoint)s" +msgstr "" + +#: cinder/compute/manager.py:1703 +#, python-format +msgid "" +"instance %(instance_uuid)s: attaching volume %(volume_id)s to " +"%(mountpoint)s" +msgstr "" + +#: cinder/compute/manager.py:1705 +#, python-format +msgid "Attaching volume %(volume_id)s to %(mountpoint)s" +msgstr "" + +#: cinder/compute/manager.py:1714 +#, python-format +msgid "instance %(instance_uuid)s: attach failed %(mountpoint)s, removing" +msgstr "" + +#: cinder/compute/manager.py:1724 +#, python-format +msgid "Attach failed %(mountpoint)s, removing" +msgstr "" + +#: cinder/compute/manager.py:1752 +#, python-format +msgid "Detach volume %(volume_id)s from mountpoint %(mp)s" +msgstr "" + +#: cinder/compute/manager.py:1756 +#, python-format +msgid "Detaching volume from unknown instance %s" +msgstr "" + +#: cinder/compute/manager.py:1822 +#, python-format +msgid "" +"Creating tmpfile %s to notify to other compute nodes that they should " +"mount the same storage." +msgstr "" + +#: cinder/compute/manager.py:1884 +msgid "Instance has no volume." +msgstr "" + +#: cinder/compute/manager.py:1916 +#, python-format +msgid "plug_vifs() failed %(cnt)d.Retry up to %(max_retry)d for %(hostname)s." +msgstr "" + +#: cinder/compute/manager.py:1973 +#, python-format +msgid "Pre live migration failed at %(dest)s" +msgstr "" + +#: cinder/compute/manager.py:2000 +msgid "post_live_migration() is started.." +msgstr "" + +#: cinder/compute/manager.py:2030 +msgid "No floating_ip found" +msgstr "" + +#: cinder/compute/manager.py:2038 +msgid "No floating_ip found." +msgstr "" + +#: cinder/compute/manager.py:2040 +#, python-format +msgid "" +"Live migration: Unexpected error: cannot inherit floating ip.\n" +"%(e)s" +msgstr "" + +#: cinder/compute/manager.py:2073 +#, python-format +msgid "Migrating instance to %(dest)s finished successfully." +msgstr "" + +#: cinder/compute/manager.py:2075 +msgid "" +"You may see the error \"libvirt: QEMU error: Domain not found: no domain " +"with matching name.\" This error can be safely ignored." +msgstr "" + +#: cinder/compute/manager.py:2090 +msgid "Post operation of migraton started" +msgstr "" + +#: cinder/compute/manager.py:2226 +#, python-format +msgid "Updated the info_cache for instance %s" +msgstr "" + +#: cinder/compute/manager.py:2255 +msgid "Updating bandwidth usage cache" +msgstr "" + +#: cinder/compute/manager.py:2277 +msgid "Updating host status" +msgstr "" + +#: cinder/compute/manager.py:2305 +#, python-format +msgid "" +"Found %(num_db_instances)s in the database and %(num_vm_instances)s on " +"the hypervisor." +msgstr "" + +#: cinder/compute/manager.py:2331 +#, python-format +msgid "" +"During the sync_power process the instance %(uuid)s has moved from host " +"%(src)s to host %(dst)s" +msgstr "" + +#: cinder/compute/manager.py:2344 +#, python-format +msgid "" +"Instance %s is in the process of migrating to this host. Wait next " +"sync_power cycle before setting power state to NOSTATE" +msgstr "" + +#: cinder/compute/manager.py:2350 +msgid "" +"Instance found in database but not known by hypervisor. Setting power " +"state to NOSTATE" +msgstr "" + +#: cinder/compute/manager.py:2380 +msgid "FLAGS.reclaim_instance_interval <= 0, skipping..." +msgstr "" + +#: cinder/compute/manager.py:2392 +msgid "Reclaiming deleted instance" +msgstr "" + +#: cinder/compute/manager.py:2458 +#, python-format +msgid "" +"Detected instance with name label '%(name)s' which is marked as DELETED " +"but still present on host." +msgstr "" + +#: cinder/compute/manager.py:2465 +#, python-format +msgid "" +"Destroying instance with name label '%(name)s' which is marked as DELETED" +" but still present on host." +msgstr "" + +#: cinder/compute/manager.py:2472 +#, python-format +msgid "Unrecognized value '%(action)s' for FLAGS.running_deleted_instance_action" +msgstr "" + +#: cinder/compute/manager.py:2542 +#, python-format +msgid "" +"Aggregate %(aggregate_id)s: unrecoverable state during operation on " +"%(host)s" +msgstr "" + +#: cinder/compute/utils.py:142 +msgid "v4 subnets are required for legacy nw_info" +msgstr "" + +#: cinder/console/manager.py:77 cinder/console/vmrc_manager.py:70 +msgid "Adding console" +msgstr "" + +#: cinder/console/manager.py:97 +#, python-format +msgid "Tried to remove non-existant console %(console_id)s." +msgstr "" + +#: cinder/console/vmrc_manager.py:122 +#, python-format +msgid "Tried to remove non-existent console %(console_id)s." +msgstr "" + +#: cinder/console/vmrc_manager.py:125 +#, python-format +msgid "Removing console %(console_id)s." +msgstr "" + +#: cinder/console/xvp.py:98 +msgid "Rebuilding xvp conf" +msgstr "" + +#: cinder/console/xvp.py:116 +#, python-format +msgid "Re-wrote %s" +msgstr "" + +#: cinder/console/xvp.py:121 +msgid "Stopping xvp" +msgstr "" + +#: cinder/console/xvp.py:134 +msgid "Starting xvp" +msgstr "" + +#: cinder/console/xvp.py:141 +#, python-format +msgid "Error starting xvp: %s" +msgstr "" + +#: cinder/console/xvp.py:144 +msgid "Restarting xvp" +msgstr "" + +#: cinder/console/xvp.py:146 +msgid "xvp not running..." +msgstr "" + +#: cinder/consoleauth/manager.py:63 +#, python-format +msgid "Deleting Expired Token: (%s)" +msgstr "" + +#: cinder/consoleauth/manager.py:75 +#, python-format +msgid "Received Token: %(token)s, %(token_dict)s)" +msgstr "" + +#: cinder/consoleauth/manager.py:79 +#, python-format +msgid "Checking Token: %(token)s, %(token_valid)s)" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:57 +msgid "Use of empty request context is deprecated" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:198 +#, python-format +msgid "Unrecognized read_deleted value '%s'" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:516 cinder/db/sqlalchemy/api.py:551 +#, python-format +msgid "No ComputeNode for %(host)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:4019 cinder/db/sqlalchemy/api.py:4045 +#, python-format +msgid "No backend config with id %(sm_backend_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:4103 +#, python-format +msgid "No sm_flavor called %(sm_flavor)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:4147 +#, python-format +msgid "No sm_volume with id %(volume_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:66 +msgid "python-migrate is not installed. Exiting." +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:78 +msgid "version should be an integer" +msgstr "" + +#: cinder/db/sqlalchemy/session.py:137 +#, python-format +msgid "SQL connection failed. %s attempts left." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py:48 +msgid "interface column not added to networks table" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py:80 +#: cinder/db/sqlalchemy/migrate_repo/versions/062_add_instance_info_cache_table.py:54 +#: cinder/db/sqlalchemy/migrate_repo/versions/072_add_dns_table.py:61 +#: cinder/db/sqlalchemy/migrate_repo/versions/083_quota_class.py:48 +#, python-format +msgid "Table |%s| not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py:87 +msgid "VIF column not added to fixed_ips table" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py:97 +#, python-format +msgid "join list for moving mac_addresses |%s|" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/031_fk_fixed_ips_virtual_interface_id.py:39 +#: cinder/db/sqlalchemy/migrate_repo/versions/047_remove_instances_fk_from_vif.py:60 +#: cinder/db/sqlalchemy/migrate_repo/versions/060_remove_network_fk_from_vif.py:61 +#: cinder/db/sqlalchemy/migrate_repo/versions/070_untie_cinder_network_models.py:99 +msgid "foreign key constraint couldn't be added" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/031_fk_fixed_ips_virtual_interface_id.py:58 +msgid "foreign key constraint couldn't be dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/045_add_network_priority.py:34 +msgid "priority column not added to networks table" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/047_remove_instances_fk_from_vif.py:41 +#: cinder/db/sqlalchemy/migrate_repo/versions/060_remove_network_fk_from_vif.py:42 +#: cinder/db/sqlalchemy/migrate_repo/versions/064_change_instance_id_to_uuid_in_instance_actions.py:56 +#: cinder/db/sqlalchemy/migrate_repo/versions/070_untie_cinder_network_models.py:68 +msgid "foreign key constraint couldn't be removed" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/049_add_instances_progress.py:34 +msgid "progress column not added to instances table" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/055_convert_flavor_id_to_str.py:97 +#, python-format +msgid "" +"Could not cast flavorid to integer: %s. Set flavorid to an integer-like " +"string to downgrade." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/062_add_instance_info_cache_table.py:69 +msgid "instance_info_caches tables not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/069_block_migration.py:41 +msgid "progress column not added to compute_nodes table" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/072_add_dns_table.py:76 +msgid "dns_domains table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/083_quota_class.py:60 +msgid "quota_classes table not dropped" +msgstr "" + +#: cinder/image/glance.py:147 +msgid "Connection error contacting glance server, retrying" +msgstr "" + +#: cinder/image/glance.py:153 cinder/network/quantum/melange_connection.py:104 +msgid "Maximum attempts reached" +msgstr "" + +#: cinder/image/glance.py:278 +#, python-format +msgid "Creating image in Glance. Metadata passed in %s" +msgstr "" + +#: cinder/image/glance.py:281 +#, python-format +msgid "Metadata after formatting for Glance %s" +msgstr "" + +#: cinder/image/glance.py:289 +#, python-format +msgid "Metadata returned from Glance formatted for Base %s" +msgstr "" + +#: cinder/image/glance.py:331 cinder/image/glance.py:335 +msgid "Not the image owner" +msgstr "" + +#: cinder/image/glance.py:410 +#, python-format +msgid "%(timestamp)s does not follow any of the signatures: %(iso_formats)s" +msgstr "" + +#: cinder/image/s3.py:309 +#, python-format +msgid "Failed to download %(image_location)s to %(image_path)s" +msgstr "" + +#: cinder/image/s3.py:328 +#, python-format +msgid "Failed to decrypt %(image_location)s to %(image_path)s" +msgstr "" + +#: cinder/image/s3.py:340 +#, python-format +msgid "Failed to untar %(image_location)s to %(image_path)s" +msgstr "" + +#: cinder/image/s3.py:353 +#, python-format +msgid "Failed to upload %(image_location)s to %(image_path)s" +msgstr "" + +#: cinder/image/s3.py:379 +#, python-format +msgid "Failed to decrypt private key: %s" +msgstr "" + +#: cinder/image/s3.py:387 +#, python-format +msgid "Failed to decrypt initialization vector: %s" +msgstr "" + +#: cinder/image/s3.py:398 +#, python-format +msgid "Failed to decrypt image file %(image_file)s: %(err)s" +msgstr "" + +#: cinder/image/s3.py:410 +msgid "Unsafe filenames in image" +msgstr "" + +#: cinder/ipv6/account_identifier.py:38 cinder/ipv6/rfc2462.py:34 +#, python-format +msgid "Bad mac for to_global_ipv6: %s" +msgstr "" + +#: cinder/ipv6/account_identifier.py:40 cinder/ipv6/rfc2462.py:36 +#, python-format +msgid "Bad prefix for to_global_ipv6: %s" +msgstr "" + +#: cinder/ipv6/account_identifier.py:42 +#, python-format +msgid "Bad project_id for to_global_ipv6: %s" +msgstr "" + +#: cinder/network/ldapdns.py:321 +msgid "This driver only supports type 'a' entries." +msgstr "" + +#: cinder/network/linux_net.py:166 +#, python-format +msgid "Attempted to remove chain %s which does not exist" +msgstr "" + +#: cinder/network/linux_net.py:192 +#, python-format +msgid "Unknown chain: %r" +msgstr "" + +#: cinder/network/linux_net.py:215 +#, python-format +msgid "" +"Tried to remove rule that was not there: %(chain)r %(rule)r %(wrap)r " +"%(top)r" +msgstr "" + +#: cinder/network/linux_net.py:335 +msgid "IPTablesManager.apply completed with success" +msgstr "" + +#: cinder/network/linux_net.py:694 +#, python-format +msgid "Hupping dnsmasq threw %s" +msgstr "" + +#: cinder/network/linux_net.py:696 +#, python-format +msgid "Pid %d is stale, relaunching dnsmasq" +msgstr "" + +#: cinder/network/linux_net.py:756 +#, python-format +msgid "killing radvd threw %s" +msgstr "" + +#: cinder/network/linux_net.py:758 +#, python-format +msgid "Pid %d is stale, relaunching radvd" +msgstr "" + +#: cinder/network/linux_net.py:967 +#, python-format +msgid "Starting VLAN inteface %s" +msgstr "" + +#: cinder/network/linux_net.py:999 +#, python-format +msgid "Starting Bridge interface for %s" +msgstr "" + +#: cinder/network/linux_net.py:1142 +#, python-format +msgid "Starting bridge %s " +msgstr "" + +#: cinder/network/linux_net.py:1149 +#, python-format +msgid "Done starting bridge %s" +msgstr "" + +#: cinder/network/linux_net.py:1167 +#, python-format +msgid "Failed unplugging gateway interface '%s'" +msgstr "" + +#: cinder/network/linux_net.py:1170 +#, python-format +msgid "Unplugged gateway interface '%s'" +msgstr "" + +#: cinder/network/manager.py:291 +#, python-format +msgid "Fixed ip %(fixed_ip_id)s not found" +msgstr "" + +#: cinder/network/manager.py:300 cinder/network/manager.py:496 +#, python-format +msgid "Interface %(interface)s not found" +msgstr "" + +#: cinder/network/manager.py:315 +#, python-format +msgid "floating IP allocation for instance |%s|" +msgstr "" + +#: cinder/network/manager.py:353 +#, python-format +msgid "floating IP deallocation for instance |%s|" +msgstr "" + +#: cinder/network/manager.py:386 +#, python-format +msgid "Address |%(address)s| is not allocated" +msgstr "" + +#: cinder/network/manager.py:390 +#, python-format +msgid "Address |%(address)s| is not allocated to your project |%(project)s|" +msgstr "" + +#: cinder/network/manager.py:402 +#, python-format +msgid "Quota exceeded for %s, tried to allocate address" +msgstr "" + +#: cinder/network/manager.py:614 +#, python-format +msgid "" +"Database inconsistency: DNS domain |%s| is registered in the Cinder db but " +"not visible to either the floating or instance DNS driver. It will be " +"ignored." +msgstr "" + +#: cinder/network/manager.py:660 +#, python-format +msgid "Domain |%(domain)s| already exists, changing zone to |%(av_zone)s|." +msgstr "" + +#: cinder/network/manager.py:670 +#, python-format +msgid "Domain |%(domain)s| already exists, changing project to |%(project)s|." +msgstr "" + +#: cinder/network/manager.py:778 +#, python-format +msgid "Disassociated %s stale fixed ip(s)" +msgstr "" + +#: cinder/network/manager.py:782 +msgid "setting network host" +msgstr "" + +#: cinder/network/manager.py:896 +#, python-format +msgid "network allocations for instance |%s|" +msgstr "" + +#: cinder/network/manager.py:901 +#, python-format +msgid "networks retrieved for instance |%(instance_id)s|: |%(networks)s|" +msgstr "" + +#: cinder/network/manager.py:930 +#, python-format +msgid "network deallocation for instance |%s|" +msgstr "" + +#: cinder/network/manager.py:1152 +#, python-format +msgid "" +"instance-dns-zone is |%(domain)s|, which is in availability zone " +"|%(zone)s|. Instance |%(instance)s| is in zone |%(zone2)s|. No DNS record" +" will be created." +msgstr "" + +#: cinder/network/manager.py:1227 +#, python-format +msgid "Unable to release %s because vif doesn't exist." +msgstr "" + +#: cinder/network/manager.py:1244 +#, python-format +msgid "Leased IP |%(address)s|" +msgstr "" + +#: cinder/network/manager.py:1248 +#, python-format +msgid "IP %s leased that is not associated" +msgstr "" + +#: cinder/network/manager.py:1256 +#, python-format +msgid "IP |%s| leased that isn't allocated" +msgstr "" + +#: cinder/network/manager.py:1261 +#, python-format +msgid "Released IP |%(address)s|" +msgstr "" + +#: cinder/network/manager.py:1265 +#, python-format +msgid "IP %s released that is not associated" +msgstr "" + +#: cinder/network/manager.py:1268 +#, python-format +msgid "IP %s released that was not leased" +msgstr "" + +#: cinder/network/manager.py:1331 +msgid "cidr already in use" +msgstr "" + +#: cinder/network/manager.py:1334 +#, python-format +msgid "requested cidr (%(cidr)s) conflicts with existing supernet (%(super)s)" +msgstr "" + +#: cinder/network/manager.py:1345 +#, python-format +msgid "" +"requested cidr (%(cidr)s) conflicts with existing smaller cidr " +"(%(smaller)s)" +msgstr "" + +#: cinder/network/manager.py:1404 +msgid "Network already exists!" +msgstr "" + +#: cinder/network/manager.py:1423 +#, python-format +msgid "Network must be disassociated from project %s before delete" +msgstr "" + +#: cinder/network/manager.py:1832 +msgid "" +"The sum between the number of networks and the vlan start cannot be " +"greater than 4094" +msgstr "" + +#: cinder/network/manager.py:1839 +#, python-format +msgid "" +"The network range is not big enough to fit %(num_networks)s. Network size" +" is %(network_size)s" +msgstr "" + +#: cinder/network/minidns.py:65 +msgid "This driver only supports type 'a'" +msgstr "" + +#: cinder/network/quantum/client.py:154 +msgid "Tenant ID not set" +msgstr "" + +#: cinder/network/quantum/client.py:180 +#, python-format +msgid "Quantum Client Request: %(method)s %(action)s" +msgstr "" + +#: cinder/network/quantum/client.py:196 +#, python-format +msgid "Quantum entity not found: %s" +msgstr "" + +#: cinder/network/quantum/client.py:206 +#, python-format +msgid "Server %(status_code)s error: %(data)s" +msgstr "" + +#: cinder/network/quantum/client.py:210 +#, python-format +msgid "Unable to connect to server. Got error: %s" +msgstr "" + +#: cinder/network/quantum/client.py:228 +#, python-format +msgid "unable to deserialize object of type = '%s'" +msgstr "" + +#: cinder/network/quantum/manager.py:162 +msgid "QuantumManager does not use 'multi_host' parameter." +msgstr "" + +#: cinder/network/quantum/manager.py:166 +msgid "QuantumManager requires that only one network is created per call" +msgstr "" + +#: cinder/network/quantum/manager.py:176 +msgid "QuantumManager does not use 'vlan_start' parameter." +msgstr "" + +#: cinder/network/quantum/manager.py:182 +msgid "QuantumManager does not use 'vpn_start' parameter." +msgstr "" + +#: cinder/network/quantum/manager.py:186 +msgid "QuantumManager does not use 'bridge' parameter." +msgstr "" + +#: cinder/network/quantum/manager.py:190 +msgid "QuantumManager does not use 'bridge_interface' parameter." +msgstr "" + +#: cinder/network/quantum/manager.py:195 +msgid "QuantumManager requires a valid (.1) gateway address." +msgstr "" + +#: cinder/network/quantum/manager.py:204 +#, python-format +msgid "" +"Unable to find existing quantum network for tenant '%(q_tenant_id)s' with" +" net-id '%(quantum_net_id)s'" +msgstr "" + +#: cinder/network/quantum/manager.py:301 +#, python-format +msgid "network allocations for instance %s" +msgstr "" + +#: cinder/network/quantum/manager.py:588 +#, python-format +msgid "" +"port deallocation failed for instance: |%(instance_id)s|, port_id: " +"|%(port_id)s|" +msgstr "" + +#: cinder/network/quantum/manager.py:606 +#, python-format +msgid "" +"ipam deallocation failed for instance: |%(instance_id)s|, vif_uuid: " +"|%(vif_uuid)s|" +msgstr "" + +#: cinder/network/quantum/melange_connection.py:96 +#, python-format +msgid "Server returned error: %s" +msgstr "" + +#: cinder/netwo